diff options
Diffstat (limited to 'libpod')
47 files changed, 1530 insertions, 1222 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index e5a7e20fc..4fd95a3cf 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -85,7 +85,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) { db, err := bolt.Open(path, 0600, nil) if err != nil { - return nil, fmt.Errorf("error opening database %s: %w", path, err) + return nil, fmt.Errorf("opening database %s: %w", path, err) } // Everywhere else, we use s.deferredCloseDBCon(db) to ensure the state's DB // mutex is also unlocked. @@ -123,7 +123,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) { return nil }) if err != nil { - return nil, fmt.Errorf("error checking DB schema: %w", err) + return nil, fmt.Errorf("checking DB schema: %w", err) } if !needsUpdate { @@ -135,13 +135,13 @@ func NewBoltState(path string, runtime *Runtime) (State, error) { err = db.Update(func(tx *bolt.Tx) error { for _, bkt := range createBuckets { if _, err := tx.CreateBucketIfNotExists(bkt); err != nil { - return fmt.Errorf("error creating bucket %s: %w", string(bkt), err) + return fmt.Errorf("creating bucket %s: %w", string(bkt), err) } } return nil }) if err != nil { - return nil, fmt.Errorf("error creating buckets for DB: %w", err) + return nil, fmt.Errorf("creating buckets for DB: %w", err) } state.valid = true @@ -220,11 +220,11 @@ func (s *BoltState) Refresh() error { return nil }) if err != nil { - return fmt.Errorf("error reading exit codes bucket: %w", err) + return fmt.Errorf("reading exit codes bucket: %w", err) } for _, id := range toRemoveExitCodes { if err := exitCodeBucket.Delete([]byte(id)); err != nil { - return fmt.Errorf("error removing exit code for ID %s: %w", id, err) + return fmt.Errorf("removing exit code for ID %s: %w", id, err) } } @@ -276,7 +276,7 @@ func (s *BoltState) Refresh() error { state := new(podState) if err := json.Unmarshal(stateBytes, state); err != nil { - return fmt.Errorf("error unmarshalling state for pod %s: %w", string(id), err) + return fmt.Errorf("unmarshalling state for pod %s: %w", string(id), err) } // Clear the Cgroup path @@ -284,11 +284,11 @@ func (s *BoltState) Refresh() error { newStateBytes, err := json.Marshal(state) if err != nil { - return fmt.Errorf("error marshalling modified state for pod %s: %w", string(id), err) + return fmt.Errorf("marshalling modified state for pod %s: %w", string(id), err) } if err := podBkt.Put(stateKey, newStateBytes); err != nil { - return fmt.Errorf("error updating state for pod %s in DB: %w", string(id), err) + return fmt.Errorf("updating state for pod %s in DB: %w", string(id), err) } // It's not a container, nothing to do @@ -297,7 +297,7 @@ func (s *BoltState) Refresh() error { // First, delete the network namespace if err := ctrBkt.Delete(netNSKey); err != nil { - return fmt.Errorf("error removing network namespace for container %s: %w", string(id), err) + return fmt.Errorf("removing network namespace for container %s: %w", string(id), err) } stateBytes := ctrBkt.Get(stateKey) @@ -309,18 +309,18 @@ func (s *BoltState) Refresh() error { state := new(ContainerState) if err := json.Unmarshal(stateBytes, state); err != nil { - return fmt.Errorf("error unmarshalling state for container %s: %w", string(id), err) + return fmt.Errorf("unmarshalling state for container %s: %w", string(id), err) } resetState(state) newStateBytes, err := json.Marshal(state) if err != nil { - return fmt.Errorf("error marshalling modified state for container %s: %w", string(id), err) + return fmt.Errorf("marshalling modified state for container %s: %w", string(id), err) } if err := ctrBkt.Put(stateKey, newStateBytes); err != nil { - return fmt.Errorf("error updating state for container %s in DB: %w", string(id), err) + return fmt.Errorf("updating state for container %s in DB: %w", string(id), err) } // Delete all exec sessions, if there are any @@ -338,7 +338,7 @@ func (s *BoltState) Refresh() error { } for _, execID := range toRemove { if err := ctrExecBkt.Delete([]byte(execID)); err != nil { - return fmt.Errorf("error removing exec session %s from container %s: %w", execID, string(id), err) + return fmt.Errorf("removing exec session %s from container %s: %w", execID, string(id), err) } } } @@ -358,12 +358,12 @@ func (s *BoltState) Refresh() error { if testID := namesBucket.Get(name); testID != nil { logrus.Infof("Found dangling name %s (ID %s) in database", string(name), id) if err := namesBucket.Delete(name); err != nil { - return fmt.Errorf("error removing dangling name %s (ID %s) from database: %w", string(name), id, err) + return fmt.Errorf("removing dangling name %s (ID %s) from database: %w", string(name), id, err) } } } if err := idBucket.Delete([]byte(id)); err != nil { - return fmt.Errorf("error removing dangling ID %s from database: %w", id, err) + return fmt.Errorf("removing dangling ID %s from database: %w", id, err) } } @@ -384,7 +384,7 @@ func (s *BoltState) Refresh() error { oldState := new(VolumeState) if err := json.Unmarshal(volStateBytes, oldState); err != nil { - return fmt.Errorf("error unmarshalling state for volume %s: %w", string(id), err) + return fmt.Errorf("unmarshalling state for volume %s: %w", string(id), err) } // Reset mount count to 0 @@ -393,11 +393,11 @@ func (s *BoltState) Refresh() error { newState, err := json.Marshal(oldState) if err != nil { - return fmt.Errorf("error marshalling state for volume %s: %w", string(id), err) + return fmt.Errorf("marshalling state for volume %s: %w", string(id), err) } if err := dbVol.Put(stateKey, newState); err != nil { - return fmt.Errorf("error storing new state for volume %s: %w", string(id), err) + return fmt.Errorf("storing new state for volume %s: %w", string(id), err) } return nil @@ -421,7 +421,7 @@ func (s *BoltState) Refresh() error { for _, execSession := range toRemoveExec { if err := execBucket.Delete([]byte(execSession)); err != nil { - return fmt.Errorf("error deleting exec session %s registry from database: %w", execSession, err) + return fmt.Errorf("deleting exec session %s registry from database: %w", execSession, err) } } @@ -841,7 +841,7 @@ func (s *BoltState) UpdateContainer(ctr *Container) error { } if err := json.Unmarshal(newStateBytes, newState); err != nil { - return fmt.Errorf("error unmarshalling container %s state: %w", ctr.ID(), err) + return fmt.Errorf("unmarshalling container %s state: %w", ctr.ID(), err) } netNSBytes := ctrToUpdate.Get(netNSKey) @@ -886,7 +886,7 @@ func (s *BoltState) SaveContainer(ctr *Container) error { stateJSON, err := json.Marshal(ctr.state) if err != nil { - return fmt.Errorf("error marshalling container %s state to JSON: %w", ctr.ID(), err) + return fmt.Errorf("marshalling container %s state to JSON: %w", ctr.ID(), err) } netNSPath := getNetNSPath(ctr) @@ -912,17 +912,17 @@ func (s *BoltState) SaveContainer(ctr *Container) error { // Update the state if err := ctrToSave.Put(stateKey, stateJSON); err != nil { - return fmt.Errorf("error updating container %s state in DB: %w", ctr.ID(), err) + return fmt.Errorf("updating container %s state in DB: %w", ctr.ID(), err) } if netNSPath != "" { if err := ctrToSave.Put(netNSKey, []byte(netNSPath)); err != nil { - return fmt.Errorf("error updating network namespace path for container %s in DB: %w", ctr.ID(), err) + return fmt.Errorf("updating network namespace path for container %s in DB: %w", ctr.ID(), err) } } else { // Delete the existing network namespace if err := ctrToSave.Delete(netNSKey); err != nil { - return fmt.Errorf("error removing network namespace path for container %s in DB: %w", ctr.ID(), err) + return fmt.Errorf("removing network namespace path for container %s in DB: %w", ctr.ID(), err) } } @@ -1142,7 +1142,7 @@ func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOpti if ctrNetworkBkt == nil { ctrNetworkBkt, err = dbCtr.CreateBucket(networksBkt) if err != nil { - return fmt.Errorf("error creating networks bucket for container %s: %w", ctr.ID(), err) + return fmt.Errorf("creating networks bucket for container %s: %w", ctr.ID(), err) } // the container has no networks in the db lookup config and write to the db networkList = ctr.config.NetworksDeprecated @@ -1249,7 +1249,7 @@ func (s *BoltState) NetworkConnect(ctr *Container, network string, opts types.Pe optBytes, err := json.Marshal(opts) if err != nil { - return fmt.Errorf("error marshalling network options JSON for container %s: %w", ctr.ID(), err) + return fmt.Errorf("marshalling network options JSON for container %s: %w", ctr.ID(), err) } ctrID := []byte(ctr.ID()) @@ -1283,7 +1283,7 @@ func (s *BoltState) NetworkConnect(ctr *Container, network string, opts types.Pe // Add the network if err := ctrNetworksBkt.Put([]byte(network), optBytes); err != nil { - return fmt.Errorf("error adding container %s to network %s in DB: %w", ctr.ID(), network, err) + return fmt.Errorf("adding container %s to network %s in DB: %w", ctr.ID(), network, err) } return nil @@ -1340,7 +1340,7 @@ func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error { } if err := ctrNetworksBkt.Delete([]byte(network)); err != nil { - return fmt.Errorf("error removing container %s from network %s: %w", ctr.ID(), network, err) + return fmt.Errorf("removing container %s from network %s: %w", ctr.ID(), network, err) } if ctrAliasesBkt != nil { @@ -1350,7 +1350,7 @@ func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error { } if err := ctrAliasesBkt.DeleteBucket([]byte(network)); err != nil { - return fmt.Errorf("error removing container %s network aliases for network %s: %w", ctr.ID(), network, err) + return fmt.Errorf("removing container %s network aliases for network %s: %w", ctr.ID(), network, err) } } @@ -1626,7 +1626,7 @@ func (s *BoltState) AddExecSession(ctr *Container, session *ExecSession) error { ctrExecSessionBucket, err := dbCtr.CreateBucketIfNotExists(execBkt) if err != nil { - return fmt.Errorf("error creating exec sessions bucket for container %s: %w", ctr.ID(), err) + return fmt.Errorf("creating exec sessions bucket for container %s: %w", ctr.ID(), err) } execExists := execBucket.Get(sessionID) @@ -1635,11 +1635,11 @@ func (s *BoltState) AddExecSession(ctr *Container, session *ExecSession) error { } if err := execBucket.Put(sessionID, ctrID); err != nil { - return fmt.Errorf("error adding exec session %s to DB: %w", session.ID(), err) + return fmt.Errorf("adding exec session %s to DB: %w", session.ID(), err) } if err := ctrExecSessionBucket.Put(sessionID, ctrID); err != nil { - return fmt.Errorf("error adding exec session %s to container %s in DB: %w", session.ID(), ctr.ID(), err) + return fmt.Errorf("adding exec session %s to container %s in DB: %w", session.ID(), ctr.ID(), err) } return nil @@ -1716,7 +1716,7 @@ func (s *BoltState) RemoveExecSession(session *ExecSession) error { } if err := execBucket.Delete(sessionID); err != nil { - return fmt.Errorf("error removing exec session %s from database: %w", session.ID(), err) + return fmt.Errorf("removing exec session %s from database: %w", session.ID(), err) } dbCtr := ctrBucket.Bucket(containerID) @@ -1739,7 +1739,7 @@ func (s *BoltState) RemoveExecSession(session *ExecSession) error { ctrSessionExists := ctrExecBucket.Get(sessionID) if ctrSessionExists != nil { if err := ctrExecBucket.Delete(sessionID); err != nil { - return fmt.Errorf("error removing exec session %s from container %s in database: %w", session.ID(), session.ContainerID(), err) + return fmt.Errorf("removing exec session %s from container %s in database: %w", session.ID(), session.ContainerID(), err) } } @@ -1847,7 +1847,7 @@ func (s *BoltState) RemoveContainerExecSessions(ctr *Container) error { for _, session := range sessions { if err := ctrExecSessions.Delete([]byte(session)); err != nil { - return fmt.Errorf("error removing container %s exec session %s from database: %w", ctr.ID(), session, err) + return fmt.Errorf("removing container %s exec session %s from database: %w", ctr.ID(), session, err) } // Check if the session exists in the global table // before removing. It should, but in cases where the DB @@ -1861,7 +1861,7 @@ func (s *BoltState) RemoveContainerExecSessions(ctr *Container) error { return fmt.Errorf("database mismatch: exec session %s is associated with containers %s and %s: %w", session, ctr.ID(), string(sessionExists), define.ErrInternal) } if err := execBucket.Delete([]byte(session)); err != nil { - return fmt.Errorf("error removing container %s exec session %s from exec sessions: %w", ctr.ID(), session, err) + return fmt.Errorf("removing container %s exec session %s from exec sessions: %w", ctr.ID(), session, err) } } @@ -1884,7 +1884,7 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf newCfgJSON, err := json.Marshal(newCfg) if err != nil { - return fmt.Errorf("error marshalling new configuration JSON for container %s: %w", ctr.ID(), err) + return fmt.Errorf("marshalling new configuration JSON for container %s: %w", ctr.ID(), err) } db, err := s.getDBCon() @@ -1906,7 +1906,7 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf } if err := ctrDB.Put(configKey, newCfgJSON); err != nil { - return fmt.Errorf("error updating container %s config JSON: %w", ctr.ID(), err) + return fmt.Errorf("updating container %s config JSON: %w", ctr.ID(), err) } return nil @@ -1937,7 +1937,7 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName newCfgJSON, err := json.Marshal(newCfg) if err != nil { - return fmt.Errorf("error marshalling new configuration JSON for container %s: %w", ctr.ID(), err) + return fmt.Errorf("marshalling new configuration JSON for container %s: %w", ctr.ID(), err) } db, err := s.getDBCon() @@ -1978,16 +1978,16 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName // buckets are ID-indexed so we just need to // overwrite the values there. if err := namesBkt.Delete([]byte(oldName)); err != nil { - return fmt.Errorf("error deleting container %s old name from DB for rename: %w", ctr.ID(), err) + return fmt.Errorf("deleting container %s old name from DB for rename: %w", ctr.ID(), err) } if err := idBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil { - return fmt.Errorf("error renaming container %s in ID bucket in DB: %w", ctr.ID(), err) + return fmt.Errorf("renaming container %s in ID bucket in DB: %w", ctr.ID(), err) } if err := namesBkt.Put([]byte(newName), []byte(ctr.ID())); err != nil { - return fmt.Errorf("error adding new name %s for container %s in DB: %w", newName, ctr.ID(), err) + return fmt.Errorf("adding new name %s for container %s in DB: %w", newName, ctr.ID(), err) } if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil { - return fmt.Errorf("error renaming container %s in all containers bucket in DB: %w", ctr.ID(), err) + return fmt.Errorf("renaming container %s in all containers bucket in DB: %w", ctr.ID(), err) } if ctr.config.Pod != "" { podsBkt, err := getPodBucket(tx) @@ -2003,7 +2003,7 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName return fmt.Errorf("pod %s does not have a containers bucket: %w", ctr.config.Pod, define.ErrInternal) } if err := podCtrBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil { - return fmt.Errorf("error renaming container %s in pod %s members bucket: %w", ctr.ID(), ctr.config.Pod, err) + return fmt.Errorf("renaming container %s in pod %s members bucket: %w", ctr.ID(), ctr.config.Pod, err) } } } @@ -2021,7 +2021,7 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName } if err := ctrDB.Put(configKey, newCfgJSON); err != nil { - return fmt.Errorf("error updating container %s config JSON: %w", ctr.ID(), err) + return fmt.Errorf("updating container %s config JSON: %w", ctr.ID(), err) } return nil @@ -2043,7 +2043,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { newCfgJSON, err := json.Marshal(newCfg) if err != nil { - return fmt.Errorf("error marshalling new configuration JSON for pod %s: %w", pod.ID(), err) + return fmt.Errorf("marshalling new configuration JSON for pod %s: %w", pod.ID(), err) } db, err := s.getDBCon() @@ -2065,7 +2065,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { } if err := podDB.Put(configKey, newCfgJSON); err != nil { - return fmt.Errorf("error updating pod %s config JSON: %w", pod.ID(), err) + return fmt.Errorf("updating pod %s config JSON: %w", pod.ID(), err) } return nil @@ -2087,7 +2087,7 @@ func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) er newCfgJSON, err := json.Marshal(newCfg) if err != nil { - return fmt.Errorf("error marshalling new configuration JSON for volume %q: %w", volume.Name(), err) + return fmt.Errorf("marshalling new configuration JSON for volume %q: %w", volume.Name(), err) } db, err := s.getDBCon() @@ -2109,7 +2109,7 @@ func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) er } if err := volDB.Put(configKey, newCfgJSON); err != nil { - return fmt.Errorf("error updating volume %q config JSON: %w", volume.Name(), err) + return fmt.Errorf("updating volume %q config JSON: %w", volume.Name(), err) } return nil @@ -2522,7 +2522,7 @@ func (s *BoltState) AddVolume(volume *Volume) error { volConfigJSON, err := json.Marshal(volume.config) if err != nil { - return fmt.Errorf("error marshalling volume %s config to JSON: %w", volume.Name(), err) + return fmt.Errorf("marshalling volume %s config to JSON: %w", volume.Name(), err) } // Volume state is allowed to not exist @@ -2530,7 +2530,7 @@ func (s *BoltState) AddVolume(volume *Volume) error { if volume.state != nil { volStateJSON, err = json.Marshal(volume.state) if err != nil { - return fmt.Errorf("error marshalling volume %s state to JSON: %w", volume.Name(), err) + return fmt.Errorf("marshalling volume %s state to JSON: %w", volume.Name(), err) } } @@ -2561,27 +2561,27 @@ func (s *BoltState) AddVolume(volume *Volume) error { // Make a bucket for it newVol, err := volBkt.CreateBucket(volName) if err != nil { - return fmt.Errorf("error creating bucket for volume %s: %w", volume.Name(), err) + return fmt.Errorf("creating bucket for volume %s: %w", volume.Name(), err) } // Make a subbucket for the containers using the volume. Dependent container IDs will be addedremoved to // this bucket in addcontainer/removeContainer if _, err := newVol.CreateBucket(volDependenciesBkt); err != nil { - return fmt.Errorf("error creating bucket for containers using volume %s: %w", volume.Name(), err) + return fmt.Errorf("creating bucket for containers using volume %s: %w", volume.Name(), err) } if err := newVol.Put(configKey, volConfigJSON); err != nil { - return fmt.Errorf("error storing volume %s configuration in DB: %w", volume.Name(), err) + return fmt.Errorf("storing volume %s configuration in DB: %w", volume.Name(), err) } if volStateJSON != nil { if err := newVol.Put(stateKey, volStateJSON); err != nil { - return fmt.Errorf("error storing volume %s state in DB: %w", volume.Name(), err) + return fmt.Errorf("storing volume %s state in DB: %w", volume.Name(), err) } } if err := allVolsBkt.Put(volName, volName); err != nil { - return fmt.Errorf("error storing volume %s in all volumes bucket in DB: %w", volume.Name(), err) + return fmt.Errorf("storing volume %s in all volumes bucket in DB: %w", volume.Name(), err) } return nil @@ -2650,7 +2650,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error { return nil }) if err != nil { - return fmt.Errorf("error getting list of dependencies from dependencies bucket for volumes %q: %w", volume.Name(), err) + return fmt.Errorf("getting list of dependencies from dependencies bucket for volumes %q: %w", volume.Name(), err) } if len(deps) > 0 { return fmt.Errorf("volume %s is being used by container(s) %s: %w", volume.Name(), strings.Join(deps, ","), define.ErrVolumeBeingUsed) @@ -2660,10 +2660,10 @@ func (s *BoltState) RemoveVolume(volume *Volume) error { // volume is ready for removal // Let's kick it out if err := allVolsBkt.Delete(volName); err != nil { - return fmt.Errorf("error removing volume %s from all volumes bucket in DB: %w", volume.Name(), err) + return fmt.Errorf("removing volume %s from all volumes bucket in DB: %w", volume.Name(), err) } if err := volBkt.DeleteBucket(volName); err != nil { - return fmt.Errorf("error removing volume %s from DB: %w", volume.Name(), err) + return fmt.Errorf("removing volume %s from DB: %w", volume.Name(), err) } return nil @@ -2710,7 +2710,7 @@ func (s *BoltState) UpdateVolume(volume *Volume) error { } if err := json.Unmarshal(stateBytes, newState); err != nil { - return fmt.Errorf("error unmarshalling volume %s state: %w", volume.Name(), err) + return fmt.Errorf("unmarshalling volume %s state: %w", volume.Name(), err) } return nil @@ -2740,7 +2740,7 @@ func (s *BoltState) SaveVolume(volume *Volume) error { if volume.state != nil { stateJSON, err := json.Marshal(volume.state) if err != nil { - return fmt.Errorf("error marshalling volume %s state to JSON: %w", volume.Name(), err) + return fmt.Errorf("marshalling volume %s state to JSON: %w", volume.Name(), err) } newStateJSON = stateJSON } @@ -3060,12 +3060,12 @@ func (s *BoltState) AddPod(pod *Pod) error { podConfigJSON, err := json.Marshal(pod.config) if err != nil { - return fmt.Errorf("error marshalling pod %s config to JSON: %w", pod.ID(), err) + return fmt.Errorf("marshalling pod %s config to JSON: %w", pod.ID(), err) } podStateJSON, err := json.Marshal(pod.state) if err != nil { - return fmt.Errorf("error marshalling pod %s state to JSON: %w", pod.ID(), err) + return fmt.Errorf("marshalling pod %s state to JSON: %w", pod.ID(), err) } db, err := s.getDBCon() @@ -3122,40 +3122,40 @@ func (s *BoltState) AddPod(pod *Pod) error { // Make a bucket for it newPod, err := podBkt.CreateBucket(podID) if err != nil { - return fmt.Errorf("error creating bucket for pod %s: %w", pod.ID(), err) + return fmt.Errorf("creating bucket for pod %s: %w", pod.ID(), err) } // Make a subbucket for pod containers if _, err := newPod.CreateBucket(containersBkt); err != nil { - return fmt.Errorf("error creating bucket for pod %s containers: %w", pod.ID(), err) + return fmt.Errorf("creating bucket for pod %s containers: %w", pod.ID(), err) } if err := newPod.Put(configKey, podConfigJSON); err != nil { - return fmt.Errorf("error storing pod %s configuration in DB: %w", pod.ID(), err) + return fmt.Errorf("storing pod %s configuration in DB: %w", pod.ID(), err) } if err := newPod.Put(stateKey, podStateJSON); err != nil { - return fmt.Errorf("error storing pod %s state JSON in DB: %w", pod.ID(), err) + return fmt.Errorf("storing pod %s state JSON in DB: %w", pod.ID(), err) } if podNamespace != nil { if err := newPod.Put(namespaceKey, podNamespace); err != nil { - return fmt.Errorf("error storing pod %s namespace in DB: %w", pod.ID(), err) + return fmt.Errorf("storing pod %s namespace in DB: %w", pod.ID(), err) } if err := nsBkt.Put(podID, podNamespace); err != nil { - return fmt.Errorf("error storing pod %s namespace in DB: %w", pod.ID(), err) + return fmt.Errorf("storing pod %s namespace in DB: %w", pod.ID(), err) } } // Add us to the ID and names buckets if err := idsBkt.Put(podID, podName); err != nil { - return fmt.Errorf("error storing pod %s ID in DB: %w", pod.ID(), err) + return fmt.Errorf("storing pod %s ID in DB: %w", pod.ID(), err) } if err := namesBkt.Put(podName, podID); err != nil { - return fmt.Errorf("error storing pod %s name in DB: %w", pod.Name(), err) + return fmt.Errorf("storing pod %s name in DB: %w", pod.Name(), err) } if err := allPodsBkt.Put(podID, podName); err != nil { - return fmt.Errorf("error storing pod %s in all pods bucket in DB: %w", pod.ID(), err) + return fmt.Errorf("storing pod %s in all pods bucket in DB: %w", pod.ID(), err) } return nil @@ -3240,19 +3240,19 @@ func (s *BoltState) RemovePod(pod *Pod) error { // Pod is empty, and ready for removal // Let's kick it out if err := idsBkt.Delete(podID); err != nil { - return fmt.Errorf("error removing pod %s ID from DB: %w", pod.ID(), err) + return fmt.Errorf("removing pod %s ID from DB: %w", pod.ID(), err) } if err := namesBkt.Delete(podName); err != nil { - return fmt.Errorf("error removing pod %s name (%s) from DB: %w", pod.ID(), pod.Name(), err) + return fmt.Errorf("removing pod %s name (%s) from DB: %w", pod.ID(), pod.Name(), err) } if err := nsBkt.Delete(podID); err != nil { - return fmt.Errorf("error removing pod %s namespace from DB: %w", pod.ID(), err) + return fmt.Errorf("removing pod %s namespace from DB: %w", pod.ID(), err) } if err := allPodsBkt.Delete(podID); err != nil { - return fmt.Errorf("error removing pod %s ID from all pods bucket in DB: %w", pod.ID(), err) + return fmt.Errorf("removing pod %s ID from all pods bucket in DB: %w", pod.ID(), err) } if err := podBkt.DeleteBucket(podID); err != nil { - return fmt.Errorf("error removing pod %s from DB: %w", pod.ID(), err) + return fmt.Errorf("removing pod %s from DB: %w", pod.ID(), err) } return nil @@ -3353,19 +3353,19 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error { // Dependencies are set, we're clear to remove if err := ctrBkt.DeleteBucket(id); err != nil { - return fmt.Errorf("error deleting container %s from DB: %w", string(id), define.ErrInternal) + return fmt.Errorf("deleting container %s from DB: %w", string(id), define.ErrInternal) } if err := idsBkt.Delete(id); err != nil { - return fmt.Errorf("error deleting container %s ID in DB: %w", string(id), err) + return fmt.Errorf("deleting container %s ID in DB: %w", string(id), err) } if err := namesBkt.Delete(name); err != nil { - return fmt.Errorf("error deleting container %s name in DB: %w", string(id), err) + return fmt.Errorf("deleting container %s name in DB: %w", string(id), err) } if err := allCtrsBkt.Delete(id); err != nil { - return fmt.Errorf("error deleting container %s ID from all containers bucket in DB: %w", string(id), err) + return fmt.Errorf("deleting container %s ID from all containers bucket in DB: %w", string(id), err) } return nil @@ -3376,10 +3376,10 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error { // Delete and recreate the bucket to empty it if err := podDB.DeleteBucket(containersBkt); err != nil { - return fmt.Errorf("error removing pod %s containers bucket: %w", pod.ID(), err) + return fmt.Errorf("removing pod %s containers bucket: %w", pod.ID(), err) } if _, err := podDB.CreateBucket(containersBkt); err != nil { - return fmt.Errorf("error recreating pod %s containers bucket: %w", pod.ID(), err) + return fmt.Errorf("recreating pod %s containers bucket: %w", pod.ID(), err) } return nil @@ -3496,7 +3496,7 @@ func (s *BoltState) UpdatePod(pod *Pod) error { } if err := json.Unmarshal(podStateBytes, newState); err != nil { - return fmt.Errorf("error unmarshalling pod %s state JSON: %w", pod.ID(), err) + return fmt.Errorf("unmarshalling pod %s state JSON: %w", pod.ID(), err) } return nil @@ -3526,7 +3526,7 @@ func (s *BoltState) SavePod(pod *Pod) error { stateJSON, err := json.Marshal(pod.state) if err != nil { - return fmt.Errorf("error marshalling pod %s state to JSON: %w", pod.ID(), err) + return fmt.Errorf("marshalling pod %s state to JSON: %w", pod.ID(), err) } db, err := s.getDBCon() @@ -3551,7 +3551,7 @@ func (s *BoltState) SavePod(pod *Pod) error { // Set the pod state JSON if err := podDB.Put(stateKey, stateJSON); err != nil { - return fmt.Errorf("error updating pod %s state in database: %w", pod.ID(), err) + return fmt.Errorf("updating pod %s state in database: %w", pod.ID(), err) } return nil diff --git a/libpod/boltdb_state_freebsd.go b/libpod/boltdb_state_freebsd.go index d7f2736fc..d0a2d4f28 100644 --- a/libpod/boltdb_state_freebsd.go +++ b/libpod/boltdb_state_freebsd.go @@ -6,12 +6,20 @@ package libpod // replaceNetNS handle network namespace transitions after updating a // container's state. func replaceNetNS(netNSPath string, ctr *Container, newState *ContainerState) error { - // On FreeBSD, we just record the network jail's name in our state. - newState.NetworkJail = netNSPath + if netNSPath != "" { + // On FreeBSD, we just record the network jail's name in our state. + newState.NetNS = &jailNetNS{Name: netNSPath} + } else { + newState.NetNS = nil + } return nil } // getNetNSPath retrieves the netns path to be stored in the database func getNetNSPath(ctr *Container) string { - return ctr.state.NetworkJail + if ctr.state.NetNS != nil { + return ctr.state.NetNS.Name + } else { + return "" + } } diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index f28fadfa9..87f1fa4eb 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -195,7 +195,7 @@ func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error { } if err := configBkt.Put(missing.key, dbValue); err != nil { - return fmt.Errorf("error updating %s in DB runtime config: %w", missing.name, err) + return fmt.Errorf("updating %s in DB runtime config: %w", missing.name, err) } } @@ -254,7 +254,7 @@ func (s *BoltState) getDBCon() (*bolt.DB, error) { db, err := bolt.Open(s.dbPath, 0600, nil) if err != nil { - return nil, fmt.Errorf("error opening database %s: %w", s.dbPath, err) + return nil, fmt.Errorf("opening database %s: %w", s.dbPath, err) } return db, nil @@ -403,7 +403,7 @@ func (s *BoltState) getContainerConfigFromDB(id []byte, config *ContainerConfig, } if err := json.Unmarshal(configBytes, config); err != nil { - return fmt.Errorf("error unmarshalling container %s config: %w", string(id), err) + return fmt.Errorf("unmarshalling container %s config: %w", string(id), err) } // convert ports to the new format if needed @@ -426,7 +426,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt. // Get the lock lock, err := s.runtime.lockManager.RetrieveLock(ctr.config.LockID) if err != nil { - return fmt.Errorf("error retrieving lock for container %s: %w", string(id), err) + return fmt.Errorf("retrieving lock for container %s: %w", string(id), err) } ctr.lock = lock @@ -489,13 +489,13 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error } if err := json.Unmarshal(podConfigBytes, pod.config); err != nil { - return fmt.Errorf("error unmarshalling pod %s config from DB: %w", string(id), err) + return fmt.Errorf("unmarshalling pod %s config from DB: %w", string(id), err) } // Get the lock lock, err := s.runtime.lockManager.RetrieveLock(pod.config.LockID) if err != nil { - return fmt.Errorf("error retrieving lock for pod %s: %w", string(id), err) + return fmt.Errorf("retrieving lock for pod %s: %w", string(id), err) } pod.lock = lock @@ -517,14 +517,14 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu } if err := json.Unmarshal(volConfigBytes, volume.config); err != nil { - return fmt.Errorf("error unmarshalling volume %s config from DB: %w", string(name), err) + return fmt.Errorf("unmarshalling volume %s config from DB: %w", string(name), err) } // Volume state is allowed to be nil for legacy compatibility volStateBytes := volDB.Get(stateKey) if volStateBytes != nil { if err := json.Unmarshal(volStateBytes, volume.state); err != nil { - return fmt.Errorf("error unmarshalling volume %s state from DB: %w", string(name), err) + return fmt.Errorf("unmarshalling volume %s state from DB: %w", string(name), err) } } @@ -546,7 +546,7 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu // Get the lock lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID) if err != nil { - return fmt.Errorf("error retrieving lock for volume %q: %w", string(name), err) + return fmt.Errorf("retrieving lock for volume %q: %w", string(name), err) } volume.lock = lock @@ -572,11 +572,11 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { // JSON container structs to insert into DB configJSON, err := json.Marshal(ctr.config) if err != nil { - return fmt.Errorf("error marshalling container %s config to JSON: %w", ctr.ID(), err) + return fmt.Errorf("marshalling container %s config to JSON: %w", ctr.ID(), err) } stateJSON, err := json.Marshal(ctr.state) if err != nil { - return fmt.Errorf("error marshalling container %s state to JSON: %w", ctr.ID(), err) + return fmt.Errorf("marshalling container %s state to JSON: %w", ctr.ID(), err) } netNSPath := getNetNSPath(ctr) dependsCtrs := ctr.Dependencies() @@ -603,7 +603,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { opts.Aliases = append(opts.Aliases, ctr.config.ID[:12]) optBytes, err := json.Marshal(opts) if err != nil { - return fmt.Errorf("error marshalling network options JSON for container %s: %w", ctr.ID(), err) + return fmt.Errorf("marshalling network options JSON for container %s: %w", ctr.ID(), err) } networks[net] = optBytes } @@ -694,60 +694,60 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { // No overlapping containers // Add the new container to the DB if err := idsBucket.Put(ctrID, ctrName); err != nil { - return fmt.Errorf("error adding container %s ID to DB: %w", ctr.ID(), err) + return fmt.Errorf("adding container %s ID to DB: %w", ctr.ID(), err) } if err := namesBucket.Put(ctrName, ctrID); err != nil { - return fmt.Errorf("error adding container %s name (%s) to DB: %w", ctr.ID(), ctr.Name(), err) + return fmt.Errorf("adding container %s name (%s) to DB: %w", ctr.ID(), ctr.Name(), err) } if ctrNamespace != nil { if err := nsBucket.Put(ctrID, ctrNamespace); err != nil { - return fmt.Errorf("error adding container %s namespace (%q) to DB: %w", ctr.ID(), ctr.Namespace(), err) + return fmt.Errorf("adding container %s namespace (%q) to DB: %w", ctr.ID(), ctr.Namespace(), err) } } if err := allCtrsBucket.Put(ctrID, ctrName); err != nil { - return fmt.Errorf("error adding container %s to all containers bucket in DB: %w", ctr.ID(), err) + return fmt.Errorf("adding container %s to all containers bucket in DB: %w", ctr.ID(), err) } newCtrBkt, err := ctrBucket.CreateBucket(ctrID) if err != nil { - return fmt.Errorf("error adding container %s bucket to DB: %w", ctr.ID(), err) + return fmt.Errorf("adding container %s bucket to DB: %w", ctr.ID(), err) } if err := newCtrBkt.Put(configKey, configJSON); err != nil { - return fmt.Errorf("error adding container %s config to DB: %w", ctr.ID(), err) + return fmt.Errorf("adding container %s config to DB: %w", ctr.ID(), err) } if err := newCtrBkt.Put(stateKey, stateJSON); err != nil { - return fmt.Errorf("error adding container %s state to DB: %w", ctr.ID(), err) + return fmt.Errorf("adding container %s state to DB: %w", ctr.ID(), err) } if ctrNamespace != nil { if err := newCtrBkt.Put(namespaceKey, ctrNamespace); err != nil { - return fmt.Errorf("error adding container %s namespace to DB: %w", ctr.ID(), err) + return fmt.Errorf("adding container %s namespace to DB: %w", ctr.ID(), err) } } if pod != nil { if err := newCtrBkt.Put(podIDKey, []byte(pod.ID())); err != nil { - return fmt.Errorf("error adding container %s pod to DB: %w", ctr.ID(), err) + return fmt.Errorf("adding container %s pod to DB: %w", ctr.ID(), err) } } if netNSPath != "" { if err := newCtrBkt.Put(netNSKey, []byte(netNSPath)); err != nil { - return fmt.Errorf("error adding container %s netns path to DB: %w", ctr.ID(), err) + return fmt.Errorf("adding container %s netns path to DB: %w", ctr.ID(), err) } } if len(networks) > 0 { ctrNetworksBkt, err := newCtrBkt.CreateBucket(networksBkt) if err != nil { - return fmt.Errorf("error creating networks bucket for container %s: %w", ctr.ID(), err) + return fmt.Errorf("creating networks bucket for container %s: %w", ctr.ID(), err) } for network, opts := range networks { if err := ctrNetworksBkt.Put([]byte(network), opts); err != nil { - return fmt.Errorf("error adding network %q to networks bucket for container %s: %w", network, ctr.ID(), err) + return fmt.Errorf("adding network %q to networks bucket for container %s: %w", network, ctr.ID(), err) } } } if _, err := newCtrBkt.CreateBucket(dependenciesBkt); err != nil { - return fmt.Errorf("error creating dependencies bucket for container %s: %w", ctr.ID(), err) + return fmt.Errorf("creating dependencies bucket for container %s: %w", ctr.ID(), err) } // Add dependencies for the container @@ -784,14 +784,14 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { return fmt.Errorf("container %s does not have a dependencies bucket: %w", dependsCtr, define.ErrInternal) } if err := depCtrDependsBkt.Put(ctrID, ctrName); err != nil { - return fmt.Errorf("error adding ctr %s as dependency of container %s: %w", ctr.ID(), dependsCtr, err) + return fmt.Errorf("adding ctr %s as dependency of container %s: %w", ctr.ID(), dependsCtr, err) } } // Add ctr to pod if pod != nil && podCtrs != nil { if err := podCtrs.Put(ctrID, ctrName); err != nil { - return fmt.Errorf("error adding container %s to pod %s: %w", ctr.ID(), pod.ID(), err) + return fmt.Errorf("adding container %s to pod %s: %w", ctr.ID(), pod.ID(), err) } } @@ -804,11 +804,11 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { ctrDepsBkt, err := volDB.CreateBucketIfNotExists(volDependenciesBkt) if err != nil { - return fmt.Errorf("error creating volume %s dependencies bucket to add container %s: %w", vol.Name, ctr.ID(), err) + return fmt.Errorf("creating volume %s dependencies bucket to add container %s: %w", vol.Name, ctr.ID(), err) } if depExists := ctrDepsBkt.Get(ctrID); depExists == nil { if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil { - return fmt.Errorf("error adding container %s to volume %s dependencies: %w", ctr.ID(), vol.Name, err) + return fmt.Errorf("adding container %s to volume %s dependencies: %w", ctr.ID(), vol.Name, err) } } } @@ -902,7 +902,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error return fmt.Errorf("container %s is not in pod %s: %w", ctr.ID(), pod.ID(), define.ErrNoSuchCtr) } if err := podCtrs.Delete(ctrID); err != nil { - return fmt.Errorf("error removing container %s from pod %s: %w", ctr.ID(), pod.ID(), err) + return fmt.Errorf("removing container %s from pod %s: %w", ctr.ID(), pod.ID(), err) } } } @@ -943,21 +943,21 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error } if err := ctrBucket.DeleteBucket(ctrID); err != nil { - return fmt.Errorf("error deleting container %s from DB: %w", ctr.ID(), define.ErrInternal) + return fmt.Errorf("deleting container %s from DB: %w", ctr.ID(), define.ErrInternal) } if err := idsBucket.Delete(ctrID); err != nil { - return fmt.Errorf("error deleting container %s ID in DB: %w", ctr.ID(), err) + return fmt.Errorf("deleting container %s ID in DB: %w", ctr.ID(), err) } if err := namesBucket.Delete(ctrName); err != nil { - return fmt.Errorf("error deleting container %s name in DB: %w", ctr.ID(), err) + return fmt.Errorf("deleting container %s name in DB: %w", ctr.ID(), err) } if err := nsBucket.Delete(ctrID); err != nil { - return fmt.Errorf("error deleting container %s namespace in DB: %w", ctr.ID(), err) + return fmt.Errorf("deleting container %s namespace in DB: %w", ctr.ID(), err) } if err := allCtrsBucket.Delete(ctrID); err != nil { - return fmt.Errorf("error deleting container %s from all containers bucket in DB: %w", ctr.ID(), err) + return fmt.Errorf("deleting container %s from all containers bucket in DB: %w", ctr.ID(), err) } depCtrs := ctr.Dependencies() @@ -986,7 +986,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error } if err := depCtrDependsBkt.Delete(ctrID); err != nil { - return fmt.Errorf("error removing container %s as a dependency of container %s: %w", ctr.ID(), depCtr, err) + return fmt.Errorf("removing container %s as a dependency of container %s: %w", ctr.ID(), depCtr, err) } } @@ -1005,7 +1005,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error } if depExists := ctrDepsBkt.Get(ctrID); depExists == nil { if err := ctrDepsBkt.Delete(ctrID); err != nil { - return fmt.Errorf("error deleting container %s dependency on volume %s: %w", ctr.ID(), vol.Name, err) + return fmt.Errorf("deleting container %s dependency on volume %s: %w", ctr.ID(), vol.Name, err) } } } diff --git a/libpod/boltdb_state_linux.go b/libpod/boltdb_state_linux.go index 813afd8bf..3d9f5b6a2 100644 --- a/libpod/boltdb_state_linux.go +++ b/libpod/boltdb_state_linux.go @@ -30,7 +30,7 @@ func replaceNetNS(netNSPath string, ctr *Container, newState *ContainerState) er newState.NetNS = ns } else { if ctr.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { - return fmt.Errorf("error joining network namespace of container %s: %w", ctr.ID(), err) + return fmt.Errorf("joining network namespace of container %s: %w", ctr.ID(), err) } logrus.Errorf("Joining network namespace for container %s: %v", ctr.ID(), err) diff --git a/libpod/container.go b/libpod/container.go index 1891b124f..bdedafd22 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -353,14 +353,14 @@ func (c *Container) specFromState() (*spec.Spec, error) { returnSpec = new(spec.Spec) content, err := ioutil.ReadAll(f) if err != nil { - return nil, fmt.Errorf("error reading container config: %w", err) + return nil, fmt.Errorf("reading container config: %w", err) } if err := json.Unmarshal(content, &returnSpec); err != nil { - return nil, fmt.Errorf("error unmarshalling container config: %w", err) + return nil, fmt.Errorf("unmarshalling container config: %w", err) } } else if !os.IsNotExist(err) { // ignore when the file does not exist - return nil, fmt.Errorf("error opening container config: %w", err) + return nil, fmt.Errorf("opening container config: %w", err) } return returnSpec, nil @@ -703,7 +703,7 @@ func (c *Container) Mounted() (bool, string, error) { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return false, "", fmt.Errorf("error updating container %s state: %w", c.ID(), err) + return false, "", fmt.Errorf("updating container %s state: %w", c.ID(), err) } } // We cannot directly return c.state.Mountpoint as it is not guaranteed @@ -733,7 +733,7 @@ func (c *Container) StartedTime() (time.Time, error) { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return time.Time{}, fmt.Errorf("error updating container %s state: %w", c.ID(), err) + return time.Time{}, fmt.Errorf("updating container %s state: %w", c.ID(), err) } } return c.state.StartedTime, nil @@ -745,7 +745,7 @@ func (c *Container) FinishedTime() (time.Time, error) { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return time.Time{}, fmt.Errorf("error updating container %s state: %w", c.ID(), err) + return time.Time{}, fmt.Errorf("updating container %s state: %w", c.ID(), err) } } return c.state.FinishedTime, nil @@ -760,7 +760,7 @@ func (c *Container) ExitCode() (int32, bool, error) { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return 0, false, fmt.Errorf("error updating container %s state: %w", c.ID(), err) + return 0, false, fmt.Errorf("updating container %s state: %w", c.ID(), err) } } return c.state.ExitCode, c.state.Exited, nil @@ -772,7 +772,7 @@ func (c *Container) OOMKilled() (bool, error) { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return false, fmt.Errorf("error updating container %s state: %w", c.ID(), err) + return false, fmt.Errorf("updating container %s state: %w", c.ID(), err) } } return c.state.OOMKilled, nil @@ -859,7 +859,7 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) { returnSession := new(ExecSession) if err := JSONDeepCopy(session, returnSession); err != nil { - return nil, fmt.Errorf("error copying contents of container %s exec session %s: %w", c.ID(), session.ID(), err) + return nil, fmt.Errorf("copying contents of container %s exec session %s: %w", c.ID(), session.ID(), err) } return returnSession, nil @@ -919,7 +919,7 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return "", fmt.Errorf("error updating container %s state: %w", c.ID(), err) + return "", fmt.Errorf("updating container %s state: %w", c.ID(), err) } } @@ -957,7 +957,7 @@ func (c *Container) CgroupPath() (string, error) { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return "", fmt.Errorf("error updating container %s state: %w", c.ID(), err) + return "", fmt.Errorf("updating container %s state: %w", c.ID(), err) } } return c.cGroupPath() @@ -1057,7 +1057,7 @@ func (c *Container) RootFsSize() (int64, error) { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return -1, fmt.Errorf("error updating container %s state: %w", c.ID(), err) + return -1, fmt.Errorf("updating container %s state: %w", c.ID(), err) } } return c.rootFsSize() @@ -1069,7 +1069,7 @@ func (c *Container) RWSize() (int64, error) { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return -1, fmt.Errorf("error updating container %s state: %w", c.ID(), err) + return -1, fmt.Errorf("updating container %s state: %w", c.ID(), err) } } return c.rwSize() @@ -1157,7 +1157,7 @@ func (c *Container) ContainerState() (*ContainerState, error) { } returnConfig := new(ContainerState) if err := JSONDeepCopy(c.state, returnConfig); err != nil { - return nil, fmt.Errorf("error copying container %s state: %w", c.ID(), err) + return nil, fmt.Errorf("copying container %s state: %w", c.ID(), err) } return c.state, nil } diff --git a/libpod/container_api.go b/libpod/container_api.go index f88e38ce1..dd47b4d12 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -684,7 +684,7 @@ func (c *Container) Cleanup(ctx context.Context) error { // When the container has already been removed, the OCI runtime directory remain. if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) { if err := c.cleanupRuntime(ctx); err != nil { - return fmt.Errorf("error cleaning up container %s from OCI runtime: %w", c.ID(), err) + return fmt.Errorf("cleaning up container %s from OCI runtime: %w", c.ID(), err) } return nil } diff --git a/libpod/container_commit.go b/libpod/container_commit.go index c93c9c7bb..f447816e3 100644 --- a/libpod/container_commit.go +++ b/libpod/container_commit.go @@ -48,7 +48,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai if c.state.State == define.ContainerStateRunning && options.Pause { if err := c.pause(); err != nil { - return nil, fmt.Errorf("error pausing container %q to commit: %w", c.ID(), err) + return nil, fmt.Errorf("pausing container %q to commit: %w", c.ID(), err) } defer func() { if err := c.unpause(); err != nil { @@ -202,7 +202,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai imageRef, err := is.Transport.ParseStoreReference(c.runtime.store, resolvedImageName) if err != nil { - return nil, fmt.Errorf("error parsing target image name %q: %w", destImage, err) + return nil, fmt.Errorf("parsing target image name %q: %w", destImage, err) } commitRef = imageRef } diff --git a/libpod/container_exec.go b/libpod/container_exec.go index d3c80e896..ca3961d8a 100644 --- a/libpod/container_exec.go +++ b/libpod/container_exec.go @@ -205,7 +205,7 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) { session.State = define.ExecStateCreated session.Config = new(ExecConfig) if err := JSONDeepCopy(config, session.Config); err != nil { - return "", fmt.Errorf("error copying exec configuration into exec session: %w", err) + return "", fmt.Errorf("copying exec configuration into exec session: %w", err) } if len(session.Config.ExitCommand) > 0 { @@ -372,7 +372,7 @@ func (c *Container) execStartAndAttach(sessionID string, streams *define.AttachS if lastErr != nil { logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) } - return fmt.Errorf("error syncing container %s state to update exec session %s: %w", c.ID(), sessionID, err) + return fmt.Errorf("syncing container %s state to update exec session %s: %w", c.ID(), sessionID, err) } // Now handle the error from readExecExitCode above. @@ -809,7 +809,7 @@ func (c *Container) exec(config *ExecConfig, streams *define.AttachStreams, resi // streaming. diedEvent, err := c.runtime.GetExecDiedEvent(context.Background(), c.ID(), sessionID) if err != nil { - return -1, fmt.Errorf("error retrieving exec session %s exit code: %w", sessionID, err) + return -1, fmt.Errorf("retrieving exec session %s exit code: %w", sessionID, err) } return diedEvent.ContainerExitCode, nil } @@ -911,7 +911,7 @@ func (c *Container) createExecBundle(sessionID string) (retErr error) { if err := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err != nil { // The directory is allowed to exist if !os.IsExist(err) { - return fmt.Errorf("error creating OCI runtime exit file path %s: %w", c.execExitFileDir(sessionID), err) + return fmt.Errorf("creating OCI runtime exit file path %s: %w", c.execExitFileDir(sessionID), err) } } return nil @@ -1121,7 +1121,7 @@ func writeExecExitCode(c *Container, sessionID string, exitCode int) error { // need to. Exit without error. return nil } - return fmt.Errorf("error syncing container %s state to remove exec session %s: %w", c.ID(), sessionID, err) + return fmt.Errorf("syncing container %s state to remove exec session %s: %w", c.ID(), sessionID, err) } return justWriteExecExitCode(c, sessionID, exitCode) diff --git a/libpod/container_freebsd.go b/libpod/container_freebsd.go index 7292ba37a..87fb494dd 100644 --- a/libpod/container_freebsd.go +++ b/libpod/container_freebsd.go @@ -4,11 +4,20 @@ package libpod type containerPlatformState struct { - // NetworkJail is the name of the container's network VNET + // NetNS is the name of the container's network VNET // jail. Will only be set if config.CreateNetNS is true, or // the container was told to join another container's network // namespace. - NetworkJail string `json:"-"` + NetNS *jailNetNS `json:"-"` +} + +type jailNetNS struct { + Name string `json:"-"` +} + +func (ns *jailNetNS) Path() string { + // The jail name approximately corresponds to the Linux netns path + return ns.Name } func networkDisabled(c *Container) (bool, error) { @@ -16,7 +25,7 @@ func networkDisabled(c *Container) (bool, error) { return false, nil } if !c.config.PostConfigureNetNS { - return c.state.NetworkJail == "", nil + return c.state.NetNS != nil, nil } return false, nil } diff --git a/libpod/container_graph.go b/libpod/container_graph.go index 67b1abc34..96d61b756 100644 --- a/libpod/container_graph.go +++ b/libpod/container_graph.go @@ -160,7 +160,7 @@ func detectCycles(graph *ContainerGraph) (bool, error) { // Popped item is no longer on the stack, mark as such topInfo, ok := nodes[topOfStack.id] if !ok { - return false, fmt.Errorf("error finding node info for %s: %w", topOfStack.id, define.ErrInternal) + return false, fmt.Errorf("finding node info for %s: %w", topOfStack.id, define.ErrInternal) } topInfo.onStack = false diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index ad8bae286..b72d843b6 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -24,15 +24,15 @@ import ( func (c *Container) inspectLocked(size bool) (*define.InspectContainerData, error) { storeCtr, err := c.runtime.store.Container(c.ID()) if err != nil { - return nil, fmt.Errorf("error getting container from store %q: %w", c.ID(), err) + return nil, fmt.Errorf("getting container from store %q: %w", c.ID(), err) } layer, err := c.runtime.store.Layer(storeCtr.LayerID) if err != nil { - return nil, fmt.Errorf("error reading information about layer %q: %w", storeCtr.LayerID, err) + return nil, fmt.Errorf("reading information about layer %q: %w", storeCtr.LayerID, err) } driverData, err := driver.GetDriverData(c.runtime.store, layer.ID) if err != nil { - return nil, fmt.Errorf("error getting graph driver info %q: %w", c.ID(), err) + return nil, fmt.Errorf("getting graph driver info %q: %w", c.ID(), err) } return c.getContainerInspectData(size, driverData) } @@ -241,7 +241,7 @@ func (c *Container) GetMounts(namedVolumes []*ContainerNamedVolume, imageVolumes // volume. volFromDB, err := c.runtime.state.Volume(volume.Name) if err != nil { - return nil, fmt.Errorf("error looking up volume %s in container %s config: %w", volume.Name, c.ID(), err) + return nil, fmt.Errorf("looking up volume %s in container %s config: %w", volume.Name, c.ID(), err) } mountStruct.Driver = volFromDB.Driver() diff --git a/libpod/container_internal.go b/libpod/container_internal.go index d61812cb7..994243805 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -207,7 +207,7 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error { } statusCode, err := strconv.Atoi(string(statusCodeStr)) if err != nil { - return fmt.Errorf("error converting exit status code (%q, err) for container %s to int: %w", + return fmt.Errorf("converting exit status code (%q, err) for container %s to int: %w", c.ID(), statusCodeStr, err) } c.state.ExitCode = int32(statusCode) @@ -465,7 +465,7 @@ func (c *Container) setupStorage(ctx context.Context) error { defOptions, err := storage.GetMountOptions(c.runtime.store.GraphDriverName(), c.runtime.store.GraphOptions()) if err != nil { - return fmt.Errorf("error getting default mount options: %w", err) + return fmt.Errorf("getting default mount options: %w", err) } var newOptions []string for _, opt := range defOptions { @@ -500,7 +500,7 @@ func (c *Container) setupStorage(ctx context.Context) error { } } if containerInfoErr != nil { - return fmt.Errorf("error creating container storage: %w", containerInfoErr) + return fmt.Errorf("creating container storage: %w", containerInfoErr) } // Only reconfig IDMappings if layer was mounted from storage. @@ -542,7 +542,7 @@ func (c *Container) setupStorage(ctx context.Context) error { artifacts := filepath.Join(c.config.StaticDir, artifactsDir) if err := os.MkdirAll(artifacts, 0755); err != nil { - return fmt.Errorf("error creating artifacts directory: %w", err) + return fmt.Errorf("creating artifacts directory: %w", err) } return nil @@ -576,7 +576,7 @@ func (c *Container) teardownStorage() error { artifacts := filepath.Join(c.config.StaticDir, artifactsDir) if err := os.RemoveAll(artifacts); err != nil { - return fmt.Errorf("error removing container %s artifacts %q: %w", c.ID(), artifacts, err) + return fmt.Errorf("removing container %s artifacts %q: %w", c.ID(), artifacts, err) } if err := c.cleanupStorage(); err != nil { @@ -593,7 +593,7 @@ func (c *Container) teardownStorage() error { return nil } - return fmt.Errorf("error removing container %s root filesystem: %w", c.ID(), err) + return fmt.Errorf("removing container %s root filesystem: %w", c.ID(), err) } return nil @@ -644,7 +644,7 @@ func (c *Container) refresh() error { // It was lost in the reboot and must be recreated dir, err := c.runtime.storageService.GetRunDir(c.ID()) if err != nil { - return fmt.Errorf("error retrieving temporary directory for container %s: %w", c.ID(), err) + return fmt.Errorf("retrieving temporary directory for container %s: %w", c.ID(), err) } c.state.RunDir = dir @@ -658,7 +658,7 @@ func (c *Container) refresh() error { } root := filepath.Join(c.runtime.config.Engine.TmpDir, "containers-root", c.ID()) if err := os.MkdirAll(root, 0755); err != nil { - return fmt.Errorf("error creating userNS tmpdir for container %s: %w", c.ID(), err) + return fmt.Errorf("creating userNS tmpdir for container %s: %w", c.ID(), err) } if err := os.Chown(root, c.RootUID(), c.RootGID()); err != nil { return err @@ -668,7 +668,7 @@ func (c *Container) refresh() error { // We need to pick up a new lock lock, err := c.runtime.lockManager.AllocateAndRetrieveLock(c.config.LockID) if err != nil { - return fmt.Errorf("error acquiring lock %d for container %s: %w", c.config.LockID, c.ID(), err) + return fmt.Errorf("acquiring lock %d for container %s: %w", c.config.LockID, c.ID(), err) } c.lock = lock @@ -689,7 +689,7 @@ func (c *Container) refresh() error { } if err := c.save(); err != nil { - return fmt.Errorf("error refreshing state for container %s: %w", c.ID(), err) + return fmt.Errorf("refreshing state for container %s: %w", c.ID(), err) } // Remove ctl and attach files, which may persist across reboot @@ -710,22 +710,22 @@ func (c *Container) removeConmonFiles() error { } if err := os.Remove(attachFile); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error removing container %s attach file: %w", c.ID(), err) + return fmt.Errorf("removing container %s attach file: %w", c.ID(), err) } ctlFile := filepath.Join(c.bundlePath(), "ctl") if err := os.Remove(ctlFile); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error removing container %s ctl file: %w", c.ID(), err) + return fmt.Errorf("removing container %s ctl file: %w", c.ID(), err) } winszFile := filepath.Join(c.bundlePath(), "winsz") if err := os.Remove(winszFile); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error removing container %s winsz file: %w", c.ID(), err) + return fmt.Errorf("removing container %s winsz file: %w", c.ID(), err) } oomFile := filepath.Join(c.bundlePath(), "oom") if err := os.Remove(oomFile); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error removing container %s OOM file: %w", c.ID(), err) + return fmt.Errorf("removing container %s OOM file: %w", c.ID(), err) } // Remove the exit file so we don't leak memory in tmpfs @@ -734,7 +734,7 @@ func (c *Container) removeConmonFiles() error { return err } if err := os.Remove(exitFile); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error removing container %s exit file: %w", c.ID(), err) + return fmt.Errorf("removing container %s exit file: %w", c.ID(), err) } return nil @@ -757,12 +757,12 @@ func (c *Container) export(path string) error { input, err := archive.Tar(mountPoint, archive.Uncompressed) if err != nil { - return fmt.Errorf("error reading container directory %q: %w", c.ID(), err) + return fmt.Errorf("reading container directory %q: %w", c.ID(), err) } outFile, err := os.Create(path) if err != nil { - return fmt.Errorf("error creating file %q: %w", path, err) + return fmt.Errorf("creating file %q: %w", path, err) } defer outFile.Close() @@ -778,7 +778,7 @@ func (c *Container) getArtifactPath(name string) string { // save container state to the database func (c *Container) save() error { if err := c.runtime.state.SaveContainer(c); err != nil { - return fmt.Errorf("error saving container %s state: %w", c.ID(), err) + return fmt.Errorf("saving container %s state: %w", c.ID(), err) } return nil } @@ -832,7 +832,7 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (retErr func (c *Container) checkDependenciesAndHandleError() error { notRunning, err := c.checkDependenciesRunning() if err != nil { - return fmt.Errorf("error checking dependencies for container %s: %w", c.ID(), err) + return fmt.Errorf("checking dependencies for container %s: %w", c.ID(), err) } if len(notRunning) > 0 { depString := strings.Join(notRunning, ",") @@ -851,7 +851,7 @@ func (c *Container) startDependencies(ctx context.Context) error { depVisitedCtrs := make(map[string]*Container) if err := c.getAllDependencies(depVisitedCtrs); err != nil { - return fmt.Errorf("error starting dependency for container %s: %w", c.ID(), err) + return fmt.Errorf("starting dependency for container %s: %w", c.ID(), err) } // Because of how Go handles passing slices through functions, a slice cannot grow between function calls @@ -864,7 +864,7 @@ func (c *Container) startDependencies(ctx context.Context) error { // Build a dependency graph of containers graph, err := BuildContainerGraph(depCtrs) if err != nil { - return fmt.Errorf("error generating dependency graph for container %s: %w", c.ID(), err) + return fmt.Errorf("generating dependency graph for container %s: %w", c.ID(), err) } // If there are no containers without dependencies, we can't start @@ -890,7 +890,7 @@ func (c *Container) startDependencies(ctx context.Context) error { for _, e := range ctrErrors { logrus.Errorf("%q", e) } - return fmt.Errorf("error starting some containers: %w", define.ErrInternal) + return fmt.Errorf("starting some containers: %w", define.ErrInternal) } return nil } @@ -946,13 +946,13 @@ func (c *Container) checkDependenciesRunning() ([]string, error) { // Get the dependency container depCtr, err := c.runtime.state.Container(dep) if err != nil { - return nil, fmt.Errorf("error retrieving dependency %s of container %s from state: %w", dep, c.ID(), err) + return nil, fmt.Errorf("retrieving dependency %s of container %s from state: %w", dep, c.ID(), err) } // Check the status state, err := depCtr.State() if err != nil { - return nil, fmt.Errorf("error retrieving state of dependency %s of container %s: %w", dep, c.ID(), err) + return nil, fmt.Errorf("retrieving state of dependency %s of container %s: %w", dep, c.ID(), err) } if state != define.ContainerStateRunning && !depCtr.config.IsInfra { notRunning = append(notRunning, dep) @@ -1280,7 +1280,7 @@ func (c *Container) stop(timeout uint) error { // is held when busy-waiting for the container to be stopped. c.state.State = define.ContainerStateStopping if err := c.save(); err != nil { - return fmt.Errorf("error saving container %s state before stopping: %w", c.ID(), err) + return fmt.Errorf("saving container %s state before stopping: %w", c.ID(), err) } if !c.batched { c.lock.Unlock() @@ -1346,7 +1346,7 @@ func (c *Container) stop(timeout uint) error { } if err := c.save(); err != nil { - return fmt.Errorf("error saving container %s state after stopping: %w", c.ID(), err) + return fmt.Errorf("saving container %s state after stopping: %w", c.ID(), err) } // Wait until we have an exit file, and sync once we do @@ -1633,7 +1633,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) logrus.Debugf("Going to mount named volume %s", v.Name) vol, err := c.runtime.state.Volume(v.Name) if err != nil { - return nil, fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err) + return nil, fmt.Errorf("retrieving named volume %s for container %s: %w", v.Name, c.ID(), err) } if vol.config.LockID == c.config.LockID { @@ -1643,7 +1643,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) defer vol.lock.Unlock() if vol.needsMount() { if err := vol.mount(); err != nil { - return nil, fmt.Errorf("error mounting volume %s for container %s: %w", vol.Name(), c.ID(), err) + return nil, fmt.Errorf("mounting volume %s for container %s: %w", vol.Name(), c.ID(), err) } } // The volume may need a copy-up. Check the state. @@ -1656,7 +1656,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) srcDir, err := securejoin.SecureJoin(mountpoint, v.Dest) if err != nil { - return nil, fmt.Errorf("error calculating destination path to copy up container %s volume %s: %w", c.ID(), vol.Name(), err) + return nil, fmt.Errorf("calculating destination path to copy up container %s volume %s: %w", c.ID(), vol.Name(), err) } // Do a manual stat on the source directory to verify existence. // Skip the rest if it exists. @@ -1667,7 +1667,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) // up. return vol, nil } - return nil, fmt.Errorf("error identifying source directory for copy up into volume %s: %w", vol.Name(), err) + return nil, fmt.Errorf("identifying source directory for copy up into volume %s: %w", vol.Name(), err) } // If it's not a directory we're mounting over it. if !srcStat.IsDir() { @@ -1679,7 +1679,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) // RHBZ#1928643 srcContents, err := os.ReadDir(srcDir) if err != nil { - return nil, fmt.Errorf("error reading contents of source directory for copy up into volume %s: %w", vol.Name(), err) + return nil, fmt.Errorf("reading contents of source directory for copy up into volume %s: %w", vol.Name(), err) } if len(srcContents) == 0 { return vol, nil @@ -1689,7 +1689,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) volMount := vol.mountPoint() contents, err := os.ReadDir(volMount) if err != nil { - return nil, fmt.Errorf("error listing contents of volume %s mountpoint when copying up from container %s: %w", vol.Name(), c.ID(), err) + return nil, fmt.Errorf("listing contents of volume %s mountpoint when copying up from container %s: %w", vol.Name(), c.ID(), err) } if len(contents) > 0 { // The volume is not empty. It was likely modified @@ -1731,11 +1731,11 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) if err2 != nil { logrus.Errorf("Streaming contents of container %s directory for volume copy-up: %v", c.ID(), err2) } - return nil, fmt.Errorf("error copying up to volume %s: %w", vol.Name(), err) + return nil, fmt.Errorf("copying up to volume %s: %w", vol.Name(), err) } if err := <-errChan; err != nil { - return nil, fmt.Errorf("error streaming container content for copy up into volume %s: %w", vol.Name(), err) + return nil, fmt.Errorf("streaming container content for copy up into volume %s: %w", vol.Name(), err) } } return vol, nil @@ -1818,7 +1818,7 @@ func (c *Container) cleanupStorage() error { if cleanupErr != nil { logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr) } - cleanupErr = fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err) + cleanupErr = fmt.Errorf("retrieving named volume %s for container %s: %w", v.Name, c.ID(), err) // We need to try and unmount every volume, so continue // if they fail. @@ -1831,7 +1831,7 @@ func (c *Container) cleanupStorage() error { if cleanupErr != nil { logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr) } - cleanupErr = fmt.Errorf("error unmounting volume %s for container %s: %w", vol.Name(), c.ID(), err) + cleanupErr = fmt.Errorf("unmounting volume %s for container %s: %w", vol.Name(), c.ID(), err) } vol.lock.Unlock() } @@ -1856,7 +1856,7 @@ func (c *Container) cleanup(ctx context.Context) error { // Clean up network namespace, if present if err := c.cleanupNetwork(); err != nil { - lastError = fmt.Errorf("error removing container %s network: %w", c.ID(), err) + lastError = fmt.Errorf("removing container %s network: %w", c.ID(), err) } // cleanup host entry if it is shared @@ -1894,7 +1894,7 @@ func (c *Container) cleanup(ctx context.Context) error { if lastError != nil { logrus.Errorf("Unmounting container %s storage: %v", c.ID(), err) } else { - lastError = fmt.Errorf("error unmounting container %s storage: %w", c.ID(), err) + lastError = fmt.Errorf("unmounting container %s storage: %w", c.ID(), err) } } @@ -1978,7 +1978,7 @@ func (c *Container) stopPodIfNeeded(ctx context.Context) error { // hooks. func (c *Container) delete(ctx context.Context) error { if err := c.ociRuntime.DeleteContainer(c); err != nil { - return fmt.Errorf("error removing container %s from runtime: %w", c.ID(), err) + return fmt.Errorf("removing container %s from runtime: %w", c.ID(), err) } if err := c.postDeleteHooks(ctx); err != nil { @@ -2041,7 +2041,7 @@ func (c *Container) writeStringToRundir(destFile, contents string) (string, erro destFileName := filepath.Join(c.state.RunDir, destFile) if err := os.Remove(destFileName); err != nil && !os.IsNotExist(err) { - return "", fmt.Errorf("error removing %s for container %s: %w", destFile, c.ID(), err) + return "", fmt.Errorf("removing %s for container %s: %w", destFile, c.ID(), err) } if err := writeStringToPath(destFileName, contents, c.config.MountLabel, c.RootUID(), c.RootGID()); err != nil { @@ -2075,22 +2075,22 @@ func (c *Container) saveSpec(spec *spec.Spec) error { jsonPath := filepath.Join(c.bundlePath(), "config.json") if _, err := os.Stat(jsonPath); err != nil { if !os.IsNotExist(err) { - return fmt.Errorf("error doing stat on container %s spec: %w", c.ID(), err) + return fmt.Errorf("doing stat on container %s spec: %w", c.ID(), err) } // The spec does not exist, we're fine } else { // The spec exists, need to remove it if err := os.Remove(jsonPath); err != nil { - return fmt.Errorf("error replacing runtime spec for container %s: %w", c.ID(), err) + return fmt.Errorf("replacing runtime spec for container %s: %w", c.ID(), err) } } fileJSON, err := json.Marshal(spec) if err != nil { - return fmt.Errorf("error exporting runtime spec for container %s to JSON: %w", c.ID(), err) + return fmt.Errorf("exporting runtime spec for container %s to JSON: %w", c.ID(), err) } if err := ioutil.WriteFile(jsonPath, fileJSON, 0644); err != nil { - return fmt.Errorf("error writing runtime spec JSON for container %s to disk: %w", c.ID(), err) + return fmt.Errorf("writing runtime spec JSON for container %s to disk: %w", c.ID(), err) } logrus.Debugf("Created OCI spec for container %s at %s", c.ID(), jsonPath) @@ -2158,11 +2158,11 @@ func (c *Container) mount() (string, error) { mountPoint, err := c.runtime.storageService.MountContainerImage(c.ID()) if err != nil { - return "", fmt.Errorf("error mounting storage for container %s: %w", c.ID(), err) + return "", fmt.Errorf("mounting storage for container %s: %w", c.ID(), err) } mountPoint, err = filepath.EvalSymlinks(mountPoint) if err != nil { - return "", fmt.Errorf("error resolving storage path for container %s: %w", c.ID(), err) + return "", fmt.Errorf("resolving storage path for container %s: %w", c.ID(), err) } if err := os.Chown(mountPoint, c.RootUID(), c.RootGID()); err != nil { return "", fmt.Errorf("cannot chown %s to %d:%d: %w", mountPoint, c.RootUID(), c.RootGID(), err) @@ -2174,7 +2174,7 @@ func (c *Container) mount() (string, error) { func (c *Container) unmount(force bool) error { // Also unmount storage if _, err := c.runtime.storageService.UnmountContainerImage(c.ID(), force); err != nil { - return fmt.Errorf("error unmounting container %s root filesystem: %w", c.ID(), err) + return fmt.Errorf("unmounting container %s root filesystem: %w", c.ID(), err) } return nil @@ -2303,7 +2303,7 @@ func (c *Container) checkExitFile() error { return nil } - return fmt.Errorf("error running stat on container %s exit file: %w", c.ID(), err) + return fmt.Errorf("running stat on container %s exit file: %w", c.ID(), err) } // Alright, it exists. Transition to Stopped state. diff --git a/libpod/container_internal_common.go b/libpod/container_internal_common.go index 192a86b6a..c7f59aba5 100644 --- a/libpod/container_internal_common.go +++ b/libpod/container_internal_common.go @@ -147,7 +147,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { for _, namedVol := range c.config.NamedVolumes { volume, err := c.runtime.GetVolume(namedVol.Name) if err != nil { - return nil, fmt.Errorf("error retrieving volume %s to add to container %s: %w", namedVol.Name, c.ID(), err) + return nil, fmt.Errorf("retrieving volume %s to add to container %s: %w", namedVol.Name, c.ID(), err) } mountPoint, err := volume.MountPoint() if err != nil { @@ -308,11 +308,11 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { // Mount the specified image. img, _, err := c.runtime.LibimageRuntime().LookupImage(volume.Source, nil) if err != nil { - return nil, fmt.Errorf("error creating image volume %q:%q: %w", volume.Source, volume.Dest, err) + return nil, fmt.Errorf("creating image volume %q:%q: %w", volume.Source, volume.Dest, err) } mountPoint, err := img.Mount(ctx, nil, "") if err != nil { - return nil, fmt.Errorf("error mounting image volume %q:%q: %w", volume.Source, volume.Dest, err) + return nil, fmt.Errorf("mounting image volume %q:%q: %w", volume.Source, volume.Dest, err) } contentDir, err := overlay.TempDir(c.config.StaticDir, c.RootUID(), c.RootGID()) @@ -363,7 +363,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { if len(c.config.Groups) > 0 { gids, err := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, overrides) if err != nil { - return nil, fmt.Errorf("error looking up supplemental groups for container %s: %w", c.ID(), err) + return nil, fmt.Errorf("looking up supplemental groups for container %s: %w", c.ID(), err) } for _, gid := range gids { g.AddProcessAdditionalGid(gid) @@ -443,7 +443,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { } _, err := registry.InjectDevices(g.Config, c.config.CDIDevices...) if err != nil { - return nil, fmt.Errorf("error setting up CDI devices: %w", err) + return nil, fmt.Errorf("setting up CDI devices: %w", err) } } @@ -459,7 +459,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { if m.Type == "tmpfs" { finalPath, err := securejoin.SecureJoin(c.state.Mountpoint, m.Destination) if err != nil { - return nil, fmt.Errorf("error resolving symlinks for mount destination %s: %w", m.Destination, err) + return nil, fmt.Errorf("resolving symlinks for mount destination %s: %w", m.Destination, err) } trimmedPath := strings.TrimPrefix(finalPath, strings.TrimSuffix(c.state.Mountpoint, "/")) m.Destination = trimmedPath @@ -473,7 +473,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { // Warning: precreate hooks may alter g.Config in place. if c.state.ExtensionStageHooks, err = c.setupOCIHooks(ctx, g.Config); err != nil { - return nil, fmt.Errorf("error setting up OCI Hooks: %w", err) + return nil, fmt.Errorf("setting up OCI Hooks: %w", err) } if len(c.config.EnvSecrets) > 0 { manager, err := c.runtime.SecretsManager() @@ -596,7 +596,7 @@ func (c *Container) resolveWorkDir() error { } // This might be a serious error (e.g., permission), so // we need to return the full error. - return fmt.Errorf("error detecting workdir %q on container %s: %w", workdir, c.ID(), err) + return fmt.Errorf("detecting workdir %q on container %s: %w", workdir, c.ID(), err) } return nil } @@ -604,16 +604,16 @@ func (c *Container) resolveWorkDir() error { if os.IsExist(err) { return nil } - return fmt.Errorf("error creating container %s workdir: %w", c.ID(), err) + return fmt.Errorf("creating container %s workdir: %w", c.ID(), err) } // Ensure container entrypoint is created (if required). uid, gid, _, err := chrootuser.GetUser(c.state.Mountpoint, c.User()) if err != nil { - return fmt.Errorf("error looking up %s inside of the container %s: %w", c.User(), c.ID(), err) + return fmt.Errorf("looking up %s inside of the container %s: %w", c.User(), c.ID(), err) } if err := os.Chown(resolvedWorkdir, int(uid), int(gid)); err != nil { - return fmt.Errorf("error chowning container %s workdir to container root: %w", c.ID(), err) + return fmt.Errorf("chowning container %s workdir to container root: %w", c.ID(), err) } return nil @@ -873,7 +873,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { // To correctly track deleted files, let's go through the output of 'podman diff' rootFsChanges, err := c.runtime.GetDiff("", c.ID(), define.DiffContainer) if err != nil { - return fmt.Errorf("error exporting root file-system diff for %q: %w", c.ID(), err) + return fmt.Errorf("exporting root file-system diff for %q: %w", c.ID(), err) } addToTarFiles, err := crutils.CRCreateRootFsDiffTar(&rootFsChanges, c.state.Mountpoint, c.bundlePath()) @@ -890,7 +890,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { // Create an archive for each volume associated with the container if !options.IgnoreVolumes { if err := os.MkdirAll(expVolDir, 0700); err != nil { - return fmt.Errorf("error creating volumes export directory %q: %w", expVolDir, err) + return fmt.Errorf("creating volumes export directory %q: %w", expVolDir, err) } for _, v := range c.config.NamedVolumes { @@ -899,7 +899,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { volumeTarFile, err := os.Create(volumeTarFileFullPath) if err != nil { - return fmt.Errorf("error creating %q: %w", volumeTarFileFullPath, err) + return fmt.Errorf("creating %q: %w", volumeTarFileFullPath, err) } volume, err := c.runtime.GetVolume(v.Name) @@ -920,7 +920,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { IncludeSourceDir: true, }) if err != nil { - return fmt.Errorf("error reading volume directory %q: %w", v.Dest, err) + return fmt.Errorf("reading volume directory %q: %w", v.Dest, err) } _, err = io.Copy(volumeTarFile, input) @@ -940,12 +940,12 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { }) if err != nil { - return fmt.Errorf("error reading checkpoint directory %q: %w", c.ID(), err) + return fmt.Errorf("reading checkpoint directory %q: %w", c.ID(), err) } outFile, err := os.Create(options.TargetFile) if err != nil { - return fmt.Errorf("error creating checkpoint export file %q: %w", options.TargetFile, err) + return fmt.Errorf("creating checkpoint export file %q: %w", options.TargetFile, err) } defer outFile.Close() @@ -1343,12 +1343,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti infraContainer.lock.Lock() if err := infraContainer.syncContainer(); err != nil { infraContainer.lock.Unlock() - return nil, 0, fmt.Errorf("error syncing infrastructure container %s status: %w", infraContainer.ID(), err) + return nil, 0, fmt.Errorf("syncing infrastructure container %s status: %w", infraContainer.ID(), err) } if infraContainer.state.State != define.ContainerStateRunning { if err := infraContainer.initAndStart(ctx); err != nil { infraContainer.lock.Unlock() - return nil, 0, fmt.Errorf("error starting infrastructure container %s status: %w", infraContainer.ID(), err) + return nil, 0, fmt.Errorf("starting infrastructure container %s status: %w", infraContainer.ID(), err) } } infraContainer.lock.Unlock() @@ -1591,7 +1591,7 @@ func (c *Container) getRootNetNsDepCtr() (depCtr *Container, err error) { depCtr, err = c.runtime.state.Container(nextCtr) if err != nil { - return nil, fmt.Errorf("error fetching dependency %s of container %s: %w", c.config.NetNsCtr, c.ID(), err) + return nil, fmt.Errorf("fetching dependency %s of container %s: %w", c.config.NetNsCtr, c.ID(), err) } // This should never happen without an error if depCtr == nil { @@ -1657,13 +1657,13 @@ func (c *Container) makeBindMounts() error { // them. depCtr, err := c.getRootNetNsDepCtr() if err != nil { - return fmt.Errorf("error fetching network namespace dependency container for container %s: %w", c.ID(), err) + return fmt.Errorf("fetching network namespace dependency container for container %s: %w", c.ID(), err) } // We need that container's bind mounts bindMounts, err := depCtr.BindMounts() if err != nil { - return fmt.Errorf("error fetching bind mounts from dependency %s of container %s: %w", depCtr.ID(), c.ID(), err) + return fmt.Errorf("fetching bind mounts from dependency %s of container %s: %w", depCtr.ID(), c.ID(), err) } // The other container may not have a resolv.conf or /etc/hosts @@ -1673,7 +1673,7 @@ func (c *Container) makeBindMounts() error { err := c.mountIntoRootDirs("/etc/resolv.conf", resolvPath) if err != nil { - return fmt.Errorf("error assigning mounts to container %s: %w", c.ID(), err) + return fmt.Errorf("assigning mounts to container %s: %w", c.ID(), err) } } @@ -1693,13 +1693,13 @@ func (c *Container) makeBindMounts() error { err = etchosts.Add(hostsPath, getLocalhostHostEntry(c)) lock.Unlock() if err != nil { - return fmt.Errorf("error creating hosts file for container %s which depends on container %s: %w", c.ID(), depCtr.ID(), err) + return fmt.Errorf("creating hosts file for container %s which depends on container %s: %w", c.ID(), depCtr.ID(), err) } // finally, save it in the new container err = c.mountIntoRootDirs(config.DefaultHostsFile, hostsPath) if err != nil { - return fmt.Errorf("error assigning mounts to container %s: %w", c.ID(), err) + return fmt.Errorf("assigning mounts to container %s: %w", c.ID(), err) } } @@ -1714,13 +1714,13 @@ func (c *Container) makeBindMounts() error { } else { if !c.config.UseImageResolvConf { if err := c.generateResolvConf(); err != nil { - return fmt.Errorf("error creating resolv.conf for container %s: %w", c.ID(), err) + return fmt.Errorf("creating resolv.conf for container %s: %w", c.ID(), err) } } if !c.config.UseImageHosts { if err := c.createHosts(); err != nil { - return fmt.Errorf("error creating hosts file for container %s: %w", c.ID(), err) + return fmt.Errorf("creating hosts file for container %s: %w", c.ID(), err) } } } @@ -1738,7 +1738,7 @@ func (c *Container) makeBindMounts() error { } } else if !c.config.UseImageHosts && c.state.BindMounts["/etc/hosts"] == "" { if err := c.createHosts(); err != nil { - return fmt.Errorf("error creating hosts file for container %s: %w", c.ID(), err) + return fmt.Errorf("creating hosts file for container %s: %w", c.ID(), err) } } @@ -1750,7 +1750,7 @@ func (c *Container) makeBindMounts() error { if c.config.Passwd == nil || *c.config.Passwd { newPasswd, newGroup, err := c.generatePasswdAndGroup() if err != nil { - return fmt.Errorf("error creating temporary passwd file for container %s: %w", c.ID(), err) + return fmt.Errorf("creating temporary passwd file for container %s: %w", c.ID(), err) } if newPasswd != "" { // Make /etc/passwd @@ -1766,16 +1766,6 @@ func (c *Container) makeBindMounts() error { } } - // Make /etc/hostname - // This should never change, so no need to recreate if it exists - if _, ok := c.state.BindMounts["/etc/hostname"]; !ok { - hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname()) - if err != nil { - return fmt.Errorf("error creating hostname file for container %s: %w", c.ID(), err) - } - c.state.BindMounts["/etc/hostname"] = hostnamePath - } - // Make /etc/localtime ctrTimezone := c.Timezone() if ctrTimezone != "" { @@ -1783,7 +1773,7 @@ func (c *Container) makeBindMounts() error { if ctrTimezone != "local" { _, err = time.LoadLocation(ctrTimezone) if err != nil { - return fmt.Errorf("error finding timezone for container %s: %w", c.ID(), err) + return fmt.Errorf("finding timezone for container %s: %w", c.ID(), err) } } if _, ok := c.state.BindMounts["/etc/localtime"]; !ok { @@ -1791,18 +1781,18 @@ func (c *Container) makeBindMounts() error { if ctrTimezone == "local" { zonePath, err = filepath.EvalSymlinks("/etc/localtime") if err != nil { - return fmt.Errorf("error finding local timezone for container %s: %w", c.ID(), err) + return fmt.Errorf("finding local timezone for container %s: %w", c.ID(), err) } } else { zone := filepath.Join("/usr/share/zoneinfo", ctrTimezone) zonePath, err = filepath.EvalSymlinks(zone) if err != nil { - return fmt.Errorf("error setting timezone for container %s: %w", c.ID(), err) + return fmt.Errorf("setting timezone for container %s: %w", c.ID(), err) } } localtimePath, err := c.copyTimezoneFile(zonePath) if err != nil { - return fmt.Errorf("error setting timezone for container %s: %w", c.ID(), err) + return fmt.Errorf("setting timezone for container %s: %w", c.ID(), err) } c.state.BindMounts["/etc/localtime"] = localtimePath } @@ -1840,7 +1830,7 @@ rootless=%d } containerenvPath, err := c.writeStringToRundir(".containerenv", containerenv) if err != nil { - return fmt.Errorf("error creating containerenv file for container %s: %w", c.ID(), err) + return fmt.Errorf("creating containerenv file for container %s: %w", c.ID(), err) } c.state.BindMounts["/run/.containerenv"] = containerenvPath } @@ -1861,7 +1851,7 @@ rootless=%d if len(c.Secrets()) > 0 { // create /run/secrets if subscriptions did not create if err := c.createSecretMountDir(); err != nil { - return fmt.Errorf("error creating secrets mount: %w", err) + return fmt.Errorf("creating secrets mount: %w", err) } for _, secret := range c.Secrets() { secretFileName := secret.Name @@ -1879,7 +1869,7 @@ rootless=%d } } - return nil + return c.makePlatformBindMounts() } // generateResolvConf generates a containers resolv.conf @@ -1939,16 +1929,21 @@ func (c *Container) generateResolvConf() error { destPath := filepath.Join(c.state.RunDir, "resolv.conf") + var namespaces []spec.LinuxNamespace + if c.config.Spec.Linux != nil { + namespaces = c.config.Spec.Linux.Namespaces + } + if err := resolvconf.New(&resolvconf.Params{ IPv6Enabled: ipv6, KeepHostServers: keepHostServers, Nameservers: nameservers, - Namespaces: c.config.Spec.Linux.Namespaces, + Namespaces: namespaces, Options: options, Path: destPath, Searches: search, }); err != nil { - return fmt.Errorf("error building resolv.conf for container %s: %w", c.ID(), err) + return fmt.Errorf("building resolv.conf for container %s: %w", c.ID(), err) } return c.bindMountRootFile(destPath, resolvconf.DefaultResolvConf) @@ -2445,7 +2440,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) { logrus.Debugf("Making /etc/passwd for container %s", c.ID()) originPasswdFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd") if err != nil { - return "", "", fmt.Errorf("error creating path to container %s /etc/passwd: %w", c.ID(), err) + return "", "", fmt.Errorf("creating path to container %s /etc/passwd: %w", c.ID(), err) } orig, err := ioutil.ReadFile(originPasswdFile) if err != nil && !os.IsNotExist(err) { @@ -2463,7 +2458,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) { logrus.Debugf("Modifying container %s /etc/passwd", c.ID()) containerPasswd, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd") if err != nil { - return "", "", fmt.Errorf("error looking up location of container %s /etc/passwd: %w", c.ID(), err) + return "", "", fmt.Errorf("looking up location of container %s /etc/passwd: %w", c.ID(), err) } f, err := os.OpenFile(containerPasswd, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) @@ -2491,7 +2486,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) { logrus.Debugf("Making /etc/group for container %s", c.ID()) originGroupFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group") if err != nil { - return "", "", fmt.Errorf("error creating path to container %s /etc/group: %w", c.ID(), err) + return "", "", fmt.Errorf("creating path to container %s /etc/group: %w", c.ID(), err) } orig, err := ioutil.ReadFile(originGroupFile) if err != nil && !os.IsNotExist(err) { @@ -2509,7 +2504,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) { logrus.Debugf("Modifying container %s /etc/group", c.ID()) containerGroup, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group") if err != nil { - return "", "", fmt.Errorf("error looking up location of container %s /etc/group: %w", c.ID(), err) + return "", "", fmt.Errorf("looking up location of container %s /etc/group: %w", c.ID(), err) } f, err := os.OpenFile(containerGroup, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) @@ -2593,7 +2588,7 @@ func (c *Container) createSecretMountDir() error { func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { vol, err := c.runtime.state.Volume(v.Name) if err != nil { - return fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err) + return fmt.Errorf("retrieving named volume %s for container %s: %w", v.Name, c.ID(), err) } vol.lock.Lock() @@ -2620,7 +2615,7 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap) newPair, err := mappings.ToHost(p) if err != nil { - return fmt.Errorf("error mapping user %d:%d: %w", uid, gid, err) + return fmt.Errorf("mapping user %d:%d: %w", uid, gid, err) } uid = newPair.UID gid = newPair.GID diff --git a/libpod/container_internal_freebsd.go b/libpod/container_internal_freebsd.go index 40c6c5ebf..67f87a98d 100644 --- a/libpod/container_internal_freebsd.go +++ b/libpod/container_internal_freebsd.go @@ -4,7 +4,6 @@ package libpod import ( - "errors" "fmt" "os" "strings" @@ -24,20 +23,6 @@ var ( bindOptions = []string{} ) -// Network stubs to decouple container_internal_freebsd.go from -// networking_freebsd.go so they can be reviewed separately. -func (r *Runtime) createNetNS(ctr *Container) (netJail string, q map[string]types.StatusBlock, retErr error) { - return "", nil, errors.New("not implemented (*Runtime) createNetNS") -} - -func (r *Runtime) teardownNetNS(ctr *Container) error { - return errors.New("not implemented (*Runtime) teardownNetNS") -} - -func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.StatusBlock, error) { - return nil, errors.New("not implemented (*Runtime) reloadContainerNetwork") -} - func (c *Container) mountSHM(shmOptions string) error { return nil } @@ -51,7 +36,7 @@ func (c *Container) unmountSHM(path string) error { func (c *Container) prepare() error { var ( wg sync.WaitGroup - jailName string + ctrNS *jailNetNS networkStatus map[string]types.StatusBlock createNetNSErr, mountStorageErr error mountPoint string @@ -63,9 +48,9 @@ func (c *Container) prepare() error { go func() { defer wg.Done() // Set up network namespace if not already set up - noNetNS := c.state.NetworkJail == "" + noNetNS := c.state.NetNS == nil if c.config.CreateNetNS && noNetNS && !c.config.PostConfigureNetNS { - jailName, networkStatus, createNetNSErr = c.runtime.createNetNS(c) + ctrNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c) if createNetNSErr != nil { return } @@ -74,7 +59,7 @@ func (c *Container) prepare() error { defer tmpStateLock.Unlock() // Assign NetNS attributes to container - c.state.NetworkJail = jailName + c.state.NetNS = ctrNS c.state.NetworkStatus = networkStatus } }() @@ -162,9 +147,9 @@ func (c *Container) addNetworkContainer(g *generate.Generator, ctr string) error nsCtr, err := c.runtime.state.Container(ctr) c.runtime.state.UpdateContainer(nsCtr) if err != nil { - return fmt.Errorf("error retrieving dependency %s of container %s from state: %w", ctr, c.ID(), err) + return fmt.Errorf("retrieving dependency %s of container %s from state: %w", ctr, c.ID(), err) } - g.AddAnnotation("org.freebsd.parentJail", nsCtr.state.NetworkJail) + g.AddAnnotation("org.freebsd.parentJail", nsCtr.state.NetNS.Name) return nil } @@ -187,7 +172,7 @@ func openDirectory(path string) (fd int, err error) { func (c *Container) addNetworkNamespace(g *generate.Generator) error { if c.config.CreateNetNS { - g.AddAnnotation("org.freebsd.parentJail", c.state.NetworkJail) + g.AddAnnotation("org.freebsd.parentJail", c.state.NetNS.Name) } return nil } @@ -272,7 +257,7 @@ func (c *Container) isSlirp4netnsIPv6() (bool, error) { // check for net=none func (c *Container) hasNetNone() bool { - return c.state.NetworkJail == "" + return c.state.NetNS == nil } func setVolumeAtime(mountPoint string, st os.FileInfo) error { @@ -283,3 +268,7 @@ func setVolumeAtime(mountPoint string, st os.FileInfo) error { } return nil } + +func (c *Container) makePlatformBindMounts() error { + return nil +} diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 9b05a2d61..ef8649776 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -43,7 +43,7 @@ func (c *Container) mountSHM(shmOptions string) error { func (c *Container) unmountSHM(mount string) error { if err := unix.Unmount(mount, 0); err != nil { if err != syscall.EINVAL && err != syscall.ENOENT { - return fmt.Errorf("error unmounting container %s SHM mount %s: %w", c.ID(), mount, err) + return fmt.Errorf("unmounting container %s SHM mount %s: %w", c.ID(), mount, err) } // If it's just an EINVAL or ENOENT, debug logs only logrus.Debugf("Container %s failed to unmount %s : %v", c.ID(), mount, err) @@ -122,7 +122,7 @@ func (c *Container) prepare() error { // createErr is guaranteed non-nil, so print // unconditionally logrus.Errorf("Preparing container %s: %v", c.ID(), createErr) - createErr = fmt.Errorf("error unmounting storage for container %s after network create failure: %w", c.ID(), err) + createErr = fmt.Errorf("unmounting storage for container %s after network create failure: %w", c.ID(), err) } } @@ -131,7 +131,7 @@ func (c *Container) prepare() error { if createErr != nil { if err := c.cleanupNetwork(); err != nil { logrus.Errorf("Preparing container %s: %v", c.ID(), createErr) - createErr = fmt.Errorf("error cleaning up container %s network after setup failure: %w", c.ID(), err) + createErr = fmt.Errorf("cleaning up container %s network after setup failure: %w", c.ID(), err) } } @@ -310,7 +310,7 @@ func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) erro func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr string, specNS spec.LinuxNamespaceType) error { nsCtr, err := c.runtime.state.Container(ctr) if err != nil { - return fmt.Errorf("error retrieving dependency %s of container %s from state: %w", ctr, c.ID(), err) + return fmt.Errorf("retrieving dependency %s of container %s from state: %w", ctr, c.ID(), err) } if specNS == spec.UTSNamespace { @@ -441,7 +441,7 @@ func (c *Container) addNetworkNamespace(g *generate.Generator) error { func (c *Container) addSystemdMounts(g *generate.Generator) error { if c.Systemd() { if err := c.setupSystemd(g.Mounts(), *g); err != nil { - return fmt.Errorf("error adding systemd-specific mounts: %w", err) + return fmt.Errorf("adding systemd-specific mounts: %w", err) } } return nil @@ -652,3 +652,16 @@ func setVolumeAtime(mountPoint string, st os.FileInfo) error { } return nil } + +func (c *Container) makePlatformBindMounts() error { + // Make /etc/hostname + // This should never change, so no need to recreate if it exists + if _, ok := c.state.BindMounts["/etc/hostname"]; !ok { + hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname()) + if err != nil { + return fmt.Errorf("creating hostname file for container %s: %w", c.ID(), err) + } + c.state.BindMounts["/etc/hostname"] = hostnamePath + } + return nil +} diff --git a/libpod/container_stat_linux.go b/libpod/container_stat_linux.go index 72aabb516..dc3a524f5 100644 --- a/libpod/container_stat_linux.go +++ b/libpod/container_stat_linux.go @@ -168,7 +168,7 @@ func secureStat(root string, path string) (*copier.StatForItem, error) { if stat.IsSymlink { target, err := copier.Eval(root, path, copier.EvalOptions{}) if err != nil { - return nil, fmt.Errorf("error evaluating symlink in container: %w", err) + return nil, fmt.Errorf("evaluating symlink in container: %w", err) } // Need to make sure the symlink is relative to the root! target = strings.TrimPrefix(target, root) diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go index 5571edf73..b9916c3a3 100644 --- a/libpod/container_top_linux.go +++ b/libpod/container_top_linux.go @@ -70,7 +70,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) { output, err = c.execPS(psDescriptors) if err != nil { - return nil, fmt.Errorf("error executing ps(1) in the container: %w", err) + return nil, fmt.Errorf("executing ps(1) in the container: %w", err) } // Trick: filter the ps command from the output instead of diff --git a/libpod/events.go b/libpod/events.go index 60142cb60..2f9799114 100644 --- a/libpod/events.go +++ b/libpod/events.go @@ -3,6 +3,7 @@ package libpod import ( "context" "fmt" + "path/filepath" "sync" "github.com/containers/podman/v4/libpod/events" @@ -11,6 +12,10 @@ import ( // newEventer returns an eventer that can be used to read/write events func (r *Runtime) newEventer() (events.Eventer, error) { + if r.config.Engine.EventsLogFilePath == "" { + // default, use path under tmpdir when none was explicitly set by the user + r.config.Engine.EventsLogFilePath = filepath.Join(r.config.Engine.TmpDir, "events", "events.log") + } options := events.EventerOptions{ EventerType: r.config.Engine.EventsLogger, LogFilePath: r.config.Engine.EventsLogFilePath, @@ -133,11 +138,7 @@ func (v *Volume) newVolumeEvent(status events.Status) { // Events is a wrapper function for everyone to begin tailing the events log // with options func (r *Runtime) Events(ctx context.Context, options events.ReadOptions) error { - eventer, err := r.newEventer() - if err != nil { - return err - } - return eventer.Read(ctx, options) + return r.eventer.Read(ctx, options) } // GetEvents reads the event log and returns events based on input filters @@ -149,10 +150,6 @@ func (r *Runtime) GetEvents(ctx context.Context, filters []string) ([]*events.Ev FromStart: true, Stream: false, } - eventer, err := r.newEventer() - if err != nil { - return nil, err - } logEvents := make([]*events.Event, 0, len(eventChannel)) readLock := sync.Mutex{} @@ -164,7 +161,7 @@ func (r *Runtime) GetEvents(ctx context.Context, filters []string) ([]*events.Ev readLock.Unlock() }() - readErr := eventer.Read(ctx, options) + readErr := r.eventer.Read(ctx, options) readLock.Lock() // Wait for the events to be consumed. return logEvents, readErr } diff --git a/libpod/events/events_freebsd.go b/libpod/events/events_freebsd.go index 17d410089..90933fa2c 100644 --- a/libpod/events/events_freebsd.go +++ b/libpod/events/events_freebsd.go @@ -14,7 +14,7 @@ func NewEventer(options EventerOptions) (Eventer, error) { case strings.ToUpper(LogFile.String()): return EventLogFile{options}, nil case strings.ToUpper(Null.String()): - return NewNullEventer(), nil + return newNullEventer(), nil case strings.ToUpper(Memory.String()): return NewMemoryEventer(), nil default: diff --git a/libpod/events/events_linux.go b/libpod/events/events_linux.go index e7801af5b..66b125dd5 100644 --- a/libpod/events/events_linux.go +++ b/libpod/events/events_linux.go @@ -18,9 +18,9 @@ func NewEventer(options EventerOptions) (Eventer, error) { } return eventer, nil case strings.ToUpper(LogFile.String()): - return EventLogFile{options}, nil + return newLogFileEventer(options) case strings.ToUpper(Null.String()): - return NewNullEventer(), nil + return newNullEventer(), nil case strings.ToUpper(Memory.String()): return NewMemoryEventer(), nil default: diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go index 16ef6504f..4986502a2 100644 --- a/libpod/events/journal_linux.go +++ b/libpod/events/journal_linux.go @@ -112,57 +112,16 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error { } } - // the api requires a next|prev before getting a cursor - if _, err := j.Next(); err != nil { - return fmt.Errorf("failed to move journal cursor to next entry: %w", err) - } - - prevCursor, err := j.GetCursor() - if err != nil { - return fmt.Errorf("failed to get journal cursor: %w", err) - } for { - select { - case <-ctx.Done(): - // the consumer has cancelled - return nil - default: - // fallthrough - } - - if _, err := j.Next(); err != nil { - return fmt.Errorf("failed to move journal cursor to next entry: %w", err) - } - newCursor, err := j.GetCursor() + entry, err := getNextEntry(ctx, j, options.Stream, untilTime) if err != nil { - return fmt.Errorf("failed to get journal cursor: %w", err) + return err } - if prevCursor == newCursor { - if !options.Stream || (len(options.Until) > 0 && time.Now().After(untilTime)) { - break - } - - // j.Wait() is blocking, this would cause the goroutine to hang forever - // if no more journal entries are generated and thus if the client - // has closed the connection in the meantime to leak memory. - // Waiting only 5 seconds makes sure we can check if the client closed in the - // meantime at least every 5 seconds. - t := 5 * time.Second - if len(options.Until) > 0 { - until := time.Until(untilTime) - if until < t { - t = until - } - } - _ = j.Wait(t) - continue + // no entry == we hit the end + if entry == nil { + return nil } - prevCursor = newCursor - entry, err := j.GetEntry() - if err != nil { - return fmt.Errorf("failed to read journal entry: %w", err) - } newEvent, err := newEventFromJournalEntry(entry) if err != nil { // We can't decode this event. @@ -177,7 +136,6 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error { options.EventChannel <- newEvent } } - return nil } func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { @@ -238,3 +196,51 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { func (e EventJournalD) String() string { return Journald.String() } + +// getNextEntry returns the next entry in the journal. If the end of the +// journal is reached and stream is not set or the current time is after +// the until time this function return nil,nil. +func getNextEntry(ctx context.Context, j *sdjournal.Journal, stream bool, untilTime time.Time) (*sdjournal.JournalEntry, error) { + for { + select { + case <-ctx.Done(): + // the consumer has cancelled + return nil, nil + default: + // fallthrough + } + // the api requires a next|prev before reading the event + ret, err := j.Next() + if err != nil { + return nil, fmt.Errorf("failed to move journal cursor to next entry: %w", err) + } + // ret == 0 equals EOF, see sd_journal_next(3) + if ret == 0 { + if !stream || (!untilTime.IsZero() && time.Now().After(untilTime)) { + // we hit the end and should not keep streaming + return nil, nil + } + // keep waiting for the next entry + // j.Wait() is blocking, this would cause the goroutine to hang forever + // if no more journal entries are generated and thus if the client + // has closed the connection in the meantime to leak memory. + // Waiting only 5 seconds makes sure we can check if the client closed in the + // meantime at least every 5 seconds. + t := 5 * time.Second + if !untilTime.IsZero() { + until := time.Until(untilTime) + if until < t { + t = until + } + } + _ = j.Wait(t) + continue + } + + entry, err := j.GetEntry() + if err != nil { + return nil, fmt.Errorf("failed to read journal entry: %w", err) + } + return entry, nil + } +} diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go index 519e16629..d749a0d4d 100644 --- a/libpod/events/logfile.go +++ b/libpod/events/logfile.go @@ -12,6 +12,7 @@ import ( "io/ioutil" "os" "path" + "path/filepath" "time" "github.com/containers/podman/v4/pkg/util" @@ -27,6 +28,21 @@ type EventLogFile struct { options EventerOptions } +// newLogFileEventer creates a new EventLogFile eventer +func newLogFileEventer(options EventerOptions) (*EventLogFile, error) { + // Create events log dir + if err := os.MkdirAll(filepath.Dir(options.LogFilePath), 0700); err != nil { + return nil, fmt.Errorf("creating events dirs: %w", err) + } + // We have to make sure the file is created otherwise reading events will hang. + // https://github.com/containers/podman/issues/15688 + fd, err := os.OpenFile(options.LogFilePath, os.O_RDONLY|os.O_CREATE, 0700) + if err != nil { + return nil, fmt.Errorf("failed to create event log file: %w", err) + } + return &EventLogFile{options: options}, fd.Close() +} + // Writes to the log file func (e EventLogFile) Write(ee Event) error { // We need to lock events file @@ -108,6 +124,8 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error { } }() } + logrus.Debugf("Reading events from file %q", e.options.LogFilePath) + var line *tail.Line var ok bool for { diff --git a/libpod/events/nullout.go b/libpod/events/nullout.go index 587a1b98b..da3820c23 100644 --- a/libpod/events/nullout.go +++ b/libpod/events/nullout.go @@ -2,10 +2,11 @@ package events import ( "context" + "errors" ) -// EventToNull is an eventer type that only performs write operations -// and only writes to /dev/null. It is meant for unittests only +// EventToNull is an eventer type that does nothing. +// It is meant for unittests only type EventToNull struct{} // Write eats the event and always returns nil @@ -13,14 +14,14 @@ func (e EventToNull) Write(ee Event) error { return nil } -// Read does nothing. Do not use it. +// Read does nothing and returns an error. func (e EventToNull) Read(ctx context.Context, options ReadOptions) error { - return nil + return errors.New("cannot read events with the \"none\" backend") } -// NewNullEventer returns a new null eventer. You should only do this for +// newNullEventer returns a new null eventer. You should only do this for // the purposes of internal libpod testing. -func NewNullEventer() Eventer { +func newNullEventer() Eventer { return EventToNull{} } diff --git a/libpod/info.go b/libpod/info.go index 1990dc044..ad8f65432 100644 --- a/libpod/info.go +++ b/libpod/info.go @@ -28,20 +28,20 @@ func (r *Runtime) info() (*define.Info, error) { info := define.Info{} versionInfo, err := define.GetVersion() if err != nil { - return nil, fmt.Errorf("error getting version info: %w", err) + return nil, fmt.Errorf("getting version info: %w", err) } info.Version = versionInfo // get host information hostInfo, err := r.hostInfo() if err != nil { - return nil, fmt.Errorf("error getting host info: %w", err) + return nil, fmt.Errorf("getting host info: %w", err) } info.Host = hostInfo // get store information storeInfo, err := r.storeInfo() if err != nil { - return nil, fmt.Errorf("error getting store info: %w", err) + return nil, fmt.Errorf("getting store info: %w", err) } info.Store = storeInfo registries := make(map[string]interface{}) @@ -49,14 +49,14 @@ func (r *Runtime) info() (*define.Info, error) { sys := r.SystemContext() data, err := sysregistriesv2.GetRegistries(sys) if err != nil { - return nil, fmt.Errorf("error getting registries: %w", err) + return nil, fmt.Errorf("getting registries: %w", err) } for _, reg := range data { registries[reg.Prefix] = reg } regs, err := sysregistriesv2.UnqualifiedSearchRegistries(sys) if err != nil { - return nil, fmt.Errorf("error getting registries: %w", err) + return nil, fmt.Errorf("getting registries: %w", err) } if len(regs) > 0 { registries["search"] = regs @@ -80,19 +80,19 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) { // lets say OS, arch, number of cpus, amount of memory, maybe os distribution/version, hostname, kernel version, uptime mi, err := system.ReadMemInfo() if err != nil { - return nil, fmt.Errorf("error reading memory info: %w", err) + return nil, fmt.Errorf("reading memory info: %w", err) } hostDistributionInfo := r.GetHostDistributionInfo() kv, err := util.ReadKernelVersion() if err != nil { - return nil, fmt.Errorf("error reading kernel version: %w", err) + return nil, fmt.Errorf("reading kernel version: %w", err) } host, err := os.Hostname() if err != nil { - return nil, fmt.Errorf("error getting hostname: %w", err) + return nil, fmt.Errorf("getting hostname: %w", err) } cpuUtil, err := getCPUUtilization() @@ -131,7 +131,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) { duration, err := util.ReadUptime() if err != nil { - return nil, fmt.Errorf("error reading up time: %w", err) + return nil, fmt.Errorf("reading up time: %w", err) } uptime := struct { @@ -201,7 +201,7 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) { } images, err := r.store.Images() if err != nil { - return nil, fmt.Errorf("error getting number of images: %w", err) + return nil, fmt.Errorf("getting number of images: %w", err) } conInfo, err := r.getContainerStoreInfo() if err != nil { diff --git a/libpod/info_freebsd.go b/libpod/info_freebsd.go index ef7b6817c..1be988350 100644 --- a/libpod/info_freebsd.go +++ b/libpod/info_freebsd.go @@ -21,7 +21,7 @@ func timeToPercent(time uint64, total uint64) float64 { func getCPUUtilization() (*define.CPUUsage, error) { buf, err := unix.SysctlRaw("kern.cp_time") if err != nil { - return nil, fmt.Errorf("error reading sysctl kern.cp_time: %w", err) + return nil, fmt.Errorf("reading sysctl kern.cp_time: %w", err) } var total uint64 = 0 diff --git a/libpod/info_linux.go b/libpod/info_linux.go index 801dcdb43..44beafa8c 100644 --- a/libpod/info_linux.go +++ b/libpod/info_linux.go @@ -21,19 +21,19 @@ import ( func (r *Runtime) setPlatformHostInfo(info *define.HostInfo) error { seccompProfilePath, err := DefaultSeccompPath() if err != nil { - return fmt.Errorf("error getting Seccomp profile path: %w", err) + return fmt.Errorf("getting Seccomp profile path: %w", err) } // Cgroups version unified, err := cgroups.IsCgroup2UnifiedMode() if err != nil { - return fmt.Errorf("error reading cgroups mode: %w", err) + return fmt.Errorf("reading cgroups mode: %w", err) } // Get Map of all available controllers availableControllers, err := cgroups.GetAvailableControllers(nil, unified) if err != nil { - return fmt.Errorf("error getting available cgroup controllers: %w", err) + return fmt.Errorf("getting available cgroup controllers: %w", err) } info.CgroupManager = r.config.Engine.CgroupManager @@ -75,11 +75,11 @@ func (r *Runtime) setPlatformHostInfo(info *define.HostInfo) error { if rootless.IsRootless() { uidmappings, err := rootless.ReadMappingsProc("/proc/self/uid_map") if err != nil { - return fmt.Errorf("error reading uid mappings: %w", err) + return fmt.Errorf("reading uid mappings: %w", err) } gidmappings, err := rootless.ReadMappingsProc("/proc/self/gid_map") if err != nil { - return fmt.Errorf("error reading gid mappings: %w", err) + return fmt.Errorf("reading gid mappings: %w", err) } idmappings := define.IDMappings{ GIDMap: gidmappings, diff --git a/libpod/lock/file/file_lock.go b/libpod/lock/file/file_lock.go index 55110fc0b..bcbaea5e6 100644 --- a/libpod/lock/file/file_lock.go +++ b/libpod/lock/file/file_lock.go @@ -102,7 +102,7 @@ func (locks *FileLocks) AllocateGivenLock(lck uint32) error { f, err := os.OpenFile(locks.getLockPath(lck), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) if err != nil { - return fmt.Errorf("error creating lock %d: %w", lck, err) + return fmt.Errorf("creating lock %d: %w", lck, err) } f.Close() @@ -130,7 +130,7 @@ func (locks *FileLocks) DeallocateAllLocks() error { } files, err := os.ReadDir(locks.lockPath) if err != nil { - return fmt.Errorf("error reading directory %s: %w", locks.lockPath, err) + return fmt.Errorf("reading directory %s: %w", locks.lockPath, err) } var lastErr error for _, f := range files { @@ -152,7 +152,7 @@ func (locks *FileLocks) LockFileLock(lck uint32) error { l, err := storage.GetLockfile(locks.getLockPath(lck)) if err != nil { - return fmt.Errorf("error acquiring lock: %w", err) + return fmt.Errorf("acquiring lock: %w", err) } l.Lock() @@ -166,7 +166,7 @@ func (locks *FileLocks) UnlockFileLock(lck uint32) error { } l, err := storage.GetLockfile(locks.getLockPath(lck)) if err != nil { - return fmt.Errorf("error acquiring lock: %w", err) + return fmt.Errorf("acquiring lock: %w", err) } l.Unlock() diff --git a/libpod/networking_common.go b/libpod/networking_common.go new file mode 100644 index 000000000..fa444e26a --- /dev/null +++ b/libpod/networking_common.go @@ -0,0 +1,719 @@ +//go:build linux || freebsd +// +build linux freebsd + +package libpod + +import ( + "errors" + "fmt" + "regexp" + "sort" + + "github.com/containers/common/libnetwork/etchosts" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/machine" + "github.com/containers/common/pkg/util" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/namespaces" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/storage/pkg/lockfile" + "github.com/sirupsen/logrus" +) + +// convertPortMappings will remove the HostIP part from the ports when running inside podman machine. +// This is need because a HostIP of 127.0.0.1 would now allow the gvproxy forwarder to reach to open ports. +// For machine the HostIP must only be used by gvproxy and never in the VM. +func (c *Container) convertPortMappings() []types.PortMapping { + if !machine.IsGvProxyBased() || len(c.config.PortMappings) == 0 { + return c.config.PortMappings + } + // if we run in a machine VM we have to ignore the host IP part + newPorts := make([]types.PortMapping, 0, len(c.config.PortMappings)) + for _, port := range c.config.PortMappings { + port.HostIP = "" + newPorts = append(newPorts, port) + } + return newPorts +} + +func (c *Container) getNetworkOptions(networkOpts map[string]types.PerNetworkOptions) types.NetworkOptions { + opts := types.NetworkOptions{ + ContainerID: c.config.ID, + ContainerName: getCNIPodName(c), + } + opts.PortMappings = c.convertPortMappings() + + // If the container requested special network options use this instead of the config. + // This is the case for container restore or network reload. + if c.perNetworkOpts != nil { + opts.Networks = c.perNetworkOpts + } else { + opts.Networks = networkOpts + } + return opts +} + +// setUpNetwork will set up the the networks, on error it will also tear down the cni +// networks. If rootless it will join/create the rootless network namespace. +func (r *Runtime) setUpNetwork(ns string, opts types.NetworkOptions) (map[string]types.StatusBlock, error) { + rootlessNetNS, err := r.GetRootlessNetNs(true) + if err != nil { + return nil, err + } + var results map[string]types.StatusBlock + setUpPod := func() error { + results, err = r.network.Setup(ns, types.SetupOptions{NetworkOptions: opts}) + return err + } + // rootlessNetNS is nil if we are root + if rootlessNetNS != nil { + // execute the setup in the rootless net ns + err = rootlessNetNS.Do(setUpPod) + rootlessNetNS.Lock.Unlock() + } else { + err = setUpPod() + } + return results, err +} + +// getCNIPodName return the pod name (hostname) used by CNI and the dnsname plugin. +// If we are in the pod network namespace use the pod name otherwise the container name +func getCNIPodName(c *Container) string { + if c.config.NetMode.IsPod() || c.IsInfra() { + pod, err := c.runtime.state.Pod(c.PodID()) + if err == nil { + return pod.Name() + } + } + return c.Name() +} + +// Tear down a container's network configuration and joins the +// rootless net ns as rootless user +func (r *Runtime) teardownNetwork(ns string, opts types.NetworkOptions) error { + rootlessNetNS, err := r.GetRootlessNetNs(false) + if err != nil { + return err + } + tearDownPod := func() error { + if err := r.network.Teardown(ns, types.TeardownOptions{NetworkOptions: opts}); err != nil { + return fmt.Errorf("tearing down network namespace configuration for container %s: %w", opts.ContainerID, err) + } + return nil + } + + // rootlessNetNS is nil if we are root + if rootlessNetNS != nil { + // execute the cni setup in the rootless net ns + err = rootlessNetNS.Do(tearDownPod) + if cerr := rootlessNetNS.Cleanup(r); cerr != nil { + logrus.WithError(err).Error("failed to clean up rootless netns") + } + rootlessNetNS.Lock.Unlock() + } else { + err = tearDownPod() + } + return err +} + +// Tear down a container's CNI network configuration, but do not tear down the +// namespace itself. +func (r *Runtime) teardownCNI(ctr *Container) error { + if ctr.state.NetNS == nil { + // The container has no network namespace, we're set + return nil + } + + logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID()) + + networks, err := ctr.networks() + if err != nil { + return err + } + + if !ctr.config.NetMode.IsSlirp4netns() && len(networks) > 0 { + netOpts := ctr.getNetworkOptions(networks) + return r.teardownNetwork(ctr.state.NetNS.Path(), netOpts) + } + return nil +} + +// isBridgeNetMode checks if the given network mode is bridge. +// It returns nil when it is set to bridge and an error otherwise. +func isBridgeNetMode(n namespaces.NetworkMode) error { + if !n.IsBridge() { + return fmt.Errorf("%q is not supported: %w", n, define.ErrNetworkModeInvalid) + } + return nil +} + +// Reload only works with containers with a configured network. +// It will tear down, and then reconfigure, the network of the container. +// This is mainly used when a reload of firewall rules wipes out existing +// firewall configuration. +// Efforts will be made to preserve MAC and IP addresses, but this only works if +// the container only joined a single CNI network, and was only assigned a +// single MAC or IP. +// Only works on root containers at present, though in the future we could +// extend this to stop + restart slirp4netns +func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.StatusBlock, error) { + if ctr.state.NetNS == nil { + return nil, fmt.Errorf("container %s network is not configured, refusing to reload: %w", ctr.ID(), define.ErrCtrStateInvalid) + } + if err := isBridgeNetMode(ctr.config.NetMode); err != nil { + return nil, err + } + logrus.Infof("Going to reload container %s network", ctr.ID()) + + err := r.teardownCNI(ctr) + if err != nil { + // teardownCNI will error if the iptables rules do not exists and this is the case after + // a firewall reload. The purpose of network reload is to recreate the rules if they do + // not exists so we should not log this specific error as error. This would confuse users otherwise. + // iptables-legacy and iptables-nft will create different errors make sure to match both. + b, rerr := regexp.MatchString("Couldn't load target `CNI-[a-f0-9]{24}':No such file or directory|Chain 'CNI-[a-f0-9]{24}' does not exist", err.Error()) + if rerr == nil && !b { + logrus.Error(err) + } else { + logrus.Info(err) + } + } + + networkOpts, err := ctr.networks() + if err != nil { + return nil, err + } + + // Set the same network settings as before.. + netStatus := ctr.getNetworkStatus() + for network, perNetOpts := range networkOpts { + for name, netInt := range netStatus[network].Interfaces { + perNetOpts.InterfaceName = name + perNetOpts.StaticMAC = netInt.MacAddress + for _, netAddress := range netInt.Subnets { + perNetOpts.StaticIPs = append(perNetOpts.StaticIPs, netAddress.IPNet.IP) + } + // Normally interfaces have a length of 1, only for some special cni configs we could get more. + // For now just use the first interface to get the ips this should be good enough for most cases. + break + } + networkOpts[network] = perNetOpts + } + ctr.perNetworkOpts = networkOpts + + return r.configureNetNS(ctr, ctr.state.NetNS) +} + +// Produce an InspectNetworkSettings containing information on the container +// network. +func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, error) { + if c.config.NetNsCtr != "" { + netNsCtr, err := c.runtime.GetContainer(c.config.NetNsCtr) + if err != nil { + return nil, err + } + // see https://github.com/containers/podman/issues/10090 + // the container has to be locked for syncContainer() + netNsCtr.lock.Lock() + defer netNsCtr.lock.Unlock() + // Have to sync to ensure that state is populated + if err := netNsCtr.syncContainer(); err != nil { + return nil, err + } + logrus.Debugf("Container %s shares network namespace, retrieving network info of container %s", c.ID(), c.config.NetNsCtr) + + return netNsCtr.getContainerNetworkInfo() + } + + settings := new(define.InspectNetworkSettings) + settings.Ports = makeInspectPortBindings(c.config.PortMappings, c.config.ExposedPorts) + + networks, err := c.networks() + if err != nil { + return nil, err + } + + if c.state.NetNS == nil { + if networkNSPath := c.joinedNetworkNSPath(); networkNSPath != "" { + if result, err := c.inspectJoinedNetworkNS(networkNSPath); err == nil { + // fallback to dummy configuration + settings.InspectBasicNetworkConfig = resultToBasicNetworkConfig(result) + return settings, nil + } + // do not propagate error inspecting a joined network ns + logrus.Errorf("Inspecting network namespace: %s of container %s: %v", networkNSPath, c.ID(), err) + } + // We can't do more if the network is down. + + // We still want to make dummy configurations for each CNI net + // the container joined. + if len(networks) > 0 { + settings.Networks = make(map[string]*define.InspectAdditionalNetwork, len(networks)) + for net, opts := range networks { + cniNet := new(define.InspectAdditionalNetwork) + cniNet.NetworkID = net + cniNet.Aliases = opts.Aliases + settings.Networks[net] = cniNet + } + } + + return settings, nil + } + + // Set network namespace path + settings.SandboxKey = c.state.NetNS.Path() + + netStatus := c.getNetworkStatus() + // If this is empty, we're probably slirp4netns + if len(netStatus) == 0 { + return settings, nil + } + + // If we have networks - handle that here + if len(networks) > 0 { + if len(networks) != len(netStatus) { + return nil, fmt.Errorf("network inspection mismatch: asked to join %d network(s) %v, but have information on %d network(s): %w", len(networks), networks, len(netStatus), define.ErrInternal) + } + + settings.Networks = make(map[string]*define.InspectAdditionalNetwork) + + for name, opts := range networks { + result := netStatus[name] + addedNet := new(define.InspectAdditionalNetwork) + addedNet.NetworkID = name + addedNet.Aliases = opts.Aliases + addedNet.InspectBasicNetworkConfig = resultToBasicNetworkConfig(result) + + settings.Networks[name] = addedNet + } + + // if not only the default network is connected we can return here + // otherwise we have to populate the InspectBasicNetworkConfig settings + _, isDefaultNet := networks[c.runtime.config.Network.DefaultNetwork] + if !(len(networks) == 1 && isDefaultNet) { + return settings, nil + } + } + + // If not joining networks, we should have at most 1 result + if len(netStatus) > 1 { + return nil, fmt.Errorf("should have at most 1 network status result if not joining networks, instead got %d: %w", len(netStatus), define.ErrInternal) + } + + if len(netStatus) == 1 { + for _, status := range netStatus { + settings.InspectBasicNetworkConfig = resultToBasicNetworkConfig(status) + } + } + return settings, nil +} + +// resultToBasicNetworkConfig produces an InspectBasicNetworkConfig from a CNI +// result +func resultToBasicNetworkConfig(result types.StatusBlock) define.InspectBasicNetworkConfig { + config := define.InspectBasicNetworkConfig{} + interfaceNames := make([]string, 0, len(result.Interfaces)) + for interfaceName := range result.Interfaces { + interfaceNames = append(interfaceNames, interfaceName) + } + // ensure consistent inspect results by sorting + sort.Strings(interfaceNames) + for _, interfaceName := range interfaceNames { + netInt := result.Interfaces[interfaceName] + for _, netAddress := range netInt.Subnets { + size, _ := netAddress.IPNet.Mask.Size() + if netAddress.IPNet.IP.To4() != nil { + // ipv4 + if config.IPAddress == "" { + config.IPAddress = netAddress.IPNet.IP.String() + config.IPPrefixLen = size + config.Gateway = netAddress.Gateway.String() + } else { + config.SecondaryIPAddresses = append(config.SecondaryIPAddresses, define.Address{Addr: netAddress.IPNet.IP.String(), PrefixLength: size}) + } + } else { + // ipv6 + if config.GlobalIPv6Address == "" { + config.GlobalIPv6Address = netAddress.IPNet.IP.String() + config.GlobalIPv6PrefixLen = size + config.IPv6Gateway = netAddress.Gateway.String() + } else { + config.SecondaryIPv6Addresses = append(config.SecondaryIPv6Addresses, define.Address{Addr: netAddress.IPNet.IP.String(), PrefixLength: size}) + } + } + } + if config.MacAddress == "" { + config.MacAddress = netInt.MacAddress.String() + } else { + config.AdditionalMacAddresses = append(config.AdditionalMacAddresses, netInt.MacAddress.String()) + } + } + return config +} + +// NetworkDisconnect removes a container from the network +func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) error { + // only the bridge mode supports cni networks + if err := isBridgeNetMode(c.config.NetMode); err != nil { + return err + } + + c.lock.Lock() + defer c.lock.Unlock() + + networks, err := c.networks() + if err != nil { + return err + } + + // check if network exists and if the input is a ID we get the name + // CNI only uses names so it is important that we only use the name + netName, err = c.runtime.normalizeNetworkName(netName) + if err != nil { + return err + } + + _, nameExists := networks[netName] + if !nameExists && len(networks) > 0 { + return fmt.Errorf("container %s is not connected to network %s", nameOrID, netName) + } + + if err := c.syncContainer(); err != nil { + return err + } + // get network status before we disconnect + networkStatus := c.getNetworkStatus() + + if err := c.runtime.state.NetworkDisconnect(c, netName); err != nil { + return err + } + + c.newNetworkEvent(events.NetworkDisconnect, netName) + if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) { + return nil + } + + if c.state.NetNS == nil { + return fmt.Errorf("unable to disconnect %s from %s: %w", nameOrID, netName, define.ErrNoNetwork) + } + + opts := types.NetworkOptions{ + ContainerID: c.config.ID, + ContainerName: getCNIPodName(c), + } + opts.PortMappings = c.convertPortMappings() + opts.Networks = map[string]types.PerNetworkOptions{ + netName: networks[netName], + } + + if err := c.runtime.teardownNetwork(c.state.NetNS.Path(), opts); err != nil { + return err + } + + // update network status if container is running + oldStatus, statusExist := networkStatus[netName] + delete(networkStatus, netName) + c.state.NetworkStatus = networkStatus + err = c.save() + if err != nil { + return err + } + + // Reload ports when there are still connected networks, maybe we removed the network interface with the child ip. + // Reloading without connected networks does not make sense, so we can skip this step. + if rootless.IsRootless() && len(networkStatus) > 0 { + if err := c.reloadRootlessRLKPortMapping(); err != nil { + return err + } + } + + // Update resolv.conf if required + if statusExist { + stringIPs := make([]string, 0, len(oldStatus.DNSServerIPs)) + for _, ip := range oldStatus.DNSServerIPs { + stringIPs = append(stringIPs, ip.String()) + } + if len(stringIPs) > 0 { + logrus.Debugf("Removing DNS Servers %v from resolv.conf", stringIPs) + if err := c.removeNameserver(stringIPs); err != nil { + return err + } + } + + // update /etc/hosts file + if file, ok := c.state.BindMounts[config.DefaultHostsFile]; ok { + // sync the names with c.getHostsEntries() + names := []string{c.Hostname(), c.config.Name} + rm := etchosts.GetNetworkHostEntries(map[string]types.StatusBlock{netName: oldStatus}, names...) + if len(rm) > 0 { + // make sure to lock this file to prevent concurrent writes when + // this is used a net dependency container + lock, err := lockfile.GetLockfile(file) + if err != nil { + return fmt.Errorf("failed to lock hosts file: %w", err) + } + logrus.Debugf("Remove /etc/hosts entries %v", rm) + lock.Lock() + err = etchosts.Remove(file, rm) + lock.Unlock() + if err != nil { + return err + } + } + } + } + return nil +} + +// ConnectNetwork connects a container to a given network +func (c *Container) NetworkConnect(nameOrID, netName string, netOpts types.PerNetworkOptions) error { + // only the bridge mode supports cni networks + if err := isBridgeNetMode(c.config.NetMode); err != nil { + return err + } + + c.lock.Lock() + defer c.lock.Unlock() + + networks, err := c.networks() + if err != nil { + return err + } + + // check if network exists and if the input is a ID we get the name + // CNI only uses names so it is important that we only use the name + netName, err = c.runtime.normalizeNetworkName(netName) + if err != nil { + return err + } + + if err := c.syncContainer(); err != nil { + return err + } + + // get network status before we connect + networkStatus := c.getNetworkStatus() + + // always add the short id as alias for docker compat + netOpts.Aliases = append(netOpts.Aliases, c.config.ID[:12]) + + if netOpts.InterfaceName == "" { + netOpts.InterfaceName = getFreeInterfaceName(networks) + if netOpts.InterfaceName == "" { + return errors.New("could not find free network interface name") + } + } + + if err := c.runtime.state.NetworkConnect(c, netName, netOpts); err != nil { + // Docker compat: treat requests to attach already attached networks as a no-op, ignoring opts + if errors.Is(err, define.ErrNetworkConnected) && c.ensureState(define.ContainerStateConfigured) { + return nil + } + + return err + } + c.newNetworkEvent(events.NetworkConnect, netName) + if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) { + return nil + } + if c.state.NetNS == nil { + return fmt.Errorf("unable to connect %s to %s: %w", nameOrID, netName, define.ErrNoNetwork) + } + + opts := types.NetworkOptions{ + ContainerID: c.config.ID, + ContainerName: getCNIPodName(c), + } + opts.PortMappings = c.convertPortMappings() + opts.Networks = map[string]types.PerNetworkOptions{ + netName: netOpts, + } + + results, err := c.runtime.setUpNetwork(c.state.NetNS.Path(), opts) + if err != nil { + return err + } + if len(results) != 1 { + return errors.New("when adding aliases, results must be of length 1") + } + + // we need to get the old host entries before we add the new one to the status + // if we do not add do it here we will get the wrong existing entries which will throw of the logic + // we could also copy the map but this does not seem worth it + // sync the hostNames with c.getHostsEntries() + hostNames := []string{c.Hostname(), c.config.Name} + oldHostEntries := etchosts.GetNetworkHostEntries(networkStatus, hostNames...) + + // update network status + if networkStatus == nil { + networkStatus = make(map[string]types.StatusBlock, 1) + } + networkStatus[netName] = results[netName] + c.state.NetworkStatus = networkStatus + + err = c.save() + if err != nil { + return err + } + + // The first network needs a port reload to set the correct child ip for the rootlessport process. + // Adding a second network does not require a port reload because the child ip is still valid. + if rootless.IsRootless() && len(networks) == 0 { + if err := c.reloadRootlessRLKPortMapping(); err != nil { + return err + } + } + + ipv6, err := c.checkForIPv6(networkStatus) + if err != nil { + return err + } + + // Update resolv.conf if required + stringIPs := make([]string, 0, len(results[netName].DNSServerIPs)) + for _, ip := range results[netName].DNSServerIPs { + if (ip.To4() == nil) && !ipv6 { + continue + } + stringIPs = append(stringIPs, ip.String()) + } + if len(stringIPs) > 0 { + logrus.Debugf("Adding DNS Servers %v to resolv.conf", stringIPs) + if err := c.addNameserver(stringIPs); err != nil { + return err + } + } + + // update /etc/hosts file + if file, ok := c.state.BindMounts[config.DefaultHostsFile]; ok { + // make sure to lock this file to prevent concurrent writes when + // this is used a net dependency container + lock, err := lockfile.GetLockfile(file) + if err != nil { + return fmt.Errorf("failed to lock hosts file: %w", err) + } + new := etchosts.GetNetworkHostEntries(results, hostNames...) + logrus.Debugf("Add /etc/hosts entries %v", new) + // use special AddIfExists API to make sure we only add new entries if an old one exists + // see the AddIfExists() comment for more information + lock.Lock() + err = etchosts.AddIfExists(file, oldHostEntries, new) + lock.Unlock() + if err != nil { + return err + } + } + + return nil +} + +// get a free interface name for a new network +// return an empty string if no free name was found +func getFreeInterfaceName(networks map[string]types.PerNetworkOptions) string { + ifNames := make([]string, 0, len(networks)) + for _, opts := range networks { + ifNames = append(ifNames, opts.InterfaceName) + } + for i := 0; i < 100000; i++ { + ifName := fmt.Sprintf("eth%d", i) + if !util.StringInSlice(ifName, ifNames) { + return ifName + } + } + return "" +} + +// DisconnectContainerFromNetwork removes a container from its CNI network +func (r *Runtime) DisconnectContainerFromNetwork(nameOrID, netName string, force bool) error { + ctr, err := r.LookupContainer(nameOrID) + if err != nil { + return err + } + return ctr.NetworkDisconnect(nameOrID, netName, force) +} + +// ConnectContainerToNetwork connects a container to a CNI network +func (r *Runtime) ConnectContainerToNetwork(nameOrID, netName string, netOpts types.PerNetworkOptions) error { + ctr, err := r.LookupContainer(nameOrID) + if err != nil { + return err + } + return ctr.NetworkConnect(nameOrID, netName, netOpts) +} + +// normalizeNetworkName takes a network name, a partial or a full network ID and returns the network name. +// If the network is not found a errors is returned. +func (r *Runtime) normalizeNetworkName(nameOrID string) (string, error) { + net, err := r.network.NetworkInspect(nameOrID) + if err != nil { + return "", err + } + return net.Name, nil +} + +// ocicniPortsToNetTypesPorts convert the old port format to the new one +// while deduplicating ports into ranges +func ocicniPortsToNetTypesPorts(ports []types.OCICNIPortMapping) []types.PortMapping { + if len(ports) == 0 { + return nil + } + + newPorts := make([]types.PortMapping, 0, len(ports)) + + // first sort the ports + sort.Slice(ports, func(i, j int) bool { + return compareOCICNIPorts(ports[i], ports[j]) + }) + + // we already check if the slice is empty so we can use the first element + currentPort := types.PortMapping{ + HostIP: ports[0].HostIP, + HostPort: uint16(ports[0].HostPort), + ContainerPort: uint16(ports[0].ContainerPort), + Protocol: ports[0].Protocol, + Range: 1, + } + + for i := 1; i < len(ports); i++ { + if ports[i].HostIP == currentPort.HostIP && + ports[i].Protocol == currentPort.Protocol && + ports[i].HostPort-int32(currentPort.Range) == int32(currentPort.HostPort) && + ports[i].ContainerPort-int32(currentPort.Range) == int32(currentPort.ContainerPort) { + currentPort.Range++ + } else { + newPorts = append(newPorts, currentPort) + currentPort = types.PortMapping{ + HostIP: ports[i].HostIP, + HostPort: uint16(ports[i].HostPort), + ContainerPort: uint16(ports[i].ContainerPort), + Protocol: ports[i].Protocol, + Range: 1, + } + } + } + newPorts = append(newPorts, currentPort) + return newPorts +} + +// compareOCICNIPorts will sort the ocicni ports by +// 1) host ip +// 2) protocol +// 3) hostPort +// 4) container port +func compareOCICNIPorts(i, j types.OCICNIPortMapping) bool { + if i.HostIP != j.HostIP { + return i.HostIP < j.HostIP + } + + if i.Protocol != j.Protocol { + return i.Protocol < j.Protocol + } + + if i.HostPort != j.HostPort { + return i.HostPort < j.HostPort + } + + return i.ContainerPort < j.ContainerPort +} diff --git a/libpod/networking_freebsd.go b/libpod/networking_freebsd.go new file mode 100644 index 000000000..230efc99d --- /dev/null +++ b/libpod/networking_freebsd.go @@ -0,0 +1,268 @@ +//go:build freebsd +// +build freebsd + +package libpod + +import ( + "crypto/rand" + jdec "encoding/json" + "errors" + "fmt" + "net" + "os/exec" + "path/filepath" + + "github.com/containers/buildah/pkg/jail" + "github.com/containers/common/libnetwork/types" + "github.com/containers/storage/pkg/lockfile" + "github.com/sirupsen/logrus" +) + +type Netstat struct { + Statistics NetstatInterface `json:"statistics"` +} + +type NetstatInterface struct { + Interface []NetstatAddress `json:"interface"` +} + +type NetstatAddress struct { + Name string `json:"name"` + Flags string `json:"flags"` + Mtu int `json:"mtu"` + Network string `json:"network"` + Address string `json:"address"` + + ReceivedPackets uint64 `json:"received-packets"` + ReceivedBytes uint64 `json:"received-bytes"` + ReceivedErrors uint64 `json:"received-errors"` + + SentPackets uint64 `json:"sent-packets"` + SentBytes uint64 `json:"sent-bytes"` + SentErrors uint64 `json:"send-errors"` + + DroppedPackets uint64 `json:"dropped-packets"` + + Collisions uint64 `json:"collisions"` +} + +// copied from github.com/vishvanada/netlink which does not build on freebsd +type LinkStatistics64 struct { + RxPackets uint64 + TxPackets uint64 + RxBytes uint64 + TxBytes uint64 + RxErrors uint64 + TxErrors uint64 + RxDropped uint64 + TxDropped uint64 + Multicast uint64 + Collisions uint64 + RxLengthErrors uint64 + RxOverErrors uint64 + RxCrcErrors uint64 + RxFrameErrors uint64 + RxFifoErrors uint64 + RxMissedErrors uint64 + TxAbortedErrors uint64 + TxCarrierErrors uint64 + TxFifoErrors uint64 + TxHeartbeatErrors uint64 + TxWindowErrors uint64 + RxCompressed uint64 + TxCompressed uint64 +} + +type RootlessNetNS struct { + dir string + Lock lockfile.Locker +} + +// getPath will join the given path to the rootless netns dir +func (r *RootlessNetNS) getPath(path string) string { + return filepath.Join(r.dir, path) +} + +// Do - run the given function in the rootless netns. +// It does not lock the rootlessCNI lock, the caller +// should only lock when needed, e.g. for cni operations. +func (r *RootlessNetNS) Do(toRun func() error) error { + return errors.New("not supported on freebsd") +} + +// Cleanup the rootless network namespace if needed. +// It checks if we have running containers with the bridge network mode. +// Cleanup() expects that r.Lock is locked +func (r *RootlessNetNS) Cleanup(runtime *Runtime) error { + return errors.New("not supported on freebsd") +} + +// GetRootlessNetNs returns the rootless netns object. If create is set to true +// the rootless network namespace will be created if it does not exists already. +// If called as root it returns always nil. +// On success the returned RootlessCNI lock is locked and must be unlocked by the caller. +func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) { + return nil, nil +} + +func GetSlirp4netnsIP(subnet *net.IPNet) (*net.IP, error) { + return nil, errors.New("not implemented GetSlirp4netnsIP") +} + +// While there is code in container_internal.go which calls this, in +// my testing network creation always seems to go through createNetNS. +func (r *Runtime) setupNetNS(ctr *Container) error { + return errors.New("not implemented (*Runtime) setupNetNS") +} + +// Create and configure a new network namespace for a container +func (r *Runtime) configureNetNS(ctr *Container, ctrNS *jailNetNS) (status map[string]types.StatusBlock, rerr error) { + if err := r.exposeMachinePorts(ctr.config.PortMappings); err != nil { + return nil, err + } + defer func() { + // make sure to unexpose the gvproxy ports when an error happens + if rerr != nil { + if err := r.unexposeMachinePorts(ctr.config.PortMappings); err != nil { + logrus.Errorf("failed to free gvproxy machine ports: %v", err) + } + } + }() + networks, err := ctr.networks() + if err != nil { + return nil, err + } + // All networks have been removed from the container. + // This is effectively forcing net=none. + if len(networks) == 0 { + return nil, nil + } + + netOpts := ctr.getNetworkOptions(networks) + netStatus, err := r.setUpNetwork(ctrNS.Name, netOpts) + if err != nil { + return nil, err + } + + return netStatus, err +} + +// Create and configure a new network namespace for a container +func (r *Runtime) createNetNS(ctr *Container) (n *jailNetNS, q map[string]types.StatusBlock, retErr error) { + b := make([]byte, 16) + _, err := rand.Reader.Read(b) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate random vnet name: %v", err) + } + ctrNS := &jailNetNS{Name: fmt.Sprintf("vnet-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])} + + jconf := jail.NewConfig() + jconf.Set("name", ctrNS.Name) + jconf.Set("vnet", jail.NEW) + jconf.Set("children.max", 1) + jconf.Set("persist", true) + jconf.Set("enforce_statfs", 0) + jconf.Set("devfs_ruleset", 4) + jconf.Set("allow.raw_sockets", true) + jconf.Set("allow.chflags", true) + jconf.Set("securelevel", -1) + if _, err := jail.Create(jconf); err != nil { + logrus.Debugf("Failed to create vnet jail %s for container %s", ctrNS.Name, ctr.ID()) + } + + logrus.Debugf("Created vnet jail %s for container %s", ctrNS.Name, ctr.ID()) + + var networkStatus map[string]types.StatusBlock + networkStatus, err = r.configureNetNS(ctr, ctrNS) + return ctrNS, networkStatus, err +} + +// Tear down a network namespace, undoing all state associated with it. +func (r *Runtime) teardownNetNS(ctr *Container) error { + if err := r.unexposeMachinePorts(ctr.config.PortMappings); err != nil { + // do not return an error otherwise we would prevent network cleanup + logrus.Errorf("failed to free gvproxy machine ports: %v", err) + } + if err := r.teardownCNI(ctr); err != nil { + return err + } + + if ctr.state.NetNS != nil { + // Rather than destroying the jail immediately, reset the + // persist flag so that it will live until the container is + // done. + netjail, err := jail.FindByName(ctr.state.NetNS.Name) + if err != nil { + return fmt.Errorf("finding network jail %s: %w", ctr.state.NetNS.Name, err) + } + jconf := jail.NewConfig() + jconf.Set("persist", false) + if err := netjail.Set(jconf); err != nil { + return fmt.Errorf("releasing network jail %s: %w", ctr.state.NetNS.Name, err) + } + + ctr.state.NetNS = nil + } + + return nil +} + +func getContainerNetIO(ctr *Container) (*LinkStatistics64, error) { + if ctr.state.NetNS == nil { + // If NetNS is nil, it was set as none, and no netNS + // was set up this is a valid state and thus return no + // error, nor any statistics + return nil, nil + } + + // FIXME get the interface from the container netstatus + cmd := exec.Command("jexec", ctr.state.NetNS.Name, "netstat", "-bI", "eth0", "--libxo", "json") + out, err := cmd.Output() + if err != nil { + return nil, err + } + stats := Netstat{} + if err := jdec.Unmarshal(out, &stats); err != nil { + return nil, err + } + + // Find the link stats + for _, ifaddr := range stats.Statistics.Interface { + if ifaddr.Mtu > 0 { + return &LinkStatistics64{ + RxPackets: ifaddr.ReceivedPackets, + TxPackets: ifaddr.SentPackets, + RxBytes: ifaddr.ReceivedBytes, + TxBytes: ifaddr.SentBytes, + RxErrors: ifaddr.ReceivedErrors, + TxErrors: ifaddr.SentErrors, + RxDropped: ifaddr.DroppedPackets, + Collisions: ifaddr.Collisions, + }, nil + } + } + + return &LinkStatistics64{}, nil +} + +func (c *Container) joinedNetworkNSPath() string { + if c.state.NetNS != nil { + return c.state.NetNS.Name + } else { + return "" + } +} + +func (c *Container) inspectJoinedNetworkNS(networkns string) (q types.StatusBlock, retErr error) { + // TODO: extract interface information from the vnet jail + return types.StatusBlock{}, nil + +} + +func (c *Container) reloadRootlessRLKPortMapping() error { + return errors.New("unsupported (*Container).reloadRootlessRLKPortMapping") +} + +func (c *Container) setupRootlessNetwork() error { + return nil +} diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index c10c3c0b2..e27ec8e9d 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -13,25 +13,17 @@ import ( "os" "os/exec" "path/filepath" - "regexp" - "sort" "strconv" "strings" "syscall" "time" "github.com/containernetworking/plugins/pkg/ns" - "github.com/containers/common/libnetwork/etchosts" "github.com/containers/common/libnetwork/resolvconf" "github.com/containers/common/libnetwork/types" - "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/machine" "github.com/containers/common/pkg/netns" "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/containers/podman/v4/pkg/namespaces" "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/podman/v4/utils" "github.com/containers/storage/pkg/lockfile" @@ -59,39 +51,6 @@ const ( persistentCNIDir = "/var/lib/cni" ) -// convertPortMappings will remove the HostIP part from the ports when running inside podman machine. -// This is need because a HostIP of 127.0.0.1 would now allow the gvproxy forwarder to reach to open ports. -// For machine the HostIP must only be used by gvproxy and never in the VM. -func (c *Container) convertPortMappings() []types.PortMapping { - if !machine.IsGvProxyBased() || len(c.config.PortMappings) == 0 { - return c.config.PortMappings - } - // if we run in a machine VM we have to ignore the host IP part - newPorts := make([]types.PortMapping, 0, len(c.config.PortMappings)) - for _, port := range c.config.PortMappings { - port.HostIP = "" - newPorts = append(newPorts, port) - } - return newPorts -} - -func (c *Container) getNetworkOptions(networkOpts map[string]types.PerNetworkOptions) types.NetworkOptions { - opts := types.NetworkOptions{ - ContainerID: c.config.ID, - ContainerName: getCNIPodName(c), - } - opts.PortMappings = c.convertPortMappings() - - // If the container requested special network options use this instead of the config. - // This is the case for container restore or network reload. - if c.perNetworkOpts != nil { - opts.Networks = c.perNetworkOpts - } else { - opts.Networks = networkOpts - } - return opts -} - type RootlessNetNS struct { ns ns.NetNS dir string @@ -354,7 +313,7 @@ func (r *RootlessNetNS) Cleanup(runtime *Runtime) error { } } if err != nil { - logrus.Errorf("Failed to kill slirp4netns process: %s", err) + logrus.Errorf("Failed to kill slirp4netns process: %v", err) } err = os.RemoveAll(r.dir) if err != nil { @@ -411,13 +370,13 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) { if err != nil { if !new { // return a error if we could not get the namespace and should no create one - return nil, fmt.Errorf("error getting rootless network namespace: %w", err) + return nil, fmt.Errorf("getting rootless network namespace: %w", err) } // create a new namespace logrus.Debugf("creating rootless network namespace with name %q", netnsName) ns, err = netns.NewNSWithName(netnsName) if err != nil { - return nil, fmt.Errorf("error creating rootless network namespace: %w", err) + return nil, fmt.Errorf("creating rootless network namespace: %w", err) } // set up slirp4netns here path := r.config.Engine.NetworkCmdPath @@ -442,7 +401,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) { } slirpFeatures, err := checkSlirpFlags(path) if err != nil { - return nil, fmt.Errorf("error checking slirp4netns binary %s: %q: %w", path, err, err) + return nil, fmt.Errorf("checking slirp4netns binary %s: %q: %w", path, err, err) } cmdArgs, err := createBasicSlirp4netnsCmdArgs(netOptions, slirpFeatures) if err != nil { @@ -589,41 +548,6 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) { return rootlessNetNS, nil } -// setUpNetwork will set up the the networks, on error it will also tear down the cni -// networks. If rootless it will join/create the rootless network namespace. -func (r *Runtime) setUpNetwork(ns string, opts types.NetworkOptions) (map[string]types.StatusBlock, error) { - rootlessNetNS, err := r.GetRootlessNetNs(true) - if err != nil { - return nil, err - } - var results map[string]types.StatusBlock - setUpPod := func() error { - results, err = r.network.Setup(ns, types.SetupOptions{NetworkOptions: opts}) - return err - } - // rootlessNetNS is nil if we are root - if rootlessNetNS != nil { - // execute the setup in the rootless net ns - err = rootlessNetNS.Do(setUpPod) - rootlessNetNS.Lock.Unlock() - } else { - err = setUpPod() - } - return results, err -} - -// getCNIPodName return the pod name (hostname) used by CNI and the dnsname plugin. -// If we are in the pod network namespace use the pod name otherwise the container name -func getCNIPodName(c *Container) string { - if c.config.NetMode.IsPod() || c.IsInfra() { - pod, err := c.runtime.state.Pod(c.PodID()) - if err == nil { - return pod.Name() - } - } - return c.Name() -} - // Create and configure a new network namespace for a container func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) (status map[string]types.StatusBlock, rerr error) { if err := r.exposeMachinePorts(ctr.config.PortMappings); err != nil { @@ -675,7 +599,7 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) (status map[str func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q map[string]types.StatusBlock, retErr error) { ctrNS, err := netns.NewNS() if err != nil { - return nil, nil, fmt.Errorf("error creating network namespace for container %s: %w", ctr.ID(), err) + return nil, nil, fmt.Errorf("creating network namespace for container %s: %w", ctr.ID(), err) } defer func() { if retErr != nil { @@ -742,7 +666,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error { func joinNetNS(path string) (ns.NetNS, error) { netNS, err := ns.GetNS(path) if err != nil { - return nil, fmt.Errorf("error retrieving network namespace at %s: %w", path, err) + return nil, fmt.Errorf("retrieving network namespace at %s: %w", path, err) } return netNS, nil @@ -758,7 +682,7 @@ func (r *Runtime) closeNetNS(ctr *Container) error { } if err := ctr.state.NetNS.Close(); err != nil { - return fmt.Errorf("error closing network namespace for container %s: %w", ctr.ID(), err) + return fmt.Errorf("closing network namespace for container %s: %w", ctr.ID(), err) } ctr.state.NetNS = nil @@ -766,56 +690,6 @@ func (r *Runtime) closeNetNS(ctr *Container) error { return nil } -// Tear down a container's network configuration and joins the -// rootless net ns as rootless user -func (r *Runtime) teardownNetwork(ns string, opts types.NetworkOptions) error { - rootlessNetNS, err := r.GetRootlessNetNs(false) - if err != nil { - return err - } - tearDownPod := func() error { - if err := r.network.Teardown(ns, types.TeardownOptions{NetworkOptions: opts}); err != nil { - return fmt.Errorf("error tearing down network namespace configuration for container %s: %w", opts.ContainerID, err) - } - return nil - } - - // rootlessNetNS is nil if we are root - if rootlessNetNS != nil { - // execute the cni setup in the rootless net ns - err = rootlessNetNS.Do(tearDownPod) - if cerr := rootlessNetNS.Cleanup(r); cerr != nil { - logrus.WithError(err).Error("failed to clean up rootless netns") - } - rootlessNetNS.Lock.Unlock() - } else { - err = tearDownPod() - } - return err -} - -// Tear down a container's CNI network configuration, but do not tear down the -// namespace itself. -func (r *Runtime) teardownCNI(ctr *Container) error { - if ctr.state.NetNS == nil { - // The container has no network namespace, we're set - return nil - } - - logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID()) - - networks, err := ctr.networks() - if err != nil { - return err - } - - if !ctr.config.NetMode.IsSlirp4netns() && len(networks) > 0 { - netOpts := ctr.getNetworkOptions(networks) - return r.teardownNetwork(ctr.state.NetNS.Path(), netOpts) - } - return nil -} - // Tear down a network namespace, undoing all state associated with it. func (r *Runtime) teardownNetNS(ctr *Container) error { if err := r.unexposeMachinePorts(ctr.config.PortMappings); err != nil { @@ -828,12 +702,12 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { // First unmount the namespace if err := netns.UnmountNS(ctr.state.NetNS); err != nil { - return fmt.Errorf("error unmounting network namespace for container %s: %w", ctr.ID(), err) + return fmt.Errorf("unmounting network namespace for container %s: %w", ctr.ID(), err) } // Now close the open file descriptor if err := ctr.state.NetNS.Close(); err != nil { - return fmt.Errorf("error closing network namespace for container %s: %w", ctr.ID(), err) + return fmt.Errorf("closing network namespace for container %s: %w", ctr.ID(), err) } ctr.state.NetNS = nil @@ -862,72 +736,6 @@ func getContainerNetNS(ctr *Container) (string, *Container, error) { return "", nil, nil } -// isBridgeNetMode checks if the given network mode is bridge. -// It returns nil when it is set to bridge and an error otherwise. -func isBridgeNetMode(n namespaces.NetworkMode) error { - if !n.IsBridge() { - return fmt.Errorf("%q is not supported: %w", n, define.ErrNetworkModeInvalid) - } - return nil -} - -// Reload only works with containers with a configured network. -// It will tear down, and then reconfigure, the network of the container. -// This is mainly used when a reload of firewall rules wipes out existing -// firewall configuration. -// Efforts will be made to preserve MAC and IP addresses, but this only works if -// the container only joined a single CNI network, and was only assigned a -// single MAC or IP. -// Only works on root containers at present, though in the future we could -// extend this to stop + restart slirp4netns -func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.StatusBlock, error) { - if ctr.state.NetNS == nil { - return nil, fmt.Errorf("container %s network is not configured, refusing to reload: %w", ctr.ID(), define.ErrCtrStateInvalid) - } - if err := isBridgeNetMode(ctr.config.NetMode); err != nil { - return nil, err - } - logrus.Infof("Going to reload container %s network", ctr.ID()) - - err := r.teardownCNI(ctr) - if err != nil { - // teardownCNI will error if the iptables rules do not exists and this is the case after - // a firewall reload. The purpose of network reload is to recreate the rules if they do - // not exists so we should not log this specific error as error. This would confuse users otherwise. - // iptables-legacy and iptables-nft will create different errors make sure to match both. - b, rerr := regexp.MatchString("Couldn't load target `CNI-[a-f0-9]{24}':No such file or directory|Chain 'CNI-[a-f0-9]{24}' does not exist", err.Error()) - if rerr == nil && !b { - logrus.Error(err) - } else { - logrus.Info(err) - } - } - - networkOpts, err := ctr.networks() - if err != nil { - return nil, err - } - - // Set the same network settings as before.. - netStatus := ctr.getNetworkStatus() - for network, perNetOpts := range networkOpts { - for name, netInt := range netStatus[network].Interfaces { - perNetOpts.InterfaceName = name - perNetOpts.StaticMAC = netInt.MacAddress - for _, netAddress := range netInt.Subnets { - perNetOpts.StaticIPs = append(perNetOpts.StaticIPs, netAddress.IPNet.IP) - } - // Normally interfaces have a length of 1, only for some special cni configs we could get more. - // For now just use the first interface to get the ips this should be good enough for most cases. - break - } - networkOpts[network] = perNetOpts - } - ctr.perNetworkOpts = networkOpts - - return r.configureNetNS(ctr, ctr.state.NetNS) -} - // TODO (5.0): return the statistics per network interface // This would allow better compat with docker. func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) { @@ -981,110 +789,6 @@ func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) { return netStats, err } -// Produce an InspectNetworkSettings containing information on the container -// network. -func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, error) { - if c.config.NetNsCtr != "" { - netNsCtr, err := c.runtime.GetContainer(c.config.NetNsCtr) - if err != nil { - return nil, err - } - // see https://github.com/containers/podman/issues/10090 - // the container has to be locked for syncContainer() - netNsCtr.lock.Lock() - defer netNsCtr.lock.Unlock() - // Have to sync to ensure that state is populated - if err := netNsCtr.syncContainer(); err != nil { - return nil, err - } - logrus.Debugf("Container %s shares network namespace, retrieving network info of container %s", c.ID(), c.config.NetNsCtr) - - return netNsCtr.getContainerNetworkInfo() - } - - settings := new(define.InspectNetworkSettings) - settings.Ports = makeInspectPortBindings(c.config.PortMappings, c.config.ExposedPorts) - - networks, err := c.networks() - if err != nil { - return nil, err - } - - if c.state.NetNS == nil { - if networkNSPath := c.joinedNetworkNSPath(); networkNSPath != "" { - if result, err := c.inspectJoinedNetworkNS(networkNSPath); err == nil { - // fallback to dummy configuration - settings.InspectBasicNetworkConfig = resultToBasicNetworkConfig(result) - return settings, nil - } - // do not propagate error inspecting a joined network ns - logrus.Errorf("Inspecting network namespace: %s of container %s: %v", networkNSPath, c.ID(), err) - } - // We can't do more if the network is down. - - // We still want to make dummy configurations for each CNI net - // the container joined. - if len(networks) > 0 { - settings.Networks = make(map[string]*define.InspectAdditionalNetwork, len(networks)) - for net, opts := range networks { - cniNet := new(define.InspectAdditionalNetwork) - cniNet.NetworkID = net - cniNet.Aliases = opts.Aliases - settings.Networks[net] = cniNet - } - } - - return settings, nil - } - - // Set network namespace path - settings.SandboxKey = c.state.NetNS.Path() - - netStatus := c.getNetworkStatus() - // If this is empty, we're probably slirp4netns - if len(netStatus) == 0 { - return settings, nil - } - - // If we have networks - handle that here - if len(networks) > 0 { - if len(networks) != len(netStatus) { - return nil, fmt.Errorf("network inspection mismatch: asked to join %d network(s) %v, but have information on %d network(s): %w", len(networks), networks, len(netStatus), define.ErrInternal) - } - - settings.Networks = make(map[string]*define.InspectAdditionalNetwork) - - for name, opts := range networks { - result := netStatus[name] - addedNet := new(define.InspectAdditionalNetwork) - addedNet.NetworkID = name - addedNet.Aliases = opts.Aliases - addedNet.InspectBasicNetworkConfig = resultToBasicNetworkConfig(result) - - settings.Networks[name] = addedNet - } - - // if not only the default network is connected we can return here - // otherwise we have to populate the InspectBasicNetworkConfig settings - _, isDefaultNet := networks[c.runtime.config.Network.DefaultNetwork] - if !(len(networks) == 1 && isDefaultNet) { - return settings, nil - } - } - - // If not joining networks, we should have at most 1 result - if len(netStatus) > 1 { - return nil, fmt.Errorf("should have at most 1 network status result if not joining networks, instead got %d: %w", len(netStatus), define.ErrInternal) - } - - if len(netStatus) == 1 { - for _, status := range netStatus { - settings.InspectBasicNetworkConfig = resultToBasicNetworkConfig(status) - } - } - return settings, nil -} - func (c *Container) joinedNetworkNSPath() string { for _, namespace := range c.config.Spec.Linux.Namespaces { if namespace.Type == specs.NetworkNamespace { @@ -1151,49 +855,6 @@ func (c *Container) inspectJoinedNetworkNS(networkns string) (q types.StatusBloc return result, err } -// resultToBasicNetworkConfig produces an InspectBasicNetworkConfig from a CNI -// result -func resultToBasicNetworkConfig(result types.StatusBlock) define.InspectBasicNetworkConfig { - config := define.InspectBasicNetworkConfig{} - interfaceNames := make([]string, 0, len(result.Interfaces)) - for interfaceName := range result.Interfaces { - interfaceNames = append(interfaceNames, interfaceName) - } - // ensure consistent inspect results by sorting - sort.Strings(interfaceNames) - for _, interfaceName := range interfaceNames { - netInt := result.Interfaces[interfaceName] - for _, netAddress := range netInt.Subnets { - size, _ := netAddress.IPNet.Mask.Size() - if netAddress.IPNet.IP.To4() != nil { - // ipv4 - if config.IPAddress == "" { - config.IPAddress = netAddress.IPNet.IP.String() - config.IPPrefixLen = size - config.Gateway = netAddress.Gateway.String() - } else { - config.SecondaryIPAddresses = append(config.SecondaryIPAddresses, define.Address{Addr: netAddress.IPNet.IP.String(), PrefixLength: size}) - } - } else { - // ipv6 - if config.GlobalIPv6Address == "" { - config.GlobalIPv6Address = netAddress.IPNet.IP.String() - config.GlobalIPv6PrefixLen = size - config.IPv6Gateway = netAddress.Gateway.String() - } else { - config.SecondaryIPv6Addresses = append(config.SecondaryIPv6Addresses, define.Address{Addr: netAddress.IPNet.IP.String(), PrefixLength: size}) - } - } - } - if config.MacAddress == "" { - config.MacAddress = netInt.MacAddress.String() - } else { - config.AdditionalMacAddresses = append(config.AdditionalMacAddresses, netInt.MacAddress.String()) - } - } - return config -} - type logrusDebugWriter struct { prefix string } @@ -1202,368 +863,3 @@ func (w *logrusDebugWriter) Write(p []byte) (int, error) { logrus.Debugf("%s%s", w.prefix, string(p)) return len(p), nil } - -// NetworkDisconnect removes a container from the network -func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) error { - // only the bridge mode supports cni networks - if err := isBridgeNetMode(c.config.NetMode); err != nil { - return err - } - - c.lock.Lock() - defer c.lock.Unlock() - - networks, err := c.networks() - if err != nil { - return err - } - - // check if network exists and if the input is a ID we get the name - // CNI only uses names so it is important that we only use the name - netName, err = c.runtime.normalizeNetworkName(netName) - if err != nil { - return err - } - - _, nameExists := networks[netName] - if !nameExists && len(networks) > 0 { - return fmt.Errorf("container %s is not connected to network %s", nameOrID, netName) - } - - if err := c.syncContainer(); err != nil { - return err - } - // get network status before we disconnect - networkStatus := c.getNetworkStatus() - - if err := c.runtime.state.NetworkDisconnect(c, netName); err != nil { - return err - } - - c.newNetworkEvent(events.NetworkDisconnect, netName) - if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) { - return nil - } - - if c.state.NetNS == nil { - return fmt.Errorf("unable to disconnect %s from %s: %w", nameOrID, netName, define.ErrNoNetwork) - } - - opts := types.NetworkOptions{ - ContainerID: c.config.ID, - ContainerName: getCNIPodName(c), - } - opts.PortMappings = c.convertPortMappings() - opts.Networks = map[string]types.PerNetworkOptions{ - netName: networks[netName], - } - - if err := c.runtime.teardownNetwork(c.state.NetNS.Path(), opts); err != nil { - return err - } - - // update network status if container is running - oldStatus, statusExist := networkStatus[netName] - delete(networkStatus, netName) - c.state.NetworkStatus = networkStatus - err = c.save() - if err != nil { - return err - } - - // Reload ports when there are still connected networks, maybe we removed the network interface with the child ip. - // Reloading without connected networks does not make sense, so we can skip this step. - if rootless.IsRootless() && len(networkStatus) > 0 { - if err := c.reloadRootlessRLKPortMapping(); err != nil { - return err - } - } - - // Update resolv.conf if required - if statusExist { - stringIPs := make([]string, 0, len(oldStatus.DNSServerIPs)) - for _, ip := range oldStatus.DNSServerIPs { - stringIPs = append(stringIPs, ip.String()) - } - if len(stringIPs) > 0 { - logrus.Debugf("Removing DNS Servers %v from resolv.conf", stringIPs) - if err := c.removeNameserver(stringIPs); err != nil { - return err - } - } - - // update /etc/hosts file - if file, ok := c.state.BindMounts[config.DefaultHostsFile]; ok { - // sync the names with c.getHostsEntries() - names := []string{c.Hostname(), c.config.Name} - rm := etchosts.GetNetworkHostEntries(map[string]types.StatusBlock{netName: oldStatus}, names...) - if len(rm) > 0 { - // make sure to lock this file to prevent concurrent writes when - // this is used a net dependency container - lock, err := lockfile.GetLockfile(file) - if err != nil { - return fmt.Errorf("failed to lock hosts file: %w", err) - } - logrus.Debugf("Remove /etc/hosts entries %v", rm) - lock.Lock() - err = etchosts.Remove(file, rm) - lock.Unlock() - if err != nil { - return err - } - } - } - } - return nil -} - -// ConnectNetwork connects a container to a given network -func (c *Container) NetworkConnect(nameOrID, netName string, netOpts types.PerNetworkOptions) error { - // only the bridge mode supports cni networks - if err := isBridgeNetMode(c.config.NetMode); err != nil { - return err - } - - c.lock.Lock() - defer c.lock.Unlock() - - networks, err := c.networks() - if err != nil { - return err - } - - // check if network exists and if the input is a ID we get the name - // CNI only uses names so it is important that we only use the name - netName, err = c.runtime.normalizeNetworkName(netName) - if err != nil { - return err - } - - if err := c.syncContainer(); err != nil { - return err - } - - // get network status before we connect - networkStatus := c.getNetworkStatus() - - // always add the short id as alias for docker compat - netOpts.Aliases = append(netOpts.Aliases, c.config.ID[:12]) - - if netOpts.InterfaceName == "" { - netOpts.InterfaceName = getFreeInterfaceName(networks) - if netOpts.InterfaceName == "" { - return errors.New("could not find free network interface name") - } - } - - if err := c.runtime.state.NetworkConnect(c, netName, netOpts); err != nil { - // Docker compat: treat requests to attach already attached networks as a no-op, ignoring opts - if errors.Is(err, define.ErrNetworkConnected) && c.ensureState(define.ContainerStateConfigured) { - return nil - } - - return err - } - c.newNetworkEvent(events.NetworkConnect, netName) - if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) { - return nil - } - if c.state.NetNS == nil { - return fmt.Errorf("unable to connect %s to %s: %w", nameOrID, netName, define.ErrNoNetwork) - } - - opts := types.NetworkOptions{ - ContainerID: c.config.ID, - ContainerName: getCNIPodName(c), - } - opts.PortMappings = c.convertPortMappings() - opts.Networks = map[string]types.PerNetworkOptions{ - netName: netOpts, - } - - results, err := c.runtime.setUpNetwork(c.state.NetNS.Path(), opts) - if err != nil { - return err - } - if len(results) != 1 { - return errors.New("when adding aliases, results must be of length 1") - } - - // we need to get the old host entries before we add the new one to the status - // if we do not add do it here we will get the wrong existing entries which will throw of the logic - // we could also copy the map but this does not seem worth it - // sync the hostNames with c.getHostsEntries() - hostNames := []string{c.Hostname(), c.config.Name} - oldHostEntries := etchosts.GetNetworkHostEntries(networkStatus, hostNames...) - - // update network status - if networkStatus == nil { - networkStatus = make(map[string]types.StatusBlock, 1) - } - networkStatus[netName] = results[netName] - c.state.NetworkStatus = networkStatus - - err = c.save() - if err != nil { - return err - } - - // The first network needs a port reload to set the correct child ip for the rootlessport process. - // Adding a second network does not require a port reload because the child ip is still valid. - if rootless.IsRootless() && len(networks) == 0 { - if err := c.reloadRootlessRLKPortMapping(); err != nil { - return err - } - } - - ipv6, err := c.checkForIPv6(networkStatus) - if err != nil { - return err - } - - // Update resolv.conf if required - stringIPs := make([]string, 0, len(results[netName].DNSServerIPs)) - for _, ip := range results[netName].DNSServerIPs { - if (ip.To4() == nil) && !ipv6 { - continue - } - stringIPs = append(stringIPs, ip.String()) - } - if len(stringIPs) > 0 { - logrus.Debugf("Adding DNS Servers %v to resolv.conf", stringIPs) - if err := c.addNameserver(stringIPs); err != nil { - return err - } - } - - // update /etc/hosts file - if file, ok := c.state.BindMounts[config.DefaultHostsFile]; ok { - // make sure to lock this file to prevent concurrent writes when - // this is used a net dependency container - lock, err := lockfile.GetLockfile(file) - if err != nil { - return fmt.Errorf("failed to lock hosts file: %w", err) - } - new := etchosts.GetNetworkHostEntries(results, hostNames...) - logrus.Debugf("Add /etc/hosts entries %v", new) - // use special AddIfExists API to make sure we only add new entries if an old one exists - // see the AddIfExists() comment for more information - lock.Lock() - err = etchosts.AddIfExists(file, oldHostEntries, new) - lock.Unlock() - if err != nil { - return err - } - } - - return nil -} - -// get a free interface name for a new network -// return an empty string if no free name was found -func getFreeInterfaceName(networks map[string]types.PerNetworkOptions) string { - ifNames := make([]string, 0, len(networks)) - for _, opts := range networks { - ifNames = append(ifNames, opts.InterfaceName) - } - for i := 0; i < 100000; i++ { - ifName := fmt.Sprintf("eth%d", i) - if !util.StringInSlice(ifName, ifNames) { - return ifName - } - } - return "" -} - -// DisconnectContainerFromNetwork removes a container from its CNI network -func (r *Runtime) DisconnectContainerFromNetwork(nameOrID, netName string, force bool) error { - ctr, err := r.LookupContainer(nameOrID) - if err != nil { - return err - } - return ctr.NetworkDisconnect(nameOrID, netName, force) -} - -// ConnectContainerToNetwork connects a container to a CNI network -func (r *Runtime) ConnectContainerToNetwork(nameOrID, netName string, netOpts types.PerNetworkOptions) error { - ctr, err := r.LookupContainer(nameOrID) - if err != nil { - return err - } - return ctr.NetworkConnect(nameOrID, netName, netOpts) -} - -// normalizeNetworkName takes a network name, a partial or a full network ID and returns the network name. -// If the network is not found a errors is returned. -func (r *Runtime) normalizeNetworkName(nameOrID string) (string, error) { - net, err := r.network.NetworkInspect(nameOrID) - if err != nil { - return "", err - } - return net.Name, nil -} - -// ocicniPortsToNetTypesPorts convert the old port format to the new one -// while deduplicating ports into ranges -func ocicniPortsToNetTypesPorts(ports []types.OCICNIPortMapping) []types.PortMapping { - if len(ports) == 0 { - return nil - } - - newPorts := make([]types.PortMapping, 0, len(ports)) - - // first sort the ports - sort.Slice(ports, func(i, j int) bool { - return compareOCICNIPorts(ports[i], ports[j]) - }) - - // we already check if the slice is empty so we can use the first element - currentPort := types.PortMapping{ - HostIP: ports[0].HostIP, - HostPort: uint16(ports[0].HostPort), - ContainerPort: uint16(ports[0].ContainerPort), - Protocol: ports[0].Protocol, - Range: 1, - } - - for i := 1; i < len(ports); i++ { - if ports[i].HostIP == currentPort.HostIP && - ports[i].Protocol == currentPort.Protocol && - ports[i].HostPort-int32(currentPort.Range) == int32(currentPort.HostPort) && - ports[i].ContainerPort-int32(currentPort.Range) == int32(currentPort.ContainerPort) { - currentPort.Range++ - } else { - newPorts = append(newPorts, currentPort) - currentPort = types.PortMapping{ - HostIP: ports[i].HostIP, - HostPort: uint16(ports[i].HostPort), - ContainerPort: uint16(ports[i].ContainerPort), - Protocol: ports[i].Protocol, - Range: 1, - } - } - } - newPorts = append(newPorts, currentPort) - return newPorts -} - -// compareOCICNIPorts will sort the ocicni ports by -// 1) host ip -// 2) protocol -// 3) hostPort -// 4) container port -func compareOCICNIPorts(i, j types.OCICNIPortMapping) bool { - if i.HostIP != j.HostIP { - return i.HostIP < j.HostIP - } - - if i.Protocol != j.Protocol { - return i.Protocol < j.Protocol - } - - if i.HostPort != j.HostPort { - return i.HostPort < j.HostPort - } - - return i.ContainerPort < j.ContainerPort -} diff --git a/libpod/networking_slirp4netns.go b/libpod/networking_slirp4netns.go index 4a6462d46..d4ec9082b 100644 --- a/libpod/networking_slirp4netns.go +++ b/libpod/networking_slirp4netns.go @@ -243,7 +243,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error { } slirpFeatures, err := checkSlirpFlags(path) if err != nil { - return fmt.Errorf("error checking slirp4netns binary %s: %q: %w", path, err, err) + return fmt.Errorf("checking slirp4netns binary %s: %q: %w", path, err, err) } cmdArgs, err := createBasicSlirp4netnsCmdArgs(netOptions, slirpFeatures) if err != nil { @@ -405,7 +405,7 @@ func GetSlirp4netnsIP(subnet *net.IPNet) (*net.IP, error) { } expectedIP, err := addToIP(slirpSubnet, uint32(100)) if err != nil { - return nil, fmt.Errorf("error calculating expected ip for slirp4netns: %w", err) + return nil, fmt.Errorf("calculating expected ip for slirp4netns: %w", err) } return expectedIP, nil } @@ -419,7 +419,7 @@ func GetSlirp4netnsGateway(subnet *net.IPNet) (*net.IP, error) { } expectedGatewayIP, err := addToIP(slirpSubnet, uint32(2)) if err != nil { - return nil, fmt.Errorf("error calculating expected gateway ip for slirp4netns: %w", err) + return nil, fmt.Errorf("calculating expected gateway ip for slirp4netns: %w", err) } return expectedGatewayIP, nil } @@ -433,7 +433,7 @@ func GetSlirp4netnsDNS(subnet *net.IPNet) (*net.IP, error) { } expectedDNSIP, err := addToIP(slirpSubnet, uint32(3)) if err != nil { - return nil, fmt.Errorf("error calculating expected dns ip for slirp4netns: %w", err) + return nil, fmt.Errorf("calculating expected dns ip for slirp4netns: %w", err) } return expectedDNSIP, nil } @@ -465,7 +465,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t b := make([]byte, 16) for { if err := syncR.SetDeadline(time.Now().Add(timeout)); err != nil { - return fmt.Errorf("error setting %s pipe timeout: %w", prog, err) + return fmt.Errorf("setting %s pipe timeout: %w", prog, err) } // FIXME: return err as soon as proc exits, without waiting for timeout if _, err := syncR.Read(b); err == nil { @@ -676,7 +676,7 @@ func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport ui // successful. var y map[string]interface{} if err := json.Unmarshal(buf[0:readLength], &y); err != nil { - return fmt.Errorf("error parsing error status from slirp4netns: %w", err) + return fmt.Errorf("parsing error status from slirp4netns: %w", err) } if e, found := y["error"]; found { return fmt.Errorf("from slirp4netns while setting up port redirection: %v", e) diff --git a/libpod/networking_unsupported.go b/libpod/networking_unsupported.go index 9429287f9..e5a6d1456 100644 --- a/libpod/networking_unsupported.go +++ b/libpod/networking_unsupported.go @@ -1,5 +1,5 @@ -//go:build !linux -// +build !linux +//go:build !linux && !freebsd +// +build !linux,!freebsd package libpod diff --git a/libpod/oci_conmon_common.go b/libpod/oci_conmon_common.go index 8ef8ae721..53dddd064 100644 --- a/libpod/oci_conmon_common.go +++ b/libpod/oci_conmon_common.go @@ -150,7 +150,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil { // The directory is allowed to exist if !os.IsExist(err) { - return nil, fmt.Errorf("error creating OCI runtime exit files directory: %w", err) + return nil, fmt.Errorf("creating OCI runtime exit files directory: %w", err) } } return runtime, nil @@ -234,7 +234,7 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { if err := cmd.Start(); err != nil { out, err2 := ioutil.ReadAll(errPipe) if err2 != nil { - return fmt.Errorf("error getting container %s state: %w", ctr.ID(), err) + return fmt.Errorf("getting container %s state: %w", ctr.ID(), err) } if strings.Contains(string(out), "does not exist") || strings.Contains(string(out), "No such file") { if err := ctr.removeConmonFiles(); err != nil { @@ -245,7 +245,7 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { ctr.state.State = define.ContainerStateExited return ctr.runtime.state.AddContainerExitCode(ctr.ID(), ctr.state.ExitCode) } - return fmt.Errorf("error getting container %s state. stderr/out: %s: %w", ctr.ID(), out, err) + return fmt.Errorf("getting container %s state. stderr/out: %s: %w", ctr.ID(), out, err) } defer func() { _ = cmd.Wait() @@ -256,10 +256,10 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { } out, err := ioutil.ReadAll(outPipe) if err != nil { - return fmt.Errorf("error reading stdout: %s: %w", ctr.ID(), err) + return fmt.Errorf("reading stdout: %s: %w", ctr.ID(), err) } if err := json.NewDecoder(bytes.NewBuffer(out)).Decode(state); err != nil { - return fmt.Errorf("error decoding container status for container %s: %w", ctr.ID(), err) + return fmt.Errorf("decoding container status for container %s: %w", ctr.ID(), err) } ctr.state.PID = state.Pid @@ -379,7 +379,7 @@ func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool) if ctr.ensureState(define.ContainerStateStopped, define.ContainerStateExited) { return define.ErrCtrStateInvalid } - return fmt.Errorf("error sending signal to container %s: %w", ctr.ID(), err) + return fmt.Errorf("sending signal to container %s: %w", ctr.ID(), err) } return nil @@ -434,7 +434,7 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) if aliveErr := unix.Kill(ctr.state.PID, 0); errors.Is(aliveErr, unix.ESRCH) { return nil } - return fmt.Errorf("error sending SIGKILL to container %s: %w", ctr.ID(), err) + return fmt.Errorf("sending SIGKILL to container %s: %w", ctr.ID(), err) } // Give runtime a few seconds to make it happen @@ -554,7 +554,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http. httpCon, httpBuf, err := hijacker.Hijack() if err != nil { - return fmt.Errorf("error hijacking connection: %w", err) + return fmt.Errorf("hijacking connection: %w", err) } hijackDone <- true @@ -563,7 +563,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http. // Force a flush after the header is written. if err := httpBuf.Flush(); err != nil { - return fmt.Errorf("error flushing HTTP hijack header: %w", err) + return fmt.Errorf("flushing HTTP hijack header: %w", err) } defer func() { @@ -838,7 +838,7 @@ func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) { if err == unix.ESRCH { return false, nil } - return false, fmt.Errorf("error pinging container %s conmon with signal 0: %w", ctr.ID(), err) + return false, fmt.Errorf("pinging container %s conmon with signal 0: %w", ctr.ID(), err) } return true, nil } @@ -890,11 +890,11 @@ func (r *ConmonOCIRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntime conmonPackage := packageVersion(r.conmonPath) runtimeVersion, err := r.getOCIRuntimeVersion() if err != nil { - return nil, nil, fmt.Errorf("error getting version of OCI runtime %s: %w", r.name, err) + return nil, nil, fmt.Errorf("getting version of OCI runtime %s: %w", r.name, err) } conmonVersion, err := r.getConmonVersion() if err != nil { - return nil, nil, fmt.Errorf("error getting conmon version: %w", err) + return nil, nil, fmt.Errorf("getting conmon version: %w", err) } conmon := define.ConmonInfo{ @@ -1001,13 +1001,13 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co parentSyncPipe, childSyncPipe, err := newPipe() if err != nil { - return 0, fmt.Errorf("error creating socket pair: %w", err) + return 0, fmt.Errorf("creating socket pair: %w", err) } defer errorhandling.CloseQuiet(parentSyncPipe) childStartPipe, parentStartPipe, err := newPipe() if err != nil { - return 0, fmt.Errorf("error creating socket pair for start pipe: %w", err) + return 0, fmt.Errorf("creating socket pair for start pipe: %w", err) } defer errorhandling.CloseQuiet(parentStartPipe) diff --git a/libpod/oci_conmon_exec_common.go b/libpod/oci_conmon_exec_common.go index 735dbb9c4..e5080942b 100644 --- a/libpod/oci_conmon_exec_common.go +++ b/libpod/oci_conmon_exec_common.go @@ -225,7 +225,7 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t if err == unix.ESRCH { return nil } - return fmt.Errorf("error pinging container %s exec session %s PID %d with signal 0: %w", ctr.ID(), sessionID, pid, err) + return fmt.Errorf("pinging container %s exec session %s PID %d with signal 0: %w", ctr.ID(), sessionID, pid, err) } if timeout > 0 { @@ -235,7 +235,7 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t if err == unix.ESRCH { return nil } - return fmt.Errorf("error killing container %s exec session %s PID %d with SIGTERM: %w", ctr.ID(), sessionID, pid, err) + return fmt.Errorf("killing container %s exec session %s PID %d with SIGTERM: %w", ctr.ID(), sessionID, pid, err) } // Wait for the PID to stop @@ -253,7 +253,7 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t if err == unix.ESRCH { return nil } - return fmt.Errorf("error killing container %s exec session %s PID %d with SIGKILL: %w", ctr.ID(), sessionID, pid, err) + return fmt.Errorf("killing container %s exec session %s PID %d with SIGKILL: %w", ctr.ID(), sessionID, pid, err) } // Wait for the PID to stop @@ -279,7 +279,7 @@ func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (b if err == unix.ESRCH { return false, nil } - return false, fmt.Errorf("error pinging container %s exec session %s PID %d with signal 0: %w", ctr.ID(), sessionID, pid, err) + return false, fmt.Errorf("pinging container %s exec session %s PID %d with signal 0: %w", ctr.ID(), sessionID, pid, err) } return true, nil @@ -338,7 +338,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex // create sync pipe to receive the pid parentSyncPipe, childSyncPipe, err := newPipe() if err != nil { - return nil, nil, fmt.Errorf("error creating socket pair: %w", err) + return nil, nil, fmt.Errorf("creating socket pair: %w", err) } pipes.syncPipe = parentSyncPipe @@ -352,7 +352,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex // attachToExec is responsible for closing parentStartPipe childStartPipe, parentStartPipe, err := newPipe() if err != nil { - return nil, nil, fmt.Errorf("error creating socket pair: %w", err) + return nil, nil, fmt.Errorf("creating socket pair: %w", err) } pipes.startPipe = parentStartPipe @@ -362,7 +362,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex // attachToExec is responsible for closing parentAttachPipe parentAttachPipe, childAttachPipe, err := newPipe() if err != nil { - return nil, nil, fmt.Errorf("error creating socket pair: %w", err) + return nil, nil, fmt.Errorf("creating socket pair: %w", err) } pipes.attachPipe = parentAttachPipe @@ -564,7 +564,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp httpCon, httpBuf, err := hijacker.Hijack() if err != nil { conmonPipeDataChan <- conmonPipeData{-1, err} - return fmt.Errorf("error hijacking connection: %w", err) + return fmt.Errorf("hijacking connection: %w", err) } hijackDone <- true @@ -575,7 +575,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp // Force a flush after the header is written. if err := httpBuf.Flush(); err != nil { conmonPipeDataChan <- conmonPipeData{-1, err} - return fmt.Errorf("error flushing HTTP hijack header: %w", err) + return fmt.Errorf("flushing HTTP hijack header: %w", err) } go func() { @@ -723,7 +723,7 @@ func (c *Container) prepareProcessExec(options *ExecOptions, env []string, sessi if len(addGroups) > 0 { sgids, err = lookup.GetContainerGroups(addGroups, c.state.Mountpoint, overrides) if err != nil { - return nil, fmt.Errorf("error looking up supplemental groups for container %s exec session %s: %w", c.ID(), sessionID, err) + return nil, fmt.Errorf("looking up supplemental groups for container %s exec session %s: %w", c.ID(), sessionID, err) } } diff --git a/libpod/plugin/volume_api.go b/libpod/plugin/volume_api.go index b13578388..522895798 100644 --- a/libpod/plugin/volume_api.go +++ b/libpod/plugin/volume_api.go @@ -76,7 +76,7 @@ func validatePlugin(newPlugin *VolumePlugin) error { // Hit the Activate endpoint to find out if it is, and if so what kind req, err := http.NewRequest("POST", "http://plugin"+activatePath, nil) if err != nil { - return fmt.Errorf("error making request to volume plugin %s activation endpoint: %w", newPlugin.Name, err) + return fmt.Errorf("making request to volume plugin %s activation endpoint: %w", newPlugin.Name, err) } req.Header.Set("Host", newPlugin.getURI()) @@ -84,7 +84,7 @@ func validatePlugin(newPlugin *VolumePlugin) error { resp, err := newPlugin.Client.Do(req) if err != nil { - return fmt.Errorf("error sending request to plugin %s activation endpoint: %w", newPlugin.Name, err) + return fmt.Errorf("sending request to plugin %s activation endpoint: %w", newPlugin.Name, err) } defer resp.Body.Close() @@ -97,12 +97,12 @@ func validatePlugin(newPlugin *VolumePlugin) error { // Read and decode the body so we can tell if this is a volume plugin. respBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - return fmt.Errorf("error reading activation response body from plugin %s: %w", newPlugin.Name, err) + return fmt.Errorf("reading activation response body from plugin %s: %w", newPlugin.Name, err) } respStruct := new(activateResponse) if err := json.Unmarshal(respBytes, respStruct); err != nil { - return fmt.Errorf("error unmarshalling plugin %s activation response: %w", newPlugin.Name, err) + return fmt.Errorf("unmarshalling plugin %s activation response: %w", newPlugin.Name, err) } foundVolume := false @@ -196,7 +196,7 @@ func (p *VolumePlugin) verifyReachable() error { return fmt.Errorf("%s: %w", p.Name, ErrPluginRemoved) } - return fmt.Errorf("error accessing plugin %s: %w", p.Name, err) + return fmt.Errorf("accessing plugin %s: %w", p.Name, err) } return nil } @@ -212,13 +212,13 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, endpoint string) (*http.R if toJSON != nil { reqJSON, err = json.Marshal(toJSON) if err != nil { - return nil, fmt.Errorf("error marshalling request JSON for volume plugin %s endpoint %s: %w", p.Name, endpoint, err) + return nil, fmt.Errorf("marshalling request JSON for volume plugin %s endpoint %s: %w", p.Name, endpoint, err) } } req, err := http.NewRequest("POST", "http://plugin"+endpoint, bytes.NewReader(reqJSON)) if err != nil { - return nil, fmt.Errorf("error making request to volume plugin %s endpoint %s: %w", p.Name, endpoint, err) + return nil, fmt.Errorf("making request to volume plugin %s endpoint %s: %w", p.Name, endpoint, err) } req.Header.Set("Host", p.getURI()) @@ -226,7 +226,7 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, endpoint string) (*http.R resp, err := p.Client.Do(req) if err != nil { - return nil, fmt.Errorf("error sending request to volume plugin %s endpoint %s: %w", p.Name, endpoint, err) + return nil, fmt.Errorf("sending request to volume plugin %s endpoint %s: %w", p.Name, endpoint, err) } // We are *deliberately not closing* response here. It is the // responsibility of the caller to do so after reading the response. @@ -240,9 +240,9 @@ func (p *VolumePlugin) makeErrorResponse(err, endpoint, volName string) error { err = "empty error from plugin" } if volName != "" { - return fmt.Errorf("error on %s on volume %s in volume plugin %s: %w", endpoint, volName, p.Name, errors.New(err)) + return fmt.Errorf("on %s on volume %s in volume plugin %s: %w", endpoint, volName, p.Name, errors.New(err)) } - return fmt.Errorf("error on %s in volume plugin %s: %w", endpoint, p.Name, errors.New(err)) + return fmt.Errorf("on %s in volume plugin %s: %w", endpoint, p.Name, errors.New(err)) } // Handle error responses from plugin @@ -254,12 +254,12 @@ func (p *VolumePlugin) handleErrorResponse(resp *http.Response, endpoint, volNam if resp.StatusCode != 200 { errResp, err := ioutil.ReadAll(resp.Body) if err != nil { - return fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err) + return fmt.Errorf("reading response body from volume plugin %s: %w", p.Name, err) } errStruct := new(volume.ErrorResponse) if err := json.Unmarshal(errResp, errStruct); err != nil { - return fmt.Errorf("error unmarshalling JSON response from volume plugin %s: %w", p.Name, err) + return fmt.Errorf("unmarshalling JSON response from volume plugin %s: %w", p.Name, err) } return p.makeErrorResponse(errStruct.Err, endpoint, volName) @@ -309,12 +309,12 @@ func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) { volumeRespBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err) + return nil, fmt.Errorf("reading response body from volume plugin %s: %w", p.Name, err) } volumeResp := new(volume.ListResponse) if err := json.Unmarshal(volumeRespBytes, volumeResp); err != nil { - return nil, fmt.Errorf("error unmarshalling volume plugin %s list response: %w", p.Name, err) + return nil, fmt.Errorf("unmarshalling volume plugin %s list response: %w", p.Name, err) } return volumeResp.Volumes, nil @@ -344,12 +344,12 @@ func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error) getRespBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err) + return nil, fmt.Errorf("reading response body from volume plugin %s: %w", p.Name, err) } getResp := new(volume.GetResponse) if err := json.Unmarshal(getRespBytes, getResp); err != nil { - return nil, fmt.Errorf("error unmarshalling volume plugin %s get response: %w", p.Name, err) + return nil, fmt.Errorf("unmarshalling volume plugin %s get response: %w", p.Name, err) } return getResp.Volume, nil @@ -400,12 +400,12 @@ func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) { pathRespBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - return "", fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err) + return "", fmt.Errorf("reading response body from volume plugin %s: %w", p.Name, err) } pathResp := new(volume.PathResponse) if err := json.Unmarshal(pathRespBytes, pathResp); err != nil { - return "", fmt.Errorf("error unmarshalling volume plugin %s path response: %w", p.Name, err) + return "", fmt.Errorf("unmarshalling volume plugin %s path response: %w", p.Name, err) } return pathResp.Mountpoint, nil @@ -437,12 +437,12 @@ func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) { mountRespBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - return "", fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err) + return "", fmt.Errorf("reading response body from volume plugin %s: %w", p.Name, err) } mountResp := new(volume.MountResponse) if err := json.Unmarshal(mountRespBytes, mountResp); err != nil { - return "", fmt.Errorf("error unmarshalling volume plugin %s path response: %w", p.Name, err) + return "", fmt.Errorf("unmarshalling volume plugin %s path response: %w", p.Name, err) } return mountResp.Mountpoint, nil diff --git a/libpod/pod_api.go b/libpod/pod_api.go index 29964ae95..1bd686ddc 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -92,7 +92,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) { // Build a dependency graph of containers in the pod graph, err := BuildContainerGraph(allCtrs) if err != nil { - return nil, fmt.Errorf("error generating dependency graph for pod %s: %w", p.ID(), err) + return nil, fmt.Errorf("generating dependency graph for pod %s: %w", p.ID(), err) } // If there are no containers without dependencies, we can't start // Error out @@ -109,7 +109,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, fmt.Errorf("error starting some containers: %w", define.ErrPodPartialFail) + return ctrErrors, fmt.Errorf("starting some containers: %w", define.ErrPodPartialFail) } defer p.newPodEvent(events.Start) return nil, nil @@ -201,7 +201,7 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m } if len(ctrErrors) > 0 { - return ctrErrors, fmt.Errorf("error stopping some containers: %w", define.ErrPodPartialFail) + return ctrErrors, fmt.Errorf("stopping some containers: %w", define.ErrPodPartialFail) } if err := p.maybeStopServiceContainer(); err != nil { @@ -305,7 +305,7 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, fmt.Errorf("error cleaning up some containers: %w", define.ErrPodPartialFail) + return ctrErrors, fmt.Errorf("cleaning up some containers: %w", define.ErrPodPartialFail) } if err := p.maybeStopServiceContainer(); err != nil { @@ -376,7 +376,7 @@ func (p *Pod) Pause(ctx context.Context) (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, fmt.Errorf("error pausing some containers: %w", define.ErrPodPartialFail) + return ctrErrors, fmt.Errorf("pausing some containers: %w", define.ErrPodPartialFail) } return nil, nil } @@ -432,7 +432,7 @@ func (p *Pod) Unpause(ctx context.Context) (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, fmt.Errorf("error unpausing some containers: %w", define.ErrPodPartialFail) + return ctrErrors, fmt.Errorf("unpausing some containers: %w", define.ErrPodPartialFail) } return nil, nil } @@ -470,7 +470,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) { // Build a dependency graph of containers in the pod graph, err := BuildContainerGraph(allCtrs) if err != nil { - return nil, fmt.Errorf("error generating dependency graph for pod %s: %w", p.ID(), err) + return nil, fmt.Errorf("generating dependency graph for pod %s: %w", p.ID(), err) } ctrErrors := make(map[string]error) @@ -488,7 +488,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, fmt.Errorf("error stopping some containers: %w", define.ErrPodPartialFail) + return ctrErrors, fmt.Errorf("stopping some containers: %w", define.ErrPodPartialFail) } p.newPodEvent(events.Stop) p.newPodEvent(events.Start) @@ -547,7 +547,7 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, fmt.Errorf("error killing some containers: %w", define.ErrPodPartialFail) + return ctrErrors, fmt.Errorf("killing some containers: %w", define.ErrPodPartialFail) } if err := p.maybeStopServiceContainer(); err != nil { diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go index a86cd6d21..9f89fd55d 100644 --- a/libpod/pod_internal.go +++ b/libpod/pod_internal.go @@ -38,7 +38,7 @@ func (p *Pod) updatePod() error { // Save pod state to database func (p *Pod) save() error { if err := p.runtime.state.SavePod(p); err != nil { - return fmt.Errorf("error saving pod %s state: %w", p.ID(), err) + return fmt.Errorf("saving pod %s state: %w", p.ID(), err) } return nil @@ -60,7 +60,7 @@ func (p *Pod) refresh() error { // Retrieve the pod's lock lock, err := p.runtime.lockManager.AllocateAndRetrieveLock(p.config.LockID) if err != nil { - return fmt.Errorf("error retrieving lock %d for pod %s: %w", p.config.LockID, p.ID(), err) + return fmt.Errorf("retrieving lock %d for pod %s: %w", p.config.LockID, p.ID(), err) } p.lock = lock diff --git a/libpod/runtime.go b/libpod/runtime.go index 1503b2344..83c9f53e2 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -207,7 +207,7 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti // Overwrite config with user-given configuration options for _, opt := range options { if err := opt(runtime); err != nil { - return nil, fmt.Errorf("error configuring runtime: %w", err) + return nil, fmt.Errorf("configuring runtime: %w", err) } } @@ -223,7 +223,7 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti } if err := shutdown.Start(); err != nil { - return nil, fmt.Errorf("error starting shutdown signal handler: %w", err) + return nil, fmt.Errorf("starting shutdown signal handler: %w", err) } if err := makeRuntime(runtime); err != nil { @@ -280,7 +280,7 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) { // Since we're renumbering, this is not fatal. // Remove the earlier set of locks and recreate. if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil { - return nil, fmt.Errorf("error removing libpod locks file %s: %w", lockPath, err) + return nil, fmt.Errorf("removing libpod locks file %s: %w", lockPath, err) } manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks) @@ -318,7 +318,7 @@ func makeRuntime(runtime *Runtime) (retErr error) { if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil { // The directory is allowed to exist if !errors.Is(err, os.ErrExist) { - return fmt.Errorf("error creating runtime static files directory: %w", err) + return fmt.Errorf("creating runtime static files directory: %w", err) } } @@ -362,7 +362,7 @@ func makeRuntime(runtime *Runtime) (retErr error) { } } - return fmt.Errorf("error retrieving runtime configuration from database: %w", err) + return fmt.Errorf("retrieving runtime configuration from database: %w", err) } runtime.mergeDBConfig(dbConfig) @@ -405,7 +405,7 @@ func makeRuntime(runtime *Runtime) (retErr error) { } if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil { - return fmt.Errorf("error setting libpod namespace in state: %w", err) + return fmt.Errorf("setting libpod namespace in state: %w", err) } logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace) @@ -462,15 +462,7 @@ func makeRuntime(runtime *Runtime) (retErr error) { if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil { // The directory is allowed to exist if !errors.Is(err, os.ErrExist) { - return fmt.Errorf("error creating tmpdir: %w", err) - } - } - - // Create events log dir - if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil { - // The directory is allowed to exist - if !errors.Is(err, os.ErrExist) { - return fmt.Errorf("error creating events dirs: %w", err) + return fmt.Errorf("creating tmpdir: %w", err) } } @@ -528,7 +520,7 @@ func makeRuntime(runtime *Runtime) (retErr error) { if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil { // The directory is allowed to exist if !errors.Is(err, os.ErrExist) { - return fmt.Errorf("error creating runtime temporary files directory: %w", err) + return fmt.Errorf("creating runtime temporary files directory: %w", err) } } @@ -549,7 +541,7 @@ func makeRuntime(runtime *Runtime) (retErr error) { runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive") aliveLock, err := storage.GetLockfile(runtimeAliveLock) if err != nil { - return fmt.Errorf("error acquiring runtime init lock: %w", err) + return fmt.Errorf("acquiring runtime init lock: %w", err) } // Acquire the lock and hold it until we return // This ensures that no two processes will be in runtime.refresh at once @@ -603,7 +595,7 @@ func makeRuntime(runtime *Runtime) (retErr error) { if errors.Is(err, os.ErrNotExist) { doRefresh = true } else { - return fmt.Errorf("error reading runtime status file %s: %w", runtimeAliveFile, err) + return fmt.Errorf("reading runtime status file %s: %w", runtimeAliveFile, err) } } @@ -695,7 +687,7 @@ func (r *Runtime) GetConfig() (*config.Config, error) { // Copy so the caller won't be able to modify the actual config if err := JSONDeepCopy(rtConfig, config); err != nil { - return nil, fmt.Errorf("error copying config: %w", err) + return nil, fmt.Errorf("copying config: %w", err) } return config, nil @@ -806,7 +798,7 @@ func (r *Runtime) Shutdown(force bool) error { // Note that the libimage runtime shuts down the store. if err := r.libimageRuntime.Shutdown(force); err != nil { - lastError = fmt.Errorf("error shutting down container storage: %w", err) + lastError = fmt.Errorf("shutting down container storage: %w", err) } } if err := r.state.Close(); err != nil { @@ -838,15 +830,15 @@ func (r *Runtime) refresh(alivePath string) error { // Containers, pods, and volumes must also reacquire their locks. ctrs, err := r.state.AllContainers() if err != nil { - return fmt.Errorf("error retrieving all containers from state: %w", err) + return fmt.Errorf("retrieving all containers from state: %w", err) } pods, err := r.state.AllPods() if err != nil { - return fmt.Errorf("error retrieving all pods from state: %w", err) + return fmt.Errorf("retrieving all pods from state: %w", err) } vols, err := r.state.AllVolumes() if err != nil { - return fmt.Errorf("error retrieving all volumes from state: %w", err) + return fmt.Errorf("retrieving all volumes from state: %w", err) } // No locks are taken during pod, volume, and container refresh. // Furthermore, the pod/volume/container refresh() functions are not @@ -874,7 +866,7 @@ func (r *Runtime) refresh(alivePath string) error { // Create a file indicating the runtime is alive and ready file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644) if err != nil { - return fmt.Errorf("error creating runtime status file: %w", err) + return fmt.Errorf("creating runtime status file: %w", err) } defer file.Close() @@ -1038,9 +1030,6 @@ func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) { logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp) } c.TmpDir = dbConfig.LibpodTmp - if c.EventsLogFilePath == "" { - c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log") - } } if !r.storageSet.VolumePathSet && dbConfig.VolumePath != "" { diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go index 047375628..372434b49 100644 --- a/libpod/runtime_cstorage.go +++ b/libpod/runtime_cstorage.go @@ -39,7 +39,7 @@ func (r *Runtime) ListStorageContainers() ([]*StorageContainer, error) { // Look up if container is in state hasCtr, err := r.state.HasContainer(ctr.ID) if err != nil { - return nil, fmt.Errorf("error looking up container %s in state: %w", ctr.ID, err) + return nil, fmt.Errorf("looking up container %s in state: %w", ctr.ID, err) } storageCtr.PresentInLibpod = hasCtr @@ -64,7 +64,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { if errors.Is(err, storage.ErrLayerUnknown) { return fmt.Errorf("no container with ID or name %q found: %w", idOrName, define.ErrNoSuchCtr) } - return fmt.Errorf("error looking up container %q: %w", idOrName, err) + return fmt.Errorf("looking up container %q: %w", idOrName, err) } // Lookup returns an ID but it's not guaranteed to be a container ID. @@ -74,7 +74,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { if errors.Is(err, storage.ErrContainerUnknown) { return fmt.Errorf("%q does not refer to a container: %w", idOrName, define.ErrNoSuchCtr) } - return fmt.Errorf("error retrieving container %q: %w", idOrName, err) + return fmt.Errorf("retrieving container %q: %w", idOrName, err) } // Error out if the container exists in libpod @@ -115,7 +115,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { logrus.Infof("Storage for container %s already removed", ctr.ID) return nil } - return fmt.Errorf("error removing storage for container %q: %w", idOrName, err) + return fmt.Errorf("removing storage for container %q: %w", idOrName, err) } return nil diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index fb4f80aa6..c9dc74403 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -86,7 +86,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config ctr, err := r.initContainerVariables(rSpec, config) if err != nil { - return nil, fmt.Errorf("error initializing container variables: %w", err) + return nil, fmt.Errorf("initializing container variables: %w", err) } // For an imported checkpoint no one has ever set the StartedTime. Set it now. ctr.state.StartedTime = time.Now() @@ -126,7 +126,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s // the config was re-written. newConf, err := r.state.GetContainerConfig(ctr.ID()) if err != nil { - return nil, fmt.Errorf("error retrieving container %s configuration from DB to remove: %w", ctr.ID(), err) + return nil, fmt.Errorf("retrieving container %s configuration from DB to remove: %w", ctr.ID(), err) } ctr.config = newConf @@ -143,7 +143,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s // Set config back to the old name so reflect what is actually // present in the DB. ctr.config.Name = oldName - return nil, fmt.Errorf("error renaming container %s: %w", ctr.ID(), err) + return nil, fmt.Errorf("renaming container %s: %w", ctr.ID(), err) } // Step 3: rename the container in c/storage. @@ -189,7 +189,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf // This is a restore from an imported checkpoint ctr.restoreFromCheckpoint = true if err := JSONDeepCopy(config, ctr.config); err != nil { - return nil, fmt.Errorf("error copying container config for restore: %w", err) + return nil, fmt.Errorf("copying container config for restore: %w", err) } // If the ID is empty a new name for the restored container was requested if ctr.config.ID == "" { @@ -229,12 +229,12 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options .. ctr, err = r.initContainerVariables(rSpec, nil) if err != nil { - return nil, fmt.Errorf("error initializing container variables: %w", err) + return nil, fmt.Errorf("initializing container variables: %w", err) } for _, option := range options { if err := option(ctr); err != nil { - return nil, fmt.Errorf("error running container create option: %w", err) + return nil, fmt.Errorf("running container create option: %w", err) } } @@ -301,7 +301,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai // Allocate a lock for the container lock, err := r.lockManager.AllocateLock() if err != nil { - return nil, fmt.Errorf("error allocating lock for new container: %w", err) + return nil, fmt.Errorf("allocating lock for new container: %w", err) } ctr.lock = lock ctr.config.LockID = ctr.lock.ID() @@ -355,7 +355,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai if pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra() { podCgroup, err := pod.CgroupPath() if err != nil { - return nil, fmt.Errorf("error retrieving pod %s cgroup: %w", pod.ID(), err) + return nil, fmt.Errorf("retrieving pod %s cgroup: %w", pod.ID(), err) } expectPodCgroup, err := ctr.expectPodCgroup() if err != nil { @@ -380,7 +380,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai case pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra(): podCgroup, err := pod.CgroupPath() if err != nil { - return nil, fmt.Errorf("error retrieving pod %s cgroup: %w", pod.ID(), err) + return nil, fmt.Errorf("retrieving pod %s cgroup: %w", pod.ID(), err) } ctr.config.CgroupParent = podCgroup case rootless.IsRootless() && ctr.config.CgroupsMode != cgroupSplit: @@ -432,7 +432,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai defer func() { if retErr != nil { if err := ctr.teardownStorage(); err != nil { - logrus.Errorf("Removing partially-created container root filesystem: %s", err) + logrus.Errorf("Removing partially-created container root filesystem: %v", err) } } }() @@ -476,7 +476,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai // The volume exists, we're good continue } else if !errors.Is(err, define.ErrNoSuchVolume) { - return nil, fmt.Errorf("error retrieving named volume %s for new container: %w", vol.Name, err) + return nil, fmt.Errorf("retrieving named volume %s for new container: %w", vol.Name, err) } } if vol.IsAnonymous { @@ -514,7 +514,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai } newVol, err := r.newVolume(false, volOptions...) if err != nil { - return nil, fmt.Errorf("error creating named volume %q: %w", vol.Name, err) + return nil, fmt.Errorf("creating named volume %q: %w", vol.Name, err) } ctrNamedVolumes = append(ctrNamedVolumes, newVol) @@ -606,7 +606,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo // exist once we're done. newConf, err := r.state.GetContainerConfig(c.ID()) if err != nil { - return fmt.Errorf("error retrieving container %s configuration from DB to remove: %w", c.ID(), err) + return fmt.Errorf("retrieving container %s configuration from DB to remove: %w", c.ID(), err) } c.config = newConf @@ -727,7 +727,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo if ok, _ := r.state.HasContainer(c.ID()); !ok { // When the container has already been removed, the OCI runtime directory remain. if err := c.cleanupRuntime(ctx); err != nil { - return fmt.Errorf("error cleaning up container %s from OCI runtime: %w", c.ID(), err) + return fmt.Errorf("cleaning up container %s from OCI runtime: %w", c.ID(), err) } return nil } @@ -739,7 +739,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo // Do this before we set ContainerStateRemoving, to ensure that we can // actually remove from the OCI runtime. if err := c.cleanup(ctx); err != nil { - cleanupErr = fmt.Errorf("error cleaning up container %s: %w", c.ID(), err) + cleanupErr = fmt.Errorf("cleaning up container %s: %w", c.ID(), err) } // Set ContainerStateRemoving @@ -799,7 +799,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo // Deallocate the container's lock if err := c.lock.Free(); err != nil { if cleanupErr == nil && !os.IsNotExist(err) { - cleanupErr = fmt.Errorf("error freeing lock for container %s: %w", c.ID(), err) + cleanupErr = fmt.Errorf("freeing lock for container %s: %w", c.ID(), err) } else { logrus.Errorf("Free container lock: %v", err) } @@ -1227,7 +1227,7 @@ func (r *Runtime) MountStorageContainer(id string) (string, error) { } mountPoint, err := r.store.Mount(container.ID, "") if err != nil { - return "", fmt.Errorf("error mounting storage for container %s: %w", id, err) + return "", fmt.Errorf("mounting storage for container %s: %w", id, err) } return mountPoint, nil } @@ -1275,7 +1275,7 @@ func (r *Runtime) StorageContainers() ([]storage.Container, error) { storeContainers, err := r.store.Containers() if err != nil { - return nil, fmt.Errorf("error reading list of all storage containers: %w", err) + return nil, fmt.Errorf("reading list of all storage containers: %w", err) } retCtrs := []storage.Container{} for _, container := range storeContainers { diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index d04607d2e..5510b2af6 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -107,7 +107,7 @@ func (r *Runtime) Build(ctx context.Context, options buildahDefine.BuildOptions, func DownloadFromFile(reader *os.File) (string, error) { outFile, err := ioutil.TempFile(util.Tmpdir(), "import") if err != nil { - return "", fmt.Errorf("error creating file: %w", err) + return "", fmt.Errorf("creating file: %w", err) } defer outFile.Close() @@ -115,7 +115,7 @@ func DownloadFromFile(reader *os.File) (string, error) { _, err = io.Copy(outFile, reader) if err != nil { - return "", fmt.Errorf("error saving %s to %s: %w", reader.Name(), outFile.Name(), err) + return "", fmt.Errorf("saving %s to %s: %w", reader.Name(), outFile.Name(), err) } return outFile.Name(), nil diff --git a/libpod/runtime_migrate.go b/libpod/runtime_migrate.go index 139638a6b..36901d4d0 100644 --- a/libpod/runtime_migrate.go +++ b/libpod/runtime_migrate.go @@ -92,7 +92,7 @@ func (r *Runtime) migrate() error { if needsWrite { if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil { - return fmt.Errorf("error rewriting config for container %s: %w", ctr.ID(), err) + return fmt.Errorf("rewriting config for container %s: %w", ctr.ID(), err) } } } diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 57c0b5c48..3eeef69d8 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -36,14 +36,14 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option for _, option := range options { if err := option(pod); err != nil { - return nil, fmt.Errorf("error running pod create option: %w", err) + return nil, fmt.Errorf("running pod create option: %w", err) } } // Allocate a lock for the pod lock, err := r.lockManager.AllocateLock() if err != nil { - return nil, fmt.Errorf("error allocating lock for new pod: %w", err) + return nil, fmt.Errorf("allocating lock for new pod: %w", err) } pod.lock = lock pod.config.LockID = pod.lock.ID() @@ -160,7 +160,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option } } if addPodErr != nil { - return nil, fmt.Errorf("error adding pod to state: %w", addPodErr) + return nil, fmt.Errorf("adding pod to state: %w", addPodErr) } return pod, nil @@ -286,7 +286,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, case config.SystemdCgroupsManager: if err := deleteSystemdCgroup(p.state.CgroupPath, p.ResourceLim()); err != nil { if removalErr == nil { - removalErr = fmt.Errorf("error removing pod %s cgroup: %w", p.ID(), err) + removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err) } else { logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) } @@ -300,7 +300,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, conmonCgroup, err := cgroups.Load(conmonCgroupPath) if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless { if removalErr == nil { - removalErr = fmt.Errorf("error retrieving pod %s conmon cgroup: %w", p.ID(), err) + removalErr = fmt.Errorf("retrieving pod %s conmon cgroup: %w", p.ID(), err) } else { logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err) } @@ -308,7 +308,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, if err == nil { if err = conmonCgroup.Delete(); err != nil { if removalErr == nil { - removalErr = fmt.Errorf("error removing pod %s conmon cgroup: %w", p.ID(), err) + removalErr = fmt.Errorf("removing pod %s conmon cgroup: %w", p.ID(), err) } else { logrus.Errorf("Deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err) } @@ -317,7 +317,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, cgroup, err := cgroups.Load(p.state.CgroupPath) if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless { if removalErr == nil { - removalErr = fmt.Errorf("error retrieving pod %s cgroup: %w", p.ID(), err) + removalErr = fmt.Errorf("retrieving pod %s cgroup: %w", p.ID(), err) } else { logrus.Errorf("Retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) } @@ -325,7 +325,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, if err == nil { if err := cgroup.Delete(); err != nil { if removalErr == nil { - removalErr = fmt.Errorf("error removing pod %s cgroup: %w", p.ID(), err) + removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err) } else { logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) } @@ -362,7 +362,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, // Deallocate the pod lock if err := p.lock.Free(); err != nil { if removalErr == nil { - removalErr = fmt.Errorf("error freeing pod %s lock: %w", p.ID(), err) + removalErr = fmt.Errorf("freeing pod %s lock: %w", p.ID(), err) } else { logrus.Errorf("Freeing pod %s lock: %v", p.ID(), err) } diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go index 9149dd72f..ff70081d8 100644 --- a/libpod/runtime_renumber.go +++ b/libpod/runtime_renumber.go @@ -27,7 +27,7 @@ func (r *Runtime) renumberLocks() error { for _, ctr := range allCtrs { lock, err := r.lockManager.AllocateLock() if err != nil { - return fmt.Errorf("error allocating lock for container %s: %w", ctr.ID(), err) + return fmt.Errorf("allocating lock for container %s: %w", ctr.ID(), err) } ctr.config.LockID = lock.ID() @@ -44,7 +44,7 @@ func (r *Runtime) renumberLocks() error { for _, pod := range allPods { lock, err := r.lockManager.AllocateLock() if err != nil { - return fmt.Errorf("error allocating lock for pod %s: %w", pod.ID(), err) + return fmt.Errorf("allocating lock for pod %s: %w", pod.ID(), err) } pod.config.LockID = lock.ID() @@ -61,7 +61,7 @@ func (r *Runtime) renumberLocks() error { for _, vol := range allVols { lock, err := r.lockManager.AllocateLock() if err != nil { - return fmt.Errorf("error allocating lock for volume %s: %w", vol.Name(), err) + return fmt.Errorf("allocating lock for volume %s: %w", vol.Name(), err) } vol.config.LockID = lock.ID() diff --git a/libpod/util.go b/libpod/util.go index a6e6a4f3e..c5a2b81bd 100644 --- a/libpod/util.go +++ b/libpod/util.go @@ -231,7 +231,7 @@ func DefaultSeccompPath() (string, error) { func checkDependencyContainer(depCtr, ctr *Container) error { state, err := depCtr.State() if err != nil { - return fmt.Errorf("error accessing dependency container %s state: %w", depCtr.ID(), err) + return fmt.Errorf("accessing dependency container %s state: %w", depCtr.ID(), err) } if state == define.ContainerStateRemoving { return fmt.Errorf("cannot use container %s as a dependency as it is being removed: %w", depCtr.ID(), define.ErrCtrStateInvalid) diff --git a/libpod/util_linux.go b/libpod/util_linux.go index 7c79e6ce4..efc11710f 100644 --- a/libpod/util_linux.go +++ b/libpod/util_linux.go @@ -29,7 +29,7 @@ func systemdSliceFromPath(parent, name string, resources *spec.LinuxResources) ( logrus.Debugf("Created cgroup path %s for parent %s and name %s", cgroupPath, parent, name) if err := makeSystemdCgroup(cgroupPath, resources); err != nil { - return "", fmt.Errorf("error creating cgroup %s: %w", cgroupPath, err) + return "", fmt.Errorf("creating cgroup %s: %w", cgroupPath, err) } logrus.Debugf("Created cgroup %s", cgroupPath) @@ -112,17 +112,17 @@ var lvpReleaseLabel = label.ReleaseLabel func LabelVolumePath(path string) error { _, mountLabel, err := lvpInitLabels([]string{}) if err != nil { - return fmt.Errorf("error getting default mountlabels: %w", err) + return fmt.Errorf("getting default mountlabels: %w", err) } if err := lvpReleaseLabel(mountLabel); err != nil { - return fmt.Errorf("error releasing label %q: %w", mountLabel, err) + return fmt.Errorf("releasing label %q: %w", mountLabel, err) } if err := lvpRelabel(path, mountLabel, true); err != nil { if err == syscall.ENOTSUP { logrus.Debugf("Labeling not supported on %q", path) } else { - return fmt.Errorf("error setting selinux label for %s to %q as shared: %w", path, mountLabel, err) + return fmt.Errorf("setting selinux label for %s to %q as shared: %w", path, mountLabel, err) } } return nil diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go index c3872bca7..73441576b 100644 --- a/libpod/volume_inspect.go +++ b/libpod/volume_inspect.go @@ -39,7 +39,7 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) { req.Name = v.Name() resp, err := v.plugin.GetVolume(req) if err != nil { - return nil, fmt.Errorf("error retrieving volume %s information from plugin %s: %w", v.Name(), v.Driver(), err) + return nil, fmt.Errorf("retrieving volume %s information from plugin %s: %w", v.Name(), v.Driver(), err) } if resp != nil { data.Status = resp.Status |