diff options
Diffstat (limited to 'libpod')
71 files changed, 2165 insertions, 1389 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 12c364993..4dda3a7f0 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -5,6 +5,7 @@ import ( "strings" "sync" + "github.com/containers/libpod/libpod/define" bolt "github.com/etcd-io/bbolt" jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" @@ -133,7 +134,7 @@ func (s *BoltState) Close() error { // Refresh clears container and pod states after a reboot func (s *BoltState) Refresh() error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } db, err := s.getDBCon() @@ -171,13 +172,13 @@ func (s *BoltState) Refresh() error { if podBkt == nil { // This is neither a pod nor a container // Error out on the dangling ID - return errors.Wrapf(ErrInternal, "id %s is not a pod or a container", string(id)) + return errors.Wrapf(define.ErrInternal, "id %s is not a pod or a container", string(id)) } // Get the state stateBytes := podBkt.Get(stateKey) if stateBytes == nil { - return errors.Wrapf(ErrInternal, "pod %s missing state key", string(id)) + return errors.Wrapf(define.ErrInternal, "pod %s missing state key", string(id)) } state := new(podState) @@ -210,7 +211,7 @@ func (s *BoltState) Refresh() error { stateBytes := ctrBkt.Get(stateKey) if stateBytes == nil { // Badly formatted container bucket - return errors.Wrapf(ErrInternal, "container %s missing state in DB", string(id)) + return errors.Wrapf(define.ErrInternal, "container %s missing state in DB", string(id)) } state := new(ContainerState) @@ -243,7 +244,7 @@ func (s *BoltState) Refresh() error { // the database was first initialized func (s *BoltState) GetDBConfig() (*DBConfig, error) { if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } cfg := new(DBConfig) @@ -290,7 +291,7 @@ func (s *BoltState) GetDBConfig() (*DBConfig, error) { // ValidateDBConfig validates paths in the given runtime against the database func (s *BoltState) ValidateDBConfig(runtime *Runtime) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } db, err := s.getDBCon() @@ -324,11 +325,11 @@ func (s *BoltState) SetNamespace(ns string) error { // Container retrieves a single container from the state by its full ID func (s *BoltState) Container(id string) (*Container, error) { if id == "" { - return nil, ErrEmptyID + return nil, define.ErrEmptyID } if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } ctrID := []byte(id) @@ -362,11 +363,11 @@ func (s *BoltState) Container(id string) (*Container, error) { // partial ID or name func (s *BoltState) LookupContainer(idOrName string) (*Container, error) { if idOrName == "" { - return nil, ErrEmptyID + return nil, define.ErrEmptyID } if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } ctr := new(Container) @@ -380,11 +381,6 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) { defer s.closeDBCon(db) err = db.View(func(tx *bolt.Tx) error { - idBucket, err := getIDBucket(tx) - if err != nil { - return err - } - ctrBucket, err := getCtrBucket(tx) if err != nil { return err @@ -435,7 +431,7 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) { // We were not given a full container ID or name. // Search for partial ID matches. exists := false - err = idBucket.ForEach(func(checkID, checkName []byte) error { + err = ctrBucket.ForEach(func(checkID, checkName []byte) error { // If the container isn't in our namespace, we // can't match it if s.namespaceBytes != nil { @@ -446,7 +442,7 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) { } if strings.HasPrefix(string(checkID), idOrName) { if exists { - return errors.Wrapf(ErrCtrExists, "more than one result for container ID %s", idOrName) + return errors.Wrapf(define.ErrCtrExists, "more than one result for container ID %s", idOrName) } id = checkID exists = true @@ -458,9 +454,9 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) { return err } else if !exists { if isPod { - return errors.Wrapf(ErrNoSuchCtr, "%s is a pod, not a container", idOrName) + return errors.Wrapf(define.ErrNoSuchCtr, "%s is a pod, not a container", idOrName) } - return errors.Wrapf(ErrNoSuchCtr, "no container with name or ID %s found", idOrName) + return errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %s found", idOrName) } return s.getContainerFromDB(id, ctr, ctrBucket) @@ -475,11 +471,11 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) { // HasContainer checks if a container is present in the state func (s *BoltState) HasContainer(id string) (bool, error) { if id == "" { - return false, ErrEmptyID + return false, define.ErrEmptyID } if !s.valid { - return false, ErrDBClosed + return false, define.ErrDBClosed } ctrID := []byte(id) @@ -523,15 +519,15 @@ func (s *BoltState) HasContainer(id string) (bool, error) { // The container being added cannot belong to a pod func (s *BoltState) AddContainer(ctr *Container) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !ctr.valid { - return ErrCtrRemoved + return define.ErrCtrRemoved } if ctr.config.Pod != "" { - return errors.Wrapf(ErrInvalidArg, "cannot add a container that belongs to a pod with AddContainer - use AddContainerToPod") + return errors.Wrapf(define.ErrInvalidArg, "cannot add a container that belongs to a pod with AddContainer - use AddContainerToPod") } return s.addContainer(ctr, nil) @@ -542,11 +538,11 @@ func (s *BoltState) AddContainer(ctr *Container) error { // pod, use RemoveContainerFromPod func (s *BoltState) RemoveContainer(ctr *Container) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if ctr.config.Pod != "" { - return errors.Wrapf(ErrPodExists, "container %s is part of a pod, use RemoveContainerFromPod instead", ctr.ID()) + return errors.Wrapf(define.ErrPodExists, "container %s is part of a pod, use RemoveContainerFromPod instead", ctr.ID()) } db, err := s.getDBCon() @@ -564,15 +560,15 @@ func (s *BoltState) RemoveContainer(ctr *Container) error { // UpdateContainer updates a container's state from the database func (s *BoltState) UpdateContainer(ctr *Container) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !ctr.valid { - return ErrCtrRemoved + return define.ErrCtrRemoved } if s.namespace != "" && s.namespace != ctr.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) } newState := new(ContainerState) @@ -595,12 +591,12 @@ func (s *BoltState) UpdateContainer(ctr *Container) error { ctrToUpdate := ctrBucket.Bucket(ctrID) if ctrToUpdate == nil { ctr.valid = false - return errors.Wrapf(ErrNoSuchCtr, "container %s does not exist in database", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID()) } newStateBytes := ctrToUpdate.Get(stateKey) if newStateBytes == nil { - return errors.Wrapf(ErrInternal, "container %s does not have a state key in DB", ctr.ID()) + return errors.Wrapf(define.ErrInternal, "container %s does not have a state key in DB", ctr.ID()) } if err := json.Unmarshal(newStateBytes, newState); err != nil { @@ -632,15 +628,15 @@ func (s *BoltState) UpdateContainer(ctr *Container) error { // SaveContainer saves a container's current state in the database func (s *BoltState) SaveContainer(ctr *Container) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !ctr.valid { - return ErrCtrRemoved + return define.ErrCtrRemoved } if s.namespace != "" && s.namespace != ctr.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) } stateJSON, err := json.Marshal(ctr.state) @@ -666,7 +662,7 @@ func (s *BoltState) SaveContainer(ctr *Container) error { ctrToSave := ctrBucket.Bucket(ctrID) if ctrToSave == nil { ctr.valid = false - return errors.Wrapf(ErrNoSuchCtr, "container %s does not exist in DB", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in DB", ctr.ID()) } // Update the state @@ -695,15 +691,15 @@ func (s *BoltState) SaveContainer(ctr *Container) error { // container. If the slice is empty, no containers depend on the given container func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) { if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } if !ctr.valid { - return nil, ErrCtrRemoved + return nil, define.ErrCtrRemoved } if s.namespace != "" && s.namespace != ctr.config.Namespace { - return nil, errors.Wrapf(ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) + return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) } depCtrs := []string{} @@ -723,12 +719,12 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) { ctrDB := ctrBucket.Bucket([]byte(ctr.ID())) if ctrDB == nil { ctr.valid = false - return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID()) } dependsBkt := ctrDB.Bucket(dependenciesBkt) if dependsBkt == nil { - return errors.Wrapf(ErrInternal, "container %s has no dependencies bucket", ctr.ID()) + return errors.Wrapf(define.ErrInternal, "container %s has no dependencies bucket", ctr.ID()) } // Iterate through and add dependencies @@ -754,7 +750,7 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) { // AllContainers retrieves all the containers in the database func (s *BoltState) AllContainers() ([]*Container, error) { if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } ctrs := []*Container{} @@ -782,7 +778,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) { // be much less helpful. ctrExists := ctrBucket.Bucket(id) if ctrExists == nil { - return errors.Wrapf(ErrInternal, "state is inconsistent - container ID %s in all containers, but container not found", string(id)) + return errors.Wrapf(define.ErrInternal, "state is inconsistent - container ID %s in all containers, but container not found", string(id)) } ctr := new(Container) @@ -794,7 +790,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) { // ignore it safely. // We just won't include the container in the // results. - if errors.Cause(err) != ErrNSMismatch { + if errors.Cause(err) != define.ErrNSMismatch { // Even if it's not an NS mismatch, it's // not worth erroring over. // If we do, a single bad container JSON @@ -821,11 +817,11 @@ func (s *BoltState) AllContainers() ([]*Container, error) { // comment on this function in state.go. func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !ctr.valid { - return ErrCtrRemoved + return define.ErrCtrRemoved } newCfgJSON, err := json.Marshal(newCfg) @@ -848,7 +844,7 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf ctrDB := ctrBkt.Bucket([]byte(ctr.ID())) if ctrDB == nil { ctr.valid = false - return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID()) } if err := ctrDB.Put(configKey, newCfgJSON); err != nil { @@ -865,11 +861,11 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf // comment on this function in state.go. func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } newCfgJSON, err := json.Marshal(newCfg) @@ -892,7 +888,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { podDB := podBkt.Bucket([]byte(pod.ID())) if podDB == nil { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID()) } if err := podDB.Put(configKey, newCfgJSON); err != nil { @@ -907,11 +903,11 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { // Pod retrieves a pod given its full ID func (s *BoltState) Pod(id string) (*Pod, error) { if id == "" { - return nil, ErrEmptyID + return nil, define.ErrEmptyID } if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } podID := []byte(id) @@ -944,11 +940,11 @@ func (s *BoltState) Pod(id string) (*Pod, error) { // LookupPod retrieves a pod from full or unique partial ID or name func (s *BoltState) LookupPod(idOrName string) (*Pod, error) { if idOrName == "" { - return nil, ErrEmptyID + return nil, define.ErrEmptyID } if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } pod := new(Pod) @@ -962,11 +958,6 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) { defer s.closeDBCon(db) err = db.View(func(tx *bolt.Tx) error { - idBucket, err := getIDBucket(tx) - if err != nil { - return err - } - podBkt, err := getPodBucket(tx) if err != nil { return err @@ -1014,7 +1005,7 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) { // They did not give us a full pod name or ID. // Search for partial ID matches. exists := false - err = idBucket.ForEach(func(checkID, checkName []byte) error { + err = podBkt.ForEach(func(checkID, checkName []byte) error { // If the pod isn't in our namespace, we // can't match it if s.namespaceBytes != nil { @@ -1025,7 +1016,7 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) { } if strings.HasPrefix(string(checkID), idOrName) { if exists { - return errors.Wrapf(ErrPodExists, "more than one result for ID or name %s", idOrName) + return errors.Wrapf(define.ErrPodExists, "more than one result for ID or name %s", idOrName) } id = checkID exists = true @@ -1037,9 +1028,9 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) { return err } else if !exists { if isCtr { - return errors.Wrapf(ErrNoSuchPod, "%s is a container, not a pod", idOrName) + return errors.Wrapf(define.ErrNoSuchPod, "%s is a container, not a pod", idOrName) } - return errors.Wrapf(ErrNoSuchPod, "no pod with name or ID %s found", idOrName) + return errors.Wrapf(define.ErrNoSuchPod, "no pod with name or ID %s found", idOrName) } // We might have found a container ID, but it's OK @@ -1056,11 +1047,11 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) { // HasPod checks if a pod with the given ID exists in the state func (s *BoltState) HasPod(id string) (bool, error) { if id == "" { - return false, ErrEmptyID + return false, define.ErrEmptyID } if !s.valid { - return false, ErrDBClosed + return false, define.ErrDBClosed } podID := []byte(id) @@ -1103,19 +1094,19 @@ func (s *BoltState) HasPod(id string) (bool, error) { // PodHasContainer checks if the given pod has a container with the given ID func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) { if id == "" { - return false, ErrEmptyID + return false, define.ErrEmptyID } if !s.valid { - return false, ErrDBClosed + return false, define.ErrDBClosed } if !pod.valid { - return false, ErrPodRemoved + return false, define.ErrPodRemoved } if s.namespace != "" && s.namespace != pod.config.Namespace { - return false, errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) + return false, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) } ctrID := []byte(id) @@ -1139,13 +1130,13 @@ func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) { podDB := podBkt.Bucket(podID) if podDB == nil { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "pod %s not found in database", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID()) } // Get pod containers bucket podCtrs := podDB.Bucket(containersBkt) if podCtrs == nil { - return errors.Wrapf(ErrInternal, "pod %s missing containers bucket in DB", pod.ID()) + return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID()) } // Don't bother with a namespace check on the container - @@ -1170,15 +1161,15 @@ func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) { // PodContainersByID returns the IDs of all containers present in the given pod func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) { if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } if !pod.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } if s.namespace != "" && s.namespace != pod.config.Namespace { - return nil, errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) + return nil, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) } podID := []byte(pod.ID()) @@ -1201,13 +1192,13 @@ func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) { podDB := podBkt.Bucket(podID) if podDB == nil { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "pod %s not found in database", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID()) } // Get pod containers bucket podCtrs := podDB.Bucket(containersBkt) if podCtrs == nil { - return errors.Wrapf(ErrInternal, "pod %s missing containers bucket in DB", pod.ID()) + return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID()) } // Iterate through all containers in the pod @@ -1232,15 +1223,15 @@ func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) { // PodContainers returns all the containers present in the given pod func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) { if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } if !pod.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } if s.namespace != "" && s.namespace != pod.config.Namespace { - return nil, errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) + return nil, errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) } podID := []byte(pod.ID()) @@ -1268,13 +1259,13 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) { podDB := podBkt.Bucket(podID) if podDB == nil { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "pod %s not found in database", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "pod %s not found in database", pod.ID()) } // Get pod containers bucket podCtrs := podDB.Bucket(containersBkt) if podCtrs == nil { - return errors.Wrapf(ErrInternal, "pod %s missing containers bucket in DB", pod.ID()) + return errors.Wrapf(define.ErrInternal, "pod %s missing containers bucket in DB", pod.ID()) } // Iterate through all containers in the pod @@ -1303,11 +1294,11 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) { // the sub bucket holding the container dependencies that this volume has func (s *BoltState) AddVolume(volume *Volume) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !volume.valid { - return ErrVolumeRemoved + return define.ErrVolumeRemoved } volName := []byte(volume.Name()) @@ -1337,7 +1328,7 @@ func (s *BoltState) AddVolume(volume *Volume) error { // Check if we already have a volume with the given name volExists := allVolsBkt.Get(volName) if volExists != nil { - return errors.Wrapf(ErrVolumeExists, "name %s is in use", volume.Name()) + return errors.Wrapf(define.ErrVolumeExists, "name %s is in use", volume.Name()) } // We are good to add the volume @@ -1369,7 +1360,7 @@ func (s *BoltState) AddVolume(volume *Volume) error { // RemoveVolume removes the given volume from the state func (s *BoltState) RemoveVolume(volume *Volume) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } volName := []byte(volume.Name()) @@ -1400,7 +1391,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error { volDB := volBkt.Bucket(volName) if volDB == nil { volume.valid = false - return errors.Wrapf(ErrNoSuchVolume, "volume %s does not exist in DB", volume.Name()) + return errors.Wrapf(define.ErrNoSuchVolume, "volume %s does not exist in DB", volume.Name()) } // Check if volume is not being used by any container @@ -1430,7 +1421,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error { return errors.Wrapf(err, "error getting list of dependencies from dependencies bucket for volumes %q", volume.Name()) } if len(deps) > 0 { - return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by container(s) %s", volume.Name(), strings.Join(deps, ",")) + return errors.Wrapf(define.ErrVolumeBeingUsed, "volume %s is being used by container(s) %s", volume.Name(), strings.Join(deps, ",")) } } @@ -1451,7 +1442,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error { // AllVolumes returns all volumes present in the state func (s *BoltState) AllVolumes() ([]*Volume, error) { if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } volumes := []*Volume{} @@ -1477,14 +1468,14 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) { // This check can be removed if performance becomes an // issue, but much less helpful errors will be produced if volExists == nil { - return errors.Wrapf(ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id)) + return errors.Wrapf(define.ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id)) } volume := new(Volume) volume.config = new(VolumeConfig) if err := s.getVolumeFromDB(id, volume, volBucket); err != nil { - if errors.Cause(err) != ErrNSMismatch { + if errors.Cause(err) != define.ErrNSMismatch { logrus.Errorf("Error retrieving volume %s from the database: %v", string(id), err) } } else { @@ -1505,11 +1496,11 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) { // Volume retrieves a volume from full name func (s *BoltState) Volume(name string) (*Volume, error) { if name == "" { - return nil, ErrEmptyID + return nil, define.ErrEmptyID } if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } volName := []byte(name) @@ -1541,11 +1532,11 @@ func (s *BoltState) Volume(name string) (*Volume, error) { // HasVolume returns true if the given volume exists in the state, otherwise it returns false func (s *BoltState) HasVolume(name string) (bool, error) { if name == "" { - return false, ErrEmptyID + return false, define.ErrEmptyID } if !s.valid { - return false, ErrDBClosed + return false, define.ErrDBClosed } volName := []byte(name) @@ -1583,11 +1574,11 @@ func (s *BoltState) HasVolume(name string) (bool, error) { // volume. If the slice is empty, no containers use the given volume func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) { if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } if !volume.valid { - return nil, ErrVolumeRemoved + return nil, define.ErrVolumeRemoved } depCtrs := []string{} @@ -1612,12 +1603,12 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) { volDB := volBucket.Bucket([]byte(volume.Name())) if volDB == nil { volume.valid = false - return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in DB", volume.Name()) + return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in DB", volume.Name()) } dependsBkt := volDB.Bucket(volDependenciesBkt) if dependsBkt == nil { - return errors.Wrapf(ErrInternal, "volume %s has no dependencies bucket", volume.Name()) + return errors.Wrapf(define.ErrInternal, "volume %s has no dependencies bucket", volume.Name()) } // Iterate through and add dependencies @@ -1649,15 +1640,15 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) { // AddPod adds the given pod to the state. func (s *BoltState) AddPod(pod *Pod) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } if s.namespace != "" && s.namespace != pod.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) } podID := []byte(pod.ID()) @@ -1713,11 +1704,11 @@ func (s *BoltState) AddPod(pod *Pod) error { // Check if we already have something with the given ID and name idExist := idsBkt.Get(podID) if idExist != nil { - return errors.Wrapf(ErrPodExists, "ID %s is in use", pod.ID()) + return errors.Wrapf(define.ErrPodExists, "ID %s is in use", pod.ID()) } nameExist := namesBkt.Get(podName) if nameExist != nil { - return errors.Wrapf(ErrPodExists, "name %s is in use", pod.Name()) + return errors.Wrapf(define.ErrPodExists, "name %s is in use", pod.Name()) } // We are good to add the pod @@ -1773,15 +1764,15 @@ func (s *BoltState) AddPod(pod *Pod) error { // Only empty pods can be removed func (s *BoltState) RemovePod(pod *Pod) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } if s.namespace != "" && s.namespace != pod.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) } podID := []byte(pod.ID()) @@ -1823,7 +1814,7 @@ func (s *BoltState) RemovePod(pod *Pod) error { podDB := podBkt.Bucket(podID) if podDB == nil { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "pod %s does not exist in DB", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in DB", pod.ID()) } // Check if pod is empty @@ -1835,7 +1826,7 @@ func (s *BoltState) RemovePod(pod *Pod) error { if podCtrsBkt != nil { cursor := podCtrsBkt.Cursor() if id, _ := cursor.First(); id != nil { - return errors.Wrapf(ErrCtrExists, "pod %s is not empty", pod.ID()) + return errors.Wrapf(define.ErrCtrExists, "pod %s is not empty", pod.ID()) } } @@ -1869,15 +1860,15 @@ func (s *BoltState) RemovePod(pod *Pod) error { // RemovePodContainers removes all containers in a pod func (s *BoltState) RemovePodContainers(pod *Pod) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } if s.namespace != "" && s.namespace != pod.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) } podID := []byte(pod.ID()) @@ -1918,12 +1909,12 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error { podDB := podBkt.Bucket(podID) if podDB == nil { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "pod %s does not exist in DB", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in DB", pod.ID()) } podCtrsBkt := podDB.Bucket(containersBkt) if podCtrsBkt == nil { - return errors.Wrapf(ErrInternal, "pod %s does not have a containers bucket", pod.ID()) + return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", pod.ID()) } // Traverse all containers in the pod with a cursor @@ -1934,7 +1925,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error { if ctr == nil { // This should never happen // State is inconsistent - return errors.Wrapf(ErrNoSuchCtr, "pod %s referenced nonexistant container %s", pod.ID(), string(id)) + return errors.Wrapf(define.ErrNoSuchCtr, "pod %s referenced nonexistant container %s", pod.ID(), string(id)) } ctrDeps := ctr.Bucket(dependenciesBkt) // This should never be nil, but if it is, we're @@ -1943,7 +1934,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error { err = ctrDeps.ForEach(func(depID, name []byte) error { exists := podCtrsBkt.Get(depID) if exists == nil { - return errors.Wrapf(ErrCtrExists, "container %s has dependency %s outside of pod %s", string(id), string(depID), pod.ID()) + return errors.Wrapf(define.ErrCtrExists, "container %s has dependency %s outside of pod %s", string(id), string(depID), pod.ID()) } return nil }) @@ -1955,7 +1946,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error { // Dependencies are set, we're clear to remove if err := ctrBkt.DeleteBucket(id); err != nil { - return errors.Wrapf(ErrInternal, "error deleting container %s from DB", string(id)) + return errors.Wrapf(define.ErrInternal, "error deleting container %s from DB", string(id)) } if err := idsBkt.Delete(id); err != nil { @@ -1997,19 +1988,19 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error { // The container will be added to the state and the pod func (s *BoltState) AddContainerToPod(pod *Pod, ctr *Container) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } if !ctr.valid { - return ErrCtrRemoved + return define.ErrCtrRemoved } if ctr.config.Pod != pod.ID() { - return errors.Wrapf(ErrNoSuchCtr, "container %s is not part of pod %s", ctr.ID(), pod.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not part of pod %s", ctr.ID(), pod.ID()) } return s.addContainer(ctr, pod) @@ -2019,28 +2010,28 @@ func (s *BoltState) AddContainerToPod(pod *Pod, ctr *Container) error { // The container will also be removed from the state func (s *BoltState) RemoveContainerFromPod(pod *Pod, ctr *Container) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } if s.namespace != "" { if s.namespace != pod.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) } if s.namespace != ctr.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "container %s in in namespace %q but we are in namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "container %s in in namespace %q but we are in namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) } } if ctr.config.Pod == "" { - return errors.Wrapf(ErrNoSuchPod, "container %s is not part of a pod, use RemoveContainer instead", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "container %s is not part of a pod, use RemoveContainer instead", ctr.ID()) } if ctr.config.Pod != pod.ID() { - return errors.Wrapf(ErrInvalidArg, "container %s is not part of pod %s", ctr.ID(), pod.ID()) + return errors.Wrapf(define.ErrInvalidArg, "container %s is not part of pod %s", ctr.ID(), pod.ID()) } db, err := s.getDBCon() @@ -2058,15 +2049,15 @@ func (s *BoltState) RemoveContainerFromPod(pod *Pod, ctr *Container) error { // UpdatePod updates a pod's state from the database func (s *BoltState) UpdatePod(pod *Pod) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } if s.namespace != "" && s.namespace != pod.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) } newState := new(podState) @@ -2088,13 +2079,13 @@ func (s *BoltState) UpdatePod(pod *Pod) error { podDB := podBkt.Bucket(podID) if podDB == nil { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in database", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in database", pod.ID()) } // Get the pod state JSON podStateBytes := podDB.Get(stateKey) if podStateBytes == nil { - return errors.Wrapf(ErrInternal, "pod %s is missing state key in DB", pod.ID()) + return errors.Wrapf(define.ErrInternal, "pod %s is missing state key in DB", pod.ID()) } if err := json.Unmarshal(podStateBytes, newState); err != nil { @@ -2115,15 +2106,15 @@ func (s *BoltState) UpdatePod(pod *Pod) error { // SavePod saves a pod's state to the database func (s *BoltState) SavePod(pod *Pod) error { if !s.valid { - return ErrDBClosed + return define.ErrDBClosed } if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } if s.namespace != "" && s.namespace != pod.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q but we are in namespace %q", pod.ID(), pod.config.Namespace, s.namespace) } stateJSON, err := json.Marshal(pod.state) @@ -2148,7 +2139,7 @@ func (s *BoltState) SavePod(pod *Pod) error { podDB := podBkt.Bucket(podID) if podDB == nil { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in database", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in database", pod.ID()) } // Set the pod state JSON @@ -2168,7 +2159,7 @@ func (s *BoltState) SavePod(pod *Pod) error { // AllPods returns all pods present in the state func (s *BoltState) AllPods() ([]*Pod, error) { if !s.valid { - return nil, ErrDBClosed + return nil, define.ErrDBClosed } pods := []*Pod{} @@ -2195,7 +2186,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) { // This check can be removed if performance becomes an // issue, but much less helpful errors will be produced if podExists == nil { - return errors.Wrapf(ErrInternal, "inconsistency in state - pod %s is in all pods bucket but pod not found", string(id)) + return errors.Wrapf(define.ErrInternal, "inconsistency in state - pod %s is in all pods bucket but pod not found", string(id)) } pod := new(Pod) @@ -2203,7 +2194,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) { pod.state = new(podState) if err := s.getPodFromDB(id, pod, podBucket); err != nil { - if errors.Cause(err) != ErrNSMismatch { + if errors.Cause(err) != define.ErrNSMismatch { logrus.Errorf("Error retrieving pod %s from the database: %v", string(id), err) } } else { diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index b7930e158..ee2784cdd 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -6,6 +6,7 @@ import ( "runtime" "strings" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage" bolt "github.com/etcd-io/bbolt" @@ -222,7 +223,7 @@ func readOnlyValidateConfig(bucket *bolt.Bucket, toCheck dbConfigValidation) (bo return true, nil } - return true, errors.Wrapf(ErrDBBadConfig, "database %s %q does not match our %s %q", + return true, errors.Wrapf(define.ErrDBBadConfig, "database %s %q does not match our %s %q", toCheck.name, dbValue, toCheck.name, toCheck.runtimeValue) } @@ -260,7 +261,7 @@ func (s *BoltState) closeDBCon(db *bolt.DB) error { func getIDBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(idRegistryBkt) if bkt == nil { - return nil, errors.Wrapf(ErrDBBadConfig, "id registry bucket not found in DB") + return nil, errors.Wrapf(define.ErrDBBadConfig, "id registry bucket not found in DB") } return bkt, nil } @@ -268,7 +269,7 @@ func getIDBucket(tx *bolt.Tx) (*bolt.Bucket, error) { func getNamesBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(nameRegistryBkt) if bkt == nil { - return nil, errors.Wrapf(ErrDBBadConfig, "name registry bucket not found in DB") + return nil, errors.Wrapf(define.ErrDBBadConfig, "name registry bucket not found in DB") } return bkt, nil } @@ -276,7 +277,7 @@ func getNamesBucket(tx *bolt.Tx) (*bolt.Bucket, error) { func getNSBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(nsRegistryBkt) if bkt == nil { - return nil, errors.Wrapf(ErrDBBadConfig, "namespace registry bucket not found in DB") + return nil, errors.Wrapf(define.ErrDBBadConfig, "namespace registry bucket not found in DB") } return bkt, nil } @@ -284,7 +285,7 @@ func getNSBucket(tx *bolt.Tx) (*bolt.Bucket, error) { func getCtrBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(ctrBkt) if bkt == nil { - return nil, errors.Wrapf(ErrDBBadConfig, "containers bucket not found in DB") + return nil, errors.Wrapf(define.ErrDBBadConfig, "containers bucket not found in DB") } return bkt, nil } @@ -292,7 +293,7 @@ func getCtrBucket(tx *bolt.Tx) (*bolt.Bucket, error) { func getAllCtrsBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(allCtrsBkt) if bkt == nil { - return nil, errors.Wrapf(ErrDBBadConfig, "all containers bucket not found in DB") + return nil, errors.Wrapf(define.ErrDBBadConfig, "all containers bucket not found in DB") } return bkt, nil } @@ -300,7 +301,7 @@ func getAllCtrsBucket(tx *bolt.Tx) (*bolt.Bucket, error) { func getPodBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(podBkt) if bkt == nil { - return nil, errors.Wrapf(ErrDBBadConfig, "pods bucket not found in DB") + return nil, errors.Wrapf(define.ErrDBBadConfig, "pods bucket not found in DB") } return bkt, nil } @@ -308,7 +309,7 @@ func getPodBucket(tx *bolt.Tx) (*bolt.Bucket, error) { func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(allPodsBkt) if bkt == nil { - return nil, errors.Wrapf(ErrDBBadConfig, "all pods bucket not found in DB") + return nil, errors.Wrapf(define.ErrDBBadConfig, "all pods bucket not found in DB") } return bkt, nil } @@ -316,7 +317,7 @@ func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) { func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(volBkt) if bkt == nil { - return nil, errors.Wrapf(ErrDBBadConfig, "volumes bucket not found in DB") + return nil, errors.Wrapf(define.ErrDBBadConfig, "volumes bucket not found in DB") } return bkt, nil } @@ -324,7 +325,7 @@ func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) { func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(allVolsBkt) if bkt == nil { - return nil, errors.Wrapf(ErrDBBadConfig, "all volumes bucket not found in DB") + return nil, errors.Wrapf(define.ErrDBBadConfig, "all volumes bucket not found in DB") } return bkt, nil } @@ -332,28 +333,27 @@ func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) { func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(runtimeConfigBkt) if bkt == nil { - return nil, errors.Wrapf(ErrDBBadConfig, "runtime configuration bucket not found in DB") + return nil, errors.Wrapf(define.ErrDBBadConfig, "runtime configuration bucket not found in DB") } return bkt, nil } func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.Bucket) error { - valid := true ctrBkt := ctrsBkt.Bucket(id) if ctrBkt == nil { - return errors.Wrapf(ErrNoSuchCtr, "container %s not found in DB", string(id)) + return errors.Wrapf(define.ErrNoSuchCtr, "container %s not found in DB", string(id)) } if s.namespaceBytes != nil { ctrNamespaceBytes := ctrBkt.Get(namespaceKey) if !bytes.Equal(s.namespaceBytes, ctrNamespaceBytes) { - return errors.Wrapf(ErrNSMismatch, "cannot retrieve container %s as it is part of namespace %q and we are in namespace %q", string(id), string(ctrNamespaceBytes), s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "cannot retrieve container %s as it is part of namespace %q and we are in namespace %q", string(id), string(ctrNamespaceBytes), s.namespace) } } configBytes := ctrBkt.Get(configKey) if configBytes == nil { - return errors.Wrapf(ErrInternal, "container %s missing config key in DB", string(id)) + return errors.Wrapf(define.ErrInternal, "container %s missing config key in DB", string(id)) } if err := json.Unmarshal(configBytes, ctr.config); err != nil { @@ -379,13 +379,13 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt. ociRuntime, ok := s.runtime.ociRuntimes[runtimeName] if !ok { - return errors.Wrapf(ErrInternal, "container %s was created with OCI runtime %s, but that runtime is not available in the current configuration", ctr.ID(), ctr.config.OCIRuntime) + return errors.Wrapf(define.ErrInternal, "container %s was created with OCI runtime %s, but that runtime is not available in the current configuration", ctr.ID(), ctr.config.OCIRuntime) } ctr.ociRuntime = ociRuntime } ctr.runtime = s.runtime - ctr.valid = valid + ctr.valid = true return nil } @@ -393,19 +393,19 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt. func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error { podDB := podBkt.Bucket(id) if podDB == nil { - return errors.Wrapf(ErrNoSuchPod, "pod with ID %s not found", string(id)) + return errors.Wrapf(define.ErrNoSuchPod, "pod with ID %s not found", string(id)) } if s.namespaceBytes != nil { podNamespaceBytes := podDB.Get(namespaceKey) if !bytes.Equal(s.namespaceBytes, podNamespaceBytes) { - return errors.Wrapf(ErrNSMismatch, "cannot retrieve pod %s as it is part of namespace %q and we are in namespace %q", string(id), string(podNamespaceBytes), s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "cannot retrieve pod %s as it is part of namespace %q and we are in namespace %q", string(id), string(podNamespaceBytes), s.namespace) } } podConfigBytes := podDB.Get(configKey) if podConfigBytes == nil { - return errors.Wrapf(ErrInternal, "pod %s is missing configuration key in DB", string(id)) + return errors.Wrapf(define.ErrInternal, "pod %s is missing configuration key in DB", string(id)) } if err := json.Unmarshal(podConfigBytes, pod.config); err != nil { @@ -428,12 +428,12 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bucket) error { volDB := volBkt.Bucket(name) if volDB == nil { - return errors.Wrapf(ErrNoSuchVolume, "volume with name %s not found", string(name)) + return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %s not found", string(name)) } volConfigBytes := volDB.Get(configKey) if volConfigBytes == nil { - return errors.Wrapf(ErrInternal, "volume %s is missing configuration key in DB", string(name)) + return errors.Wrapf(define.ErrInternal, "volume %s is missing configuration key in DB", string(name)) } if err := json.Unmarshal(volConfigBytes, volume.config); err != nil { @@ -450,7 +450,7 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu // If pod is not nil, the container is added to the pod as well func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { if s.namespace != "" && s.namespace != ctr.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "cannot add container %s as it is in namespace %q and we are in namespace %q", + return errors.Wrapf(define.ErrNSMismatch, "cannot add container %s as it is in namespace %q and we are in namespace %q", ctr.ID(), s.namespace, ctr.config.Namespace) } @@ -526,16 +526,16 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { podDB = podBucket.Bucket(podID) if podDB == nil { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "pod %s does not exist in database", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in database", pod.ID()) } podCtrs = podDB.Bucket(containersBkt) if podCtrs == nil { - return errors.Wrapf(ErrInternal, "pod %s does not have a containers bucket", pod.ID()) + return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", pod.ID()) } podNS := podDB.Get(namespaceKey) if !bytes.Equal(podNS, ctrNamespace) { - return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s", + return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s", ctr.ID(), ctr.config.Namespace, pod.ID(), pod.config.Namespace) } } @@ -543,11 +543,11 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { // Check if we already have a container with the given ID and name idExist := idsBucket.Get(ctrID) if idExist != nil { - return errors.Wrapf(ErrCtrExists, "ID %s is in use", ctr.ID()) + return errors.Wrapf(define.ErrCtrExists, "ID %s is in use", ctr.ID()) } nameExist := namesBucket.Get(ctrName) if nameExist != nil { - return errors.Wrapf(ErrCtrExists, "name %s is in use", ctr.Name()) + return errors.Wrapf(define.ErrCtrExists, "name %s is in use", ctr.Name()) } // No overlapping containers @@ -603,34 +603,34 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { depCtrBkt := ctrBucket.Bucket(depCtrID) if depCtrBkt == nil { - return errors.Wrapf(ErrNoSuchCtr, "container %s depends on container %s, but it does not exist in the DB", ctr.ID(), dependsCtr) + return errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s, but it does not exist in the DB", ctr.ID(), dependsCtr) } depCtrPod := depCtrBkt.Get(podIDKey) if pod != nil { // If we're part of a pod, make sure the dependency is part of the same pod if depCtrPod == nil { - return errors.Wrapf(ErrInvalidArg, "container %s depends on container %s which is not in pod %s", ctr.ID(), dependsCtr, pod.ID()) + return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is not in pod %s", ctr.ID(), dependsCtr, pod.ID()) } if string(depCtrPod) != pod.ID() { - return errors.Wrapf(ErrInvalidArg, "container %s depends on container %s which is in a different pod (%s)", ctr.ID(), dependsCtr, string(depCtrPod)) + return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is in a different pod (%s)", ctr.ID(), dependsCtr, string(depCtrPod)) } } else { // If we're not part of a pod, we cannot depend on containers in a pod if depCtrPod != nil { - return errors.Wrapf(ErrInvalidArg, "container %s depends on container %s which is in a pod - containers not in pods cannot depend on containers in pods", ctr.ID(), dependsCtr) + return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is in a pod - containers not in pods cannot depend on containers in pods", ctr.ID(), dependsCtr) } } depNamespace := depCtrBkt.Get(namespaceKey) if !bytes.Equal(ctrNamespace, depNamespace) { - return errors.Wrapf(ErrNSMismatch, "container %s in namespace %q depends on container %s in namespace %q - namespaces must match", ctr.ID(), ctr.config.Namespace, dependsCtr, string(depNamespace)) + return errors.Wrapf(define.ErrNSMismatch, "container %s in namespace %q depends on container %s in namespace %q - namespaces must match", ctr.ID(), ctr.config.Namespace, dependsCtr, string(depNamespace)) } depCtrDependsBkt := depCtrBkt.Bucket(dependenciesBkt) if depCtrDependsBkt == nil { - return errors.Wrapf(ErrInternal, "container %s does not have a dependencies bucket", dependsCtr) + return errors.Wrapf(define.ErrInternal, "container %s does not have a dependencies bucket", dependsCtr) } if err := depCtrDependsBkt.Put(ctrID, ctrName); err != nil { return errors.Wrapf(err, "error adding ctr %s as dependency of container %s", ctr.ID(), dependsCtr) @@ -638,7 +638,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { } // Add ctr to pod - if pod != nil { + if pod != nil && podCtrs != nil { if err := podCtrs.Put(ctrID, ctrName); err != nil { return errors.Wrapf(err, "error adding container %s to pod %s", ctr.ID(), pod.ID()) } @@ -648,7 +648,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { for _, vol := range ctr.config.NamedVolumes { volDB := volBkt.Bucket([]byte(vol.Name)) if volDB == nil { - return errors.Wrapf(ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID()) + return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID()) } ctrDepsBkt := volDB.Bucket(volDependenciesBkt) @@ -714,7 +714,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error podDB = podBucket.Bucket(podID) if podDB == nil { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID()) } } @@ -722,21 +722,21 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error ctrExists := ctrBucket.Bucket(ctrID) if ctrExists == nil { ctr.valid = false - return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID()) } // Compare namespace // We can't remove containers not in our namespace if s.namespace != "" { if s.namespace != ctr.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) } if pod != nil && s.namespace != pod.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "pod %s is in namespace %q, does not match out namespace %q", pod.ID(), pod.config.Namespace, s.namespace) + return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q, does not match out namespace %q", pod.ID(), pod.config.Namespace, s.namespace) } } - if podDB != nil { + if podDB != nil && pod != nil { // Check if the container is in the pod, remove it if it is podCtrs := podDB.Bucket(containersBkt) if podCtrs == nil { @@ -745,7 +745,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error } else { ctrInPod := podCtrs.Get(ctrID) if ctrInPod == nil { - return errors.Wrapf(ErrNoSuchCtr, "container %s is not in pod %s", ctr.ID(), pod.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not in pod %s", ctr.ID(), pod.ID()) } if err := podCtrs.Delete(ctrID); err != nil { return errors.Wrapf(err, "error removing container %s from pod %s", ctr.ID(), pod.ID()) @@ -756,7 +756,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error // Does the container have dependencies? ctrDepsBkt := ctrExists.Bucket(dependenciesBkt) if ctrDepsBkt == nil { - return errors.Wrapf(ErrInternal, "container %s does not have a dependencies bucket", ctr.ID()) + return errors.Wrapf(define.ErrInternal, "container %s does not have a dependencies bucket", ctr.ID()) } deps := []string{} err = ctrDepsBkt.ForEach(func(id, value []byte) error { @@ -768,11 +768,11 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error return err } if len(deps) != 0 { - return errors.Wrapf(ErrCtrExists, "container %s is a dependency of the following containers: %s", ctr.ID(), strings.Join(deps, ", ")) + return errors.Wrapf(define.ErrCtrExists, "container %s is a dependency of the following containers: %s", ctr.ID(), strings.Join(deps, ", ")) } if err := ctrBucket.DeleteBucket(ctrID); err != nil { - return errors.Wrapf(ErrInternal, "error deleting container %s from DB", ctr.ID()) + return errors.Wrapf(define.ErrInternal, "error deleting container %s from DB", ctr.ID()) } if err := idsBucket.Delete(ctrID); err != nil { diff --git a/libpod/common_test.go b/libpod/common_test.go index df730098e..93ca7bc71 100644 --- a/libpod/common_test.go +++ b/libpod/common_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/lock" "github.com/cri-o/ocicni/pkg/ocicni" "github.com/opencontainers/runtime-tools/generate" @@ -49,7 +50,7 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error) }, }, state: &ContainerState{ - State: ContainerStateRunning, + State: define.ContainerStateRunning, ConfigPath: "/does/not/exist/specs/" + id, RunDir: "/does/not/exist/tmp/", Mounted: true, @@ -88,13 +89,13 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error) ctr.config.Labels["test"] = "testing" - // Allocate a lock for the container - lock, err := manager.AllocateLock() + // Allocate a containerLock for the container + containerLock, err := manager.AllocateLock() if err != nil { return nil, err } - ctr.lock = lock - ctr.config.LockID = lock.ID() + ctr.lock = containerLock + ctr.config.LockID = containerLock.ID() return ctr, nil } @@ -113,13 +114,13 @@ func getTestPod(id, name string, manager lock.Manager) (*Pod, error) { valid: true, } - // Allocate a lock for the pod - lock, err := manager.AllocateLock() + // Allocate a podLock for the pod + podLock, err := manager.AllocateLock() if err != nil { return nil, err } - pod.lock = lock - pod.config.LockID = lock.ID() + pod.lock = podLock + pod.config.LockID = podLock.ID() return pod, nil } diff --git a/libpod/container.go b/libpod/container.go index 3a0f60fd9..a9b512de9 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -11,39 +11,16 @@ import ( "github.com/containernetworking/cni/pkg/types" cnitypes "github.com/containernetworking/cni/pkg/types/current" "github.com/containers/image/manifest" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/lock" "github.com/containers/libpod/pkg/namespaces" + "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage" "github.com/cri-o/ocicni/pkg/ocicni" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) -// ContainerStatus represents the current state of a container -type ContainerStatus int - -const ( - // ContainerStateUnknown indicates that the container is in an error - // state where information about it cannot be retrieved - ContainerStateUnknown ContainerStatus = iota - // ContainerStateConfigured indicates that the container has had its - // storage configured but it has not been created in the OCI runtime - ContainerStateConfigured ContainerStatus = iota - // ContainerStateCreated indicates the container has been created in - // the OCI runtime but not started - ContainerStateCreated ContainerStatus = iota - // ContainerStateRunning indicates the container is currently executing - ContainerStateRunning ContainerStatus = iota - // ContainerStateStopped indicates that the container was running but has - // exited - ContainerStateStopped ContainerStatus = iota - // ContainerStatePaused indicates that the container has been paused - ContainerStatePaused ContainerStatus = iota - // ContainerStateExited indicates the the container has stopped and been - // cleaned up - ContainerStateExited ContainerStatus = iota -) - // CgroupfsDefaultCgroupParent is the cgroup parent for CGroupFS in libpod const CgroupfsDefaultCgroupParent = "/libpod_parent" @@ -51,6 +28,10 @@ const CgroupfsDefaultCgroupParent = "/libpod_parent" // manager in libpod const SystemdDefaultCgroupParent = "machine.slice" +// SystemdDefaultRootlessCgroupParent is the cgroup parent for the systemd cgroup +// manager in libpod when running as rootless +const SystemdDefaultRootlessCgroupParent = "user.slice" + // JournaldLogging is the string conmon expects to specify journald logging const JournaldLogging = "journald" @@ -166,7 +147,7 @@ type Container struct { // It is stored on disk in a tmpfs and recreated on reboot type ContainerState struct { // The current state of the running container - State ContainerStatus `json:"state"` + State define.ContainerStatus `json:"state"` // The path to the JSON OCI runtime spec for this container ConfigPath string `json:"configPath,omitempty"` // RunDir is a per-boot directory for container content @@ -190,6 +171,8 @@ type ContainerState struct { OOMKilled bool `json:"oomKilled,omitempty"` // PID is the PID of a running container PID int `json:"pid,omitempty"` + // ConmonPID is the PID of the container's conmon + ConmonPID int `json:"conmonPid,omitempty"` // ExecSessions contains active exec sessions for container // Exec session ID is mapped to PID of exec process ExecSessions map[string]*ExecSession `json:"execSessions,omitempty"` @@ -425,51 +408,6 @@ type ContainerNamedVolume struct { Options []string `json:"options,omitempty"` } -// ContainerStatus returns a string representation for users -// of a container state -func (t ContainerStatus) String() string { - switch t { - case ContainerStateUnknown: - return "unknown" - case ContainerStateConfigured: - return "configured" - case ContainerStateCreated: - return "created" - case ContainerStateRunning: - return "running" - case ContainerStateStopped: - return "stopped" - case ContainerStatePaused: - return "paused" - case ContainerStateExited: - return "exited" - } - return "bad state" -} - -// StringToContainerStatus converts a string representation of a containers -// status into an actual container status type -func StringToContainerStatus(status string) (ContainerStatus, error) { - switch status { - case ContainerStateUnknown.String(): - return ContainerStateUnknown, nil - case ContainerStateConfigured.String(): - return ContainerStateConfigured, nil - case ContainerStateCreated.String(): - return ContainerStateCreated, nil - case ContainerStateRunning.String(): - return ContainerStateRunning, nil - case ContainerStateStopped.String(): - return ContainerStateStopped, nil - case ContainerStatePaused.String(): - return ContainerStatePaused, nil - case ContainerStateExited.String(): - return ContainerStateExited, nil - default: - return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status) - } -} - // Config accessors // Unlocked @@ -820,13 +758,13 @@ func (c *Container) WorkingDir() string { // Require locking // State returns the current state of the container -func (c *Container) State() (ContainerStatus, error) { +func (c *Container) State() (define.ContainerStatus, error) { if !c.batched { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return ContainerStateUnknown, err + return define.ContainerStateUnknown, err } } return c.state.State, nil @@ -916,7 +854,7 @@ func (c *Container) OOMKilled() (bool, error) { return c.state.OOMKilled, nil } -// PID returns the PID of the container +// PID returns the PID of the container. // If the container is not running, a pid of 0 will be returned. No error will // occur. func (c *Container) PID() (int, error) { @@ -932,6 +870,22 @@ func (c *Container) PID() (int, error) { return c.state.PID, nil } +// ConmonPID Returns the PID of the container's conmon process. +// If the container is not running, a PID of 0 will be returned. No error will +// occur. +func (c *Container) ConmonPID() (int, error) { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return -1, err + } + } + + return c.state.ConmonPID, nil +} + // ExecSessions retrieves active exec sessions running in the container func (c *Container) ExecSessions() ([]string, error) { if !c.batched { @@ -965,7 +919,7 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) { session, ok := c.state.ExecSessions[id] if !ok { - return nil, errors.Wrapf(ErrNoSuchCtr, "no exec session with ID %s found in container %s", id, c.ID()) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no exec session with ID %s found in container %s", id, c.ID()) } returnSession := new(ExecSession) @@ -990,7 +944,7 @@ func (c *Container) IPs() ([]net.IPNet, error) { } if !c.config.CreateNetNS { - return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID()) + return nil, errors.Wrapf(define.ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID()) } ips := make([]net.IPNet, 0) @@ -1018,7 +972,7 @@ func (c *Container) Routes() ([]types.Route, error) { } if !c.config.CreateNetNS { - return nil, errors.Wrapf(ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID()) + return nil, errors.Wrapf(define.ErrInvalidArg, "container %s network namespace is not managed by libpod", c.ID()) } routes := make([]types.Route, 0) @@ -1094,12 +1048,12 @@ func (c *Container) NamespacePath(ns LinuxNS) (string, error) { } } - if c.state.State != ContainerStateRunning && c.state.State != ContainerStatePaused { - return "", errors.Wrapf(ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID()) + if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused { + return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID()) } if ns == InvalidNS { - return "", errors.Wrapf(ErrInvalidArg, "invalid namespace requested from container %s", c.ID()) + return "", errors.Wrapf(define.ErrInvalidArg, "invalid namespace requested from container %s", c.ID()) } return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, ns.String()), nil @@ -1111,9 +1065,13 @@ func (c *Container) CGroupPath() (string, error) { case CgroupfsCgroupsManager: return filepath.Join(c.config.CgroupParent, fmt.Sprintf("libpod-%s", c.ID())), nil case SystemdCgroupsManager: + if rootless.IsRootless() { + uid := rootless.GetRootlessUID() + return filepath.Join(c.config.CgroupParent, fmt.Sprintf("user-%d.slice/user@%d.service/user.slice", uid, uid), createUnitName("libpod", c.ID())), nil + } return filepath.Join(c.config.CgroupParent, createUnitName("libpod", c.ID())), nil default: - return "", errors.Wrapf(ErrInvalidArg, "unsupported CGroup manager %s in use", c.runtime.config.CgroupManager) + return "", errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager %s in use", c.runtime.config.CgroupManager) } } diff --git a/libpod/container.log.go b/libpod/container.log.go new file mode 100644 index 000000000..7d0cd5bfb --- /dev/null +++ b/libpod/container.log.go @@ -0,0 +1,73 @@ +package libpod + +import ( + "os" + + "github.com/containers/libpod/libpod/logs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Log is a runtime function that can read one or more container logs. +func (r *Runtime) Log(containers []*Container, options *logs.LogOptions, logChannel chan *logs.LogLine) error { + for _, ctr := range containers { + if err := ctr.ReadLog(options, logChannel); err != nil { + return err + } + } + return nil +} + +// ReadLog reads a containers log based on the input options and returns loglines over a channel +func (c *Container) ReadLog(options *logs.LogOptions, logChannel chan *logs.LogLine) error { + // TODO Skip sending logs until journald logs can be read + // TODO make this not a magic string + if c.LogDriver() == JournaldLogging { + return c.readFromJournal(options, logChannel) + } + return c.readFromLogFile(options, logChannel) +} + +func (c *Container) readFromLogFile(options *logs.LogOptions, logChannel chan *logs.LogLine) error { + t, tailLog, err := logs.GetLogFile(c.LogPath(), options) + if err != nil { + // If the log file does not exist, this is not fatal. + if os.IsNotExist(errors.Cause(err)) { + return nil + } + return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath()) + } + options.WaitGroup.Add(1) + if len(tailLog) > 0 { + for _, nll := range tailLog { + nll.CID = c.ID() + if nll.Since(options.Since) { + logChannel <- nll + } + } + } + + go func() { + var partial string + for line := range t.Lines { + nll, err := logs.NewLogLine(line.Text) + if err != nil { + logrus.Error(err) + continue + } + if nll.Partial() { + partial = partial + nll.Msg + continue + } else if !nll.Partial() && len(partial) > 1 { + nll.Msg = partial + partial = "" + } + nll.CID = c.ID() + if nll.Since(options.Since) { + logChannel <- nll + } + } + options.WaitGroup.Done() + }() + return nil +} diff --git a/libpod/container_api.go b/libpod/container_api.go index 370e3e5d9..3dd84b02c 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -7,9 +7,9 @@ import ( "io/ioutil" "os" "strconv" - "sync" "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/lookup" "github.com/containers/storage/pkg/stringid" @@ -36,10 +36,10 @@ func (c *Container) Init(ctx context.Context) (err error) { } } - if !(c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateStopped || - c.state.State == ContainerStateExited) { - return errors.Wrapf(ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID()) + if !(c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateStopped || + c.state.State == define.ContainerStateExited) { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID()) } // don't recursively start @@ -54,7 +54,7 @@ func (c *Container) Init(ctx context.Context) (err error) { return err } - if c.state.State == ContainerStateStopped { + if c.state.State == define.ContainerStateStopped { // Reinitialize the container return c.reinit(ctx, false) } @@ -119,20 +119,24 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams, attachChan := make(chan error) // We need to ensure that we don't return until start() fired in attach. - // Use a WaitGroup to sync this. - wg := new(sync.WaitGroup) - wg.Add(1) + // Use a channel to sync + startedChan := make(chan bool) // Attach to the container before starting it go func() { - if err := c.attach(streams, keys, resize, true, wg); err != nil { + if err := c.attach(streams, keys, resize, true, startedChan); err != nil { attachChan <- err } close(attachChan) }() - wg.Wait() - c.newContainerEvent(events.Attach) + select { + case err := <-attachChan: + return nil, err + case <-startedChan: + c.newContainerEvent(events.Attach) + } + return attachChan, nil } @@ -177,15 +181,15 @@ func (c *Container) StopWithTimeout(timeout uint) error { } } - if c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateUnknown || - c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String()) + if c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateUnknown || + c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String()) } - if c.state.State == ContainerStateStopped || - c.state.State == ContainerStateExited { - return ErrCtrStopped + if c.state.State == define.ContainerStateStopped || + c.state.State == define.ContainerStateExited { + return define.ErrCtrStopped } defer c.newContainerEvent(events.Stop) return c.stop(timeout) @@ -202,8 +206,8 @@ func (c *Container) Kill(signal uint) error { } } - if c.state.State != ContainerStateRunning { - return errors.Wrapf(ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String()) + if c.state.State != define.ContainerStateRunning { + return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String()) } defer c.newContainerEvent(events.Kill) @@ -240,8 +244,8 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir conState := c.state.State // TODO can probably relax this once we track exec sessions - if conState != ContainerStateRunning { - return errors.Wrapf(ErrCtrStateInvalid, "cannot exec into container that is not running") + if conState != define.ContainerStateRunning { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running") } if privileged || c.config.Privileged { capList = caps.GetAllCapabilities() @@ -398,10 +402,10 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re c.lock.Unlock() } - if c.state.State != ContainerStateCreated && - c.state.State != ContainerStateRunning && - c.state.State != ContainerStateExited { - return errors.Wrapf(ErrCtrStateInvalid, "can only attach to created or running containers") + if c.state.State != define.ContainerStateCreated && + c.state.State != define.ContainerStateRunning && + c.state.State != define.ContainerStateExited { + return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers") } defer c.newContainerEvent(events.Attach) return c.attach(streams, keys, resize, false, nil) @@ -439,13 +443,13 @@ func (c *Container) Unmount(force bool) error { return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID()) } if mounted == 1 { - if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID()) + if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID()) } if len(c.state.ExecSessions) != 0 { - return errors.Wrapf(ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID()) + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID()) } - return errors.Wrapf(ErrInternal, "can't unmount %s last mount, it is still in use", c.ID()) + return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID()) } } defer c.newContainerEvent(events.Unmount) @@ -463,11 +467,11 @@ func (c *Container) Pause() error { } } - if c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "%q is already paused", c.ID()) + if c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "%q is already paused", c.ID()) } - if c.state.State != ContainerStateRunning { - return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State) + if c.state.State != define.ContainerStateRunning { + return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State) } defer c.newContainerEvent(events.Pause) return c.pause() @@ -484,8 +488,8 @@ func (c *Container) Unpause() error { } } - if c.state.State != ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID()) + if c.state.State != define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID()) } defer c.newContainerEvent(events.Unpause) return c.unpause() @@ -509,7 +513,7 @@ func (c *Container) Export(path string) error { // AddArtifact creates and writes to an artifact file for the container func (c *Container) AddArtifact(name string, data []byte) error { if !c.valid { - return ErrCtrRemoved + return define.ErrCtrRemoved } return ioutil.WriteFile(c.getArtifactPath(name), data, 0740) @@ -518,7 +522,7 @@ func (c *Container) AddArtifact(name string, data []byte) error { // GetArtifact reads the specified artifact file from the container func (c *Container) GetArtifact(name string) ([]byte, error) { if !c.valid { - return nil, ErrCtrRemoved + return nil, define.ErrCtrRemoved } return ioutil.ReadFile(c.getArtifactPath(name)) @@ -527,7 +531,7 @@ func (c *Container) GetArtifact(name string) ([]byte, error) { // RemoveArtifact deletes the specified artifacts file func (c *Container) RemoveArtifact(name string) error { if !c.valid { - return ErrCtrRemoved + return define.ErrCtrRemoved } return os.Remove(c.getArtifactPath(name)) @@ -542,7 +546,7 @@ func (c *Container) Wait() (int32, error) { // code. The argument is the interval at which checks the container's status. func (c *Container) WaitWithInterval(waitTimeout time.Duration) (int32, error) { if !c.valid { - return -1, ErrCtrRemoved + return -1, define.ErrCtrRemoved } err := wait.PollImmediateInfinite(waitTimeout, func() (bool, error) { @@ -577,8 +581,8 @@ func (c *Container) Cleanup(ctx context.Context) error { } // Check if state is good - if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID()) + if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID()) } // Handle restart policy. @@ -596,7 +600,7 @@ func (c *Container) Cleanup(ctx context.Context) error { // Check if we have active exec sessions if len(c.state.ExecSessions) != 0 { - return errors.Wrapf(ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID()) + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID()) } defer c.newContainerEvent(events.Cleanup) return c.cleanup(ctx) @@ -655,9 +659,9 @@ func (c *Container) Sync() error { // If runtime knows about the container, update its status in runtime // And then save back to disk - if (c.state.State != ContainerStateUnknown) && - (c.state.State != ContainerStateConfigured) && - (c.state.State != ContainerStateExited) { + if (c.state.State != define.ContainerStateUnknown) && + (c.state.State != define.ContainerStateConfigured) && + (c.state.State != define.ContainerStateExited) { oldState := c.state.State if err := c.ociRuntime.updateContainerStatus(c, true); err != nil { return err @@ -686,27 +690,27 @@ func (c *Container) Refresh(ctx context.Context) error { } wasCreated := false - if c.state.State == ContainerStateCreated { + if c.state.State == define.ContainerStateCreated { wasCreated = true } wasRunning := false - if c.state.State == ContainerStateRunning { + if c.state.State == define.ContainerStateRunning { wasRunning = true } wasPaused := false - if c.state.State == ContainerStatePaused { + if c.state.State == define.ContainerStatePaused { wasPaused = true } // First, unpause the container if it's paused - if c.state.State == ContainerStatePaused { + if c.state.State == define.ContainerStatePaused { if err := c.unpause(); err != nil { return err } } // Next, if the container is running, stop it - if c.state.State == ContainerStateRunning { + if c.state.State == define.ContainerStateRunning { if err := c.stop(c.config.StopTimeout); err != nil { return err } @@ -723,7 +727,7 @@ func (c *Container) Refresh(ctx context.Context) error { // If the container is in ContainerStateStopped, we need to delete it // from the runtime and clear conmon state - if c.state.State == ContainerStateStopped { + if c.state.State == define.ContainerStateStopped { if err := c.delete(ctx); err != nil { return err } diff --git a/libpod/container_attach_linux.go b/libpod/container_attach_linux.go index 7e9b7697b..17b09fccc 100644 --- a/libpod/container_attach_linux.go +++ b/libpod/container_attach_linux.go @@ -8,8 +8,8 @@ import ( "net" "os" "path/filepath" - "sync" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/kubeutils" "github.com/containers/libpod/utils" "github.com/docker/docker/pkg/term" @@ -19,10 +19,6 @@ import ( "k8s.io/client-go/tools/remotecommand" ) -//#include <sys/un.h> -// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;} -import "C" - /* Sync with stdpipe_t in conmon.c */ const ( AttachPipeStdin = 1 @@ -32,32 +28,35 @@ const ( // Attach to the given container // Does not check if state is appropriate -func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, wg *sync.WaitGroup) error { +func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool) error { if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput { - return errors.Wrapf(ErrInvalidArg, "must provide at least one stream to attach to") - } - - // Check the validity of the provided keys first - var err error - detachKeys := []byte{} - if len(keys) > 0 { - detachKeys, err = term.ToBytes(keys) - if err != nil { - return errors.Wrapf(err, "invalid detach keys") - } + return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to") } logrus.Debugf("Attaching to container %s", c.ID()) - return c.attachContainerSocket(resize, detachKeys, streams, startContainer, wg) + return c.attachContainerSocket(resize, keys, streams, startContainer, started) } // attachContainerSocket connects to the container's attach socket and deals with the IO. -// wg is only required if startContainer is true +// started is only required if startContainer is true // TODO add a channel to allow interrupting -func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSize, detachKeys []byte, streams *AttachStreams, startContainer bool, wg *sync.WaitGroup) error { - if startContainer && wg == nil { - return errors.Wrapf(ErrInternal, "wait group not passed when startContainer set") +func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSize, keys string, streams *AttachStreams, startContainer bool, started chan bool) error { + if startContainer && started == nil { + return errors.Wrapf(define.ErrInternal, "started chan not passed when startContainer set") + } + + // Use default detach keys when keys aren't passed or specified in libpod.conf + if len(keys) == 0 { + keys = DefaultDetachKeys + } + + // Check the validity of the provided keys + detachKeys := []byte{} + var err error + detachKeys, err = term.ToBytes(keys) + if err != nil { + return errors.Wrapf(err, "invalid detach keys") } kubeutils.HandleResizing(resize, func(size remotecommand.TerminalSize) { @@ -77,7 +76,7 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi socketPath := c.AttachSocketPath() - maxUnixLength := int(C.unix_path_length()) + maxUnixLength := unixPathLength() if maxUnixLength < len(socketPath) { socketPath = socketPath[0:maxUnixLength] } @@ -96,7 +95,7 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi if err := c.start(); err != nil { return err } - wg.Done() + started <- true } receiveStdoutError := make(chan error) @@ -118,7 +117,7 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi case err := <-receiveStdoutError: return err case err := <-stdinDone: - if err == ErrDetach { + if err == define.ErrDetach { return err } if streams.AttachOutput || streams.AttachError { @@ -146,7 +145,9 @@ func redirectResponseToOutputStreams(outputStream, errorStream io.Writer, writeO default: logrus.Infof("Received unexpected attach type %+d", buf[0]) } - + if dst == nil { + return errors.New("output destination cannot be nil") + } if doWrite { nw, ew := dst.Write(buf[1:nr]) if ew != nil { diff --git a/libpod/container_attach_linux_cgo.go b/libpod/container_attach_linux_cgo.go new file mode 100644 index 000000000..d81243360 --- /dev/null +++ b/libpod/container_attach_linux_cgo.go @@ -0,0 +1,11 @@ +//+build linux,cgo + +package libpod + +//#include <sys/un.h> +// extern int unix_path_length(){struct sockaddr_un addr; return sizeof(addr.sun_path) - 1;} +import "C" + +func unixPathLength() int { + return int(C.unix_path_length()) +} diff --git a/libpod/container_attach_linux_nocgo.go b/libpod/container_attach_linux_nocgo.go new file mode 100644 index 000000000..a514a555d --- /dev/null +++ b/libpod/container_attach_linux_nocgo.go @@ -0,0 +1,7 @@ +//+build linux,!cgo + +package libpod + +func unixPathLength() int { + return 107 +} diff --git a/libpod/container_attach_unsupported.go b/libpod/container_attach_unsupported.go index 9e8badeaf..c27ce0799 100644 --- a/libpod/container_attach_unsupported.go +++ b/libpod/container_attach_unsupported.go @@ -3,11 +3,10 @@ package libpod import ( - "sync" - + "github.com/containers/libpod/libpod/define" "k8s.io/client-go/tools/remotecommand" ) -func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, wg *sync.WaitGroup) error { - return ErrNotImplemented +func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool) error { + return define.ErrNotImplemented } diff --git a/libpod/container_commit.go b/libpod/container_commit.go index 5e6ddcbd1..17586bfad 100644 --- a/libpod/container_commit.go +++ b/libpod/container_commit.go @@ -9,6 +9,7 @@ import ( "github.com/containers/buildah" "github.com/containers/buildah/util" is "github.com/containers/image/storage" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" "github.com/pkg/errors" @@ -28,9 +29,6 @@ type ContainerCommitOptions struct { Changes []string } -// ChangeCmds is the list of valid Changes commands to passed to the Commit call -var ChangeCmds = []string{"CMD", "ENTRYPOINT", "ENV", "EXPOSE", "LABEL", "ONBUILD", "STOPSIGNAL", "USER", "VOLUME", "WORKDIR"} - // Commit commits the changes between a container and its image, creating a new // image func (c *Container) Commit(ctx context.Context, destImage string, options ContainerCommitOptions) (*image.Image, error) { @@ -51,7 +49,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai } } - if c.state.State == ContainerStateRunning && options.Pause { + if c.state.State == define.ContainerStateRunning && options.Pause { if err := c.ociRuntime.pauseContainer(c); err != nil { return nil, errors.Wrapf(err, "error pausing container %q", c.ID()) } diff --git a/libpod/container_graph.go b/libpod/container_graph.go index da93be77d..50dbdfbe4 100644 --- a/libpod/container_graph.go +++ b/libpod/container_graph.go @@ -4,6 +4,7 @@ import ( "context" "strings" + "github.com/containers/libpod/libpod/define" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -43,7 +44,7 @@ func buildContainerGraph(ctrs []*Container) (*containerGraph, error) { // Get the dep's node depNode, ok := graph.nodes[dep] if !ok { - return nil, errors.Wrapf(ErrNoSuchCtr, "container %s depends on container %s not found in input list", node.id, dep) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s not found in input list", node.id, dep) } // Add the dependent node to the node's dependencies @@ -68,7 +69,7 @@ func buildContainerGraph(ctrs []*Container) (*containerGraph, error) { if err != nil { return nil, err } else if cycle { - return nil, errors.Wrapf(ErrInternal, "cycle found in container dependency graph") + return nil, errors.Wrapf(define.ErrInternal, "cycle found in container dependency graph") } return graph, nil @@ -133,7 +134,7 @@ func detectCycles(graph *containerGraph) (bool, error) { if info.lowLink == info.index { l := len(stack) if l == 0 { - return false, errors.Wrapf(ErrInternal, "empty stack in detectCycles") + return false, errors.Wrapf(define.ErrInternal, "empty stack in detectCycles") } // Pop off the stack @@ -143,7 +144,7 @@ func detectCycles(graph *containerGraph) (bool, error) { // Popped item is no longer on the stack, mark as such topInfo, ok := nodes[topOfStack.id] if !ok { - return false, errors.Wrapf(ErrInternal, "error finding node info for %s", topOfStack.id) + return false, errors.Wrapf(define.ErrInternal, "error finding node info for %s", topOfStack.id) } topInfo.onStack = false @@ -186,7 +187,7 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError if setError { // Mark us as visited, and set an error ctrsVisited[node.id] = true - ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id) + ctrErrors[node.id] = errors.Wrapf(define.ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id) // Hit anyone who depends on us, and set errors on them too for _, successor := range node.dependedOn { @@ -226,7 +227,7 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError } else if len(depsStopped) > 0 { // Our dependencies are not running depsList := strings.Join(depsStopped, ",") - ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList) + ctrErrors[node.id] = errors.Wrapf(define.ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList) ctrErrored = true } @@ -243,13 +244,13 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError // Start the container (only if it is not running) if !ctrErrored { - if !restart && node.container.state.State != ContainerStateRunning { + if !restart && node.container.state.State != define.ContainerStateRunning { if err := node.container.initAndStart(ctx); err != nil { ctrErrored = true ctrErrors[node.id] = err } } - if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown { + if restart && node.container.state.State != define.ContainerStatePaused && node.container.state.State != define.ContainerStateUnknown { if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil { ctrErrored = true ctrErrors[node.id] = err diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index 3ac774060..2de78254c 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -5,6 +5,7 @@ import ( "time" "github.com/containers/image/manifest" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/driver" "github.com/cri-o/ocicni/pkg/ocicni" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -144,6 +145,7 @@ type InspectContainerState struct { OOMKilled bool `json:"OOMKilled"` Dead bool `json:"Dead"` Pid int `json:"Pid"` + ConmonPid int `json:"ConmonPid,omitempty"` ExitCode int32 `json:"ExitCode"` Error string `json:"Error"` // TODO StartedAt time.Time `json:"StartedAt"` @@ -204,12 +206,12 @@ func (c *Container) Inspect(size bool) (*InspectContainerData, error) { func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) (*InspectContainerData, error) { config := c.config runtimeInfo := c.state - spec, err := c.specFromState() + stateSpec, err := c.specFromState() if err != nil { return nil, err } - // Process is allowed to be nil in the spec + // Process is allowed to be nil in the stateSpec args := []string{} if config.Spec.Process != nil { args = config.Spec.Process.Args @@ -242,7 +244,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) } } - mounts, err := c.getInspectMounts(spec) + mounts, err := c.getInspectMounts(stateSpec) if err != nil { return nil, err } @@ -253,13 +255,14 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) Path: path, Args: args, State: &InspectContainerState{ - OciVersion: spec.Version, + OciVersion: stateSpec.Version, Status: runtimeInfo.State.String(), - Running: runtimeInfo.State == ContainerStateRunning, - Paused: runtimeInfo.State == ContainerStatePaused, + Running: runtimeInfo.State == define.ContainerStateRunning, + Paused: runtimeInfo.State == define.ContainerStatePaused, OOMKilled: runtimeInfo.OOMKilled, Dead: runtimeInfo.State.String() == "bad state", Pid: runtimeInfo.PID, + ConmonPid: runtimeInfo.ConmonPID, ExitCode: runtimeInfo.ExitCode, Error: "", // can't get yet StartedAt: runtimeInfo.StartedTime, @@ -282,9 +285,9 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) Driver: driverData.Name, MountLabel: config.MountLabel, ProcessLabel: config.ProcessLabel, - EffectiveCaps: spec.Process.Capabilities.Effective, - BoundingCaps: spec.Process.Capabilities.Bounding, - AppArmorProfile: spec.Process.ApparmorProfile, + EffectiveCaps: stateSpec.Process.Capabilities.Effective, + BoundingCaps: stateSpec.Process.Capabilities.Bounding, + AppArmorProfile: stateSpec.Process.ApparmorProfile, ExecIDs: execIDs, GraphDriver: driverData, Mounts: mounts, @@ -335,7 +338,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) // Get information on the container's network namespace (if present) data = c.getContainerNetworkInfo(data) - inspectConfig, err := c.generateInspectContainerConfig(spec) + inspectConfig, err := c.generateInspectContainerConfig(stateSpec) if err != nil { return nil, err } @@ -367,58 +370,41 @@ func (c *Container) getInspectMounts(ctrSpec *spec.Spec) ([]InspectMount, error) return inspectMounts, nil } - // We need to parse all named volumes and mounts into maps, so we don't - // end up with repeated lookups for each user volume. - // Map destination to struct, as destination is what is stored in - // UserVolumes. - namedVolumes := make(map[string]*ContainerNamedVolume) - mounts := make(map[string]spec.Mount) - for _, namedVol := range c.config.NamedVolumes { - namedVolumes[namedVol.Dest] = namedVol - } - for _, mount := range ctrSpec.Mounts { - mounts[mount.Destination] = mount - } + namedVolumes, mounts := c.sortUserVolumes(ctrSpec) + for _, volume := range namedVolumes { + mountStruct := InspectMount{} + mountStruct.Type = "volume" + mountStruct.Destination = volume.Dest + mountStruct.Name = volume.Name + + // For src and driver, we need to look up the named + // volume. + volFromDB, err := c.runtime.state.Volume(volume.Name) + if err != nil { + return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID()) + } + mountStruct.Driver = volFromDB.Driver() + mountStruct.Source = volFromDB.MountPoint() - for _, vol := range c.config.UserVolumes { - // We need to look up the volumes. - // First: is it a named volume? - if volume, ok := namedVolumes[vol]; ok { - mountStruct := InspectMount{} - mountStruct.Type = "volume" - mountStruct.Destination = volume.Dest - mountStruct.Name = volume.Name - - // For src and driver, we need to look up the named - // volume. - volFromDB, err := c.runtime.state.Volume(volume.Name) - if err != nil { - return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID()) - } - mountStruct.Driver = volFromDB.Driver() - mountStruct.Source = volFromDB.MountPoint() - - parseMountOptionsForInspect(volume.Options, &mountStruct) - - inspectMounts = append(inspectMounts, mountStruct) - } else if mount, ok := mounts[vol]; ok { - // It's a mount. - // Is it a tmpfs? If so, discard. - if mount.Type == "tmpfs" { - continue - } - - mountStruct := InspectMount{} - mountStruct.Type = "bind" - mountStruct.Source = mount.Source - mountStruct.Destination = mount.Destination - - parseMountOptionsForInspect(mount.Options, &mountStruct) - - inspectMounts = append(inspectMounts, mountStruct) + parseMountOptionsForInspect(volume.Options, &mountStruct) + + inspectMounts = append(inspectMounts, mountStruct) + } + for _, mount := range mounts { + // It's a mount. + // Is it a tmpfs? If so, discard. + if mount.Type == "tmpfs" { + continue } - // We couldn't find a mount. Log a warning. - logrus.Warnf("Could not find mount at destination %q when building inspect output for container %s", vol, c.ID()) + + mountStruct := InspectMount{} + mountStruct.Type = "bind" + mountStruct.Source = mount.Source + mountStruct.Destination = mount.Destination + + parseMountOptionsForInspect(mount.Options, &mountStruct) + + inspectMounts = append(inspectMounts, mountStruct) } return inspectMounts, nil diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 5d824908c..c409da96a 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -12,6 +12,7 @@ import ( "strings" "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/ctime" "github.com/containers/libpod/pkg/hooks" @@ -23,7 +24,7 @@ import ( spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" - opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -155,7 +156,7 @@ func (c *Container) waitForExitFileAndSync() error { // Reset our state c.state.ExitCode = -1 c.state.FinishedTime = time.Now() - c.state.State = ContainerStateStopped + c.state.State = define.ContainerStateStopped if err2 := c.save(); err2 != nil { logrus.Errorf("Error saving container %s state: %v", c.ID(), err2) @@ -240,10 +241,10 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er // Is the container running again? // If so, we don't have to do anything - if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused { + if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { return false, nil - } else if c.state.State == ContainerStateUnknown { - return false, errors.Wrapf(ErrInternal, "invalid container state encountered in restart attempt!") + } else if c.state.State == define.ContainerStateUnknown { + return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt!") } c.newContainerEvent(events.Restart) @@ -266,13 +267,13 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er return false, err } - if c.state.State == ContainerStateStopped { + if c.state.State == define.ContainerStateStopped { // Reinitialize the container if we need to if err := c.reinit(ctx, true); err != nil { return false, err } - } else if c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateExited { + } else if c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateExited { // Initialize the container if err := c.init(ctx, true); err != nil { return false, err @@ -294,9 +295,9 @@ func (c *Container) syncContainer() error { } // If runtime knows about the container, update its status in runtime // And then save back to disk - if (c.state.State != ContainerStateUnknown) && - (c.state.State != ContainerStateConfigured) && - (c.state.State != ContainerStateExited) { + if (c.state.State != define.ContainerStateUnknown) && + (c.state.State != define.ContainerStateConfigured) && + (c.state.State != define.ContainerStateExited) { oldState := c.state.State // TODO: optionally replace this with a stat for the exit file if err := c.ociRuntime.updateContainerStatus(c, false); err != nil { @@ -306,8 +307,8 @@ func (c *Container) syncContainer() error { if c.state.State != oldState { // Check for a restart policy match if c.config.RestartPolicy != RestartPolicyNone && c.config.RestartPolicy != RestartPolicyNo && - (oldState == ContainerStateRunning || oldState == ContainerStatePaused) && - (c.state.State == ContainerStateStopped || c.state.State == ContainerStateExited) && + (oldState == define.ContainerStateRunning || oldState == define.ContainerStatePaused) && + (c.state.State == define.ContainerStateStopped || c.state.State == define.ContainerStateExited) && !c.state.StoppedByUser { c.state.RestartPolicyMatch = true } @@ -319,7 +320,7 @@ func (c *Container) syncContainer() error { } if !c.valid { - return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID()) } return nil @@ -332,16 +333,16 @@ func (c *Container) setupStorage(ctx context.Context) error { defer span.Finish() if !c.valid { - return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", c.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID()) } - if c.state.State != ContainerStateConfigured { - return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID()) + if c.state.State != define.ContainerStateConfigured { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID()) } // Need both an image ID and image name, plus a bool telling us whether to use the image configuration if c.config.Rootfs == "" && (c.config.RootfsImageID == "" || c.config.RootfsImageName == "") { - return errors.Wrapf(ErrInvalidArg, "must provide image ID and image name to use an image") + return errors.Wrapf(define.ErrInvalidArg, "must provide image ID and image name to use an image") } options := storage.ContainerOptions{ @@ -427,8 +428,8 @@ func (c *Container) setupStorage(ctx context.Context) error { // Tear down a container's storage prior to removal func (c *Container) teardownStorage() error { - if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID()) + if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID()) } artifacts := filepath.Join(c.config.StaticDir, artifactsDir) @@ -461,10 +462,11 @@ func (c *Container) teardownStorage() error { // It does not save the results - assumes the database will do that for us func resetState(state *ContainerState) error { state.PID = 0 + state.ConmonPID = 0 state.Mountpoint = "" state.Mounted = false - if state.State != ContainerStateExited { - state.State = ContainerStateConfigured + if state.State != define.ContainerStateExited { + state.State = define.ContainerStateConfigured } state.ExecSessions = make(map[string]*ExecSession) state.NetworkStatus = nil @@ -488,7 +490,7 @@ func (c *Container) refresh() error { } if !c.valid { - return errors.Wrapf(ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID()) } // We need to get the container's temporary directory from c/storage @@ -563,7 +565,7 @@ func (c *Container) removeConmonFiles() error { if !os.IsNotExist(err) { return errors.Wrapf(err, "error running stat on container %s exit file", c.ID()) } - } else if err == nil { + } else { // Rename should replace the old exit file (if it exists) if err := os.Rename(exitFile, oldExitFile); err != nil { return errors.Wrapf(err, "error renaming container %s exit file", c.ID()) @@ -576,11 +578,11 @@ func (c *Container) removeConmonFiles() error { func (c *Container) export(path string) error { mountPoint := c.state.Mountpoint if !c.state.Mounted { - mount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel) + containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel) if err != nil { return errors.Wrapf(err, "error mounting container %q", c.ID()) } - mountPoint = mount + mountPoint = containerMount defer func() { if _, err := c.runtime.store.Unmount(c.ID(), false); err != nil { logrus.Errorf("error unmounting container %q: %v", c.ID(), err) @@ -618,7 +620,7 @@ func (c *Container) isStopped() (bool, error) { if err != nil { return true, err } - return (c.state.State != ContainerStateRunning && c.state.State != ContainerStatePaused), nil + return c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused, nil } // save container state to the database @@ -634,11 +636,11 @@ func (c *Container) save() error { // Otherwise, this function will return with error if there are dependencies of this container that aren't running. func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) { // Container must be created or stopped to be started - if !(c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateCreated || - c.state.State == ContainerStateStopped || - c.state.State == ContainerStateExited) { - return errors.Wrapf(ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID()) + if !(c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateCreated || + c.state.State == define.ContainerStateStopped || + c.state.State == define.ContainerStateExited) { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID()) } if !recursive { @@ -663,13 +665,13 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err return err } - if c.state.State == ContainerStateStopped { + if c.state.State == define.ContainerStateStopped { // Reinitialize the container if we need to if err := c.reinit(ctx, false); err != nil { return err } - } else if c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateExited { + } else if c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateExited { // Or initialize it if necessary if err := c.init(ctx, false); err != nil { return err @@ -686,7 +688,7 @@ func (c *Container) checkDependenciesAndHandleError(ctx context.Context) error { } if len(notRunning) > 0 { depString := strings.Join(notRunning, ",") - return errors.Wrapf(ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString) + return errors.Wrapf(define.ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString) } return nil @@ -724,7 +726,7 @@ func (c *Container) startDependencies(ctx context.Context) error { if len(graph.nodes) == 0 { return nil } - return errors.Wrapf(ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID()) } ctrErrors := make(map[string]error) @@ -740,7 +742,7 @@ func (c *Container) startDependencies(ctx context.Context) error { for _, e := range ctrErrors { logrus.Errorf("%q", e) } - return errors.Wrapf(ErrInternal, "error starting some containers") + return errors.Wrapf(define.ErrInternal, "error starting some containers") } return nil } @@ -772,7 +774,7 @@ func (c *Container) getAllDependencies(visited map[string]*Container) error { } // if the dependency is already running, we can assume its dependencies are also running // so no need to add them to those we need to start - if status != ContainerStateRunning { + if status != define.ContainerStateRunning { visited[depID] = dep if err := dep.getAllDependencies(visited); err != nil { return err @@ -804,7 +806,7 @@ func (c *Container) checkDependenciesRunning() ([]string, error) { if err != nil { return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID()) } - if state != ContainerStateRunning { + if state != define.ContainerStateRunning { notRunning = append(notRunning, dep) } depCtrs[dep] = depCtr @@ -826,14 +828,14 @@ func (c *Container) checkDependenciesRunningLocked(depCtrs map[string]*Container for _, dep := range deps { depCtr, ok := depCtrs[dep] if !ok { - return nil, errors.Wrapf(ErrNoSuchCtr, "container %s depends on container %s but it is not on containers passed to checkDependenciesRunning", c.ID(), dep) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s but it is not on containers passed to checkDependenciesRunning", c.ID(), dep) } if err := c.syncContainer(); err != nil { return nil, err } - if depCtr.state.State != ContainerStateRunning { + if depCtr.state.State != define.ContainerStateRunning { notRunning = append(notRunning, dep) } } @@ -864,18 +866,18 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { span.SetTag("struct", "container") defer span.Finish() - // Generate the OCI spec - spec, err := c.generateSpec(ctx) + // Generate the OCI newSpec + newSpec, err := c.generateSpec(ctx) if err != nil { return err } - // Save the OCI spec to disk - if err := c.saveSpec(spec); err != nil { + // Save the OCI newSpec to disk + if err := c.saveSpec(newSpec); err != nil { return err } - // With the spec complete, do an OCI create + // With the newSpec complete, do an OCI create if err := c.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil { return err } @@ -884,7 +886,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { c.state.ExitCode = 0 c.state.Exited = false - c.state.State = ContainerStateCreated + c.state.State = define.ContainerStateCreated c.state.StoppedByUser = false c.state.RestartPolicyMatch = false @@ -915,7 +917,7 @@ func (c *Container) cleanupRuntime(ctx context.Context) error { // If the container is not ContainerStateStopped or // ContainerStateCreated, do nothing. - if c.state.State != ContainerStateStopped && c.state.State != ContainerStateCreated { + if c.state.State != define.ContainerStateStopped && c.state.State != define.ContainerStateCreated { return nil } @@ -931,10 +933,10 @@ func (c *Container) cleanupRuntime(ctx context.Context) error { // If we were Stopped, we are now Exited, as we've removed ourself // from the runtime. // If we were Created, we are now Configured. - if c.state.State == ContainerStateStopped { - c.state.State = ContainerStateExited - } else if c.state.State == ContainerStateCreated { - c.state.State = ContainerStateConfigured + if c.state.State == define.ContainerStateStopped { + c.state.State = define.ContainerStateExited + } else if c.state.State == define.ContainerStateCreated { + c.state.State = define.ContainerStateConfigured } if c.valid { @@ -973,17 +975,17 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error { // Does not lock or check validity func (c *Container) initAndStart(ctx context.Context) (err error) { // If we are ContainerStateUnknown, throw an error - if c.state.State == ContainerStateUnknown { - return errors.Wrapf(ErrCtrStateInvalid, "container %s is in an unknown state", c.ID()) + if c.state.State == define.ContainerStateUnknown { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID()) } // If we are running, do nothing - if c.state.State == ContainerStateRunning { + if c.state.State == define.ContainerStateRunning { return nil } // If we are paused, throw an error - if c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "cannot start paused container %s", c.ID()) + if c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start paused container %s", c.ID()) } defer func() { @@ -1000,14 +1002,14 @@ func (c *Container) initAndStart(ctx context.Context) (err error) { // If we are ContainerStateStopped we need to remove from runtime // And reset to ContainerStateConfigured - if c.state.State == ContainerStateStopped { + if c.state.State == define.ContainerStateStopped { logrus.Debugf("Recreating container %s in OCI runtime", c.ID()) if err := c.reinit(ctx, false); err != nil { return err } - } else if c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateExited { + } else if c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateExited { if err := c.init(ctx, false); err != nil { return err } @@ -1028,7 +1030,7 @@ func (c *Container) start() error { } logrus.Debugf("Started container %s", c.ID()) - c.state.State = ContainerStateRunning + c.state.State = define.ContainerStateRunning if c.config.HealthCheckConfig != nil { if err := c.updateHealthStatus(HealthCheckStarting); err != nil { @@ -1052,6 +1054,8 @@ func (c *Container) stop(timeout uint) error { return err } + c.state.PID = 0 + c.state.ConmonPID = 0 c.state.StoppedByUser = true if err := c.save(); err != nil { return errors.Wrapf(err, "error saving container %s state after stopping", c.ID()) @@ -1069,7 +1073,7 @@ func (c *Container) pause() error { logrus.Debugf("Paused container %s", c.ID()) - c.state.State = ContainerStatePaused + c.state.State = define.ContainerStatePaused return c.save() } @@ -1082,20 +1086,20 @@ func (c *Container) unpause() error { logrus.Debugf("Unpaused container %s", c.ID()) - c.state.State = ContainerStateRunning + c.state.State = define.ContainerStateRunning return c.save() } // Internal, non-locking function to restart a container func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err error) { - if c.state.State == ContainerStateUnknown || c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state") + if c.state.State == define.ContainerStateUnknown || c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state") } c.newContainerEvent(events.Restart) - if c.state.State == ContainerStateRunning { + if c.state.State == define.ContainerStateRunning { if err := c.stop(timeout); err != nil { return err } @@ -1111,13 +1115,13 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e return err } - if c.state.State == ContainerStateStopped { + if c.state.State == define.ContainerStateStopped { // Reinitialize the container if we need to if err := c.reinit(ctx, false); err != nil { return err } - } else if c.state.State == ContainerStateConfigured || - c.state.State == ContainerStateExited { + } else if c.state.State == define.ContainerStateConfigured || + c.state.State == define.ContainerStateExited { // Initialize the container if err := c.init(ctx, false); err != nil { return err @@ -1173,8 +1177,8 @@ func (c *Container) cleanupStorage() error { return nil } - for _, mount := range c.config.Mounts { - if err := c.unmountSHM(mount); err != nil { + for _, containerMount := range c.config.Mounts { + if err := c.unmountSHM(containerMount); err != nil { return err } } @@ -1405,14 +1409,14 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (exten } return nil, err } - hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0) + ociHooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0) if err != nil { return nil, err } - if len(hooks) > 0 || config.Hooks != nil { - logrus.Warnf("implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load hooks from this directory", hDir) + if len(ociHooks) > 0 || config.Hooks != nil { + logrus.Warnf("implicit hook directories are deprecated; set --ociHooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir) } - for i, hook := range hooks { + for i, hook := range ociHooks { allHooks[i] = hook } } @@ -1470,13 +1474,6 @@ func (c *Container) unmount(force bool) error { return nil } -// getExcludedCGroups returns a string slice of cgroups we want to exclude -// because runc or other components are unaware of them. -func getExcludedCGroups() (excludes []string) { - excludes = []string{"rdma"} - return -} - // this should be from chrootarchive. func (c *Container) copyWithTarFromImage(src, dest string) error { mountpoint, err := c.mount() @@ -1498,17 +1495,17 @@ func (c *Container) copyWithTarFromImage(src, dest string) error { // If it is, we'll remove the container anyways. // Returns nil if safe to remove, or an error describing why it's unsafe if not. func (c *Container) checkReadyForRemoval() error { - if c.state.State == ContainerStateUnknown { - return errors.Wrapf(ErrCtrStateInvalid, "container %s is in invalid state", c.ID()) + if c.state.State == define.ContainerStateUnknown { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID()) } - if c.state.State == ContainerStateRunning || - c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String()) + if c.state.State == define.ContainerStateRunning || + c.state.State == define.ContainerStatePaused { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String()) } if len(c.state.ExecSessions) != 0 { - return errors.Wrapf(ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID()) + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID()) } return nil @@ -1550,3 +1547,34 @@ func (c *Container) prepareCheckpointExport() (err error) { return nil } + +// sortUserVolumes sorts the volumes specified for a container +// between named and normal volumes +func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume, []spec.Mount) { + namedUserVolumes := []*ContainerNamedVolume{} + userMounts := []spec.Mount{} + + // We need to parse all named volumes and mounts into maps, so we don't + // end up with repeated lookups for each user volume. + // Map destination to struct, as destination is what is stored in + // UserVolumes. + namedVolumes := make(map[string]*ContainerNamedVolume) + mounts := make(map[string]spec.Mount) + for _, namedVol := range c.config.NamedVolumes { + namedVolumes[namedVol.Dest] = namedVol + } + for _, mount := range ctrSpec.Mounts { + mounts[mount.Destination] = mount + } + + for _, vol := range c.config.UserVolumes { + if volume, ok := namedVolumes[vol]; ok { + namedUserVolumes = append(namedUserVolumes, volume) + } else if mount, ok := mounts[vol]; ok { + userMounts = append(userMounts, mount) + } else { + logrus.Warnf("Could not find mount at destination %q when parsing user volumes for container %s", vol, c.ID()) + } + } + return namedUserVolumes, userMounts +} diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 50a2e2d44..686a595de 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -20,8 +20,10 @@ import ( cnitypes "github.com/containernetworking/cni/pkg/types/current" "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/buildah/pkg/secrets" + "github.com/containers/libpod/libpod/define" crioAnnotations "github.com/containers/libpod/pkg/annotations" "github.com/containers/libpod/pkg/apparmor" + "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/criu" "github.com/containers/libpod/pkg/lookup" "github.com/containers/libpod/pkg/resolvconf" @@ -183,9 +185,13 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { // If network namespace was requested, add it now if c.config.CreateNetNS { if c.config.PostConfigureNetNS { - g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, "") + if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, ""); err != nil { + return nil, err + } } else { - g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()) + if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()); err != nil { + return nil, err + } } } @@ -349,7 +355,11 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { g.AddProcessEnv("container", "libpod") } - if rootless.IsRootless() { + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + if rootless.IsRootless() && !unified { g.SetLinuxCgroupsPath("") } else if c.runtime.config.CgroupManager == SystemdCgroupsManager { // When runc is set to use Systemd as a cgroup manager, it @@ -409,7 +419,9 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { if rootPropagation != "" { logrus.Debugf("set root propagation to %q", rootPropagation) - g.SetLinuxRootPropagation(rootPropagation) + if err := g.SetLinuxRootPropagation(rootPropagation); err != nil { + return nil, err + } } // Warning: precreate hooks may alter g.Config in place. @@ -555,7 +567,9 @@ func (c *Container) checkpointRestoreLabelLog(fileName string) (err error) { if err != nil { return errors.Wrapf(err, "failed to create CRIU log file %q", dumpLog) } - logFile.Close() + if err := logFile.Close(); err != nil { + logrus.Errorf("unable to close log file: %q", err) + } if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil { return errors.Wrapf(err, "failed to label CRIU log file %q", dumpLog) } @@ -567,8 +581,8 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO return err } - if c.state.State != ContainerStateRunning { - return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State) + if c.state.State != define.ContainerStateRunning { + return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State) } if err := c.checkpointRestoreLabelLog("dump.log"); err != nil { @@ -599,7 +613,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO logrus.Debugf("Checkpointed container %s", c.ID()) if !options.KeepRunning { - c.state.State = ContainerStateStopped + c.state.State = define.ContainerStateStopped // Cleanup Storage and Network if err := c.cleanup(ctx); err != nil { @@ -614,9 +628,11 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO "config.dump", "spec.dump", } - for _, delete := range cleanup { - file := filepath.Join(c.bundlePath(), delete) - os.Remove(file) + for _, del := range cleanup { + file := filepath.Join(c.bundlePath(), del) + if err := os.Remove(file); err != nil { + logrus.Debugf("unable to remove file %s", file) + } } } @@ -658,8 +674,8 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti return err } - if (c.state.State != ContainerStateConfigured) && (c.state.State != ContainerStateExited) { - return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID()) + if (c.state.State != define.ContainerStateConfigured) && (c.state.State != define.ContainerStateExited) { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID()) } if options.TargetFile != "" { @@ -696,7 +712,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err != nil { return err } - json.Unmarshal(networkJSON, &networkStatus) + if err := json.Unmarshal(networkJSON, &networkStatus); err != nil { + return err + } // Take the first IP address var IP net.IP if len(networkStatus) > 0 { @@ -738,7 +756,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // We want to have the same network namespace as before. if c.config.CreateNetNS { - g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()) + if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()); err != nil { + return err + } } if err := c.makeBindMounts(); err != nil { @@ -763,7 +783,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti } // Cleanup for a working restore. - c.removeConmonFiles() + if err := c.removeConmonFiles(); err != nil { + return err + } // Save the OCI spec to disk if err := c.saveSpec(g.Spec()); err != nil { @@ -775,7 +797,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti logrus.Debugf("Restored container %s", c.ID()) - c.state.State = ContainerStateRunning + c.state.State = define.ContainerStateRunning if !options.Keep { // Delete all checkpoint related files. At this point, in theory, all files @@ -787,8 +809,8 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti logrus.Debugf("Non-fatal: removal of checkpoint directory (%s) failed: %v", c.CheckpointPath(), err) } cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status"} - for _, delete := range cleanup { - file := filepath.Join(c.bundlePath(), delete) + for _, del := range cleanup { + file := filepath.Join(c.bundlePath(), del) err = os.Remove(file) if err != nil { logrus.Debugf("Non-fatal: removal of checkpoint file (%s) failed: %v", file, err) @@ -818,14 +840,14 @@ func (c *Container) makeBindMounts() error { // will recreate. Only do this if we aren't sharing them with // another container. if c.config.NetNsCtr == "" { - if path, ok := c.state.BindMounts["/etc/resolv.conf"]; ok { - if err := os.Remove(path); err != nil && !os.IsNotExist(err) { + if resolvePath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok { + if err := os.Remove(resolvePath); err != nil && !os.IsNotExist(err) { return errors.Wrapf(err, "error removing container %s resolv.conf", c.ID()) } delete(c.state.BindMounts, "/etc/resolv.conf") } - if path, ok := c.state.BindMounts["/etc/hosts"]; ok { - if err := os.Remove(path); err != nil && !os.IsNotExist(err) { + if hostsPath, ok := c.state.BindMounts["/etc/hosts"]; ok { + if err := os.Remove(hostsPath); err != nil && !os.IsNotExist(err) { return errors.Wrapf(err, "error removing container %s hosts", c.ID()) } delete(c.state.BindMounts, "/etc/hosts") @@ -962,10 +984,10 @@ func (c *Container) makeBindMounts() error { // generateResolvConf generates a containers resolv.conf func (c *Container) generateResolvConf() (string, error) { resolvConf := "/etc/resolv.conf" - for _, ns := range c.config.Spec.Linux.Namespaces { - if ns.Type == spec.NetworkNamespace { - if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") { - definedPath := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf") + for _, namespace := range c.config.Spec.Linux.Namespaces { + if namespace.Type == spec.NetworkNamespace { + if namespace.Path != "" && !strings.HasPrefix(namespace.Path, "/proc/") { + definedPath := filepath.Join("/etc/netns", filepath.Base(namespace.Path), "resolv.conf") _, err := os.Stat(definedPath) if err == nil { resolvConf = definedPath @@ -1090,10 +1112,10 @@ func (c *Container) generatePasswd() (string, error) { if c.config.User == "" { return "", nil } - spec := strings.SplitN(c.config.User, ":", 2) - userspec := spec[0] - if len(spec) > 1 { - groupspec = spec[1] + splitSpec := strings.SplitN(c.config.User, ":", 2) + userspec := splitSpec[0] + if len(splitSpec) > 1 { + groupspec = splitSpec[1] } // If a non numeric User, then don't generate passwd uid, err := strconv.ParseUint(userspec, 10, 32) @@ -1131,7 +1153,7 @@ func (c *Container) generatePasswd() (string, error) { if err != nil { return "", errors.Wrapf(err, "failed to create temporary passwd file") } - if os.Chmod(passwdFile, 0644); err != nil { + if err := os.Chmod(passwdFile, 0644); err != nil { return "", err } return passwdFile, nil diff --git a/libpod/container_internal_unsupported.go b/libpod/container_internal_unsupported.go index f707b350c..6fa19a778 100644 --- a/libpod/container_internal_unsupported.go +++ b/libpod/container_internal_unsupported.go @@ -5,35 +5,36 @@ package libpod import ( "context" + "github.com/containers/libpod/libpod/define" spec "github.com/opencontainers/runtime-spec/specs-go" ) func (c *Container) mountSHM(shmOptions string) error { - return ErrNotImplemented + return define.ErrNotImplemented } func (c *Container) unmountSHM(mount string) error { - return ErrNotImplemented + return define.ErrNotImplemented } func (c *Container) prepare() (err error) { - return ErrNotImplemented + return define.ErrNotImplemented } func (c *Container) cleanupNetwork() error { - return ErrNotImplemented + return define.ErrNotImplemented } func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { - return nil, ErrNotImplemented + return nil, define.ErrNotImplemented } func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error { - return ErrNotImplemented + return define.ErrNotImplemented } func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) error { - return ErrNotImplemented + return define.ErrNotImplemented } func (c *Container) copyOwnerAndPerms(source, dest string) error { diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go index e549673a6..8a87a8796 100644 --- a/libpod/container_log_linux.go +++ b/libpod/container_log_linux.go @@ -9,6 +9,7 @@ import ( "strings" "time" + "github.com/containers/libpod/libpod/logs" journal "github.com/coreos/go-systemd/sdjournal" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -27,7 +28,7 @@ const ( bufLen = 16384 ) -func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLine) error { +func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *logs.LogLine) error { var config journal.JournalReaderConfig config.NumFromTail = options.Tail config.Formatter = journalFormatter @@ -79,7 +80,7 @@ func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLin // because we are reusing bytes, we need to make // sure the old data doesn't get into the new line bytestr := string(bytes[:ec]) - logLine, err2 := newLogLine(bytestr) + logLine, err2 := logs.NewLogLine(bytestr) if err2 != nil { logrus.Error(err2) continue @@ -98,7 +99,7 @@ func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLin func journalFormatter(entry *journal.JournalEntry) (string, error) { usec := entry.RealtimeTimestamp - tsString := time.Unix(0, int64(usec)*int64(time.Microsecond)).Format(logTimeFormat) + tsString := time.Unix(0, int64(usec)*int64(time.Microsecond)).Format(logs.LogTimeFormat) output := fmt.Sprintf("%s ", tsString) priority, ok := entry.Fields["PRIORITY"] if !ok { @@ -114,9 +115,9 @@ func journalFormatter(entry *journal.JournalEntry) (string, error) { // if CONTAINER_PARTIAL_MESSAGE is defined, the log type is "P" if _, ok := entry.Fields["CONTAINER_PARTIAL_MESSAGE"]; ok { - output += fmt.Sprintf("%s ", partialLogType) + output += fmt.Sprintf("%s ", logs.PartialLogType) } else { - output += fmt.Sprintf("%s ", fullLogType) + output += fmt.Sprintf("%s ", logs.FullLogType) } // Finally, append the message @@ -129,12 +130,12 @@ func journalFormatter(entry *journal.JournalEntry) (string, error) { } type FollowBuffer struct { - logChannel chan *LogLine + logChannel chan *logs.LogLine } func (f FollowBuffer) Write(p []byte) (int, error) { bytestr := string(p) - logLine, err := newLogLine(bytestr) + logLine, err := logs.NewLogLine(bytestr) if err != nil { return -1, err } diff --git a/libpod/container_log_unsupported.go b/libpod/container_log_unsupported.go index 0ec5740e2..2c4492b10 100644 --- a/libpod/container_log_unsupported.go +++ b/libpod/container_log_unsupported.go @@ -3,9 +3,11 @@ package libpod import ( + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/libpod/logs" "github.com/pkg/errors" ) -func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLine) error { - return errors.Wrapf(ErrOSNotSupported, "Journald logging only enabled with systemd on linux") +func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *logs.LogLine) error { + return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux") } diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go index 2e0e83c05..ce471838d 100644 --- a/libpod/container_top_linux.go +++ b/libpod/container_top_linux.go @@ -6,6 +6,7 @@ import ( "strconv" "strings" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/rootless" "github.com/containers/psgo" "github.com/pkg/errors" @@ -18,7 +19,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) { if err != nil { return nil, errors.Wrapf(err, "unable to look up state for %s", c.ID()) } - if conStat != ContainerStateRunning { + if conStat != define.ContainerStateRunning { return nil, errors.Errorf("top can only be used on running containers") } @@ -60,9 +61,3 @@ func (c *Container) GetContainerPidInformation(descriptors []string) ([]string, } return res, nil } - -// GetContainerPidInformationDescriptors returns a string slice of all supported -// format descriptors of GetContainerPidInformation. -func GetContainerPidInformationDescriptors() ([]string, error) { - return psgo.ListDescriptors(), nil -} diff --git a/libpod/container_top_unsupported.go b/libpod/container_top_unsupported.go index 1e6fb836d..382c98b54 100644 --- a/libpod/container_top_unsupported.go +++ b/libpod/container_top_unsupported.go @@ -2,6 +2,8 @@ package libpod +import "github.com/containers/libpod/libpod/define" + // GetContainerPidInformation returns process-related data of all processes in // the container. The output data can be controlled via the `descriptors` // argument which expects format descriptors and supports all AIXformat @@ -11,11 +13,5 @@ package libpod // // For more details, please refer to github.com/containers/psgo. func (c *Container) GetContainerPidInformation(descriptors []string) ([]string, error) { - return nil, ErrNotImplemented -} - -// GetContainerPidInformationDescriptors returns a string slice of all supported -// format descriptors of GetContainerPidInformation. -func GetContainerPidInformationDescriptors() ([]string, error) { - return nil, ErrNotImplemented + return nil, define.ErrNotImplemented } diff --git a/libpod/define/config.go b/libpod/define/config.go new file mode 100644 index 000000000..d8d6ccf55 --- /dev/null +++ b/libpod/define/config.go @@ -0,0 +1,20 @@ +package define + +var ( + // DefaultInitPath is the default path to the container-init binary + DefaultInitPath = "/usr/libexec/podman/catatonit" + // DefaultInfraImage to use for infra container + DefaultInfraImage = "k8s.gcr.io/pause:3.1" + // DefaultInfraCommand to be run in an infra container + DefaultInfraCommand = "/pause" +) + +// CtrRemoveTimeout is the default number of seconds to wait after stopping a container +// before sending the kill signal +const CtrRemoveTimeout = 10 + +// InfoData holds the info type, i.e store, host etc and the data for each type +type InfoData struct { + Type string + Data map[string]interface{} +} diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go new file mode 100644 index 000000000..ab2527b3e --- /dev/null +++ b/libpod/define/containerstate.go @@ -0,0 +1,73 @@ +package define + +import "github.com/pkg/errors" + +// ContainerStatus represents the current state of a container +type ContainerStatus int + +const ( + // ContainerStateUnknown indicates that the container is in an error + // state where information about it cannot be retrieved + ContainerStateUnknown ContainerStatus = iota + // ContainerStateConfigured indicates that the container has had its + // storage configured but it has not been created in the OCI runtime + ContainerStateConfigured ContainerStatus = iota + // ContainerStateCreated indicates the container has been created in + // the OCI runtime but not started + ContainerStateCreated ContainerStatus = iota + // ContainerStateRunning indicates the container is currently executing + ContainerStateRunning ContainerStatus = iota + // ContainerStateStopped indicates that the container was running but has + // exited + ContainerStateStopped ContainerStatus = iota + // ContainerStatePaused indicates that the container has been paused + ContainerStatePaused ContainerStatus = iota + // ContainerStateExited indicates the the container has stopped and been + // cleaned up + ContainerStateExited ContainerStatus = iota +) + +// ContainerStatus returns a string representation for users +// of a container state +func (t ContainerStatus) String() string { + switch t { + case ContainerStateUnknown: + return "unknown" + case ContainerStateConfigured: + return "configured" + case ContainerStateCreated: + return "created" + case ContainerStateRunning: + return "running" + case ContainerStateStopped: + return "stopped" + case ContainerStatePaused: + return "paused" + case ContainerStateExited: + return "exited" + } + return "bad state" +} + +// StringToContainerStatus converts a string representation of a containers +// status into an actual container status type +func StringToContainerStatus(status string) (ContainerStatus, error) { + switch status { + case ContainerStateUnknown.String(): + return ContainerStateUnknown, nil + case ContainerStateConfigured.String(): + return ContainerStateConfigured, nil + case ContainerStateCreated.String(): + return ContainerStateCreated, nil + case ContainerStateRunning.String(): + return ContainerStateRunning, nil + case ContainerStateStopped.String(): + return ContainerStateStopped, nil + case ContainerStatePaused.String(): + return ContainerStatePaused, nil + case ContainerStateExited.String(): + return ContainerStateExited, nil + default: + return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status) + } +} diff --git a/libpod/errors.go b/libpod/define/errors.go index cca0935ec..14643ced1 100644 --- a/libpod/errors.go +++ b/libpod/define/errors.go @@ -1,4 +1,4 @@ -package libpod +package define import ( "errors" diff --git a/libpod/version.go b/libpod/define/version.go index d2b99a275..0f9f49050 100644 --- a/libpod/version.go +++ b/libpod/define/version.go @@ -1,4 +1,4 @@ -package libpod +package define import ( "runtime" diff --git a/libpod/events.go b/libpod/events.go index 13bb5bdde..be21e510a 100644 --- a/libpod/events.go +++ b/libpod/events.go @@ -1,7 +1,10 @@ package libpod import ( + "fmt" + "github.com/containers/libpod/libpod/events" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -79,3 +82,55 @@ func (r *Runtime) Events(options events.ReadOptions) error { } return eventer.Read(options) } + +// GetEvents reads the event log and returns events based on input filters +func (r *Runtime) GetEvents(filters []string) ([]*events.Event, error) { + var ( + logEvents []*events.Event + readErr error + ) + eventChannel := make(chan *events.Event) + options := events.ReadOptions{ + EventChannel: eventChannel, + Filters: filters, + FromStart: true, + Stream: false, + } + eventer, err := r.newEventer() + if err != nil { + return nil, err + } + go func() { + readErr = eventer.Read(options) + }() + if readErr != nil { + return nil, readErr + } + for e := range eventChannel { + logEvents = append(logEvents, e) + } + return logEvents, nil +} + +// GetLastContainerEvent takes a container name or ID and an event status and returns +// the last occurrence of the container event +func (r *Runtime) GetLastContainerEvent(nameOrID string, containerEvent events.Status) (*events.Event, error) { + // check to make sure the event.Status is valid + if _, err := events.StringToStatus(containerEvent.String()); err != nil { + return nil, err + } + filters := []string{ + fmt.Sprintf("container=%s", nameOrID), + fmt.Sprintf("event=%s", containerEvent), + "type=container", + } + containerEvents, err := r.GetEvents(filters) + if err != nil { + return nil, err + } + if len(containerEvents) < 1 { + return nil, errors.Wrapf(events.ErrEventNotFound, "%s not found", containerEvent.String()) + } + // return the last element in the slice + return containerEvents[len(containerEvents)-1], nil +} diff --git a/libpod/events/config.go b/libpod/events/config.go index 810988205..b9f01f3a5 100644 --- a/libpod/events/config.go +++ b/libpod/events/config.go @@ -2,6 +2,8 @@ package events import ( "time" + + "github.com/pkg/errors" ) // EventerType ... @@ -158,3 +160,12 @@ const ( // EventFilter for filtering events type EventFilter func(*Event) bool + +var ( + // ErrEventTypeBlank indicates the event log found something done by podman + // but it isnt likely an event + ErrEventTypeBlank = errors.New("event type blank") + + // ErrEventNotFound indicates that the event was not found in the event log + ErrEventNotFound = errors.New("unable to find event") +) diff --git a/libpod/events/events.go b/libpod/events/events.go index 1ec79bcd7..2bebff162 100644 --- a/libpod/events/events.go +++ b/libpod/events/events.go @@ -95,6 +95,8 @@ func StringToType(name string) (Type, error) { return System, nil case Volume.String(): return Volume, nil + case "": + return "", ErrEventTypeBlank } return "", errors.Errorf("unknown event type %q", name) } diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go index 78a630e9a..d5bce4334 100644 --- a/libpod/events/journal_linux.go +++ b/libpod/events/journal_linux.go @@ -101,7 +101,9 @@ func (e EventJournalD) Read(options ReadOptions) error { // We can't decode this event. // Don't fail hard - that would make events unusable. // Instead, log and continue. - logrus.Errorf("Unable to decode event: %v", err) + if errors.Cause(err) != ErrEventTypeBlank { + logrus.Errorf("Unable to decode event: %v", err) + } continue } include := true diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index 3e36a2c95..f4ea6c694 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -9,6 +9,7 @@ import ( "strings" "time" + "github.com/containers/libpod/libpod/define" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -169,7 +170,7 @@ func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) { if err != nil { return HealthCheckInternalError, err } - if cstate != ContainerStateRunning { + if cstate != define.ContainerStateRunning { return HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID()) } if !c.HasHealthCheck() { diff --git a/libpod/healthcheck_unsupported.go b/libpod/healthcheck_unsupported.go index d01d1ccd4..1eccc77ba 100644 --- a/libpod/healthcheck_unsupported.go +++ b/libpod/healthcheck_unsupported.go @@ -2,18 +2,20 @@ package libpod +import "github.com/containers/libpod/libpod/define" + // createTimer systemd timers for healthchecks of a container func (c *Container) createTimer() error { - return ErrNotImplemented + return define.ErrNotImplemented } // startTimer starts a systemd timer for the healthchecks func (c *Container) startTimer() error { - return ErrNotImplemented + return define.ErrNotImplemented } // removeTimer removes the systemd timer and unit files // for the container func (c *Container) removeTimer() error { - return ErrNotImplemented + return define.ErrNotImplemented } diff --git a/libpod/image/image.go b/libpod/image/image.go index 89a68a1bd..76e46f74f 100644 --- a/libpod/image/image.go +++ b/libpod/image/image.go @@ -660,11 +660,7 @@ func (i *Image) Size(ctx context.Context) (*uint64, error) { // DriverData gets the driver data from the store on a layer func (i *Image) DriverData() (*driver.Data, error) { - topLayer, err := i.Layer() - if err != nil { - return nil, err - } - return driver.GetDriverData(i.imageruntime.store, topLayer.ID) + return driver.GetDriverData(i.imageruntime.store, i.TopLayer()) } // Layer returns the image's top layer @@ -693,13 +689,17 @@ func (i *Image) History(ctx context.Context) ([]*History, error) { return nil, err } - // Use our layers list to find images that use one of them as its + // Use our layers list to find images that use any of them (or no + // layer, since every base layer is derived from an empty layer) as its // topmost layer. interestingLayers := make(map[string]bool) - layer, err := i.imageruntime.store.Layer(i.TopLayer()) - if err != nil { - return nil, err + var layer *storage.Layer + if i.TopLayer() != "" { + if layer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil { + return nil, err + } } + interestingLayers[""] = true for layer != nil { interestingLayers[layer.ID] = true if layer.Parent == "" { @@ -795,27 +795,6 @@ func (i *Image) History(ctx context.Context) ([]*History, error) { return allHistory, nil } -// historyLayerIDs goes through the images in store and checks if the top layer of an image -// is the same as the parent of topLayerID -func (i *Image) historyLayerIDs(topLayerID string, images []*Image, IDs *[]string) error { - for _, image := range images { - // Get the layer info of topLayerID - layer, err := i.imageruntime.store.Layer(topLayerID) - if err != nil { - return errors.Wrapf(err, "error getting layer info %q", topLayerID) - } - // Check if the parent of layer is equal to the image's top layer - // If so add the image ID to the list of IDs and find the parent of - // the top layer of the image ID added to the list - // Since we are checking for parent, each top layer can only have one parent - if layer.Parent == image.TopLayer() { - *IDs = append(*IDs, image.ID()) - return i.historyLayerIDs(image.TopLayer(), images, IDs) - } - } - return nil -} - // Dangling returns a bool if the image is "dangling" func (i *Image) Dangling() bool { return len(i.Names()) == 0 @@ -1143,13 +1122,15 @@ func areParentAndChild(parent, child *imgspecv1.Image) bool { // GetParent returns the image ID of the parent. Return nil if a parent is not found. func (i *Image) GetParent(ctx context.Context) (*Image, error) { + var childLayer *storage.Layer images, err := i.imageruntime.GetImages() if err != nil { return nil, err } - childLayer, err := i.imageruntime.store.Layer(i.TopLayer()) - if err != nil { - return nil, err + if i.TopLayer() != "" { + if childLayer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil { + return nil, err + } } // fetch the configuration for the child image child, err := i.ociv1Image(ctx) @@ -1161,11 +1142,23 @@ func (i *Image) GetParent(ctx context.Context) (*Image, error) { continue } candidateLayer := img.TopLayer() - // as a child, our top layer is either the candidate parent's - // layer, or one that's derived from it, so skip over any - // candidate image where we know that isn't the case - if candidateLayer != childLayer.Parent && candidateLayer != childLayer.ID { - continue + // as a child, our top layer, if we have one, is either the + // candidate parent's layer, or one that's derived from it, so + // skip over any candidate image where we know that isn't the + // case + if childLayer != nil { + // The child has at least one layer, so a parent would + // have a top layer that's either the same as the child's + // top layer or the top layer's recorded parent layer, + // which could be an empty value. + if candidateLayer != childLayer.Parent && candidateLayer != childLayer.ID { + continue + } + } else { + // The child has no layers, but the candidate does. + if candidateLayer != "" { + continue + } } // fetch the configuration for the candidate image candidate, err := img.ociv1Image(ctx) @@ -1204,14 +1197,22 @@ func (i *Image) getChildren(ctx context.Context, max int) ([]string, error) { if img.ID() == i.ID() { continue } - candidateLayer, err := img.Layer() - if err != nil { - return nil, err - } - // if this image's top layer is not our top layer, and is not - // based on our top layer, we can skip it - if candidateLayer.Parent != parentLayer && candidateLayer.ID != parentLayer { - continue + if img.TopLayer() == "" { + if parentLayer != "" { + // this image has no layers, but we do, so + // it can't be derived from this one + continue + } + } else { + candidateLayer, err := img.Layer() + if err != nil { + return nil, err + } + // if this image's top layer is not our top layer, and is not + // based on our top layer, we can skip it + if candidateLayer.Parent != parentLayer && candidateLayer.ID != parentLayer { + continue + } } // fetch the configuration for the candidate image candidate, err := img.ociv1Image(ctx) @@ -1443,6 +1444,7 @@ func GetLayersMapWithImageInfo(imageruntime *Runtime) (map[string]*LayerInfo, er if err != nil { return nil, err } + layerInfoMap[""] = &LayerInfo{} for _, img := range imgs { e, ok := layerInfoMap[img.TopLayer] if !ok { diff --git a/libpod/image/pull.go b/libpod/image/pull.go index 644a9ae86..e5765febc 100644 --- a/libpod/image/pull.go +++ b/libpod/image/pull.go @@ -19,8 +19,8 @@ import ( "github.com/containers/image/types" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/registries" - multierror "github.com/hashicorp/go-multierror" - opentracing "github.com/opentracing/opentracing-go" + "github.com/hashicorp/go-multierror" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go index 2669206df..7c4abd25d 100644 --- a/libpod/in_memory_state.go +++ b/libpod/in_memory_state.go @@ -3,6 +3,7 @@ package libpod import ( "strings" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/registrar" "github.com/containers/storage/pkg/truncindex" "github.com/pkg/errors" @@ -99,12 +100,12 @@ func (s *InMemoryState) SetNamespace(ns string) error { // Container retrieves a container from its full ID func (s *InMemoryState) Container(id string) (*Container, error) { if id == "" { - return nil, ErrEmptyID + return nil, define.ErrEmptyID } ctr, ok := s.containers[id] if !ok { - return nil, errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found", id) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found", id) } if err := s.checkNSMatch(ctr.ID(), ctr.Namespace()); err != nil { @@ -122,7 +123,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) { ) if idOrName == "" { - return nil, ErrEmptyID + return nil, define.ErrEmptyID } if s.namespace != "" { @@ -130,7 +131,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) { if !ok { // We have no containers in the namespace // Return false - return nil, errors.Wrapf(ErrNoSuchCtr, "no container found with name or ID %s", idOrName) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName) } nameIndex = nsIndex.nameIndex idIndex = nsIndex.idIndex @@ -146,7 +147,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) { fullID, err = idIndex.Get(idOrName) if err != nil { if err == truncindex.ErrNotExist { - return nil, errors.Wrapf(ErrNoSuchCtr, "no container found with name or ID %s", idOrName) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName) } return nil, errors.Wrapf(err, "error performing truncindex lookup for ID %s", idOrName) } @@ -158,7 +159,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) { ctr, ok := s.containers[fullID] if !ok { // It's a pod, not a container - return nil, errors.Wrapf(ErrNoSuchCtr, "name or ID %s is a pod, not a container", idOrName) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "name or ID %s is a pod, not a container", idOrName) } return ctr, nil @@ -167,7 +168,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) { // HasContainer checks if a container with the given ID is present in the state func (s *InMemoryState) HasContainer(id string) (bool, error) { if id == "" { - return false, ErrEmptyID + return false, define.ErrEmptyID } ctr, ok := s.containers[id] @@ -182,15 +183,15 @@ func (s *InMemoryState) HasContainer(id string) (bool, error) { // Containers in a pod cannot be added to the state func (s *InMemoryState) AddContainer(ctr *Container) error { if !ctr.valid { - return errors.Wrapf(ErrCtrRemoved, "container with ID %s is not valid", ctr.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container with ID %s is not valid", ctr.ID()) } if _, ok := s.containers[ctr.ID()]; ok { - return errors.Wrapf(ErrCtrExists, "container with ID %s already exists in state", ctr.ID()) + return errors.Wrapf(define.ErrCtrExists, "container with ID %s already exists in state", ctr.ID()) } if ctr.config.Pod != "" { - return errors.Wrapf(ErrInvalidArg, "cannot add a container that is in a pod with AddContainer, use AddContainerToPod") + return errors.Wrapf(define.ErrInvalidArg, "cannot add a container that is in a pod with AddContainer, use AddContainerToPod") } if err := s.checkNSMatch(ctr.ID(), ctr.Namespace()); err != nil { @@ -204,12 +205,12 @@ func (s *InMemoryState) AddContainer(ctr *Container) error { for _, depID := range depCtrs { depCtr, ok := s.containers[depID] if !ok { - return errors.Wrapf(ErrNoSuchCtr, "cannot depend on nonexistent container %s", depID) + return errors.Wrapf(define.ErrNoSuchCtr, "cannot depend on nonexistent container %s", depID) } else if depCtr.config.Pod != "" { - return errors.Wrapf(ErrInvalidArg, "cannot depend on container in a pod if not part of same pod") + return errors.Wrapf(define.ErrInvalidArg, "cannot depend on container in a pod if not part of same pod") } if depCtr.config.Namespace != ctr.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %s and cannot depend on container %s in namespace %s", ctr.ID(), ctr.config.Namespace, depID, depCtr.config.Namespace) + return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and cannot depend on container %s in namespace %s", ctr.ID(), ctr.config.Namespace, depID, depCtr.config.Namespace) } } @@ -270,12 +271,12 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error { deps, ok := s.ctrDepends[ctr.ID()] if ok && len(deps) != 0 { depsStr := strings.Join(deps, ", ") - return errors.Wrapf(ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr) + return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr) } if _, ok := s.containers[ctr.ID()]; !ok { ctr.valid = false - return errors.Wrapf(ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID()) } if err := s.idIndex.Delete(ctr.ID()); err != nil { @@ -289,7 +290,7 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error { if ctr.config.Namespace != "" { nsIndex, ok := s.namespaceIndexes[ctr.config.Namespace] if !ok { - return errors.Wrapf(ErrInternal, "error retrieving index for namespace %q", ctr.config.Namespace) + return errors.Wrapf(define.ErrInternal, "error retrieving index for namespace %q", ctr.config.Namespace) } if err := nsIndex.idIndex.Delete(ctr.ID()); err != nil { return errors.Wrapf(err, "error removing container %s from namespace ID index", ctr.ID()) @@ -317,13 +318,13 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error { func (s *InMemoryState) UpdateContainer(ctr *Container) error { // If the container is invalid, return error if !ctr.valid { - return errors.Wrapf(ErrCtrRemoved, "container with ID %s is not valid", ctr.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container with ID %s is not valid", ctr.ID()) } // If the container does not exist, return error if _, ok := s.containers[ctr.ID()]; !ok { ctr.valid = false - return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) } return s.checkNSMatch(ctr.ID(), ctr.Namespace()) @@ -336,13 +337,13 @@ func (s *InMemoryState) UpdateContainer(ctr *Container) error { func (s *InMemoryState) SaveContainer(ctr *Container) error { // If the container is invalid, return error if !ctr.valid { - return errors.Wrapf(ErrCtrRemoved, "container with ID %s is not valid", ctr.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container with ID %s is not valid", ctr.ID()) } // If the container does not exist, return error if _, ok := s.containers[ctr.ID()]; !ok { ctr.valid = false - return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) } return s.checkNSMatch(ctr.ID(), ctr.Namespace()) @@ -351,13 +352,13 @@ func (s *InMemoryState) SaveContainer(ctr *Container) error { // ContainerInUse checks if the given container is being used by other containers func (s *InMemoryState) ContainerInUse(ctr *Container) ([]string, error) { if !ctr.valid { - return nil, ErrCtrRemoved + return nil, define.ErrCtrRemoved } // If the container does not exist, return error if _, ok := s.containers[ctr.ID()]; !ok { ctr.valid = false - return nil, errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) } if err := s.checkNSMatch(ctr.ID(), ctr.Namespace()); err != nil { @@ -389,14 +390,14 @@ func (s *InMemoryState) AllContainers() ([]*Container, error) { // Please read the full comment on it in state.go before using it. func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error { if !ctr.valid { - return ErrCtrRemoved + return define.ErrCtrRemoved } // If the container does not exist, return error stateCtr, ok := s.containers[ctr.ID()] if !ok { ctr.valid = false - return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) } stateCtr.config = newCfg @@ -409,14 +410,14 @@ func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *Container // Please read the full comment on it in state.go before using it. func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } // If the pod does not exist, return error statePod, ok := s.pods[pod.ID()] if !ok { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "pod with ID %s not found in state", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "pod with ID %s not found in state", pod.ID()) } statePod.config = newCfg @@ -427,12 +428,12 @@ func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { // Volume retrieves a volume from its full name func (s *InMemoryState) Volume(name string) (*Volume, error) { if name == "" { - return nil, ErrEmptyID + return nil, define.ErrEmptyID } vol, ok := s.volumes[name] if !ok { - return nil, errors.Wrapf(ErrNoSuchCtr, "no volume with name %s found", name) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no volume with name %s found", name) } return vol, nil @@ -441,7 +442,7 @@ func (s *InMemoryState) Volume(name string) (*Volume, error) { // HasVolume checks if a volume with the given name is present in the state func (s *InMemoryState) HasVolume(name string) (bool, error) { if name == "" { - return false, ErrEmptyID + return false, define.ErrEmptyID } _, ok := s.volumes[name] @@ -455,11 +456,11 @@ func (s *InMemoryState) HasVolume(name string) (bool, error) { // AddVolume adds a volume to the state func (s *InMemoryState) AddVolume(volume *Volume) error { if !volume.valid { - return errors.Wrapf(ErrVolumeRemoved, "volume with name %s is not valid", volume.Name()) + return errors.Wrapf(define.ErrVolumeRemoved, "volume with name %s is not valid", volume.Name()) } if _, ok := s.volumes[volume.Name()]; ok { - return errors.Wrapf(ErrVolumeExists, "volume with name %s already exists in state", volume.Name()) + return errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists in state", volume.Name()) } s.volumes[volume.Name()] = volume @@ -473,12 +474,12 @@ func (s *InMemoryState) RemoveVolume(volume *Volume) error { deps, ok := s.volumeDepends[volume.Name()] if ok && len(deps) != 0 { depsStr := strings.Join(deps, ", ") - return errors.Wrapf(ErrVolumeExists, "the following containers depend on volume %s: %s", volume.Name(), depsStr) + return errors.Wrapf(define.ErrVolumeExists, "the following containers depend on volume %s: %s", volume.Name(), depsStr) } if _, ok := s.volumes[volume.Name()]; !ok { volume.valid = false - return errors.Wrapf(ErrVolumeRemoved, "no volume exists in state with name %s", volume.Name()) + return errors.Wrapf(define.ErrVolumeRemoved, "no volume exists in state with name %s", volume.Name()) } delete(s.volumes, volume.Name()) @@ -489,13 +490,13 @@ func (s *InMemoryState) RemoveVolume(volume *Volume) error { // VolumeInUse checks if the given volume is being used by at least one container func (s *InMemoryState) VolumeInUse(volume *Volume) ([]string, error) { if !volume.valid { - return nil, ErrVolumeRemoved + return nil, define.ErrVolumeRemoved } // If the volume does not exist, return error if _, ok := s.volumes[volume.Name()]; !ok { volume.valid = false - return nil, errors.Wrapf(ErrNoSuchVolume, "volume with name %s not found in state", volume.Name()) + return nil, errors.Wrapf(define.ErrNoSuchVolume, "volume with name %s not found in state", volume.Name()) } arr, ok := s.volumeDepends[volume.Name()] @@ -519,12 +520,12 @@ func (s *InMemoryState) AllVolumes() ([]*Volume, error) { // Pod retrieves a pod from the state from its full ID func (s *InMemoryState) Pod(id string) (*Pod, error) { if id == "" { - return nil, ErrEmptyID + return nil, define.ErrEmptyID } pod, ok := s.pods[id] if !ok { - return nil, errors.Wrapf(ErrNoSuchPod, "no pod with id %s found", id) + return nil, errors.Wrapf(define.ErrNoSuchPod, "no pod with id %s found", id) } if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil { @@ -543,7 +544,7 @@ func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) { ) if idOrName == "" { - return nil, ErrEmptyID + return nil, define.ErrEmptyID } if s.namespace != "" { @@ -551,7 +552,7 @@ func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) { if !ok { // We have no containers in the namespace // Return false - return nil, errors.Wrapf(ErrNoSuchCtr, "no container found with name or ID %s", idOrName) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName) } nameIndex = nsIndex.nameIndex idIndex = nsIndex.idIndex @@ -567,7 +568,7 @@ func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) { fullID, err = idIndex.Get(idOrName) if err != nil { if err == truncindex.ErrNotExist { - return nil, errors.Wrapf(ErrNoSuchPod, "no pod found with name or ID %s", idOrName) + return nil, errors.Wrapf(define.ErrNoSuchPod, "no pod found with name or ID %s", idOrName) } return nil, errors.Wrapf(err, "error performing truncindex lookup for ID %s", idOrName) } @@ -579,7 +580,7 @@ func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) { pod, ok := s.pods[fullID] if !ok { // It's a container not a pod - return nil, errors.Wrapf(ErrNoSuchPod, "id or name %s is a container not a pod", idOrName) + return nil, errors.Wrapf(define.ErrNoSuchPod, "id or name %s is a container not a pod", idOrName) } return pod, nil @@ -588,7 +589,7 @@ func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) { // HasPod checks if a pod with the given ID is present in the state func (s *InMemoryState) HasPod(id string) (bool, error) { if id == "" { - return false, ErrEmptyID + return false, define.ErrEmptyID } pod, ok := s.pods[id] @@ -602,11 +603,11 @@ func (s *InMemoryState) HasPod(id string) (bool, error) { // PodHasContainer checks if the given pod has a container with the given ID func (s *InMemoryState) PodHasContainer(pod *Pod, ctrID string) (bool, error) { if !pod.valid { - return false, errors.Wrapf(ErrPodRemoved, "pod %s is not valid", pod.ID()) + return false, errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid", pod.ID()) } if ctrID == "" { - return false, ErrEmptyID + return false, define.ErrEmptyID } if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil { @@ -616,7 +617,7 @@ func (s *InMemoryState) PodHasContainer(pod *Pod, ctrID string) (bool, error) { podCtrs, ok := s.podContainers[pod.ID()] if !ok { pod.valid = false - return false, errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in state", pod.ID()) + return false, errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in state", pod.ID()) } _, ok = podCtrs[ctrID] @@ -626,7 +627,7 @@ func (s *InMemoryState) PodHasContainer(pod *Pod, ctrID string) (bool, error) { // PodContainersByID returns the IDs of all containers in the given pod func (s *InMemoryState) PodContainersByID(pod *Pod) ([]string, error) { if !pod.valid { - return nil, errors.Wrapf(ErrPodRemoved, "pod %s is not valid", pod.ID()) + return nil, errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid", pod.ID()) } if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil { @@ -636,7 +637,7 @@ func (s *InMemoryState) PodContainersByID(pod *Pod) ([]string, error) { podCtrs, ok := s.podContainers[pod.ID()] if !ok { pod.valid = false - return nil, errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in state", pod.ID()) + return nil, errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in state", pod.ID()) } length := len(podCtrs) @@ -655,7 +656,7 @@ func (s *InMemoryState) PodContainersByID(pod *Pod) ([]string, error) { // PodContainers retrieves the containers from a pod func (s *InMemoryState) PodContainers(pod *Pod) ([]*Container, error) { if !pod.valid { - return nil, errors.Wrapf(ErrPodRemoved, "pod %s is not valid", pod.ID()) + return nil, errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid", pod.ID()) } if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil { @@ -665,7 +666,7 @@ func (s *InMemoryState) PodContainers(pod *Pod) ([]*Container, error) { podCtrs, ok := s.podContainers[pod.ID()] if !ok { pod.valid = false - return nil, errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in state", pod.ID()) + return nil, errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in state", pod.ID()) } length := len(podCtrs) @@ -684,7 +685,7 @@ func (s *InMemoryState) PodContainers(pod *Pod) ([]*Container, error) { // AddPod adds a given pod to the state func (s *InMemoryState) AddPod(pod *Pod) error { if !pod.valid { - return errors.Wrapf(ErrPodRemoved, "pod %s is not valid and cannot be added", pod.ID()) + return errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid and cannot be added", pod.ID()) } if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil { @@ -692,11 +693,11 @@ func (s *InMemoryState) AddPod(pod *Pod) error { } if _, ok := s.pods[pod.ID()]; ok { - return errors.Wrapf(ErrPodExists, "pod with ID %s already exists in state", pod.ID()) + return errors.Wrapf(define.ErrPodExists, "pod with ID %s already exists in state", pod.ID()) } if _, ok := s.podContainers[pod.ID()]; ok { - return errors.Wrapf(ErrPodExists, "pod with ID %s already exists in state", pod.ID()) + return errors.Wrapf(define.ErrPodExists, "pod with ID %s already exists in state", pod.ID()) } if err := s.nameIndex.Reserve(pod.Name(), pod.ID()); err != nil { @@ -746,15 +747,15 @@ func (s *InMemoryState) RemovePod(pod *Pod) error { if _, ok := s.pods[pod.ID()]; !ok { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID()) } podCtrs, ok := s.podContainers[pod.ID()] if !ok { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID()) } if len(podCtrs) != 0 { - return errors.Wrapf(ErrCtrExists, "pod %s is not empty and cannot be removed", pod.ID()) + return errors.Wrapf(define.ErrCtrExists, "pod %s is not empty and cannot be removed", pod.ID()) } if err := s.idIndex.Delete(pod.ID()); err != nil { @@ -767,7 +768,7 @@ func (s *InMemoryState) RemovePod(pod *Pod) error { if pod.config.Namespace != "" { nsIndex, ok := s.namespaceIndexes[pod.config.Namespace] if !ok { - return errors.Wrapf(ErrInternal, "error retrieving index for namespace %q", pod.config.Namespace) + return errors.Wrapf(define.ErrInternal, "error retrieving index for namespace %q", pod.config.Namespace) } if err := nsIndex.idIndex.Delete(pod.ID()); err != nil { return errors.Wrapf(err, "error removing container %s from namespace ID index", pod.ID()) @@ -784,7 +785,7 @@ func (s *InMemoryState) RemovePod(pod *Pod) error { // Will only remove containers if no dependencies outside of the pod are present func (s *InMemoryState) RemovePodContainers(pod *Pod) error { if !pod.valid { - return errors.Wrapf(ErrPodRemoved, "pod %s is not valid", pod.ID()) + return errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid", pod.ID()) } if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil { @@ -795,7 +796,7 @@ func (s *InMemoryState) RemovePodContainers(pod *Pod) error { podCtrs, ok := s.podContainers[pod.ID()] if !ok { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID()) } // Go through container dependencies. Check to see if any are outside the pod. @@ -804,7 +805,7 @@ func (s *InMemoryState) RemovePodContainers(pod *Pod) error { if ok { for _, dep := range ctrDeps { if _, ok := podCtrs[dep]; !ok { - return errors.Wrapf(ErrCtrExists, "container %s has dependency %s outside of pod %s", ctr, dep, pod.ID()) + return errors.Wrapf(define.ErrCtrExists, "container %s has dependency %s outside of pod %s", ctr, dep, pod.ID()) } } } @@ -830,18 +831,18 @@ func (s *InMemoryState) RemovePodContainers(pod *Pod) error { // state func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error { if !pod.valid { - return errors.Wrapf(ErrPodRemoved, "pod %s is not valid", pod.ID()) + return errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid", pod.ID()) } if !ctr.valid { - return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", ctr.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", ctr.ID()) } if ctr.config.Pod != pod.ID() { - return errors.Wrapf(ErrInvalidArg, "container %s is not in pod %s", ctr.ID(), pod.ID()) + return errors.Wrapf(define.ErrInvalidArg, "container %s is not in pod %s", ctr.ID(), pod.ID()) } if ctr.config.Namespace != pod.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s", + return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s", ctr.ID(), ctr.config.Namespace, pod.ID(), pod.config.Namespace) } @@ -853,12 +854,12 @@ func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error { podCtrs, ok := s.podContainers[pod.ID()] if !ok { pod.valid = false - return errors.Wrapf(ErrPodRemoved, "pod %s not found in state", pod.ID()) + return errors.Wrapf(define.ErrPodRemoved, "pod %s not found in state", pod.ID()) } // Is the container already in the pod? if _, ok = podCtrs[ctr.ID()]; ok { - return errors.Wrapf(ErrCtrExists, "container with ID %s already exists in pod %s", ctr.ID(), pod.ID()) + return errors.Wrapf(define.ErrCtrExists, "container with ID %s already exists in pod %s", ctr.ID(), pod.ID()) } // There are potential race conditions with this @@ -867,20 +868,20 @@ func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error { depCtrs := ctr.Dependencies() for _, depCtr := range depCtrs { if _, ok = s.containers[depCtr]; !ok { - return errors.Wrapf(ErrNoSuchCtr, "cannot depend on nonexistent container %s", depCtr) + return errors.Wrapf(define.ErrNoSuchCtr, "cannot depend on nonexistent container %s", depCtr) } depCtrStruct, ok := podCtrs[depCtr] if !ok { - return errors.Wrapf(ErrInvalidArg, "cannot depend on container %s as it is not in pod %s", depCtr, pod.ID()) + return errors.Wrapf(define.ErrInvalidArg, "cannot depend on container %s as it is not in pod %s", depCtr, pod.ID()) } if depCtrStruct.config.Namespace != ctr.config.Namespace { - return errors.Wrapf(ErrNSMismatch, "container %s is in namespace %s and cannot depend on container %s in namespace %s", ctr.ID(), ctr.config.Namespace, depCtr, depCtrStruct.config.Namespace) + return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and cannot depend on container %s in namespace %s", ctr.ID(), ctr.config.Namespace, depCtr, depCtrStruct.config.Namespace) } } // Add container to state if _, ok = s.containers[ctr.ID()]; ok { - return errors.Wrapf(ErrCtrExists, "container with ID %s already exists in state", ctr.ID()) + return errors.Wrapf(define.ErrCtrExists, "container with ID %s already exists in state", ctr.ID()) } if err := s.nameIndex.Reserve(ctr.Name(), ctr.ID()); err != nil { @@ -928,10 +929,10 @@ func (s *InMemoryState) AddContainerToPod(pod *Pod, ctr *Container) error { // The container is also removed from the state func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error { if !pod.valid { - return errors.Wrapf(ErrPodRemoved, "pod %s is not valid and containers cannot be removed", pod.ID()) + return errors.Wrapf(define.ErrPodRemoved, "pod %s is not valid and containers cannot be removed", pod.ID()) } if !ctr.valid { - return errors.Wrapf(ErrCtrRemoved, "container %s is not valid and cannot be removed from the pod", ctr.ID()) + return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid and cannot be removed from the pod", ctr.ID()) } if err := s.checkNSMatch(ctr.ID(), ctr.Namespace()); err != nil { @@ -942,30 +943,30 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error { deps, ok := s.ctrDepends[ctr.ID()] if ok && len(deps) != 0 { depsStr := strings.Join(deps, ", ") - return errors.Wrapf(ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr) + return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr) } // Retrieve pod containers podCtrs, ok := s.podContainers[pod.ID()] if !ok { pod.valid = false - return errors.Wrapf(ErrPodRemoved, "pod %s has been removed", pod.ID()) + return errors.Wrapf(define.ErrPodRemoved, "pod %s has been removed", pod.ID()) } // Does the container exist? if _, ok := s.containers[ctr.ID()]; !ok { ctr.valid = false - return errors.Wrapf(ErrNoSuchCtr, "container %s does not exist in state", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in state", ctr.ID()) } // Is the container in the pod? if _, ok := podCtrs[ctr.ID()]; !ok { - return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in pod %s", ctr.ID(), pod.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in pod %s", ctr.ID(), pod.ID()) } // Remove container from state if _, ok := s.containers[ctr.ID()]; !ok { - return errors.Wrapf(ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID()) + return errors.Wrapf(define.ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID()) } if err := s.idIndex.Delete(ctr.ID()); err != nil { @@ -980,7 +981,7 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error { if ctr.config.Namespace != "" { nsIndex, ok := s.namespaceIndexes[ctr.config.Namespace] if !ok { - return errors.Wrapf(ErrInternal, "error retrieving index for namespace %q", ctr.config.Namespace) + return errors.Wrapf(define.ErrInternal, "error retrieving index for namespace %q", ctr.config.Namespace) } if err := nsIndex.idIndex.Delete(ctr.ID()); err != nil { return errors.Wrapf(err, "error removing container %s from namespace ID index", ctr.ID()) @@ -1001,7 +1002,7 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error { // This is a no-op as there is no backing store func (s *InMemoryState) UpdatePod(pod *Pod) error { if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil { @@ -1010,7 +1011,7 @@ func (s *InMemoryState) UpdatePod(pod *Pod) error { if _, ok := s.pods[pod.ID()]; !ok { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID()) } return nil @@ -1020,7 +1021,7 @@ func (s *InMemoryState) UpdatePod(pod *Pod) error { // This is a no-op at there is no backing store func (s *InMemoryState) SavePod(pod *Pod) error { if !pod.valid { - return ErrPodRemoved + return define.ErrPodRemoved } if err := s.checkNSMatch(pod.ID(), pod.Namespace()); err != nil { @@ -1029,7 +1030,7 @@ func (s *InMemoryState) SavePod(pod *Pod) error { if _, ok := s.pods[pod.ID()]; !ok { pod.valid = false - return errors.Wrapf(ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID()) + return errors.Wrapf(define.ErrNoSuchPod, "no pod exists in state with ID %s", pod.ID()) } return nil @@ -1133,7 +1134,7 @@ func (s *InMemoryState) removeCtrFromVolDependsMap(depCtrID, volName string) { // namespaces. func (s *InMemoryState) checkNSMatch(id, ns string) error { if s.namespace != "" && s.namespace != ns { - return errors.Wrapf(ErrNSMismatch, "cannot access %s as it is in namespace %q and we are in namespace %q", + return errors.Wrapf(define.ErrNSMismatch, "cannot access %s as it is in namespace %q and we are in namespace %q", id, ns, s.namespace) } return nil diff --git a/libpod/info.go b/libpod/info.go index c96293e3d..4a89fa648 100644 --- a/libpod/info.go +++ b/libpod/info.go @@ -19,12 +19,6 @@ import ( "github.com/pkg/errors" ) -// InfoData holds the info type, i.e store, host etc and the data for each type -type InfoData struct { - Type string - Data map[string]interface{} -} - // top-level "host" info func (r *Runtime) hostInfo() (map[string]interface{}, error) { // lets say OS, arch, number of cpus, amount of memory, maybe os distribution/version, hostname, kernel version, uptime diff --git a/libpod/kube.go b/libpod/kube.go index c5fd9d75c..409937010 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -3,10 +3,12 @@ package libpod import ( "fmt" "math/rand" + "os" "strconv" "strings" "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/lookup" "github.com/containers/libpod/pkg/util" "github.com/cri-o/ocicni/pkg/ocicni" @@ -15,7 +17,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" v12 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -131,27 +132,43 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor var ( podContainers []v1.Container ) + deDupPodVolumes := make(map[string]*v1.Volume) first := true for _, ctr := range containers { if !ctr.IsInfra() { - result, err := containerToV1Container(ctr) + ctr, volumes, err := containerToV1Container(ctr) if err != nil { return nil, err } + + // Since port bindings for the pod are handled by the + // infra container, wipe them here. + ctr.Ports = nil + // We add the original port declarations from the libpod infra container // to the first kubernetes container description because otherwise we loose // the original container/port bindings. if first && len(ports) > 0 { - result.Ports = ports + ctr.Ports = ports first = false } - podContainers = append(podContainers, result) + podContainers = append(podContainers, ctr) + // Deduplicate volumes, so if containers in the pod share a volume, it's only + // listed in the volumes section once + for _, vol := range volumes { + deDupPodVolumes[vol.Name] = &vol + } } } - return addContainersToPodObject(podContainers, p.Name()), nil + podVolumes := make([]v1.Volume, 0, len(deDupPodVolumes)) + for _, vol := range deDupPodVolumes { + podVolumes = append(podVolumes, *vol) + } + + return addContainersAndVolumesToPodObject(podContainers, podVolumes, p.Name()), nil } -func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod { +func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.Volume, podName string) *v1.Pod { tm := v12.TypeMeta{ Kind: "Pod", APIVersion: "v1", @@ -171,6 +188,7 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod } ps := v1.PodSpec{ Containers: containers, + Volumes: volumes, } p := v1.Pod{ TypeMeta: tm, @@ -184,56 +202,58 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod // for a single container. we "insert" that container description in a pod. func simplePodWithV1Container(ctr *Container) (*v1.Pod, error) { var containers []v1.Container - result, err := containerToV1Container(ctr) + kubeCtr, kubeVols, err := containerToV1Container(ctr) if err != nil { return nil, err } - containers = append(containers, result) - return addContainersToPodObject(containers, ctr.Name()), nil + containers = append(containers, kubeCtr) + return addContainersAndVolumesToPodObject(containers, kubeVols, ctr.Name()), nil } // containerToV1Container converts information we know about a libpod container // to a V1.Container specification. -func containerToV1Container(c *Container) (v1.Container, error) { +func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) { kubeContainer := v1.Container{} + kubeVolumes := []v1.Volume{} kubeSec, err := generateKubeSecurityContext(c) if err != nil { - return kubeContainer, err + return kubeContainer, kubeVolumes, err } if len(c.config.Spec.Linux.Devices) > 0 { // TODO Enable when we can support devices and their names devices, err := generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices) if err != nil { - return kubeContainer, err + return kubeContainer, kubeVolumes, err } kubeContainer.VolumeDevices = devices - return kubeContainer, errors.Wrapf(ErrNotImplemented, "linux devices") + return kubeContainer, kubeVolumes, errors.Wrapf(define.ErrNotImplemented, "linux devices") } if len(c.config.UserVolumes) > 0 { // TODO When we until we can resolve what the volume name should be, this is disabled // Volume names need to be coordinated "globally" in the kube files. - volumes, err := libpodMountsToKubeVolumeMounts(c) + volumeMounts, volumes, err := libpodMountsToKubeVolumeMounts(c) if err != nil { - return kubeContainer, err + return kubeContainer, kubeVolumes, err } - kubeContainer.VolumeMounts = volumes + kubeContainer.VolumeMounts = volumeMounts + kubeVolumes = append(kubeVolumes, volumes...) } envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env) if err != nil { - return kubeContainer, err + return kubeContainer, kubeVolumes, err } portmappings, err := c.PortMappings() if err != nil { - return kubeContainer, err + return kubeContainer, kubeVolumes, err } ports, err := ocicniPortMappingToContainerPort(portmappings) if err != nil { - return kubeContainer, err + return kubeContainer, kubeVolumes, err } containerCommands := c.Command() @@ -257,7 +277,7 @@ func containerToV1Container(c *Container) (v1.Container, error) { kubeContainer.StdinOnce = false kubeContainer.TTY = c.config.Spec.Process.Terminal - return kubeContainer, nil + return kubeContainer, kubeVolumes, nil } // ocicniPortMappingToContainerPort takes an ocicni portmapping and converts @@ -303,52 +323,82 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) { return envVars, nil } -// Is this worth it? -func libpodMaxAndMinToResourceList(c *Container) (v1.ResourceList, v1.ResourceList) { //nolint - // It does not appear we can properly calculate CPU resources from the information - // we know in libpod. Libpod knows CPUs by time, shares, etc. +// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands +func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume, error) { + var vms []v1.VolumeMount + var vos []v1.Volume - // We also only know about a memory limit; no memory minimum - maxResources := make(map[v1.ResourceName]resource.Quantity) - minResources := make(map[v1.ResourceName]resource.Quantity) - config := c.Config() - maxMem := config.Spec.Linux.Resources.Memory.Limit + // TjDO when named volumes are supported in play kube, also parse named volumes here + _, mounts := c.sortUserVolumes(c.config.Spec) + for _, m := range mounts { + vm, vo, err := generateKubeVolumeMount(m) + if err != nil { + return vms, vos, err + } + vms = append(vms, vm) + vos = append(vos, vo) + } + return vms, vos, nil +} - _ = maxMem +// generateKubeVolumeMount takes a user specfied mount and returns +// a kubernetes VolumeMount (to be added to the container) and a kubernetes Volume +// (to be added to the pod) +func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) { + vm := v1.VolumeMount{} + vo := v1.Volume{} - return maxResources, minResources + name, err := convertVolumePathToName(m.Source) + if err != nil { + return vm, vo, err + } + vm.Name = name + vm.MountPath = m.Destination + if util.StringInSlice("ro", m.Options) { + vm.ReadOnly = true + } + + vo.Name = name + vo.HostPath = &v1.HostPathVolumeSource{} + vo.HostPath.Path = m.Source + isDir, err := isHostPathDirectory(m.Source) + // neither a directory or a file lives here, default to creating a directory + // TODO should this be an error instead? + var hostPathType v1.HostPathType + if err != nil { + hostPathType = v1.HostPathDirectoryOrCreate + } else if isDir { + hostPathType = v1.HostPathDirectory + } else { + hostPathType = v1.HostPathFile + } + vo.HostPath.Type = &hostPathType + + return vm, vo, nil } -func generateKubeVolumeMount(hostSourcePath string, mounts []specs.Mount) (v1.VolumeMount, error) { - vm := v1.VolumeMount{} - for _, m := range mounts { - if m.Source == hostSourcePath { - // TODO Name is not provided and is required by Kube; therefore, this is disabled earlier - //vm.Name = - vm.MountPath = m.Source - vm.SubPath = m.Destination - if util.StringInSlice("ro", m.Options) { - vm.ReadOnly = true - } - return vm, nil - } +func isHostPathDirectory(hostPathSource string) (bool, error) { + info, err := os.Stat(hostPathSource) + if err != nil { + return false, err } - return vm, errors.New("unable to find mount source") + return info.Mode().IsDir(), nil } -// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands -func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, error) { - // At this point, I dont think we can distinguish between the default - // volume mounts and user added ones. For now, we pass them all. - var vms []v1.VolumeMount - for _, hostSourcePath := range c.config.UserVolumes { - vm, err := generateKubeVolumeMount(hostSourcePath, c.config.Spec.Mounts) - if err != nil { - continue +func convertVolumePathToName(hostSourcePath string) (string, error) { + if len(hostSourcePath) == 0 { + return "", errors.Errorf("hostSourcePath must be specified to generate volume name") + } + if len(hostSourcePath) == 1 { + if hostSourcePath != "/" { + return "", errors.Errorf("hostSourcePath malformatted: %s", hostSourcePath) } - vms = append(vms, vm) + // add special case name + return "root", nil } - return vms, nil + // First, trim trailing slashes, then replace slashes with dashes. + // Thus, /mnt/data/ will become mnt-data + return strings.Replace(strings.Trim(hostSourcePath, "/"), "/", "-", -1), nil } func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities { @@ -360,16 +410,14 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v // those indicate a dropped cap for _, capability := range defaultCaps { if !util.StringInSlice(capability, containerCaps) { - cap := v1.Capability(capability) - drop = append(drop, cap) + drop = append(drop, v1.Capability(capability)) } } // Find caps in the container but not in the defaults; those indicate // an added cap for _, capability := range containerCaps { if !util.StringInSlice(capability, defaultCaps) { - cap := v1.Capability(capability) - add = append(add, cap) + add = append(add, v1.Capability(capability)) } } diff --git a/libpod/lock/file/file_lock.go b/libpod/lock/file/file_lock.go new file mode 100644 index 000000000..e50d67321 --- /dev/null +++ b/libpod/lock/file/file_lock.go @@ -0,0 +1,175 @@ +package file + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "syscall" + + "github.com/containers/storage" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// FileLocks is a struct enabling POSIX lock locking in a shared memory +// segment. +type FileLocks struct { // nolint + lockPath string + valid bool +} + +// CreateFileLock sets up a directory containing the various lock files. +func CreateFileLock(path string) (*FileLocks, error) { + _, err := os.Stat(path) + if err == nil { + return nil, errors.Wrapf(syscall.EEXIST, "directory %s exists", path) + } + if err := os.MkdirAll(path, 0711); err != nil { + return nil, errors.Wrapf(err, "cannot create %s", path) + } + + locks := new(FileLocks) + locks.lockPath = path + locks.valid = true + + return locks, nil +} + +// OpenFileLock opens an existing directory with the lock files. +func OpenFileLock(path string) (*FileLocks, error) { + _, err := os.Stat(path) + if err != nil { + return nil, errors.Wrapf(err, "accessing directory %s", path) + } + + locks := new(FileLocks) + locks.lockPath = path + locks.valid = true + + return locks, nil +} + +// Close closes an existing shared-memory segment. +// The segment will be rendered unusable after closing. +// WARNING: If you Close() while there are still locks locked, these locks may +// fail to release, causing a program freeze. +// Close() is only intended to be used while testing the locks. +func (locks *FileLocks) Close() error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + err := os.RemoveAll(locks.lockPath) + if err != nil { + return errors.Wrapf(err, "deleting directory %s", locks.lockPath) + } + return nil +} + +func (locks *FileLocks) getLockPath(lck uint32) string { + return filepath.Join(locks.lockPath, strconv.FormatInt(int64(lck), 10)) +} + +// AllocateLock allocates a lock and returns the index of the lock that was allocated. +func (locks *FileLocks) AllocateLock() (uint32, error) { + if !locks.valid { + return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + id := uint32(0) + for ; ; id++ { + path := locks.getLockPath(id) + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + if os.IsExist(err) { + continue + } + return 0, errors.Wrapf(err, "creating lock file") + } + f.Close() + break + } + return id, nil +} + +// AllocateGivenLock allocates the given lock from the shared-memory +// segment for use by a container or pod. +// If the lock is already in use or the index is invalid an error will be +// returned. +func (locks *FileLocks) AllocateGivenLock(lck uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + f, err := os.OpenFile(locks.getLockPath(lck), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + return errors.Wrapf(err, "error creating lock %d", lck) + } + f.Close() + + return nil +} + +// DeallocateLock frees a lock in a shared-memory segment so it can be +// reallocated to another container or pod. +// The given lock must be already allocated, or an error will be returned. +func (locks *FileLocks) DeallocateLock(lck uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + if err := os.Remove(locks.getLockPath(lck)); err != nil { + return errors.Wrapf(err, "deallocating lock %d", lck) + } + return nil +} + +// DeallocateAllLocks frees all locks so they can be reallocated to +// other containers and pods. +func (locks *FileLocks) DeallocateAllLocks() error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + files, err := ioutil.ReadDir(locks.lockPath) + if err != nil { + return errors.Wrapf(err, "error reading directory %s", locks.lockPath) + } + var lastErr error + for _, f := range files { + p := filepath.Join(locks.lockPath, f.Name()) + err := os.Remove(p) + if err != nil { + lastErr = err + logrus.Errorf("deallocating lock %s", p) + } + } + return lastErr +} + +// LockFileLock locks the given lock. +func (locks *FileLocks) LockFileLock(lck uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + l, err := storage.GetLockfile(locks.getLockPath(lck)) + if err != nil { + return errors.Wrapf(err, "error acquiring lock") + } + + l.Lock() + return nil +} + +// UnlockFileLock unlocks the given lock. +func (locks *FileLocks) UnlockFileLock(lck uint32) error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + l, err := storage.GetLockfile(locks.getLockPath(lck)) + if err != nil { + return errors.Wrapf(err, "error acquiring lock") + } + + l.Unlock() + return nil +} diff --git a/libpod/lock/file/file_lock_test.go b/libpod/lock/file/file_lock_test.go new file mode 100644 index 000000000..6320d6b70 --- /dev/null +++ b/libpod/lock/file/file_lock_test.go @@ -0,0 +1,74 @@ +package file + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Test that creating and destroying locks work +func TestCreateAndDeallocate(t *testing.T) { + d, err := ioutil.TempDir("", "filelock") + assert.NoError(t, err) + defer os.RemoveAll(d) + + l, err := OpenFileLock(filepath.Join(d, "locks")) + assert.Error(t, err) + + l, err = CreateFileLock(filepath.Join(d, "locks")) + assert.NoError(t, err) + + lock, err := l.AllocateLock() + assert.NoError(t, err) + + err = l.AllocateGivenLock(lock) + assert.Error(t, err) + + err = l.DeallocateLock(lock) + assert.NoError(t, err) + + err = l.AllocateGivenLock(lock) + assert.NoError(t, err) + + err = l.DeallocateAllLocks() + assert.NoError(t, err) + + err = l.AllocateGivenLock(lock) + assert.NoError(t, err) + + err = l.DeallocateAllLocks() + assert.NoError(t, err) +} + +// Test that creating and destroying locks work +func TestLockAndUnlock(t *testing.T) { + d, err := ioutil.TempDir("", "filelock") + assert.NoError(t, err) + defer os.RemoveAll(d) + + l, err := CreateFileLock(filepath.Join(d, "locks")) + assert.NoError(t, err) + + lock, err := l.AllocateLock() + assert.NoError(t, err) + + err = l.LockFileLock(lock) + assert.NoError(t, err) + + lslocks, err := exec.LookPath("lslocks") + if err == nil { + lockPath := l.getLockPath(lock) + out, err := exec.Command(lslocks, "--json", "-p", fmt.Sprintf("%d", os.Getpid())).CombinedOutput() + assert.NoError(t, err) + + assert.Contains(t, string(out), lockPath) + } + + err = l.UnlockFileLock(lock) + assert.NoError(t, err) +} diff --git a/libpod/lock/file_lock_manager.go b/libpod/lock/file_lock_manager.go new file mode 100644 index 000000000..8a4d939d3 --- /dev/null +++ b/libpod/lock/file_lock_manager.go @@ -0,0 +1,110 @@ +package lock + +import ( + "github.com/containers/libpod/libpod/lock/file" +) + +// FileLockManager manages shared memory locks. +type FileLockManager struct { + locks *file.FileLocks +} + +// NewFileLockManager makes a new FileLockManager at the specified directory. +func NewFileLockManager(lockPath string) (Manager, error) { + locks, err := file.CreateFileLock(lockPath) + if err != nil { + return nil, err + } + + manager := new(FileLockManager) + manager.locks = locks + + return manager, nil +} + +// OpenFileLockManager opens an existing FileLockManager at the specified directory. +func OpenFileLockManager(path string) (Manager, error) { + locks, err := file.OpenFileLock(path) + if err != nil { + return nil, err + } + + manager := new(FileLockManager) + manager.locks = locks + + return manager, nil +} + +// AllocateLock allocates a new lock from the manager. +func (m *FileLockManager) AllocateLock() (Locker, error) { + semIndex, err := m.locks.AllocateLock() + if err != nil { + return nil, err + } + + lock := new(FileLock) + lock.lockID = semIndex + lock.manager = m + + return lock, nil +} + +// AllocateAndRetrieveLock allocates the lock with the given ID and returns it. +// If the lock is already allocated, error. +func (m *FileLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) { + lock := new(FileLock) + lock.lockID = id + lock.manager = m + + if err := m.locks.AllocateGivenLock(id); err != nil { + return nil, err + } + + return lock, nil +} + +// RetrieveLock retrieves a lock from the manager given its ID. +func (m *FileLockManager) RetrieveLock(id uint32) (Locker, error) { + lock := new(FileLock) + lock.lockID = id + lock.manager = m + + return lock, nil +} + +// FreeAllLocks frees all locks in the manager. +// This function is DANGEROUS. Please read the full comment in locks.go before +// trying to use it. +func (m *FileLockManager) FreeAllLocks() error { + return m.locks.DeallocateAllLocks() +} + +// FileLock is an individual shared memory lock. +type FileLock struct { + lockID uint32 + manager *FileLockManager +} + +// ID returns the ID of the lock. +func (l *FileLock) ID() uint32 { + return l.lockID +} + +// Lock acquires the lock. +func (l *FileLock) Lock() { + if err := l.manager.locks.LockFileLock(l.lockID); err != nil { + panic(err.Error()) + } +} + +// Unlock releases the lock. +func (l *FileLock) Unlock() { + if err := l.manager.locks.UnlockFileLock(l.lockID); err != nil { + panic(err.Error()) + } +} + +// Free releases the lock, allowing it to be reused. +func (l *FileLock) Free() error { + return l.manager.locks.DeallocateLock(l.lockID) +} diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go index 76dd5729e..322e92a8f 100644 --- a/libpod/lock/shm/shm_lock.go +++ b/libpod/lock/shm/shm_lock.go @@ -1,3 +1,5 @@ +// +build linux,cgo + package shm // #cgo LDFLAGS: -lrt -lpthread @@ -20,7 +22,7 @@ var ( // BitmapSize is the size of the bitmap used when managing SHM locks. // an SHM lock manager's max locks will be rounded up to a multiple of // this number. - BitmapSize uint32 = uint32(C.bitmap_size_c) + BitmapSize = uint32(C.bitmap_size_c) ) // SHMLocks is a struct enabling POSIX semaphore locking in a shared memory diff --git a/libpod/lock/shm/shm_lock_nocgo.go b/libpod/lock/shm/shm_lock_nocgo.go new file mode 100644 index 000000000..ea1488c90 --- /dev/null +++ b/libpod/lock/shm/shm_lock_nocgo.go @@ -0,0 +1,102 @@ +// +build linux,!cgo + +package shm + +import ( + "github.com/sirupsen/logrus" +) + +// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory +// segment. +type SHMLocks struct { +} + +// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX +// semaphores, and returns a struct that can be used to operate on those locks. +// numLocks must not be 0, and may be rounded up to a multiple of the bitmap +// size used by the underlying implementation. +func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) { + logrus.Error("locks are not supported without cgo") + return &SHMLocks{}, nil +} + +// OpenSHMLock opens an existing shared-memory segment holding a given number of +// POSIX semaphores. numLocks must match the number of locks the shared memory +// segment was created with. +func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) { + logrus.Error("locks are not supported without cgo") + return &SHMLocks{}, nil +} + +// GetMaxLocks returns the maximum number of locks in the SHM +func (locks *SHMLocks) GetMaxLocks() uint32 { + logrus.Error("locks are not supported without cgo") + return 0 +} + +// Close closes an existing shared-memory segment. +// The segment will be rendered unusable after closing. +// WARNING: If you Close() while there are still locks locked, these locks may +// fail to release, causing a program freeze. +// Close() is only intended to be used while testing the locks. +func (locks *SHMLocks) Close() error { + logrus.Error("locks are not supported without cgo") + return nil +} + +// AllocateSemaphore allocates a semaphore from a shared-memory segment for use +// by a container or pod. +// Returns the index of the semaphore that was allocated. +// Allocations past the maximum number of locks given when the SHM segment was +// created will result in an error, and no semaphore will be allocated. +func (locks *SHMLocks) AllocateSemaphore() (uint32, error) { + logrus.Error("locks are not supported without cgo") + return 0, nil +} + +// AllocateGivenSemaphore allocates the given semaphore from the shared-memory +// segment for use by a container or pod. +// If the semaphore is already in use or the index is invalid an error will be +// returned. +func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error { + logrus.Error("locks are not supported without cgo") + return nil +} + +// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be +// reallocated to another container or pod. +// The given semaphore must be already allocated, or an error will be returned. +func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error { + logrus.Error("locks are not supported without cgo") + return nil +} + +// DeallocateAllSemaphores frees all semaphores so they can be reallocated to +// other containers and pods. +func (locks *SHMLocks) DeallocateAllSemaphores() error { + logrus.Error("locks are not supported without cgo") + return nil +} + +// LockSemaphore locks the given semaphore. +// If the semaphore is already locked, LockSemaphore will block until the lock +// can be acquired. +// There is no requirement that the given semaphore be allocated. +// This ensures that attempts to lock a container after it has been deleted, +// but before the caller has queried the database to determine this, will +// succeed. +func (locks *SHMLocks) LockSemaphore(sem uint32) error { + logrus.Error("locks are not supported without cgo") + return nil +} + +// UnlockSemaphore unlocks the given semaphore. +// Unlocking a semaphore that is already unlocked with return EBUSY. +// There is no requirement that the given semaphore be allocated. +// This ensures that attempts to lock a container after it has been deleted, +// but before the caller has queried the database to determine this, will +// succeed. +func (locks *SHMLocks) UnlockSemaphore(sem uint32) error { + logrus.Error("locks are not supported without cgo") + return nil +} diff --git a/libpod/container_log.go b/libpod/logs/log.go index 374e5a1fc..488291cfe 100644 --- a/libpod/container_log.go +++ b/libpod/logs/log.go @@ -1,31 +1,29 @@ -package libpod +package logs import ( "fmt" "io/ioutil" - "os" "strings" "sync" "time" "github.com/hpcloud/tail" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( - // logTimeFormat is the time format used in the log. + // LogTimeFormat is the time format used in the log. // It is a modified version of RFC3339Nano that guarantees trailing // zeroes are not trimmed, taken from // https://github.com/golang/go/issues/19635 - logTimeFormat = "2006-01-02T15:04:05.000000000Z07:00" + LogTimeFormat = "2006-01-02T15:04:05.000000000Z07:00" - // partialLogType signifies a log line that exceeded the buffer + // PartialLogType signifies a log line that exceeded the buffer // length and needed to spill into a new line - partialLogType = "P" + PartialLogType = "P" - // fullLogType signifies a log line is full - fullLogType = "F" + // FullLogType signifies a log line is full + FullLogType = "F" ) // LogOptions is the options you can use for logs @@ -48,72 +46,8 @@ type LogLine struct { CID string } -// Log is a runtime function that can read one or more container logs. -func (r *Runtime) Log(containers []*Container, options *LogOptions, logChannel chan *LogLine) error { - for _, ctr := range containers { - if err := ctr.ReadLog(options, logChannel); err != nil { - return err - } - } - return nil -} - -// ReadLog reads a containers log based on the input options and returns loglines over a channel -func (c *Container) ReadLog(options *LogOptions, logChannel chan *LogLine) error { - // TODO Skip sending logs until journald logs can be read - // TODO make this not a magic string - if c.LogDriver() == JournaldLogging { - return c.readFromJournal(options, logChannel) - } - return c.readFromLogFile(options, logChannel) -} - -func (c *Container) readFromLogFile(options *LogOptions, logChannel chan *LogLine) error { - t, tailLog, err := getLogFile(c.LogPath(), options) - if err != nil { - // If the log file does not exist, this is not fatal. - if os.IsNotExist(errors.Cause(err)) { - return nil - } - return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath()) - } - options.WaitGroup.Add(1) - if len(tailLog) > 0 { - for _, nll := range tailLog { - nll.CID = c.ID() - if nll.Since(options.Since) { - logChannel <- nll - } - } - } - - go func() { - var partial string - for line := range t.Lines { - nll, err := newLogLine(line.Text) - if err != nil { - logrus.Error(err) - continue - } - if nll.Partial() { - partial = partial + nll.Msg - continue - } else if !nll.Partial() && len(partial) > 1 { - nll.Msg = partial - partial = "" - } - nll.CID = c.ID() - if nll.Since(options.Since) { - logChannel <- nll - } - } - options.WaitGroup.Done() - }() - return nil -} - -// getLogFile returns an hp tail for a container given options -func getLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error) { +// GetLogFile returns an hp tail for a container given options +func GetLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error) { var ( whence int err error @@ -154,7 +88,7 @@ func getTailLog(path string, tail int) ([]*LogLine, error) { if len(splitContent[i]) == 0 { continue } - nll, err := newLogLine(splitContent[i]) + nll, err := NewLogLine(splitContent[i]) if err != nil { return nil, err } @@ -191,7 +125,7 @@ func (l *LogLine) String(options *LogOptions) string { out = fmt.Sprintf("%s ", cid) } if options.Timestamps { - out = out + fmt.Sprintf("%s ", l.Time.Format(logTimeFormat)) + out = out + fmt.Sprintf("%s ", l.Time.Format(LogTimeFormat)) } return out + l.Msg } @@ -201,13 +135,13 @@ func (l *LogLine) Since(since time.Time) bool { return l.Time.After(since) } -// newLogLine creates a logLine struct from a container log string -func newLogLine(line string) (*LogLine, error) { +// NewLogLine creates a logLine struct from a container log string +func NewLogLine(line string) (*LogLine, error) { splitLine := strings.Split(line, " ") if len(splitLine) < 4 { return nil, errors.Errorf("'%s' is not a valid container log line", line) } - logTime, err := time.Parse(logTimeFormat, splitLine[0]) + logTime, err := time.Parse(LogTimeFormat, splitLine[0]) if err != nil { return nil, errors.Wrapf(err, "unable to convert time %s from container log", splitLine[0]) } @@ -222,7 +156,7 @@ func newLogLine(line string) (*LogLine, error) { // Partial returns a bool if the log line is a partial log type func (l *LogLine) Partial() bool { - if l.ParseLogType == partialLogType { + if l.ParseLogType == PartialLogType { return true } return false diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index 93ec157c5..d978bceed 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -28,21 +28,23 @@ import ( // Get an OCICNI network config func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP) ocicni.PodNetwork { + defaultNetwork := r.netPlugin.GetDefaultNetworkName() network := ocicni.PodNetwork{ - Name: name, - Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces - ID: id, - NetNS: nsPath, - PortMappings: ports, - Networks: networks, + Name: name, + Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces + ID: id, + NetNS: nsPath, + Networks: networks, + RuntimeConfig: map[string]ocicni.RuntimeConfig{ + defaultNetwork: {PortMappings: ports}, + }, } if staticIP != nil { - defaultNetwork := r.netPlugin.GetDefaultNetworkName() - network.Networks = []string{defaultNetwork} - network.NetworkConfig = make(map[string]ocicni.NetworkConfig) - network.NetworkConfig[defaultNetwork] = ocicni.NetworkConfig{IP: staticIP.String()} + network.RuntimeConfig = map[string]ocicni.RuntimeConfig{ + defaultNetwork: {IP: staticIP.String(), PortMappings: ports}, + } } return network @@ -292,14 +294,14 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket) } buf := make([]byte, 2048) - len, err := conn.Read(buf) + readLength, err := conn.Read(buf) if err != nil { return errors.Wrapf(err, "cannot read from control socket %s", apiSocket) } // if there is no 'error' key in the received JSON data, then the operation was // successful. var y map[string]interface{} - if err := json.Unmarshal(buf[0:len], &y); err != nil { + if err := json.Unmarshal(buf[0:readLength], &y); err != nil { return errors.Wrapf(err, "error parsing error status from slirp4netns") } if e, found := y["error"]; found { @@ -330,7 +332,9 @@ func (r *Runtime) setupNetNS(ctr *Container) (err error) { if err != nil { return errors.Wrapf(err, "cannot open %s", nsPath) } - mountPointFd.Close() + if err := mountPointFd.Close(); err != nil { + return err + } if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil { return errors.Wrapf(err, "cannot mount %s", nsPath) @@ -350,12 +354,12 @@ func (r *Runtime) setupNetNS(ctr *Container) (err error) { // Join an existing network namespace func joinNetNS(path string) (ns.NetNS, error) { - ns, err := ns.GetNS(path) + netNS, err := ns.GetNS(path) if err != nil { return nil, errors.Wrapf(err, "error retrieving network namespace at %s", path) } - return ns, nil + return netNS, nil } // Close a network namespace. diff --git a/libpod/networking_unsupported.go b/libpod/networking_unsupported.go index 1e46ca40b..d9b3730aa 100644 --- a/libpod/networking_unsupported.go +++ b/libpod/networking_unsupported.go @@ -2,20 +2,22 @@ package libpod +import "github.com/containers/libpod/libpod/define" + func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { - return ErrNotImplemented + return define.ErrNotImplemented } func (r *Runtime) setupNetNS(ctr *Container) (err error) { - return ErrNotImplemented + return define.ErrNotImplemented } func (r *Runtime) teardownNetNS(ctr *Container) error { - return ErrNotImplemented + return define.ErrNotImplemented } func (r *Runtime) createNetNS(ctr *Container) (err error) { - return ErrNotImplemented + return define.ErrNotImplemented } func (c *Container) getContainerNetworkInfo(data *InspectContainerData) *InspectContainerData { diff --git a/libpod/oci.go b/libpod/oci.go index 36c1dea84..6aad79cdf 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/util" "github.com/cri-o/ocicni/pkg/ocicni" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -79,7 +80,7 @@ type ociError struct { // The first path that points to a valid executable will be used. func newOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *RuntimeConfig, supportsJSON bool) (*OCIRuntime, error) { if name == "" { - return nil, errors.Wrapf(ErrInvalidArg, "the OCI runtime must be provided a non-empty name") + return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name") } runtime := new(OCIRuntime) @@ -114,14 +115,14 @@ func newOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *R break } if !foundPath { - return nil, errors.Wrapf(ErrInvalidArg, "no valid executable found for OCI runtime %s", name) + return nil, errors.Wrapf(define.ErrInvalidArg, "no valid executable found for OCI runtime %s", name) } runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits") runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket") if runtime.cgroupManager != CgroupfsCgroupsManager && runtime.cgroupManager != SystemdCgroupsManager { - return nil, errors.Wrapf(ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager) + return nil, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager) } // Create the exit files and attach sockets directories @@ -216,7 +217,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro // If not using the OCI runtime, we don't need to do most of this. if !useRuntime { // If the container's not running, nothing to do. - if ctr.state.State != ContainerStateRunning && ctr.state.State != ContainerStatePaused { + if ctr.state.State != define.ContainerStateRunning && ctr.state.State != define.ContainerStatePaused { return nil } @@ -232,7 +233,9 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro } // Alright, it exists. Transition to Stopped state. - ctr.state.State = ContainerStateStopped + ctr.state.State = define.ContainerStateStopped + ctr.state.PID = 0 + ctr.state.ConmonPID = 0 // Read the exit file to get our stopped time and exit code. return ctr.handleExitFile(exitFile, info) @@ -260,17 +263,21 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro return errors.Wrapf(err, "error getting container %s state", ctr.ID()) } if strings.Contains(string(out), "does not exist") { - ctr.removeConmonFiles() + if err := ctr.removeConmonFiles(); err != nil { + logrus.Debugf("unable to remove conmon files for container %s", ctr.ID()) + } ctr.state.ExitCode = -1 ctr.state.FinishedTime = time.Now() - ctr.state.State = ContainerStateExited + ctr.state.State = define.ContainerStateExited return nil } return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out) } defer cmd.Wait() - errPipe.Close() + if err := errPipe.Close(); err != nil { + return err + } out, err := ioutil.ReadAll(outPipe) if err != nil { return errors.Wrapf(err, "error reading stdout: %s", ctr.ID()) @@ -282,21 +289,21 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro switch state.Status { case "created": - ctr.state.State = ContainerStateCreated + ctr.state.State = define.ContainerStateCreated case "paused": - ctr.state.State = ContainerStatePaused + ctr.state.State = define.ContainerStatePaused case "running": - ctr.state.State = ContainerStateRunning + ctr.state.State = define.ContainerStateRunning case "stopped": - ctr.state.State = ContainerStateStopped + ctr.state.State = define.ContainerStateStopped default: - return errors.Wrapf(ErrInternal, "unrecognized status returned by runtime for container %s: %s", + return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s", ctr.ID(), state.Status) } // Only grab exit status if we were not already stopped // If we were, it should already be in the database - if ctr.state.State == ContainerStateStopped && oldState != ContainerStateStopped { + if ctr.state.State == define.ContainerStateStopped && oldState != define.ContainerStateStopped { var fi os.FileInfo chWait := make(chan error) defer close(chWait) @@ -390,11 +397,11 @@ func (r *OCIRuntime) unpauseContainer(ctr *Container) error { // TODO: add --pid-file and use that to generate exec session tracking func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty bool, cwd, user, sessionID string, streams *AttachStreams, preserveFDs int) (*exec.Cmd, error) { if len(cmd) == 0 { - return nil, errors.Wrapf(ErrInvalidArg, "must provide a command to execute") + return nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute") } if sessionID == "" { - return nil, errors.Wrapf(ErrEmptyID, "must provide a session ID for exec") + return nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec") } runtimeDir, err := util.GetRootlessRuntimeDir() @@ -430,8 +437,8 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty args = append(args, "--no-new-privs") } - for _, cap := range capAdd { - args = append(args, "--cap", cap) + for _, capabilityAdd := range capAdd { + args = append(args, "--cap", capabilityAdd) } for _, envVar := range env { @@ -472,7 +479,9 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty for fd := 3; fd < 3+preserveFDs; fd++ { // These fds were passed down to the runtime. Close them // and not interfere - os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close() + if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil { + logrus.Debugf("unable to close file fd-%d", fd) + } } } @@ -481,7 +490,9 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty // checkpointContainer checkpoints the given container func (r *OCIRuntime) checkpointContainer(ctr *Container, options ContainerCheckpointOptions) error { - label.SetSocketLabel(ctr.ProcessLabel()) + if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil { + return err + } // imagePath is used by CRIU to store the actual checkpoint files imagePath := ctr.CheckpointPath() // workPath will be used to store dump.log and stats-dump diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go index b7efa742a..802f4311b 100644 --- a/libpod/oci_linux.go +++ b/libpod/oci_linux.go @@ -15,7 +15,8 @@ import ( "syscall" "time" - "github.com/containerd/cgroups" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" "github.com/containers/libpod/utils" @@ -48,13 +49,13 @@ func (r *OCIRuntime) moveConmonToCgroup(ctr *Container, cgroupParent string, cmd } } else { cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon") - control, err := cgroups.New(cgroups.V1, cgroups.StaticPath(cgroupPath), &spec.LinuxResources{}) + control, err := cgroups.New(cgroupPath, &spec.LinuxResources{}) if err != nil { logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err) } else { // we need to remove this defer and delete the cgroup once conmon exits // maybe need a conmon monitor? - if err := control.Add(cgroups.Process{Pid: cmd.Process.Pid}); err != nil { + if err := control.AddPid(cmd.Process.Pid); err != nil { logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err) } } @@ -341,7 +342,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res ) plabel, err = selinux.CurrentLabel() if err != nil { - childPipe.Close() + if err := childPipe.Close(); err != nil { + logrus.Errorf("failed to close child pipe: %q", err) + } return errors.Wrapf(err, "Failed to get current SELinux label") } @@ -434,19 +437,22 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res if err == nil { var ociErr ociError if err := json.Unmarshal(data, &ociErr); err == nil { - return errors.Wrapf(ErrOCIRuntime, "%s", strings.Trim(ociErr.Msg, "\n")) + return errors.Wrapf(define.ErrOCIRuntime, "%s", strings.Trim(ociErr.Msg, "\n")) } } } // If we failed to parse the JSON errors, then print the output as it is if ss.si.Message != "" { - return errors.Wrapf(ErrOCIRuntime, "%s", ss.si.Message) + return errors.Wrapf(define.ErrOCIRuntime, "%s", ss.si.Message) } - return errors.Wrapf(ErrInternal, "container create failed") + return errors.Wrapf(define.ErrInternal, "container create failed") } ctr.state.PID = ss.si.Pid + if cmd.Process != nil { + ctr.state.ConmonPID = cmd.Process.Pid + } case <-time.After(ContainerCreateTimeout): - return errors.Wrapf(ErrInternal, "container creation timeout") + return errors.Wrapf(define.ErrInternal, "container creation timeout") } return nil } diff --git a/libpod/oci_unsupported.go b/libpod/oci_unsupported.go index 12183faf3..cfdf70bc6 100644 --- a/libpod/oci_unsupported.go +++ b/libpod/oci_unsupported.go @@ -5,18 +5,20 @@ package libpod import ( "os" "os/exec" + + "github.com/containers/libpod/libpod/define" ) func (r *OCIRuntime) moveConmonToCgroup(ctr *Container, cgroupParent string, cmd *exec.Cmd) error { - return ErrOSNotSupported + return define.ErrOSNotSupported } func newPipe() (parent *os.File, child *os.File, err error) { - return nil, nil, ErrNotImplemented + return nil, nil, define.ErrNotImplemented } func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) { - return ErrNotImplemented + return define.ErrNotImplemented } func (r *OCIRuntime) pathPackage() string { @@ -28,13 +30,13 @@ func (r *OCIRuntime) conmonPackage() string { } func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) { - return ErrOSNotSupported + return define.ErrOSNotSupported } func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error { - return ErrOSNotSupported + return define.ErrOSNotSupported } func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error { - return ErrOSNotSupported + return define.ErrOSNotSupported } diff --git a/libpod/options.go b/libpod/options.go index cdac09654..4f8bb42df 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -8,6 +8,7 @@ import ( "syscall" "github.com/containers/image/manifest" + config2 "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/namespaces" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" @@ -19,7 +20,7 @@ import ( var ( nameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") - regexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") + regexError = errors.Wrapf(config2.ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") ) // Runtime Creation Options @@ -30,7 +31,7 @@ var ( func WithStorageConfig(config storage.StoreOptions) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } setField := false @@ -104,7 +105,7 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption { func WithDefaultTransport(defaultTransport string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.ImageDefaultTransport = defaultTransport @@ -120,7 +121,7 @@ func WithDefaultTransport(defaultTransport string) RuntimeOption { func WithSignaturePolicy(path string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.SignaturePolicyPath = path @@ -136,11 +137,11 @@ func WithSignaturePolicy(path string) RuntimeOption { func WithStateType(storeType RuntimeStateStore) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } if storeType == InvalidStateStore { - return errors.Wrapf(ErrInvalidArg, "must provide a valid state store type") + return errors.Wrapf(config2.ErrInvalidArg, "must provide a valid state store type") } rt.config.StateType = storeType @@ -153,11 +154,11 @@ func WithStateType(storeType RuntimeStateStore) RuntimeOption { func WithOCIRuntime(runtime string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } if runtime == "" { - return errors.Wrapf(ErrInvalidArg, "must provide a valid path") + return errors.Wrapf(config2.ErrInvalidArg, "must provide a valid path") } rt.config.OCIRuntime = runtime @@ -172,11 +173,11 @@ func WithOCIRuntime(runtime string) RuntimeOption { func WithConmonPath(path string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } if path == "" { - return errors.Wrapf(ErrInvalidArg, "must provide a valid path") + return errors.Wrapf(config2.ErrInvalidArg, "must provide a valid path") } rt.config.ConmonPath = []string{path} @@ -189,7 +190,7 @@ func WithConmonPath(path string) RuntimeOption { func WithConmonEnv(environment []string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.ConmonEnvVars = make([]string, len(environment)) @@ -204,7 +205,7 @@ func WithConmonEnv(environment []string) RuntimeOption { func WithNetworkCmdPath(path string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.NetworkCmdPath = path @@ -219,11 +220,11 @@ func WithNetworkCmdPath(path string) RuntimeOption { func WithCgroupManager(manager string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } if manager != CgroupfsCgroupsManager && manager != SystemdCgroupsManager { - return errors.Wrapf(ErrInvalidArg, "CGroup manager must be one of %s and %s", + return errors.Wrapf(config2.ErrInvalidArg, "CGroup manager must be one of %s and %s", CgroupfsCgroupsManager, SystemdCgroupsManager) } @@ -238,7 +239,7 @@ func WithCgroupManager(manager string) RuntimeOption { func WithStaticDir(dir string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.StaticDir = dir @@ -252,12 +253,12 @@ func WithStaticDir(dir string) RuntimeOption { func WithHooksDir(hooksDirs ...string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } for _, hooksDir := range hooksDirs { if hooksDir == "" { - return errors.Wrap(ErrInvalidArg, "empty-string hook directories are not supported") + return errors.Wrap(config2.ErrInvalidArg, "empty-string hook directories are not supported") } } @@ -273,11 +274,11 @@ func WithHooksDir(hooksDirs ...string) RuntimeOption { func WithDefaultMountsFile(mountsFile string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } if mountsFile == "" { - return ErrInvalidArg + return config2.ErrInvalidArg } rt.config.DefaultMountsFile = mountsFile return nil @@ -290,7 +291,7 @@ func WithDefaultMountsFile(mountsFile string) RuntimeOption { func WithTmpDir(dir string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.TmpDir = dir rt.configuredFrom.libpodTmpDirSet = true @@ -299,12 +300,21 @@ func WithTmpDir(dir string) RuntimeOption { } } +// WithNoStore sets a bool on the runtime that we do not need +// any containers storage. +func WithNoStore() RuntimeOption { + return func(rt *Runtime) error { + rt.noStore = true + return nil + } +} + // WithMaxLogSize sets the maximum size of container logs. // Positive sizes are limits in bytes, -1 is unlimited. func WithMaxLogSize(limit int64) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.MaxLogSize = limit @@ -315,10 +325,10 @@ func WithMaxLogSize(limit int64) RuntimeOption { // WithNoPivotRoot sets the runtime to use MS_MOVE instead of PIVOT_ROOT when // starting containers. -func WithNoPivotRoot(noPivot bool) RuntimeOption { +func WithNoPivotRoot() RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.NoPivotRoot = true @@ -331,7 +341,7 @@ func WithNoPivotRoot(noPivot bool) RuntimeOption { func WithCNIConfigDir(dir string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.CNIConfigDir = dir @@ -344,7 +354,7 @@ func WithCNIConfigDir(dir string) RuntimeOption { func WithCNIPluginDir(dir string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.CNIPluginDir = []string{dir} @@ -364,7 +374,7 @@ func WithCNIPluginDir(dir string) RuntimeOption { func WithNamespace(ns string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.Namespace = ns @@ -380,7 +390,7 @@ func WithNamespace(ns string) RuntimeOption { func WithVolumePath(volPath string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.VolumePath = volPath @@ -398,7 +408,7 @@ func WithVolumePath(volPath string) RuntimeOption { func WithDefaultInfraImage(img string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.InfraImage = img @@ -412,7 +422,7 @@ func WithDefaultInfraImage(img string) RuntimeOption { func WithDefaultInfraCommand(cmd string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.config.InfraCommand = cmd @@ -428,7 +438,7 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption { func WithRenumber() RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.doRenumber = true @@ -443,7 +453,7 @@ func WithRenumber() RuntimeOption { func WithMigrate() RuntimeOption { return func(rt *Runtime) error { if rt.valid { - return ErrRuntimeFinalized + return config2.ErrRuntimeFinalized } rt.doMigrate = true @@ -458,7 +468,7 @@ func WithMigrate() RuntimeOption { func WithShmDir(dir string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.ShmDir = dir @@ -470,7 +480,7 @@ func WithShmDir(dir string) CtrCreateOption { func WithSystemd() CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.Systemd = true @@ -482,7 +492,7 @@ func WithSystemd() CtrCreateOption { func WithShmSize(size int64) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.ShmSize = size @@ -494,7 +504,7 @@ func WithShmSize(size int64) CtrCreateOption { func WithPrivileged(privileged bool) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.Privileged = privileged @@ -506,7 +516,7 @@ func WithPrivileged(privileged bool) CtrCreateOption { func WithSecLabels(labelOpts []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.LabelOpts = labelOpts return nil @@ -518,7 +528,7 @@ func WithSecLabels(labelOpts []string) CtrCreateOption { func WithUser(user string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.User = user @@ -534,14 +544,14 @@ func WithUser(user string) CtrCreateOption { func WithRootFSFromImage(imageID string, imageName string, useImageVolumes bool) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if ctr.config.RootfsImageID != "" || ctr.config.RootfsImageName != "" { - return errors.Wrapf(ErrInvalidArg, "container already configured with root filesystem") + return errors.Wrapf(config2.ErrInvalidArg, "container already configured with root filesystem") } if ctr.config.Rootfs != "" { - return errors.Wrapf(ErrInvalidArg, "cannot set both an image ID and a rootfs for a container") + return errors.Wrapf(config2.ErrInvalidArg, "cannot set both an image ID and a rootfs for a container") } ctr.config.RootfsImageID = imageID @@ -556,7 +566,7 @@ func WithRootFSFromImage(imageID string, imageName string, useImageVolumes bool) func WithStdin() CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.Stdin = true @@ -572,11 +582,11 @@ func WithStdin() CtrCreateOption { func (r *Runtime) WithPod(pod *Pod) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if pod == nil { - return ErrInvalidArg + return config2.ErrInvalidArg } ctr.config.Pod = pod.ID() @@ -589,7 +599,7 @@ func (r *Runtime) WithPod(pod *Pod) CtrCreateOption { func WithLabels(labels map[string]string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.Labels = make(map[string]string) @@ -605,7 +615,7 @@ func WithLabels(labels map[string]string) CtrCreateOption { func WithName(name string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } // Check the name against a regex @@ -623,13 +633,13 @@ func WithName(name string) CtrCreateOption { func WithStopSignal(signal syscall.Signal) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if signal == 0 { - return errors.Wrapf(ErrInvalidArg, "stop signal cannot be 0") + return errors.Wrapf(config2.ErrInvalidArg, "stop signal cannot be 0") } else if signal > 64 { - return errors.Wrapf(ErrInvalidArg, "stop signal cannot be greater than 64 (SIGRTMAX)") + return errors.Wrapf(config2.ErrInvalidArg, "stop signal cannot be greater than 64 (SIGRTMAX)") } ctr.config.StopSignal = uint(signal) @@ -643,7 +653,7 @@ func WithStopSignal(signal syscall.Signal) CtrCreateOption { func WithStopTimeout(timeout uint) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.StopTimeout = timeout @@ -656,7 +666,7 @@ func WithStopTimeout(timeout uint) CtrCreateOption { func WithIDMappings(idmappings storage.IDMappingOptions) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.IDMappings = idmappings @@ -668,7 +678,7 @@ func WithIDMappings(idmappings storage.IDMappingOptions) CtrCreateOption { func WithExitCommand(exitCommand []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.ExitCommand = append(exitCommand, ctr.ID()) @@ -681,7 +691,7 @@ func WithExitCommand(exitCommand []string) CtrCreateOption { func WithUTSNSFromPod(p *Pod) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if err := validPodNSOption(p, ctr.config.Pod); err != nil { @@ -705,19 +715,19 @@ func WithUTSNSFromPod(p *Pod) CtrCreateOption { func WithIPCNSFrom(nsCtr *Container) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if !nsCtr.valid { - return ErrCtrRemoved + return config2.ErrCtrRemoved } if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(ErrInvalidArg, "must specify another container") + return errors.Wrapf(config2.ErrInvalidArg, "must specify another container") } if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) } ctr.config.IPCNsCtr = nsCtr.ID() @@ -733,19 +743,19 @@ func WithIPCNSFrom(nsCtr *Container) CtrCreateOption { func WithMountNSFrom(nsCtr *Container) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if !nsCtr.valid { - return ErrCtrRemoved + return config2.ErrCtrRemoved } if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(ErrInvalidArg, "must specify another container") + return errors.Wrapf(config2.ErrInvalidArg, "must specify another container") } if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) } ctr.config.MountNsCtr = nsCtr.ID() @@ -761,23 +771,23 @@ func WithMountNSFrom(nsCtr *Container) CtrCreateOption { func WithNetNSFrom(nsCtr *Container) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if !nsCtr.valid { - return ErrCtrRemoved + return config2.ErrCtrRemoved } if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(ErrInvalidArg, "must specify another container") + return errors.Wrapf(config2.ErrInvalidArg, "must specify another container") } if ctr.config.CreateNetNS { - return errors.Wrapf(ErrInvalidArg, "cannot join another container's net ns as we are making a new net ns") + return errors.Wrapf(config2.ErrInvalidArg, "cannot join another container's net ns as we are making a new net ns") } if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) } ctr.config.NetNsCtr = nsCtr.ID() @@ -793,19 +803,19 @@ func WithNetNSFrom(nsCtr *Container) CtrCreateOption { func WithPIDNSFrom(nsCtr *Container) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if !nsCtr.valid { - return ErrCtrRemoved + return config2.ErrCtrRemoved } if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(ErrInvalidArg, "must specify another container") + return errors.Wrapf(config2.ErrInvalidArg, "must specify another container") } if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) } ctr.config.PIDNsCtr = nsCtr.ID() @@ -821,19 +831,19 @@ func WithPIDNSFrom(nsCtr *Container) CtrCreateOption { func WithUserNSFrom(nsCtr *Container) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if !nsCtr.valid { - return ErrCtrRemoved + return config2.ErrCtrRemoved } if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(ErrInvalidArg, "must specify another container") + return errors.Wrapf(config2.ErrInvalidArg, "must specify another container") } if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) } ctr.config.UserNsCtr = nsCtr.ID() @@ -849,19 +859,19 @@ func WithUserNSFrom(nsCtr *Container) CtrCreateOption { func WithUTSNSFrom(nsCtr *Container) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if !nsCtr.valid { - return ErrCtrRemoved + return config2.ErrCtrRemoved } if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(ErrInvalidArg, "must specify another container") + return errors.Wrapf(config2.ErrInvalidArg, "must specify another container") } if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) } ctr.config.UTSNsCtr = nsCtr.ID() @@ -877,19 +887,19 @@ func WithUTSNSFrom(nsCtr *Container) CtrCreateOption { func WithCgroupNSFrom(nsCtr *Container) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if !nsCtr.valid { - return ErrCtrRemoved + return config2.ErrCtrRemoved } if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(ErrInvalidArg, "must specify another container") + return errors.Wrapf(config2.ErrInvalidArg, "must specify another container") } if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) } ctr.config.CgroupNsCtr = nsCtr.ID() @@ -903,22 +913,22 @@ func WithCgroupNSFrom(nsCtr *Container) CtrCreateOption { func WithDependencyCtrs(ctrs []*Container) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } deps := make([]string, 0, len(ctrs)) for _, dep := range ctrs { if !dep.valid { - return errors.Wrapf(ErrCtrRemoved, "container %s is not valid", dep.ID()) + return errors.Wrapf(config2.ErrCtrRemoved, "container %s is not valid", dep.ID()) } if dep.ID() == ctr.ID() { - return errors.Wrapf(ErrInvalidArg, "must specify another container") + return errors.Wrapf(config2.ErrInvalidArg, "must specify another container") } if ctr.config.Pod != "" && dep.config.Pod != ctr.config.Pod { - return errors.Wrapf(ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, dep.ID()) + return errors.Wrapf(config2.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, dep.ID()) } deps = append(deps, dep.ID()) @@ -937,11 +947,11 @@ func WithDependencyCtrs(ctrs []*Container) CtrCreateOption { func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmode string, networks []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if ctr.config.NetNsCtr != "" { - return errors.Wrapf(ErrInvalidArg, "container is already set to join another container's net ns, cannot create a new net ns") + return errors.Wrapf(config2.ErrInvalidArg, "container is already set to join another container's net ns, cannot create a new net ns") } ctr.config.PostConfigureNetNS = postConfigureNetNS @@ -962,15 +972,15 @@ func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmo func WithStaticIP(ip net.IP) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if !ctr.config.CreateNetNS { - return errors.Wrapf(ErrInvalidArg, "cannot set a static IP if the container is not creating a network namespace") + return errors.Wrapf(config2.ErrInvalidArg, "cannot set a static IP if the container is not creating a network namespace") } if len(ctr.config.Networks) != 0 { - return errors.Wrapf(ErrInvalidArg, "cannot set a static IP if joining additional CNI networks") + return errors.Wrapf(config2.ErrInvalidArg, "cannot set a static IP if joining additional CNI networks") } ctr.config.StaticIP = ip @@ -983,15 +993,15 @@ func WithStaticIP(ip net.IP) CtrCreateOption { func WithLogDriver(driver string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } switch driver { case "": - return errors.Wrapf(ErrInvalidArg, "log driver must be set") + return errors.Wrapf(config2.ErrInvalidArg, "log driver must be set") case JournaldLogging, KubernetesLogging, JSONLogging: break default: - return errors.Wrapf(ErrInvalidArg, "invalid log driver") + return errors.Wrapf(config2.ErrInvalidArg, "invalid log driver") } ctr.config.LogDriver = driver @@ -1004,10 +1014,10 @@ func WithLogDriver(driver string) CtrCreateOption { func WithLogPath(path string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if path == "" { - return errors.Wrapf(ErrInvalidArg, "log path must be set") + return errors.Wrapf(config2.ErrInvalidArg, "log path must be set") } ctr.config.LogPath = path @@ -1020,11 +1030,11 @@ func WithLogPath(path string) CtrCreateOption { func WithCgroupParent(parent string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if parent == "" { - return errors.Wrapf(ErrInvalidArg, "cgroup parent cannot be empty") + return errors.Wrapf(config2.ErrInvalidArg, "cgroup parent cannot be empty") } ctr.config.CgroupParent = parent @@ -1037,10 +1047,10 @@ func WithCgroupParent(parent string) CtrCreateOption { func WithDNSSearch(searchDomains []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if ctr.config.UseImageResolvConf { - return errors.Wrapf(ErrInvalidArg, "cannot add DNS search domains if container will not create /etc/resolv.conf") + return errors.Wrapf(config2.ErrInvalidArg, "cannot add DNS search domains if container will not create /etc/resolv.conf") } ctr.config.DNSSearch = searchDomains return nil @@ -1051,16 +1061,16 @@ func WithDNSSearch(searchDomains []string) CtrCreateOption { func WithDNS(dnsServers []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if ctr.config.UseImageResolvConf { - return errors.Wrapf(ErrInvalidArg, "cannot add DNS servers if container will not create /etc/resolv.conf") + return errors.Wrapf(config2.ErrInvalidArg, "cannot add DNS servers if container will not create /etc/resolv.conf") } var dns []net.IP for _, i := range dnsServers { result := net.ParseIP(i) if result == nil { - return errors.Wrapf(ErrInvalidArg, "invalid IP address %s", i) + return errors.Wrapf(config2.ErrInvalidArg, "invalid IP address %s", i) } dns = append(dns, result) } @@ -1073,10 +1083,10 @@ func WithDNS(dnsServers []string) CtrCreateOption { func WithDNSOption(dnsOptions []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if ctr.config.UseImageResolvConf { - return errors.Wrapf(ErrInvalidArg, "cannot add DNS options if container will not create /etc/resolv.conf") + return errors.Wrapf(config2.ErrInvalidArg, "cannot add DNS options if container will not create /etc/resolv.conf") } ctr.config.DNSOption = dnsOptions return nil @@ -1087,11 +1097,11 @@ func WithDNSOption(dnsOptions []string) CtrCreateOption { func WithHosts(hosts []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if ctr.config.UseImageHosts { - return errors.Wrapf(ErrInvalidArg, "cannot add hosts if container will not create /etc/hosts") + return errors.Wrapf(config2.ErrInvalidArg, "cannot add hosts if container will not create /etc/hosts") } ctr.config.HostAdd = hosts @@ -1104,7 +1114,7 @@ func WithHosts(hosts []string) CtrCreateOption { func WithConmonPidFile(path string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.ConmonPidFile = path return nil @@ -1116,7 +1126,7 @@ func WithConmonPidFile(path string) CtrCreateOption { func WithGroups(groups []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.Groups = groups return nil @@ -1134,11 +1144,11 @@ func WithGroups(groups []string) CtrCreateOption { func WithUserVolumes(volumes []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if volumes == nil { - return ErrInvalidArg + return config2.ErrInvalidArg } ctr.config.UserVolumes = make([]string, 0, len(volumes)) @@ -1158,7 +1168,7 @@ func WithUserVolumes(volumes []string) CtrCreateOption { func WithEntrypoint(entrypoint []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.Entrypoint = make([]string, 0, len(entrypoint)) @@ -1178,7 +1188,7 @@ func WithEntrypoint(entrypoint []string) CtrCreateOption { func WithCommand(command []string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.Command = make([]string, 0, len(command)) @@ -1195,13 +1205,13 @@ func WithCommand(command []string) CtrCreateOption { func WithRootFS(rootfs string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if _, err := os.Stat(rootfs); err != nil { return errors.Wrapf(err, "error checking path %q", rootfs) } if ctr.config.RootfsImageID != "" { - return errors.Wrapf(ErrInvalidArg, "cannot set both an image ID and a rootfs for a container") + return errors.Wrapf(config2.ErrInvalidArg, "cannot set both an image ID and a rootfs for a container") } ctr.config.Rootfs = rootfs return nil @@ -1215,7 +1225,7 @@ func WithRootFS(rootfs string) CtrCreateOption { func WithCtrNamespace(ns string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.Namespace = ns @@ -1229,13 +1239,13 @@ func WithCtrNamespace(ns string) CtrCreateOption { func WithUseImageResolvConf() CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if len(ctr.config.DNSServer) != 0 || len(ctr.config.DNSSearch) != 0 || len(ctr.config.DNSOption) != 0 { - return errors.Wrapf(ErrInvalidArg, "not creating resolv.conf conflicts with DNS options") + return errors.Wrapf(config2.ErrInvalidArg, "not creating resolv.conf conflicts with DNS options") } ctr.config.UseImageResolvConf = true @@ -1249,11 +1259,11 @@ func WithUseImageResolvConf() CtrCreateOption { func WithUseImageHosts() CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } if len(ctr.config.HostAdd) != 0 { - return errors.Wrapf(ErrInvalidArg, "not creating /etc/hosts conflicts with adding to the hosts file") + return errors.Wrapf(config2.ErrInvalidArg, "not creating /etc/hosts conflicts with adding to the hosts file") } ctr.config.UseImageHosts = true @@ -1268,14 +1278,14 @@ func WithUseImageHosts() CtrCreateOption { func WithRestartPolicy(policy string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } switch policy { case RestartPolicyNone, RestartPolicyNo, RestartPolicyOnFailure, RestartPolicyAlways: ctr.config.RestartPolicy = policy default: - return errors.Wrapf(ErrInvalidArg, "%q is not a valid restart policy", policy) + return errors.Wrapf(config2.ErrInvalidArg, "%q is not a valid restart policy", policy) } return nil @@ -1288,7 +1298,7 @@ func WithRestartPolicy(policy string) CtrCreateOption { func WithRestartRetries(tries uint) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.RestartRetries = tries @@ -1302,7 +1312,7 @@ func WithRestartRetries(tries uint) CtrCreateOption { func withIsInfra() CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.IsInfra = true @@ -1315,7 +1325,7 @@ func withIsInfra() CtrCreateOption { func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } destinations := make(map[string]bool) @@ -1325,7 +1335,7 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption { // If they don't we will automatically create them. if _, ok := destinations[vol.Dest]; ok { - return errors.Wrapf(ErrInvalidArg, "two volumes found with destination %s", vol.Dest) + return errors.Wrapf(config2.ErrInvalidArg, "two volumes found with destination %s", vol.Dest) } destinations[vol.Dest] = true @@ -1346,7 +1356,7 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption { func WithVolumeName(name string) VolumeCreateOption { return func(volume *Volume) error { if volume.valid { - return ErrVolumeFinalized + return config2.ErrVolumeFinalized } // Check the name against a regex @@ -1363,7 +1373,7 @@ func WithVolumeName(name string) VolumeCreateOption { func WithVolumeLabels(labels map[string]string) VolumeCreateOption { return func(volume *Volume) error { if volume.valid { - return ErrVolumeFinalized + return config2.ErrVolumeFinalized } volume.config.Labels = make(map[string]string) @@ -1379,7 +1389,7 @@ func WithVolumeLabels(labels map[string]string) VolumeCreateOption { func WithVolumeDriver(driver string) VolumeCreateOption { return func(volume *Volume) error { if volume.valid { - return ErrVolumeFinalized + return config2.ErrVolumeFinalized } volume.config.Driver = driver @@ -1392,7 +1402,7 @@ func WithVolumeDriver(driver string) VolumeCreateOption { func WithVolumeOptions(options map[string]string) VolumeCreateOption { return func(volume *Volume) error { if volume.valid { - return ErrVolumeFinalized + return config2.ErrVolumeFinalized } volume.config.Options = make(map[string]string) @@ -1408,7 +1418,7 @@ func WithVolumeOptions(options map[string]string) VolumeCreateOption { func WithVolumeUID(uid int) VolumeCreateOption { return func(volume *Volume) error { if volume.valid { - return ErrVolumeFinalized + return config2.ErrVolumeFinalized } volume.config.UID = uid @@ -1421,7 +1431,7 @@ func WithVolumeUID(uid int) VolumeCreateOption { func WithVolumeGID(gid int) VolumeCreateOption { return func(volume *Volume) error { if volume.valid { - return ErrVolumeFinalized + return config2.ErrVolumeFinalized } volume.config.GID = gid @@ -1437,7 +1447,7 @@ func WithVolumeGID(gid int) VolumeCreateOption { func withSetCtrSpecific() VolumeCreateOption { return func(volume *Volume) error { if volume.valid { - return ErrVolumeFinalized + return config2.ErrVolumeFinalized } volume.config.IsCtrSpecific = true @@ -1452,7 +1462,7 @@ func withSetCtrSpecific() VolumeCreateOption { func WithPodName(name string) PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } // Check the name against a regex @@ -1470,7 +1480,7 @@ func WithPodName(name string) PodCreateOption { func WithPodLabels(labels map[string]string) PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.Labels = make(map[string]string) @@ -1486,7 +1496,7 @@ func WithPodLabels(labels map[string]string) PodCreateOption { func WithPodCgroupParent(path string) PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.CgroupParent = path @@ -1502,7 +1512,7 @@ func WithPodCgroupParent(path string) PodCreateOption { func WithPodCgroups() PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.UsePodCgroup = true @@ -1519,7 +1529,7 @@ func WithPodCgroups() PodCreateOption { func WithPodNamespace(ns string) PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.Namespace = ns @@ -1535,7 +1545,7 @@ func WithPodNamespace(ns string) PodCreateOption { func WithPodIPC() PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.UsePodIPC = true @@ -1551,7 +1561,7 @@ func WithPodIPC() PodCreateOption { func WithPodNet() PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.UsePodNet = true @@ -1569,7 +1579,7 @@ func WithPodNet() PodCreateOption { func WithPodMount() PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.UsePodMount = true @@ -1587,7 +1597,7 @@ func WithPodMount() PodCreateOption { func WithPodUser() PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.UsePodUser = true @@ -1603,7 +1613,7 @@ func WithPodUser() PodCreateOption { func WithPodPID() PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.UsePodPID = true @@ -1619,7 +1629,7 @@ func WithPodPID() PodCreateOption { func WithPodUTS() PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.UsePodUTS = true @@ -1632,7 +1642,7 @@ func WithPodUTS() PodCreateOption { func WithInfraContainer() PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.InfraContainer.HasInfraContainer = true @@ -1645,7 +1655,7 @@ func WithInfraContainer() PodCreateOption { func WithInfraContainerPorts(bindings []ocicni.PortMapping) PodCreateOption { return func(pod *Pod) error { if pod.valid { - return ErrPodFinalized + return config2.ErrPodFinalized } pod.config.InfraContainer.PortBindings = bindings return nil @@ -1656,7 +1666,7 @@ func WithInfraContainerPorts(bindings []ocicni.PortMapping) PodCreateOption { func WithHealthCheck(healthCheck *manifest.Schema2HealthConfig) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { - return ErrCtrFinalized + return config2.ErrCtrFinalized } ctr.config.HealthCheckConfig = healthCheck return nil diff --git a/libpod/pod.go b/libpod/pod.go index c319c449f..60626bfd7 100644 --- a/libpod/pod.go +++ b/libpod/pod.go @@ -3,6 +3,7 @@ package libpod import ( "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/lock" "github.com/cri-o/ocicni/pkg/ocicni" "github.com/pkg/errors" @@ -190,7 +191,7 @@ func (p *Pod) CgroupPath() (string, error) { // HasContainer checks if a container is present in the pod func (p *Pod) HasContainer(id string) (bool, error) { if !p.valid { - return false, ErrPodRemoved + return false, define.ErrPodRemoved } return p.runtime.state.PodHasContainer(p, id) @@ -202,7 +203,7 @@ func (p *Pod) AllContainersByID() ([]string, error) { defer p.lock.Unlock() if !p.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } return p.runtime.state.PodContainersByID(p) @@ -211,7 +212,7 @@ func (p *Pod) AllContainersByID() ([]string, error) { // AllContainers retrieves the containers in the pod func (p *Pod) AllContainers() ([]*Container, error) { if !p.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } p.lock.Lock() defer p.lock.Unlock() @@ -280,7 +281,7 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*ContainerStats) (ma newStats, err := c.GetContainerStats(prevStat) // If the container wasn't running, don't include it // but also suppress the error - if err != nil && errors.Cause(err) != ErrCtrStateInvalid { + if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid { return nil, err } if err == nil { diff --git a/libpod/pod_api.go b/libpod/pod_api.go index b913857dd..c7b0353bd 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -3,6 +3,7 @@ package libpod import ( "context" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -27,7 +28,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) { defer p.lock.Unlock() if !p.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } allCtrs, err := p.runtime.state.PodContainers(p) @@ -47,7 +48,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) { // If there are no containers without dependencies, we can't start // Error out if len(graph.noDepNodes) == 0 { - return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID()) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID()) } // Traverse the graph beginning at nodes with no dependencies @@ -56,7 +57,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, errors.Wrapf(ErrCtrExists, "error starting some containers") + return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error starting some containers") } defer p.newPodEvent(events.Start) return nil, nil @@ -88,7 +89,7 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m defer p.lock.Unlock() if !p.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } allCtrs, err := p.runtime.state.PodContainers(p) @@ -112,7 +113,7 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m } // Ignore containers that are not running - if ctr.state.State != ContainerStateRunning { + if ctr.state.State != define.ContainerStateRunning { ctr.lock.Unlock() continue } @@ -136,7 +137,7 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m } if len(ctrErrors) > 0 { - return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers") + return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error stopping some containers") } defer p.newPodEvent(events.Stop) return nil, nil @@ -159,7 +160,7 @@ func (p *Pod) Pause() (map[string]error, error) { defer p.lock.Unlock() if !p.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } allCtrs, err := p.runtime.state.PodContainers(p) @@ -180,7 +181,7 @@ func (p *Pod) Pause() (map[string]error, error) { } // Ignore containers that are not running - if ctr.state.State != ContainerStateRunning { + if ctr.state.State != define.ContainerStateRunning { ctr.lock.Unlock() continue } @@ -195,7 +196,7 @@ func (p *Pod) Pause() (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, errors.Wrapf(ErrCtrExists, "error pausing some containers") + return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error pausing some containers") } defer p.newPodEvent(events.Pause) return nil, nil @@ -218,7 +219,7 @@ func (p *Pod) Unpause() (map[string]error, error) { defer p.lock.Unlock() if !p.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } allCtrs, err := p.runtime.state.PodContainers(p) @@ -239,7 +240,7 @@ func (p *Pod) Unpause() (map[string]error, error) { } // Ignore containers that are not paused - if ctr.state.State != ContainerStatePaused { + if ctr.state.State != define.ContainerStatePaused { ctr.lock.Unlock() continue } @@ -254,7 +255,7 @@ func (p *Pod) Unpause() (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, errors.Wrapf(ErrCtrExists, "error unpausing some containers") + return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error unpausing some containers") } defer p.newPodEvent(events.Unpause) @@ -279,7 +280,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) { defer p.lock.Unlock() if !p.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } allCtrs, err := p.runtime.state.PodContainers(p) @@ -299,7 +300,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) { // If there are no containers without dependencies, we can't start // Error out if len(graph.noDepNodes) == 0 { - return nil, errors.Wrapf(ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID()) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID()) } // Traverse the graph beginning at nodes with no dependencies @@ -308,7 +309,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, errors.Wrapf(ErrCtrExists, "error stopping some containers") + return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error stopping some containers") } p.newPodEvent(events.Stop) p.newPodEvent(events.Start) @@ -331,7 +332,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) { defer p.lock.Unlock() if !p.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } allCtrs, err := p.runtime.state.PodContainers(p) @@ -352,7 +353,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) { } // Ignore containers that are not running - if ctr.state.State != ContainerStateRunning { + if ctr.state.State != define.ContainerStateRunning { ctr.lock.Unlock() continue } @@ -374,7 +375,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) { } if len(ctrErrors) > 0 { - return ctrErrors, errors.Wrapf(ErrCtrExists, "error killing some containers") + return ctrErrors, errors.Wrapf(define.ErrCtrExists, "error killing some containers") } defer p.newPodEvent(events.Kill) return nil, nil @@ -382,12 +383,12 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) { // Status gets the status of all containers in the pod // Returns a map of Container ID to Container Status -func (p *Pod) Status() (map[string]ContainerStatus, error) { +func (p *Pod) Status() (map[string]define.ContainerStatus, error) { p.lock.Lock() defer p.lock.Unlock() if !p.valid { - return nil, ErrPodRemoved + return nil, define.ErrPodRemoved } allCtrs, err := p.runtime.state.PodContainers(p) @@ -402,7 +403,7 @@ func (p *Pod) Status() (map[string]ContainerStatus, error) { } // Now that all containers are locked, get their status - status := make(map[string]ContainerStatus, len(allCtrs)) + status := make(map[string]define.ContainerStatus, len(allCtrs)) for _, ctr := range allCtrs { if err := ctr.syncContainer(); err != nil { return nil, err diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go index 23359d841..7aacda482 100644 --- a/libpod/pod_internal.go +++ b/libpod/pod_internal.go @@ -5,6 +5,7 @@ import ( "path/filepath" "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/storage/pkg/stringid" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -52,7 +53,7 @@ func (p *Pod) refresh() error { } if !p.valid { - return ErrPodRemoved + return define.ErrPodRemoved } // Retrieve the pod's lock @@ -76,7 +77,7 @@ func (p *Pod) refresh() error { logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath) default: - return errors.Wrapf(ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager) + return errors.Wrapf(define.ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager) } } diff --git a/libpod/pod_top_linux.go b/libpod/pod_top_linux.go index e08e5e83a..80221c3a9 100644 --- a/libpod/pod_top_linux.go +++ b/libpod/pod_top_linux.go @@ -6,6 +6,7 @@ import ( "strconv" "strings" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/rootless" "github.com/containers/psgo" ) @@ -34,7 +35,7 @@ func (p *Pod) GetPodPidInformation(descriptors []string) ([]string, error) { c.lock.Unlock() return nil, err } - if c.state.State == ContainerStateRunning { + if c.state.State == define.ContainerStateRunning { pid := strconv.Itoa(c.state.PID) pids = append(pids, pid) } diff --git a/libpod/pod_top_unsupported.go b/libpod/pod_top_unsupported.go index d44470523..9a3333275 100644 --- a/libpod/pod_top_unsupported.go +++ b/libpod/pod_top_unsupported.go @@ -2,7 +2,9 @@ package libpod +import "github.com/containers/libpod/libpod/define" + // GetPodPidInformation is exclusive to linux func (p *Pod) GetPodPidInformation(descriptors []string) ([]string, error) { - return nil, ErrNotImplemented + return nil, define.ErrNotImplemented } diff --git a/libpod/runtime.go b/libpod/runtime.go index 52ce8062b..53c9a1209 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -10,10 +10,12 @@ import ( "strings" "sync" "syscall" + "time" "github.com/BurntSushi/toml" is "github.com/containers/image/storage" "github.com/containers/image/types" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/libpod/lock" @@ -72,17 +74,17 @@ var ( OverrideConfigPath = etcDir + "/containers/libpod.conf" // DefaultInfraImage to use for infra container - DefaultInfraImage = "k8s.gcr.io/pause:3.1" - // DefaultInfraCommand to be run in an infra container - DefaultInfraCommand = "/pause" - // DefaultInitPath is the default path to the container-init binary - DefaultInitPath = "/usr/libexec/podman/catatonit" + // DefaultInfraCommand to be run in an infra container // DefaultSHMLockPath is the default path for SHM locks DefaultSHMLockPath = "/libpod_lock" // DefaultRootlessSHMLockPath is the default path for rootless SHM locks DefaultRootlessSHMLockPath = "/libpod_rootless_lock" + + // DefaultDetachKeys is the default keys sequence for detaching a + // container + DefaultDetachKeys = "ctrl-p,ctrl-q" ) // A RuntimeOption is a functional option which alters the Runtime created by @@ -123,6 +125,9 @@ type Runtime struct { // mechanism to read and write even logs eventer events.Eventer + + // noStore indicates whether we need to interact with a store or not + noStore bool } // RuntimeConfig contains configuration options used to set up the runtime @@ -234,10 +239,15 @@ type RuntimeConfig struct { // pods. NumLocks uint32 `toml:"num_locks,omitempty"` + // LockType is the type of locking to use. + LockType string `toml:"lock_type,omitempty"` + // EventsLogger determines where events should be logged EventsLogger string `toml:"events_logger"` // EventsLogFilePath is where the events log is stored. - EventsLogFilePath string `toml:-"events_logfile_path"` + EventsLogFilePath string `toml:"-events_logfile_path"` + //DetachKeys is the sequence of keys used to detach a container + DetachKeys string `toml:"detach_keys"` } // runtimeConfiguredFrom is a struct used during early runtime init to help @@ -287,19 +297,16 @@ func defaultRuntimeConfig() (RuntimeConfig, error) { }, ConmonPath: []string{ "/usr/libexec/podman/conmon", - "/usr/libexec/crio/conmon", "/usr/local/lib/podman/conmon", - "/usr/local/libexec/crio/conmon", "/usr/bin/conmon", "/usr/sbin/conmon", "/usr/local/bin/conmon", "/usr/local/sbin/conmon", - "/usr/lib/crio/bin/conmon", }, ConmonEnvVars: []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", }, - InitPath: DefaultInitPath, + InitPath: define.DefaultInitPath, CgroupManager: SystemdCgroupsManager, StaticDir: filepath.Join(storeOpts.GraphRoot, "libpod"), TmpDir: "", @@ -307,15 +314,57 @@ func defaultRuntimeConfig() (RuntimeConfig, error) { NoPivotRoot: false, CNIConfigDir: etcDir + "/cni/net.d/", CNIPluginDir: []string{"/usr/libexec/cni", "/usr/lib/cni", "/usr/local/lib/cni", "/opt/cni/bin"}, - InfraCommand: DefaultInfraCommand, - InfraImage: DefaultInfraImage, + InfraCommand: define.DefaultInfraCommand, + InfraImage: define.DefaultInfraImage, EnablePortReservation: true, EnableLabeling: true, NumLocks: 2048, EventsLogger: events.DefaultEventerType.String(), + DetachKeys: DefaultDetachKeys, + LockType: "shm", }, nil } +// SetXdgRuntimeDir ensures the XDG_RUNTIME_DIR env variable is set +// containers/image uses XDG_RUNTIME_DIR to locate the auth file. +// It internally calls EnableLinger() so that the user's processes are not +// killed once the session is terminated. EnableLinger() also attempts to +// get the runtime directory when XDG_RUNTIME_DIR is not specified. +func SetXdgRuntimeDir() error { + if !rootless.IsRootless() { + return nil + } + + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + + runtimeDirLinger, err := rootless.EnableLinger() + if err != nil { + return errors.Wrapf(err, "error enabling user session") + } + if runtimeDir == "" && runtimeDirLinger != "" { + if _, err := os.Stat(runtimeDirLinger); err != nil && os.IsNotExist(err) { + chWait := make(chan error) + defer close(chWait) + if _, err := WaitForFile(runtimeDirLinger, chWait, time.Second*10); err != nil { + return errors.Wrapf(err, "waiting for directory '%s'", runtimeDirLinger) + } + } + runtimeDir = runtimeDirLinger + } + + if runtimeDir == "" { + var err error + runtimeDir, err = util.GetRootlessRuntimeDir() + if err != nil { + return err + } + } + if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil { + return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR") + } + return nil +} + func getDefaultTmpDir() (string, error) { if !rootless.IsRootless() { return "/var/run/libpod", nil @@ -338,25 +387,6 @@ func getDefaultTmpDir() (string, error) { return filepath.Join(libpodRuntimeDir, "tmp"), nil } -// SetXdgRuntimeDir ensures the XDG_RUNTIME_DIR env variable is set -// containers/image uses XDG_RUNTIME_DIR to locate the auth file. -func SetXdgRuntimeDir(val string) error { - if !rootless.IsRootless() { - return nil - } - if val == "" { - var err error - val, err = util.GetRootlessRuntimeDir() - if err != nil { - return err - } - } - if err := os.Setenv("XDG_RUNTIME_DIR", val); err != nil { - return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR") - } - return nil -} - // NewRuntime creates a new container runtime // Options can be passed to override the default configuration for the runtime func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime, err error) { @@ -378,7 +408,7 @@ func NewRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. func homeDir() (string, error) { home := os.Getenv("HOME") if home == "" { - usr, err := user.Current() + usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID())) if err != nil { return "", errors.Wrapf(err, "unable to resolve HOME directory") } @@ -396,28 +426,33 @@ func getRootlessConfigPath() (string, error) { return filepath.Join(home, ".config/containers/libpod.conf"), nil } -func getConfigPath() string { +func getConfigPath() (string, error) { if rootless.IsRootless() { - rootlessConfigPath, err := getRootlessConfigPath() + path, err := getRootlessConfigPath() if err != nil { - if _, err := os.Stat(rootlessConfigPath); err == nil { - return rootlessConfigPath - } + return "", err + } + if _, err := os.Stat(path); err == nil { + return path, nil } + return "", err } if _, err := os.Stat(OverrideConfigPath); err == nil { // Use the override configuration path - return OverrideConfigPath + return OverrideConfigPath, nil } if _, err := os.Stat(ConfigPath); err == nil { - return ConfigPath + return ConfigPath, nil } - return "" + return "", nil } // DefaultRuntimeConfig reads default config path and returns the RuntimeConfig func DefaultRuntimeConfig() (*RuntimeConfig, error) { - configPath := getConfigPath() + configPath, err := getConfigPath() + if err != nil { + return nil, err + } contents, err := ioutil.ReadFile(configPath) if err != nil { @@ -465,8 +500,10 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. runtime.config.StaticDir = filepath.Join(storageConf.GraphRoot, "libpod") runtime.config.VolumePath = filepath.Join(storageConf.GraphRoot, "volumes") - configPath := getConfigPath() - rootlessConfigPath := "" + configPath, err := getConfigPath() + if err != nil { + return nil, err + } if rootless.IsRootless() { home, err := homeDir() if err != nil { @@ -478,23 +515,6 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. runtime.config.SignaturePolicyPath = newPath } } - - rootlessConfigPath, err = getRootlessConfigPath() - if err != nil { - return nil, err - } - - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return nil, err - } - - // containers/image uses XDG_RUNTIME_DIR to locate the auth file. - // So make sure the env variable is set. - if err := SetXdgRuntimeDir(runtimeDir); err != nil { - return nil, errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR") - } - } if userConfigPath != "" { @@ -604,7 +624,13 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. return nil, errors.Wrapf(err, "error configuring runtime") } } - if rootlessConfigPath != "" { + + if rootless.IsRootless() && configPath == "" { + configPath, err := getRootlessConfigPath() + if err != nil { + return nil, err + } + // storage.conf storageConfFile, err := storage.DefaultConfigFile(rootless.IsRootless()) if err != nil { @@ -617,16 +643,20 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. } if configPath != "" { - os.MkdirAll(filepath.Dir(rootlessConfigPath), 0755) - file, err := os.OpenFile(rootlessConfigPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil { + return nil, err + } + file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) if err != nil && !os.IsExist(err) { - return nil, errors.Wrapf(err, "cannot open file %s", rootlessConfigPath) + return nil, errors.Wrapf(err, "cannot open file %s", configPath) } if err == nil { defer file.Close() enc := toml.NewEncoder(file) if err := enc.Encode(runtime.config); err != nil { - os.Remove(rootlessConfigPath) + if removeErr := os.Remove(configPath); removeErr != nil { + logrus.Debugf("unable to remove %s: %q", configPath, err) + } } } } @@ -637,6 +667,62 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. return runtime, nil } +func getLockManager(runtime *Runtime) (lock.Manager, error) { + var err error + var manager lock.Manager + + switch runtime.config.LockType { + case "file": + lockPath := filepath.Join(runtime.config.TmpDir, "locks") + manager, err = lock.OpenFileLockManager(lockPath) + if err != nil { + if os.IsNotExist(errors.Cause(err)) { + manager, err = lock.NewFileLockManager(lockPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to get new file lock manager") + } + } else { + return nil, err + } + } + + case "", "shm": + lockPath := DefaultSHMLockPath + if rootless.IsRootless() { + lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID()) + } + // Set up the lock manager + manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks) + if err != nil { + if os.IsNotExist(errors.Cause(err)) { + manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) + if err != nil { + return nil, errors.Wrapf(err, "failed to get new shm lock manager") + } + } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber { + logrus.Debugf("Number of locks does not match - removing old locks") + + // ERANGE indicates a lock numbering mismatch. + // Since we're renumbering, this is not fatal. + // Remove the earlier set of locks and recreate. + if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil { + return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath) + } + + manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) + if err != nil { + return nil, err + } + } else { + return nil, err + } + } + default: + return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.LockType) + } + return manager, nil +} + // Make a new runtime based on the given configuration // Sets up containers/storage, state store, OCI runtime func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { @@ -655,7 +741,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { break } if !foundConmon { - return errors.Wrapf(ErrInvalidArg, + return errors.Wrapf(define.ErrInvalidArg, "could not find a working conmon binary (configured options: %v)", runtime.config.ConmonPath) } @@ -678,7 +764,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } runtime.state = state case SQLiteStateStore: - return errors.Wrapf(ErrInvalidArg, "SQLite state is currently disabled") + return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled") case BoltDBStateStore: dbPath := filepath.Join(runtime.config.StaticDir, "bolt_state.db") @@ -688,7 +774,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } runtime.state = state default: - return errors.Wrapf(ErrInvalidArg, "unrecognized state type passed") + return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed") } // Grab config from the database so we can reset some defaults @@ -765,11 +851,14 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { var store storage.Store if os.Geteuid() != 0 { logrus.Debug("Not configuring container store") + } else if runtime.noStore { + logrus.Debug("No store required. Not opening container store.") } else { store, err = storage.GetStore(runtime.config.StorageConfig) if err != nil { return err } + err = nil defer func() { if err != nil && store != nil { @@ -848,7 +937,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } if len(runtime.config.RuntimePath) == 0 { - return errors.Wrapf(ErrInvalidArg, "empty runtime path array passed") + return errors.Wrapf(define.ErrInvalidArg, "empty runtime path array passed") } name := filepath.Base(runtime.config.RuntimePath[0]) @@ -873,7 +962,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { // Initialize remaining OCI runtimes for name, paths := range runtime.config.OCIRuntimes { if len(paths) == 0 { - return errors.Wrapf(ErrInvalidArg, "must provide at least 1 path to OCI runtime %s", name) + return errors.Wrapf(define.ErrInvalidArg, "must provide at least 1 path to OCI runtime %s", name) } supportsJSON := false @@ -922,7 +1011,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } else { ociRuntime, ok := runtime.ociRuntimes[runtime.config.OCIRuntime] if !ok { - return errors.Wrapf(ErrInvalidArg, "default OCI runtime %q not found", runtime.config.OCIRuntime) + return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.OCIRuntime) } runtime.defaultOCIRuntime = ociRuntime } @@ -930,12 +1019,12 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { // Do we have at least one valid OCI runtime? if len(runtime.ociRuntimes) == 0 { - return errors.Wrapf(ErrInvalidArg, "no OCI runtime has been configured") + return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured") } // Do we have a default runtime? if runtime.defaultOCIRuntime == nil { - return errors.Wrapf(ErrInvalidArg, "no default OCI runtime was configured") + return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured") } // Make the per-boot files directory if it does not exist @@ -1019,37 +1108,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } } - lockPath := DefaultSHMLockPath - if rootless.IsRootless() { - lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID()) - } - // Set up the lock manager - manager, err := lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks) + runtime.lockManager, err = getLockManager(runtime) if err != nil { - if os.IsNotExist(errors.Cause(err)) { - manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) - if err != nil { - return errors.Wrapf(err, "failed to get new shm lock manager") - } - } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber { - logrus.Debugf("Number of locks does not match - removing old locks") - - // ERANGE indicates a lock numbering mismatch. - // Since we're renumbering, this is not fatal. - // Remove the earlier set of locks and recreate. - if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil { - return errors.Wrapf(err, "error removing libpod locks file %s", lockPath) - } - - manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) - if err != nil { - return err - } - } else { - return err - } + return err } - runtime.lockManager = manager // If we're renumbering locks, do it now. // It breaks out of normal runtime init, and will not return a valid @@ -1087,7 +1149,7 @@ func (r *Runtime) GetConfig() (*RuntimeConfig, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } config := new(RuntimeConfig) @@ -1109,7 +1171,7 @@ func (r *Runtime) Shutdown(force bool) error { defer r.lock.Unlock() if !r.valid { - return ErrRuntimeStopped + return define.ErrRuntimeStopped } r.valid = false @@ -1121,7 +1183,7 @@ func (r *Runtime) Shutdown(force bool) error { logrus.Errorf("Error retrieving containers from database: %v", err) } else { for _, ctr := range ctrs { - if err := ctr.StopWithTimeout(CtrRemoveTimeout); err != nil { + if err := ctr.StopWithTimeout(define.CtrRemoveTimeout); err != nil { logrus.Errorf("Error stopping container %s: %v", ctr.ID(), err) } } @@ -1129,6 +1191,8 @@ func (r *Runtime) Shutdown(force bool) error { } var lastError error + // If no store was requested, it can bew nil and there is no need to + // attempt to shut it down if r.store != nil { if _, err := r.store.Shutdown(force); err != nil { lastError = errors.Wrapf(err, "Error shutting down container storage") @@ -1196,21 +1260,21 @@ func (r *Runtime) refresh(alivePath string) error { } // Info returns the store and host information -func (r *Runtime) Info() ([]InfoData, error) { - info := []InfoData{} +func (r *Runtime) Info() ([]define.InfoData, error) { + info := []define.InfoData{} // get host information hostInfo, err := r.hostInfo() if err != nil { return nil, errors.Wrapf(err, "error getting host info") } - info = append(info, InfoData{Type: "host", Data: hostInfo}) + info = append(info, define.InfoData{Type: "host", Data: hostInfo}) // get store information storeInfo, err := r.storeInfo() if err != nil { return nil, errors.Wrapf(err, "error getting store info") } - info = append(info, InfoData{Type: "store", Data: storeInfo}) + info = append(info, define.InfoData{Type: "store", Data: storeInfo}) reg, err := sysreg.GetRegistries() if err != nil { @@ -1230,7 +1294,7 @@ func (r *Runtime) Info() ([]InfoData, error) { return nil, errors.Wrapf(err, "error getting registries") } registries["blocked"] = breg - info = append(info, InfoData{Type: "registries", Data: registries}) + info = append(info, define.InfoData{Type: "registries", Data: registries}) return info, nil } @@ -1242,7 +1306,7 @@ func (r *Runtime) generateName() (string, error) { if _, err := r.state.LookupContainer(name); err == nil { continue } else { - if errors.Cause(err) != ErrNoSuchCtr { + if errors.Cause(err) != define.ErrNoSuchCtr { return "", err } } @@ -1250,7 +1314,7 @@ func (r *Runtime) generateName() (string, error) { if _, err := r.state.LookupPod(name); err == nil { continue } else { - if errors.Cause(err) != ErrNoSuchPod { + if errors.Cause(err) != define.ErrNoSuchPod { return "", err } } diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go index 569f63322..586db5a1e 100644 --- a/libpod/runtime_cstorage.go +++ b/libpod/runtime_cstorage.go @@ -1,6 +1,7 @@ package libpod import ( + "github.com/containers/libpod/libpod/define" "github.com/containers/storage" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -56,7 +57,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { targetID, err := r.store.Lookup(idOrName) if err != nil { if err == storage.ErrLayerUnknown { - return errors.Wrapf(ErrNoSuchCtr, "no container with ID or name %q found", idOrName) + return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID or name %q found", idOrName) } return errors.Wrapf(err, "error looking up container %q", idOrName) } @@ -66,7 +67,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { ctr, err := r.store.Container(targetID) if err != nil { if err == storage.ErrContainerUnknown { - return errors.Wrapf(ErrNoSuchCtr, "%q does not refer to a container", idOrName) + return errors.Wrapf(define.ErrNoSuchCtr, "%q does not refer to a container", idOrName) } return errors.Wrapf(err, "error retrieving container %q", idOrName) } @@ -77,7 +78,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { return err } if exists { - return errors.Wrapf(ErrCtrExists, "refusing to remove %q as it exists in libpod as container %s", idOrName, ctr.ID) + return errors.Wrapf(define.ErrCtrExists, "refusing to remove %q as it exists in libpod as container %s", idOrName, ctr.ID) } if !force { @@ -92,7 +93,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { return errors.Wrapf(err, "error looking up container %q mounts", idOrName) } if timesMounted > 0 { - return errors.Wrapf(ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName) + return errors.Wrapf(define.ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName) } } else { if _, err := r.store.Unmount(ctr.ID, true); err != nil { diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index a169d30f7..633d26fe1 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -8,20 +8,17 @@ import ( "strings" "time" + config2 "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage/pkg/stringid" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" - opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// CtrRemoveTimeout is the default number of seconds to wait after stopping a container -// before sending the kill signal -const CtrRemoveTimeout = 10 - // Contains the public Runtime API for containers // A CtrCreateOption is a functional option which alters the Container created @@ -38,7 +35,7 @@ func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, options .. r.lock.Lock() defer r.lock.Unlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, config2.ErrRuntimeStopped } return r.newContainer(ctx, rSpec, options...) } @@ -48,7 +45,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config r.lock.Lock() defer r.lock.Unlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, config2.ErrRuntimeStopped } ctr, err := r.initContainerVariables(rSpec, config) @@ -60,7 +57,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (c *Container, err error) { if rSpec == nil { - return nil, errors.Wrapf(ErrInvalidArg, "must provide a valid runtime spec to create container") + return nil, errors.Wrapf(config2.ErrInvalidArg, "must provide a valid runtime spec to create container") } ctr := new(Container) ctr.config = new(ContainerConfig) @@ -93,7 +90,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf ctr.state.BindMounts = make(map[string]string) - ctr.config.StopTimeout = CtrRemoveTimeout + ctr.config.StopTimeout = config2.CtrRemoveTimeout ctr.config.OCIRuntime = r.defaultOCIRuntime.name @@ -136,8 +133,16 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai ctr.config.LockID = ctr.lock.ID() logrus.Debugf("Allocated lock %d for container %s", ctr.lock.ID(), ctr.ID()) + defer func() { + if err != nil { + if err2 := ctr.lock.Free(); err2 != nil { + logrus.Errorf("Error freeing lock for container after creation failed: %v", err2) + } + } + }() + ctr.valid = true - ctr.state.State = ContainerStateConfigured + ctr.state.State = config2.ContainerStateConfigured ctr.runtime = r if ctr.config.OCIRuntime == "" { @@ -145,7 +150,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai } else { ociRuntime, ok := r.ociRuntimes[ctr.config.OCIRuntime] if !ok { - return nil, errors.Wrapf(ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime) + return nil, errors.Wrapf(config2.ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime) } ctr.ociRuntime = ociRuntime } @@ -178,14 +183,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID()) } if podCgroup == "" { - return nil, errors.Wrapf(ErrInternal, "pod %s cgroup is not set", pod.ID()) + return nil, errors.Wrapf(config2.ErrInternal, "pod %s cgroup is not set", pod.ID()) } ctr.config.CgroupParent = podCgroup } else { ctr.config.CgroupParent = CgroupfsDefaultCgroupParent } } else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") { - return nil, errors.Wrapf(ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs") + return nil, errors.Wrapf(config2.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs") } case SystemdCgroupsManager: if ctr.config.CgroupParent == "" { @@ -195,14 +200,16 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID()) } ctr.config.CgroupParent = podCgroup + } else if rootless.IsRootless() { + ctr.config.CgroupParent = SystemdDefaultRootlessCgroupParent } else { ctr.config.CgroupParent = SystemdDefaultCgroupParent } } else if len(ctr.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") { - return nil, errors.Wrapf(ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups") + return nil, errors.Wrapf(config2.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups") } default: - return nil, errors.Wrapf(ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager) + return nil, errors.Wrapf(config2.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager) } if ctr.restoreFromCheckpoint { @@ -242,7 +249,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai if err == nil { // The volume exists, we're good continue - } else if errors.Cause(err) != ErrNoSuchVolume { + } else if errors.Cause(err) != config2.ErrNoSuchVolume { return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name) } @@ -356,7 +363,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } if !r.valid { - return ErrRuntimeStopped + return config2.ErrRuntimeStopped } // Update the container to get current state @@ -372,7 +379,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } - if c.state.State == ContainerStatePaused { + if c.state.State == config2.ContainerStatePaused { if err := c.ociRuntime.killContainer(c, 9); err != nil { return err } @@ -386,7 +393,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } // Check that the container's in a good state to be removed - if c.state.State == ContainerStateRunning { + if c.state.State == config2.ContainerStateRunning { if err := c.ociRuntime.stopContainer(c, c.StopTimeout()); err != nil { return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID()) } @@ -414,7 +421,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } if len(deps) != 0 { depsStr := strings.Join(deps, ", ") - return errors.Wrapf(ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr) + return errors.Wrapf(config2.ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr) } } @@ -424,22 +431,17 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, // If we're removing the pod, the container will be evicted // from the state elsewhere if !removePod { - if err := r.state.RemoveContainerFromPod(pod, c); err != nil { - if cleanupErr == nil { - cleanupErr = err - } else { - logrus.Errorf("removing container from pod: %v", err) - } - } - } - } else { - if err := r.state.RemoveContainer(c); err != nil { if cleanupErr == nil { cleanupErr = err } else { - logrus.Errorf("removing container: %v", err) + logrus.Errorf("removing container from pod: %v", err) } } + } else { + if err := r.state.RemoveContainer(c); err != nil { + cleanupErr = err + } + logrus.Errorf("removing container: %v", err) } // Set container as invalid so it can no longer be used @@ -466,8 +468,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, // Delete the container. // Not needed in Configured and Exited states, where the container // doesn't exist in the runtime - if c.state.State != ContainerStateConfigured && - c.state.State != ContainerStateExited { + if c.state.State != config2.ContainerStateConfigured && + c.state.State != config2.ContainerStateExited { if err := c.delete(ctx); err != nil { if cleanupErr == nil { cleanupErr = err @@ -497,7 +499,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, if !volume.IsCtrSpecific() { continue } - if err := runtime.removeVolume(ctx, volume, false); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed { + if err := runtime.removeVolume(ctx, volume, false); err != nil && err != config2.ErrNoSuchVolume && err != config2.ErrVolumeBeingUsed { logrus.Errorf("cleanup volume (%s): %v", v, err) } } @@ -512,7 +514,7 @@ func (r *Runtime) GetContainer(id string) (*Container, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, config2.ErrRuntimeStopped } return r.state.Container(id) @@ -524,7 +526,7 @@ func (r *Runtime) HasContainer(id string) (bool, error) { defer r.lock.RUnlock() if !r.valid { - return false, ErrRuntimeStopped + return false, config2.ErrRuntimeStopped } return r.state.HasContainer(id) @@ -537,7 +539,7 @@ func (r *Runtime) LookupContainer(idOrName string) (*Container, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, config2.ErrRuntimeStopped } return r.state.LookupContainer(idOrName) } @@ -551,7 +553,7 @@ func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*Container, error defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, config2.ErrRuntimeStopped } ctrs, err := r.state.AllContainers() @@ -584,7 +586,7 @@ func (r *Runtime) GetAllContainers() ([]*Container, error) { func (r *Runtime) GetRunningContainers() ([]*Container, error) { running := func(c *Container) bool { state, _ := c.State() - return state == ContainerStateRunning + return state == config2.ContainerStateRunning } return r.GetContainers(running) } @@ -612,7 +614,7 @@ func (r *Runtime) GetLatestContainer() (*Container, error) { return nil, errors.Wrapf(err, "unable to find latest container") } if len(ctrs) == 0 { - return nil, ErrNoSuchCtr + return nil, config2.ErrNoSuchCtr } for containerIndex, ctr := range ctrs { createdTime := ctr.config.CreatedTime diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index 7cc7de270..4055734eb 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -10,6 +10,7 @@ import ( "os" "github.com/containers/buildah/imagebuildah" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/pkg/util" "github.com/containers/storage" @@ -31,7 +32,7 @@ func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool) defer r.lock.Unlock() if !r.valid { - return "", ErrRuntimeStopped + return "", define.ErrRuntimeStopped } // Get all containers, filter to only those using the image, and remove those containers diff --git a/libpod/runtime_migrate.go b/libpod/runtime_migrate.go index e32e6edf6..c363991e6 100644 --- a/libpod/runtime_migrate.go +++ b/libpod/runtime_migrate.go @@ -5,6 +5,7 @@ package libpod import ( "context" "fmt" + "github.com/containers/libpod/pkg/util" "io/ioutil" "os" "path/filepath" @@ -12,7 +13,6 @@ import ( "syscall" "github.com/containers/libpod/pkg/rootless" - "github.com/containers/libpod/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -37,7 +37,9 @@ func stopPauseProcess() error { if err := os.Remove(pausePidPath); err != nil { return errors.Wrapf(err, "cannot delete pause pid file %s", pausePidPath) } - syscall.Kill(pausePid, syscall.SIGKILL) + if err := syscall.Kill(pausePid, syscall.SIGKILL); err != nil { + return err + } } return nil } diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go index b3dd7dabd..66f9b10c9 100644 --- a/libpod/runtime_pod.go +++ b/libpod/runtime_pod.go @@ -4,6 +4,7 @@ import ( "context" "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/util" "github.com/pkg/errors" ) @@ -30,7 +31,7 @@ func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool) defer r.lock.Unlock() if !r.valid { - return ErrRuntimeStopped + return define.ErrRuntimeStopped } if !p.valid { @@ -53,7 +54,7 @@ func (r *Runtime) GetPod(id string) (*Pod, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } return r.state.Pod(id) @@ -65,7 +66,7 @@ func (r *Runtime) HasPod(id string) (bool, error) { defer r.lock.RUnlock() if !r.valid { - return false, ErrRuntimeStopped + return false, define.ErrRuntimeStopped } return r.state.HasPod(id) @@ -78,7 +79,7 @@ func (r *Runtime) LookupPod(idOrName string) (*Pod, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } return r.state.LookupPod(idOrName) @@ -93,7 +94,7 @@ func (r *Runtime) Pods(filters ...PodFilter) ([]*Pod, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } pods, err := r.state.AllPods() @@ -122,7 +123,7 @@ func (r *Runtime) GetAllPods() ([]*Pod, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } return r.state.AllPods() @@ -137,7 +138,7 @@ func (r *Runtime) GetLatestPod() (*Pod, error) { return nil, errors.Wrapf(err, "unable to get all pods") } if len(pods) == 0 { - return nil, ErrNoSuchPod + return nil, define.ErrNoSuchPod } for podIndex, pod := range pods { createdTime := pod.config.CreatedTime @@ -159,7 +160,7 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } containers, err := r.GetRunningContainers() if err != nil { @@ -171,7 +172,7 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) { pods = append(pods, c.PodID()) pod, err := r.GetPod(c.PodID()) if err != nil { - if errors.Cause(err) == ErrPodRemoved || errors.Cause(err) == ErrNoSuchPod { + if errors.Cause(err) == define.ErrPodRemoved || errors.Cause(err) == define.ErrNoSuchPod { continue } return nil, err diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go index 0a5f78cf8..da35b7f93 100644 --- a/libpod/runtime_pod_infra_linux.go +++ b/libpod/runtime_pod_infra_linux.go @@ -6,6 +6,7 @@ import ( "context" "strings" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/pkg/rootless" "github.com/opencontainers/image-spec/specs-go/v1" @@ -104,7 +105,7 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, imgID // containers in the pod. func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container, error) { if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } newImage, err := r.ImageRuntime().New(ctx, r.config.InfraImage, "", "", nil, nil, image.SigningOptions{}, false, nil) diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 124d0daf8..d667d3a25 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -9,20 +9,22 @@ import ( "path/filepath" "strings" - "github.com/containerd/cgroups" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" + "github.com/containers/libpod/pkg/cgroups" + "github.com/containers/libpod/pkg/rootless" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // NewPod makes a new, empty pod -func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, error) { +func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Pod, Err error) { r.lock.Lock() defer r.lock.Unlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } pod, err := newPod(r) @@ -58,6 +60,14 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, pod.lock = lock pod.config.LockID = pod.lock.ID() + defer func() { + if Err != nil { + if err := pod.lock.Free(); err != nil { + logrus.Errorf("Error freeing pod lock after failed creation: %v", err) + } + } + }() + pod.valid = true // Check CGroup parent sanity, and set it if it was not set @@ -66,7 +76,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, if pod.config.CgroupParent == "" { pod.config.CgroupParent = CgroupfsDefaultCgroupParent } else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") { - return nil, errors.Wrapf(ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs") + return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs") } // If we are set to use pod cgroups, set the cgroup parent that // all containers in the pod will share @@ -77,9 +87,13 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, } case SystemdCgroupsManager: if pod.config.CgroupParent == "" { - pod.config.CgroupParent = SystemdDefaultCgroupParent + if rootless.IsRootless() { + pod.config.CgroupParent = SystemdDefaultRootlessCgroupParent + } else { + pod.config.CgroupParent = SystemdDefaultCgroupParent + } } else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") { - return nil, errors.Wrapf(ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups") + return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups") } // If we are set to use pod cgroups, set the cgroup parent that // all containers in the pod will share @@ -91,7 +105,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, pod.state.CgroupPath = cgroupPath } default: - return nil, errors.Wrapf(ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager) + return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager) } if pod.config.UsePodCgroup { @@ -107,15 +121,17 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, if err := r.state.AddPod(pod); err != nil { return nil, errors.Wrapf(err, "error adding pod to state") } + defer func() { + if Err != nil { + if err := r.removePod(ctx, pod, true, true); err != nil { + logrus.Errorf("Error removing pod after pause container creation failure: %v", err) + } + } + }() if pod.HasInfraContainer() { ctr, err := r.createInfraContainer(ctx, pod) if err != nil { - // Tear down pod, as it is assumed a the pod will contain - // a pause container, and it does not. - if err2 := r.removePod(ctx, pod, true, true); err2 != nil { - logrus.Errorf("Error removing pod after pause container creation failure: %v", err2) - } return nil, errors.Wrapf(err, "error adding Infra Container") } pod.state.InfraContainerID = ctr.ID() @@ -146,7 +162,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) force = true } if !removeCtrs && numCtrs > 0 { - return errors.Wrapf(ErrCtrExists, "pod %s contains containers and cannot be removed", p.ID()) + return errors.Wrapf(define.ErrCtrExists, "pod %s contains containers and cannot be removed", p.ID()) } // Go through and lock all containers so we can operate on them all at @@ -182,9 +198,8 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) // would prevent removing the CGroups. if p.runtime.config.CgroupManager == CgroupfsCgroupsManager { // Get the conmon CGroup - v1CGroups := GetV1CGroups(getExcludedCGroups()) conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon") - conmonCgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(conmonCgroupPath)) + conmonCgroup, err := cgroups.Load(conmonCgroupPath) if err != nil && err != cgroups.ErrCgroupDeleted { if removalErr == nil { removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup %s", p.ID(), conmonCgroupPath) @@ -249,9 +264,8 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) // Make sure the conmon cgroup is deleted first // Since the pod is almost gone, don't bother failing // hard - instead, just log errors. - v1CGroups := GetV1CGroups(getExcludedCGroups()) conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon") - conmonCgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(conmonCgroupPath)) + conmonCgroup, err := cgroups.Load(conmonCgroupPath) if err != nil && err != cgroups.ErrCgroupDeleted { if removalErr == nil { removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup", p.ID()) @@ -268,7 +282,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) } } } - cgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(p.state.CgroupPath)) + cgroup, err := cgroups.Load(p.state.CgroupPath) if err != nil && err != cgroups.ErrCgroupDeleted { if removalErr == nil { removalErr = errors.Wrapf(err, "error retrieving pod %s cgroup", p.ID()) @@ -290,7 +304,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) // keep going so we make sure to evict the pod before // ending up with an inconsistent state. if removalErr == nil { - removalErr = errors.Wrapf(ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.CgroupManager, p.ID()) + removalErr = errors.Wrapf(define.ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.CgroupManager, p.ID()) } else { logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.CgroupManager, p.ID()) } diff --git a/libpod/runtime_pod_unsupported.go b/libpod/runtime_pod_unsupported.go index d2629d5ab..5f0811822 100644 --- a/libpod/runtime_pod_unsupported.go +++ b/libpod/runtime_pod_unsupported.go @@ -4,13 +4,15 @@ package libpod import ( "context" + + "github.com/containers/libpod/libpod/define" ) // NewPod makes a new, empty pod func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, error) { - return nil, ErrOSNotSupported + return nil, define.ErrOSNotSupported } func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) error { - return ErrOSNotSupported + return define.ErrOSNotSupported } diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go index 68c6c107e..5c087faca 100644 --- a/libpod/runtime_volume.go +++ b/libpod/runtime_volume.go @@ -4,6 +4,7 @@ import ( "context" "strings" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -26,7 +27,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error defer r.lock.Unlock() if !r.valid { - return ErrRuntimeStopped + return define.ErrRuntimeStopped } if !v.valid { @@ -77,7 +78,7 @@ func (r *Runtime) GetVolume(name string) (*Volume, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } vol, err := r.state.Volume(name) @@ -103,7 +104,7 @@ func (r *Runtime) HasVolume(name string) (bool, error) { defer r.lock.RUnlock() if !r.valid { - return false, ErrRuntimeStopped + return false, define.ErrRuntimeStopped } return r.state.HasVolume(name) @@ -118,7 +119,7 @@ func (r *Runtime) Volumes(filters ...VolumeFilter) ([]*Volume, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } vols, err := r.state.AllVolumes() @@ -147,7 +148,7 @@ func (r *Runtime) GetAllVolumes() ([]*Volume, error) { defer r.lock.RUnlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } return r.state.AllVolumes() @@ -167,7 +168,7 @@ func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) { for _, vol := range vols { if err := r.RemoveVolume(ctx, vol, false); err != nil { - if errors.Cause(err) != ErrVolumeBeingUsed && errors.Cause(err) != ErrVolumeRemoved { + if errors.Cause(err) != define.ErrVolumeBeingUsed && errors.Cause(err) != define.ErrVolumeRemoved { pruneErrors = append(pruneErrors, err) } continue diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index a326ed0e0..ac6fd02c3 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -8,6 +8,7 @@ import ( "path/filepath" "strings" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/storage/pkg/stringid" "github.com/pkg/errors" @@ -20,7 +21,7 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption) defer r.lock.Unlock() if !r.valid { - return nil, ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } return r.newVolume(ctx, options...) } @@ -86,7 +87,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error if ok, _ := r.state.HasVolume(v.Name()); !ok { return nil } - return ErrVolumeRemoved + return define.ErrVolumeRemoved } deps, err := r.state.VolumeInUse(v) @@ -96,7 +97,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error if len(deps) != 0 { depsStr := strings.Join(deps, ", ") if !force { - return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr) + return errors.Wrapf(define.ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr) } // We need to remove all containers using the volume @@ -105,7 +106,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error if err != nil { // If the container's removed, no point in // erroring. - if errors.Cause(err) == ErrNoSuchCtr || errors.Cause(err) == ErrCtrRemoved { + if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved { continue } diff --git a/libpod/runtime_volume_unsupported.go b/libpod/runtime_volume_unsupported.go index 5fe487114..1cbf2699a 100644 --- a/libpod/runtime_volume_unsupported.go +++ b/libpod/runtime_volume_unsupported.go @@ -4,16 +4,18 @@ package libpod import ( "context" + + "github.com/containers/libpod/libpod/define" ) func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error { - return ErrNotImplemented + return define.ErrNotImplemented } func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) { - return nil, ErrNotImplemented + return nil, define.ErrNotImplemented } func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) { - return nil, ErrNotImplemented + return nil, define.ErrNotImplemented } diff --git a/libpod/state_test.go b/libpod/state_test.go index be68a2d69..26a1dee7d 100644 --- a/libpod/state_test.go +++ b/libpod/state_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/lock" "github.com/containers/storage" "github.com/stretchr/testify/assert" @@ -700,7 +701,7 @@ func TestSaveAndUpdateContainer(t *testing.T) { retrievedCtr, err := state.Container(testCtr.ID()) require.NoError(t, err) - retrievedCtr.state.State = ContainerStateStopped + retrievedCtr.state.State = define.ContainerStateStopped retrievedCtr.state.ExitCode = 127 retrievedCtr.state.FinishedTime = time.Now() @@ -729,7 +730,7 @@ func TestSaveAndUpdateContainerSameNamespaceSucceeds(t *testing.T) { retrievedCtr, err := state.Container(testCtr.ID()) assert.NoError(t, err) - retrievedCtr.state.State = ContainerStateStopped + retrievedCtr.state.State = define.ContainerStateStopped retrievedCtr.state.ExitCode = 127 retrievedCtr.state.FinishedTime = time.Now() diff --git a/libpod/stats.go b/libpod/stats.go index c58a46135..52af824bb 100644 --- a/libpod/stats.go +++ b/libpod/stats.go @@ -3,11 +3,13 @@ package libpod import ( + "runtime" "strings" "syscall" "time" - "github.com/containerd/cgroups" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/cgroups" "github.com/pkg/errors" ) @@ -25,30 +27,25 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container } } - if c.state.State != ContainerStateRunning { - return stats, ErrCtrStateInvalid + if c.state.State != define.ContainerStateRunning { + return stats, define.ErrCtrStateInvalid } cgroupPath, err := c.CGroupPath() if err != nil { return nil, err } - v1CGroups := GetV1CGroups(getExcludedCGroups()) - cgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(cgroupPath)) + cgroup, err := cgroups.Load(cgroupPath) if err != nil { return stats, errors.Wrapf(err, "unable to load cgroup at %s", cgroupPath) } // Ubuntu does not have swap memory in cgroups because swap is often not enabled. - cgroupStats, err := cgroup.Stat(cgroups.IgnoreNotExist) + cgroupStats, err := cgroup.Stat() if err != nil { return stats, errors.Wrapf(err, "unable to obtain cgroup stats") } conState := c.state.State - if err != nil { - return stats, errors.Wrapf(err, "unable to determine container state") - } - netStats, err := getContainerNetIO(c) if err != nil { return nil, err @@ -61,7 +58,7 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container stats.MemLimit = getMemLimit(cgroupStats.Memory.Usage.Limit) stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100 stats.PIDs = 0 - if conState == ContainerStateRunning { + if conState == define.ContainerStateRunning { stats.PIDs = cgroupStats.Pids.Current } stats.BlockInput, stats.BlockOutput = calculateBlockIO(cgroupStats) @@ -105,7 +102,11 @@ func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, previousSystem uin if systemDelta > 0.0 && cpuDelta > 0.0 { // gets a ratio of container cpu usage total, multiplies it by the number of cores (4 cores running // at 100% utilization should be 400% utilization), and multiplies that by 100 to get a percentage - cpuPercent = (cpuDelta / systemDelta) * float64(len(stats.CPU.Usage.PerCPU)) * 100 + nCPUS := len(stats.CPU.Usage.PerCPU) + if nCPUS == 0 { + nCPUS = runtime.NumCPU() + } + cpuPercent = (cpuDelta / systemDelta) * float64(nCPUS) * 100 } return cpuPercent } diff --git a/libpod/stats_unsupported.go b/libpod/stats_unsupported.go index 7117413eb..ec19a89a1 100644 --- a/libpod/stats_unsupported.go +++ b/libpod/stats_unsupported.go @@ -2,7 +2,9 @@ package libpod +import "github.com/containers/libpod/libpod/define" + // GetContainerStats gets the running stats for a given container func (c *Container) GetContainerStats(previousStats *ContainerStats) (*ContainerStats, error) { - return nil, ErrOSNotSupported + return nil, define.ErrOSNotSupported } diff --git a/libpod/storage.go b/libpod/storage.go index 1a7b13da6..0814672be 100644 --- a/libpod/storage.go +++ b/libpod/storage.go @@ -6,9 +6,10 @@ import ( istorage "github.com/containers/image/storage" "github.com/containers/image/types" + "github.com/containers/libpod/libpod/define" "github.com/containers/storage" "github.com/opencontainers/image-spec/specs-go/v1" - opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -71,7 +72,7 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte if imageName != "" { var ref types.ImageReference if containerName == "" { - return ContainerInfo{}, ErrEmptyID + return ContainerInfo{}, define.ErrEmptyID } // Check if we have the specified image. ref, err := istorage.Transport.ParseStoreReference(r.store, imageID) @@ -175,7 +176,7 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte func (r *storageService) DeleteContainer(idOrName string) error { if idOrName == "" { - return ErrEmptyID + return define.ErrEmptyID } container, err := r.store.Container(idOrName) if err != nil { @@ -214,7 +215,7 @@ func (r *storageService) MountContainerImage(idOrName string) (string, error) { container, err := r.store.Container(idOrName) if err != nil { if errors.Cause(err) == storage.ErrContainerUnknown { - return "", ErrNoSuchCtr + return "", define.ErrNoSuchCtr } return "", err } @@ -233,7 +234,7 @@ func (r *storageService) MountContainerImage(idOrName string) (string, error) { func (r *storageService) UnmountContainerImage(idOrName string, force bool) (bool, error) { if idOrName == "" { - return false, ErrEmptyID + return false, define.ErrEmptyID } container, err := r.store.Container(idOrName) if err != nil { @@ -260,7 +261,7 @@ func (r *storageService) UnmountContainerImage(idOrName string, force bool) (boo func (r *storageService) MountedContainerImage(idOrName string) (int, error) { if idOrName == "" { - return 0, ErrEmptyID + return 0, define.ErrEmptyID } container, err := r.store.Container(idOrName) if err != nil { @@ -277,7 +278,7 @@ func (r *storageService) GetMountpoint(id string) (string, error) { container, err := r.store.Container(id) if err != nil { if errors.Cause(err) == storage.ErrContainerUnknown { - return "", ErrNoSuchCtr + return "", define.ErrNoSuchCtr } return "", err } @@ -293,7 +294,7 @@ func (r *storageService) GetWorkDir(id string) (string, error) { container, err := r.store.Container(id) if err != nil { if errors.Cause(err) == storage.ErrContainerUnknown { - return "", ErrNoSuchCtr + return "", define.ErrNoSuchCtr } return "", err } @@ -304,7 +305,7 @@ func (r *storageService) GetRunDir(id string) (string, error) { container, err := r.store.Container(id) if err != nil { if errors.Cause(err) == storage.ErrContainerUnknown { - return "", ErrNoSuchCtr + return "", define.ErrNoSuchCtr } return "", err } diff --git a/libpod/util.go b/libpod/util.go index 3a15f9e39..b60575264 100644 --- a/libpod/util.go +++ b/libpod/util.go @@ -9,8 +9,7 @@ import ( "strings" "time" - "github.com/containers/image/signature" - "github.com/containers/image/types" + "github.com/containers/libpod/libpod/define" "github.com/fsnotify/fsnotify" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -23,17 +22,6 @@ const ( DefaultTransport = "docker://" ) -// OpenExclusiveFile opens a file for writing and ensure it doesn't already exist -func OpenExclusiveFile(path string) (*os.File, error) { - baseDir := filepath.Dir(path) - if baseDir != "" { - if _, err := os.Stat(baseDir); err != nil { - return nil, err - } - } - return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) -} - // FuncTimer helps measure the execution time of a function // For debug purposes, do not leave in code // used like defer FuncTimer("foo") @@ -42,24 +30,6 @@ func FuncTimer(funcName string) { fmt.Printf("%s executed in %d ms\n", funcName, elapsed) } -// CopyStringStringMap deep copies a map[string]string and returns the result -func CopyStringStringMap(m map[string]string) map[string]string { - n := map[string]string{} - for k, v := range m { - n[k] = v - } - return n -} - -// GetPolicyContext creates a signature policy context for the given signature policy path -func GetPolicyContext(path string) (*signature.PolicyContext, error) { - policy, err := signature.DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path}) - if err != nil { - return nil, err - } - return signature.NewPolicyContext(policy) -} - // RemoveScientificNotationFromFloat returns a float without any // scientific notation if the number has any. // golang does not handle conversion of float64s that have scientific @@ -126,7 +96,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e return false, errors.Wrapf(err, "checking file %s", path) } case <-timeoutChan: - return false, errors.Wrapf(ErrInternal, "timed out waiting for file %s", path) + return false, errors.Wrapf(define.ErrInternal, "timed out waiting for file %s", path) } } } @@ -156,15 +126,15 @@ func sortMounts(m []spec.Mount) []spec.Mount { func validPodNSOption(p *Pod, ctrPod string) error { if p == nil { - return errors.Wrapf(ErrInvalidArg, "pod passed in was nil. Container may not be associated with a pod") + return errors.Wrapf(define.ErrInvalidArg, "pod passed in was nil. Container may not be associated with a pod") } if ctrPod == "" { - return errors.Wrapf(ErrInvalidArg, "container is not a member of any pod") + return errors.Wrapf(define.ErrInvalidArg, "container is not a member of any pod") } if ctrPod != p.ID() { - return errors.Wrapf(ErrInvalidArg, "pod passed in is not the pod the container is associated with") + return errors.Wrapf(define.ErrInvalidArg, "pod passed in is not the pod the container is associated with") } return nil } diff --git a/libpod/util_linux.go b/libpod/util_linux.go index a801df2ee..78cbc75a7 100644 --- a/libpod/util_linux.go +++ b/libpod/util_linux.go @@ -6,9 +6,9 @@ import ( "fmt" "strings" - "github.com/containerd/cgroups" - "github.com/containers/libpod/pkg/util" - spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/cgroups" + "github.com/containers/libpod/pkg/rootless" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -34,24 +34,31 @@ func systemdSliceFromPath(parent, name string) (string, error) { return cgroupPath, nil } +func getDefaultSystemdCgroup() string { + if rootless.IsRootless() { + return SystemdDefaultRootlessCgroupParent + } + return SystemdDefaultCgroupParent +} + // makeSystemdCgroup creates a systemd CGroup at the given location. func makeSystemdCgroup(path string) error { - controller, err := cgroups.NewSystemd(SystemdDefaultCgroupParent) + controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup()) if err != nil { return err } - return controller.Create(path, &spec.LinuxResources{}) + return controller.CreateSystemdUnit(path) } // deleteSystemdCgroup deletes the systemd cgroup at the given location func deleteSystemdCgroup(path string) error { - controller, err := cgroups.NewSystemd(SystemdDefaultCgroupParent) + controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup()) if err != nil { return err } - return controller.Delete(path) + return controller.DeleteByPath(path) } // assembleSystemdCgroupName creates a systemd cgroup path given a base and @@ -61,7 +68,7 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) { const sliceSuffix = ".slice" if !strings.HasSuffix(baseSlice, sliceSuffix) { - return "", errors.Wrapf(ErrInvalidArg, "cannot assemble cgroup path with base %q - must end in .slice", baseSlice) + return "", errors.Wrapf(define.ErrInvalidArg, "cannot assemble cgroup path with base %q - must end in .slice", baseSlice) } noSlice := strings.TrimSuffix(baseSlice, sliceSuffix) @@ -70,29 +77,6 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) { return final, nil } -// GetV1CGroups gets the V1 cgroup subsystems and then "filters" -// out any subsystems that are provided by the caller. Passing nil -// for excludes will return the subsystems unfiltered. -//func GetV1CGroups(excludes []string) ([]cgroups.Subsystem, error) { -func GetV1CGroups(excludes []string) cgroups.Hierarchy { - return func() ([]cgroups.Subsystem, error) { - var filtered []cgroups.Subsystem - - subSystem, err := cgroups.V1() - if err != nil { - return nil, err - } - for _, s := range subSystem { - // If the name of the subsystem is not in the list of excludes, then - // add it as a keeper. - if !util.StringInSlice(string(s.Name()), excludes) { - filtered = append(filtered, s) - } - } - return filtered, nil - } -} - // LabelVolumePath takes a mount path for a volume and gives it an // selinux label of either shared or not func LabelVolumePath(path string, shared bool) error { diff --git a/libpod/util_unsupported.go b/libpod/util_unsupported.go index 940006e69..58b0dfbcd 100644 --- a/libpod/util_unsupported.go +++ b/libpod/util_unsupported.go @@ -3,27 +3,28 @@ package libpod import ( + "github.com/containers/libpod/libpod/define" "github.com/pkg/errors" ) func systemdSliceFromPath(parent, name string) (string, error) { - return "", errors.Wrapf(ErrOSNotSupported, "cgroups are not supported on non-linux OSes") + return "", errors.Wrapf(define.ErrOSNotSupported, "cgroups are not supported on non-linux OSes") } func makeSystemdCgroup(path string) error { - return errors.Wrapf(ErrOSNotSupported, "cgroups are not supported on non-linux OSes") + return errors.Wrapf(define.ErrOSNotSupported, "cgroups are not supported on non-linux OSes") } func deleteSystemdCgroup(path string) error { - return errors.Wrapf(ErrOSNotSupported, "cgroups are not supported on non-linux OSes") + return errors.Wrapf(define.ErrOSNotSupported, "cgroups are not supported on non-linux OSes") } func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) { - return "", errors.Wrapf(ErrOSNotSupported, "cgroups are not supported on non-linux OSes") + return "", errors.Wrapf(define.ErrOSNotSupported, "cgroups are not supported on non-linux OSes") } // LabelVolumePath takes a mount path for a volume and gives it an // selinux label of either shared or not func LabelVolumePath(path string, shared bool) error { - return ErrNotImplemented + return define.ErrNotImplemented } |