1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
|
// +build linux
package libpod
import (
"context"
"fmt"
"path"
"path/filepath"
"strings"
"github.com/containerd/cgroups"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// NewPod makes a new, empty pod
func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod, error) {
r.lock.Lock()
defer r.lock.Unlock()
if !r.valid {
return nil, ErrRuntimeStopped
}
pod, err := newPod(r)
if err != nil {
return nil, errors.Wrapf(err, "error creating pod")
}
// Set default namespace to runtime's namespace
// Do so before options run so they can override it
if r.config.Namespace != "" {
pod.config.Namespace = r.config.Namespace
}
for _, option := range options {
if err := option(pod); err != nil {
return nil, errors.Wrapf(err, "error running pod create option")
}
}
if pod.config.Name == "" {
name, err := r.generateName()
if err != nil {
return nil, err
}
pod.config.Name = name
}
// Allocate a lock for the pod
lock, err := r.lockManager.AllocateLock()
if err != nil {
return nil, errors.Wrapf(err, "error allocating lock for new pod")
}
pod.lock = lock
pod.config.LockID = pod.lock.ID()
pod.valid = true
// Check CGroup parent sanity, and set it if it was not set
switch r.config.CgroupManager {
case CgroupfsCgroupsManager:
if pod.config.CgroupParent == "" {
pod.config.CgroupParent = CgroupfsDefaultCgroupParent
} else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
return nil, errors.Wrapf(ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
// No need to create it with cgroupfs - the first container to
// launch should do it for us
if pod.config.UsePodCgroup {
pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID())
}
case SystemdCgroupsManager:
if pod.config.CgroupParent == "" {
pod.config.CgroupParent = SystemdDefaultCgroupParent
} else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
return nil, errors.Wrapf(ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups")
}
// If we are set to use pod cgroups, set the cgroup parent that
// all containers in the pod will share
if pod.config.UsePodCgroup {
cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()))
if err != nil {
return nil, errors.Wrapf(err, "unable to create pod cgroup for pod %s", pod.ID())
}
pod.state.CgroupPath = cgroupPath
}
default:
return nil, errors.Wrapf(ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager)
}
if pod.config.UsePodCgroup {
logrus.Debugf("Got pod cgroup as %s", pod.state.CgroupPath)
}
if !pod.HasInfraContainer() && pod.SharesNamespaces() {
return nil, errors.Errorf("Pods must have an infra container to share namespaces")
}
if pod.HasInfraContainer() && !pod.SharesNamespaces() {
logrus.Warnf("Pod has an infra container, but shares no namespaces")
}
if err := r.state.AddPod(pod); err != nil {
return nil, errors.Wrapf(err, "error adding pod to state")
}
if pod.HasInfraContainer() {
ctr, err := r.createInfraContainer(ctx, pod)
if err != nil {
// Tear down pod, as it is assumed a the pod will contain
// a pause container, and it does not.
if err2 := r.removePod(ctx, pod, true, true); err2 != nil {
logrus.Errorf("Error removing pod after pause container creation failure: %v", err2)
}
return nil, errors.Wrapf(err, "error adding Infra Container")
}
pod.state.InfraContainerID = ctr.ID()
if err := pod.save(); err != nil {
return nil, err
}
}
return pod, nil
}
func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) error {
if err := p.updatePod(); err != nil {
return err
}
ctrs, err := r.state.PodContainers(p)
if err != nil {
return err
}
numCtrs := len(ctrs)
// If the only container in the pod is the pause container, remove the pod and container unconditionally.
pauseCtrID := p.state.InfraContainerID
if numCtrs == 1 && ctrs[0].ID() == pauseCtrID {
removeCtrs = true
force = true
}
if !removeCtrs && numCtrs > 0 {
return errors.Wrapf(ErrCtrExists, "pod %s contains containers and cannot be removed", p.ID())
}
// Go through and lock all containers so we can operate on them all at once
dependencies := make(map[string][]string)
for _, ctr := range ctrs {
ctr.lock.Lock()
defer ctr.lock.Unlock()
// Sync all containers
if err := ctr.syncContainer(); err != nil {
return err
}
// Check if the container is in a good state to be removed
if ctr.state.State == ContainerStatePaused {
return errors.Wrapf(ErrCtrStateInvalid, "pod %s contains paused container %s, cannot remove", p.ID(), ctr.ID())
}
if ctr.state.State == ContainerStateUnknown {
return errors.Wrapf(ErrCtrStateInvalid, "pod %s contains container %s with invalid state", p.ID(), ctr.ID())
}
// If the container is running and force is not set we can't do anything
if ctr.state.State == ContainerStateRunning && !force {
return errors.Wrapf(ErrCtrStateInvalid, "pod %s contains container %s which is running", p.ID(), ctr.ID())
}
// If the container has active exec sessions and force is not set we can't do anything
if len(ctr.state.ExecSessions) != 0 && !force {
return errors.Wrapf(ErrCtrStateInvalid, "pod %s contains container %s which has active exec sessions", p.ID(), ctr.ID())
}
deps, err := r.state.ContainerInUse(ctr)
if err != nil {
return err
}
dependencies[ctr.ID()] = deps
}
// Check if containers have dependencies
// If they do, and the dependencies are not in the pod, error
for ctr, deps := range dependencies {
for _, dep := range deps {
if _, ok := dependencies[dep]; !ok {
return errors.Wrapf(ErrCtrExists, "container %s depends on container %s not in pod %s", ctr, dep, p.ID())
}
}
}
// First loop through all containers and stop them
// Do not remove in this loop to ensure that we don't remove unless all
// containers are in a good state
if force {
for _, ctr := range ctrs {
// If force is set and the container is running, stop it now
if ctr.state.State == ContainerStateRunning {
if err := r.ociRuntime.stopContainer(ctr, ctr.StopTimeout()); err != nil {
return errors.Wrapf(err, "error stopping container %s to remove pod %s", ctr.ID(), p.ID())
}
// Sync again to pick up stopped state
if err := ctr.syncContainer(); err != nil {
return err
}
}
// If the container has active exec sessions, stop them now
if len(ctr.state.ExecSessions) != 0 {
if err := r.ociRuntime.execStopContainer(ctr, ctr.StopTimeout()); err != nil {
return err
}
}
}
}
// Start removing containers
// We can remove containers even if they have dependencies now
// As we have guaranteed their dependencies are in the pod
for _, ctr := range ctrs {
// Clean up network namespace, cgroups, mounts
if err := ctr.cleanup(ctx); err != nil {
return err
}
// Stop container's storage
if err := ctr.teardownStorage(); err != nil {
return err
}
// Delete the container from runtime (only if we are not
// ContainerStateConfigured)
if ctr.state.State != ContainerStateConfigured &&
ctr.state.State != ContainerStateExited {
if err := ctr.delete(ctx); err != nil {
return err
}
}
// Free the container's lock
if err := ctr.lock.Free(); err != nil {
return err
}
}
// Remove containers from the state
if err := r.state.RemovePodContainers(p); err != nil {
return err
}
// Mark containers invalid
for _, ctr := range ctrs {
ctr.valid = false
}
// Remove pod cgroup, if present
if p.state.CgroupPath != "" {
logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath)
switch p.runtime.config.CgroupManager {
case SystemdCgroupsManager:
if err := deleteSystemdCgroup(p.state.CgroupPath); err != nil {
// The pod is already almost gone.
// No point in hard-failing if we fail
// this bit of cleanup.
logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
case CgroupfsCgroupsManager:
// Delete the cgroupfs cgroup
// Make sure the conmon cgroup is deleted first
// Since the pod is almost gone, don't bother failing
// hard - instead, just log errors.
v1CGroups := GetV1CGroups(getExcludedCGroups())
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
conmonCgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(conmonCgroupPath))
if err != nil && err != cgroups.ErrCgroupDeleted {
return err
}
if err == nil {
if err := conmonCgroup.Delete(); err != nil {
logrus.Errorf("Error deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}
}
cgroup, err := cgroups.Load(v1CGroups, cgroups.StaticPath(p.state.CgroupPath))
if err != nil && err != cgroups.ErrCgroupDeleted {
return err
}
if err == nil {
if err := cgroup.Delete(); err != nil {
logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
}
default:
return errors.Wrapf(ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager)
}
}
// Remove pod from state
if err := r.state.RemovePod(p); err != nil {
return err
}
// Mark pod invalid
p.valid = false
return nil
}
|