summaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/api/handlers/compat/images_build.go15
-rw-r--r--pkg/bindings/images/build.go8
-rw-r--r--pkg/domain/entities/events.go5
-rw-r--r--pkg/domain/infra/abi/play.go22
-rw-r--r--pkg/systemd/notifyproxy/notifyproxy.go98
5 files changed, 103 insertions, 45 deletions
diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go
index 287011798..d4c8c0743 100644
--- a/pkg/api/handlers/compat/images_build.go
+++ b/pkg/api/handlers/compat/images_build.go
@@ -130,6 +130,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Secrets string `schema:"secrets"`
SecurityOpt string `schema:"securityopt"`
ShmSize int `schema:"shmsize"`
+ SkipUnusedStages bool `schema:"skipunusedstages"`
Squash bool `schema:"squash"`
TLSVerify bool `schema:"tlsVerify"`
Tags []string `schema:"t"`
@@ -138,12 +139,13 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Ulimits string `schema:"ulimits"`
UnsetEnvs []string `schema:"unsetenv"`
}{
- Dockerfile: "Dockerfile",
- IdentityLabel: true,
- Registry: "docker.io",
- Rm: true,
- ShmSize: 64 * 1024 * 1024,
- TLSVerify: true,
+ Dockerfile: "Dockerfile",
+ IdentityLabel: true,
+ Registry: "docker.io",
+ Rm: true,
+ ShmSize: 64 * 1024 * 1024,
+ TLSVerify: true,
+ SkipUnusedStages: true,
}
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
@@ -675,6 +677,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
RemoveIntermediateCtrs: query.Rm,
ReportWriter: reporter,
RusageLogFile: query.RusageLogFile,
+ SkipUnusedStages: types.NewOptionalBool(query.SkipUnusedStages),
Squash: query.Squash,
SystemContext: systemContext,
Target: query.Target,
diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go
index 260d977a8..f8552cddb 100644
--- a/pkg/bindings/images/build.go
+++ b/pkg/bindings/images/build.go
@@ -233,6 +233,14 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
if options.CacheFrom != nil {
params.Set("cachefrom", options.CacheFrom.String())
}
+
+ switch options.SkipUnusedStages {
+ case types.OptionalBoolTrue:
+ params.Set("skipunusedstages", "1")
+ case types.OptionalBoolFalse:
+ params.Set("skipunusedstages", "0")
+ }
+
if options.CacheTo != nil {
params.Set("cacheto", options.CacheTo.String())
}
diff --git a/pkg/domain/entities/events.go b/pkg/domain/entities/events.go
index de218b285..34a6fe048 100644
--- a/pkg/domain/entities/events.go
+++ b/pkg/domain/entities/events.go
@@ -14,7 +14,7 @@ type Event struct {
// TODO: it would be nice to have full control over the types at some
// point and fork such Docker types.
dockerEvents.Message
- HealthStatus string
+ HealthStatus string `json:",omitempty"`
}
// ConvertToLibpodEvent converts an entities event to a libpod one.
@@ -34,6 +34,7 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event {
image := e.Actor.Attributes["image"]
name := e.Actor.Attributes["name"]
details := e.Actor.Attributes
+ podID := e.Actor.Attributes["podId"]
delete(details, "image")
delete(details, "name")
delete(details, "containerExitCode")
@@ -47,6 +48,7 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event {
Type: t,
HealthStatus: e.HealthStatus,
Details: libpodEvents.Details{
+ PodID: podID,
Attributes: details,
},
}
@@ -61,6 +63,7 @@ func ConvertToEntitiesEvent(e libpodEvents.Event) *Event {
attributes["image"] = e.Image
attributes["name"] = e.Name
attributes["containerExitCode"] = strconv.Itoa(e.ContainerExitCode)
+ attributes["podId"] = e.PodID
message := dockerEvents.Message{
// Compatibility with clients that still look for deprecated API elements
Status: e.Status.String(),
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 847e81e69..bd9117f72 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -10,6 +10,7 @@ import (
"path/filepath"
"strconv"
"strings"
+ "sync"
buildahDefine "github.com/containers/buildah/define"
"github.com/containers/common/libimage"
@@ -698,9 +699,24 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
fmt.Println(playKubePod.ContainerErrors)
}
- // Wait for each proxy to receive a READY message.
- for _, proxy := range sdNotifyProxies {
- if err := proxy.WaitAndClose(); err != nil {
+ // Wait for each proxy to receive a READY message. Use a wait
+ // group to prevent the potential for ABBA kinds of deadlocks.
+ var wg sync.WaitGroup
+ errors := make([]error, len(sdNotifyProxies))
+ for i := range sdNotifyProxies {
+ wg.Add(1)
+ go func(i int) {
+ err := sdNotifyProxies[i].WaitAndClose()
+ if err != nil {
+ err = fmt.Errorf("waiting for sd-notify proxy: %w", err)
+ }
+ errors[i] = err
+ wg.Done()
+ }(i)
+ }
+ wg.Wait()
+ for _, err := range errors {
+ if err != nil {
return nil, err
}
}
diff --git a/pkg/systemd/notifyproxy/notifyproxy.go b/pkg/systemd/notifyproxy/notifyproxy.go
index ea1522bb3..4b92d9e6c 100644
--- a/pkg/systemd/notifyproxy/notifyproxy.go
+++ b/pkg/systemd/notifyproxy/notifyproxy.go
@@ -1,6 +1,7 @@
package notifyproxy
import (
+ "context"
"errors"
"fmt"
"io"
@@ -109,48 +110,75 @@ func (p *NotifyProxy) WaitAndClose() error {
}
}()
- const bufferSize = 1024
- sBuilder := strings.Builder{}
- for {
- // Set a read deadline of one second such that we achieve a
- // non-blocking read and can check if the container has already
- // stopped running; in that case no READY message will be send
- // and we're done.
- if err := p.connection.SetReadDeadline(time.Now().Add(time.Second)); err != nil {
- return err
- }
-
+ // Since reading from the connection is blocking, we need to spin up two
+ // goroutines. One waiting for the `READY` message, the other waiting
+ // for the container to stop running.
+ errorChan := make(chan error, 1)
+ readyChan := make(chan bool, 1)
+
+ go func() {
+ // Read until the `READY` message is received or the connection
+ // is closed.
+ const bufferSize = 1024
+ sBuilder := strings.Builder{}
for {
- buffer := make([]byte, bufferSize)
- num, err := p.connection.Read(buffer)
- if err != nil {
- if !errors.Is(err, os.ErrDeadlineExceeded) && !errors.Is(err, io.EOF) {
- return err
+ for {
+ buffer := make([]byte, bufferSize)
+ num, err := p.connection.Read(buffer)
+ if err != nil {
+ if !errors.Is(err, io.EOF) {
+ errorChan <- err
+ return
+ }
+ }
+ sBuilder.Write(buffer[:num])
+ if num != bufferSize || buffer[num-1] == '\n' {
+ // Break as we read an entire line that
+ // we can inspect for the `READY`
+ // message.
+ break
}
}
- sBuilder.Write(buffer[:num])
- if num != bufferSize || buffer[num-1] == '\n' {
- break
- }
- }
- for _, line := range strings.Split(sBuilder.String(), "\n") {
- if line == daemon.SdNotifyReady {
- return nil
+ for _, line := range strings.Split(sBuilder.String(), "\n") {
+ if line == daemon.SdNotifyReady {
+ readyChan <- true
+ return
+ }
}
+ sBuilder.Reset()
}
- sBuilder.Reset()
+ }()
- if p.container == nil {
- continue
- }
+ if p.container != nil {
+ // Create a cancellable context to make sure the goroutine
+ // below terminates.
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go func() {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ state, err := p.container.State()
+ if err != nil {
+ errorChan <- err
+ return
+ }
+ if state != define.ContainerStateRunning {
+ errorChan <- fmt.Errorf("%w: %s", ErrNoReadyMessage, p.container.ID())
+ return
+ }
+ time.Sleep(time.Second)
+ }
+ }()
+ }
- state, err := p.container.State()
- if err != nil {
- return err
- }
- if state != define.ContainerStateRunning {
- return fmt.Errorf("%w: %s", ErrNoReadyMessage, p.container.ID())
- }
+ // Wait for the ready/error channel.
+ select {
+ case <-readyChan:
+ return nil
+ case err := <-errorChan:
+ return err
}
}