summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/container.go3
-rw-r--r--libpod/container_api.go4
-rw-r--r--libpod/container_internal_linux.go36
-rw-r--r--libpod/container_log_linux.go276
-rw-r--r--libpod/events/filters.go39
-rw-r--r--libpod/events/journal_linux.go10
-rw-r--r--libpod/events/logfile.go10
-rw-r--r--libpod/network/config.go43
-rw-r--r--libpod/network/netconflist.go18
-rw-r--r--libpod/networking_linux.go24
-rw-r--r--libpod/runtime.go17
-rw-r--r--libpod/runtime_ctr.go9
12 files changed, 307 insertions, 182 deletions
diff --git a/libpod/container.go b/libpod/container.go
index 591cf9bc5..c6f0cd618 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -14,7 +14,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/lock"
- "github.com/containers/podman/v3/pkg/rootless"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -1168,7 +1167,7 @@ func (c *Container) Networks() ([]string, bool, error) {
func (c *Container) networks() ([]string, bool, error) {
networks, err := c.runtime.state.GetNetworks(c)
if err != nil && errors.Cause(err) == define.ErrNoSuchNetwork {
- if len(c.config.Networks) == 0 && !rootless.IsRootless() {
+ if len(c.config.Networks) == 0 && c.config.NetMode.IsBridge() {
return []string{c.runtime.netPlugin.GetDefaultNetworkName()}, true, nil
}
return c.config.Networks, false, nil
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 4ccb240e7..b75d0b41d 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -12,6 +12,7 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/pkg/signal"
+ "github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -776,6 +777,9 @@ type ContainerCheckpointOptions struct {
// ImportPrevious tells the API to restore container with two
// images. One is TargetFile, the other is ImportPrevious.
ImportPrevious string
+ // Compression tells the API which compression to use for
+ // the exported checkpoint archive.
+ Compression archive.Compression
}
// Checkpoint checkpoints a container
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 1b2f5a496..a3acc3198 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -985,7 +985,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
}
input, err := archive.TarWithOptions(c.bundlePath(), &archive.TarOptions{
- Compression: archive.Gzip,
+ Compression: options.Compression,
IncludeSourceDir: true,
IncludeFiles: includeFiles,
})
@@ -1668,17 +1668,16 @@ func (c *Container) generateResolvConf() (string, error) {
return "", err
}
- // Ensure that the container's /etc/resolv.conf is compatible with its
- // network configuration.
- // TODO: set ipv6 enable bool more sanely
- resolv, err := resolvconf.FilterResolvDNS(contents, true, c.config.CreateNetNS)
- if err != nil {
- return "", errors.Wrapf(err, "error parsing host resolv.conf")
- }
-
+ ipv6 := false
// Check if CNI gave back and DNS servers for us to add in
cniResponse := c.state.NetworkStatus
for _, i := range cniResponse {
+ for _, ip := range i.IPs {
+ // Note: only using To16() does not work since it also returns a vaild ip for ipv4
+ if ip.Address.IP.To4() == nil && ip.Address.IP.To16() != nil {
+ ipv6 = true
+ }
+ }
if i.DNS.Nameservers != nil {
cniNameServers = append(cniNameServers, i.DNS.Nameservers...)
logrus.Debugf("adding nameserver(s) from cni response of '%q'", i.DNS.Nameservers)
@@ -1689,6 +1688,25 @@ func (c *Container) generateResolvConf() (string, error) {
}
}
+ if c.config.NetMode.IsSlirp4netns() {
+ ctrNetworkSlipOpts := []string{}
+ if c.config.NetworkOptions != nil {
+ ctrNetworkSlipOpts = append(ctrNetworkSlipOpts, c.config.NetworkOptions["slirp4netns"]...)
+ }
+ slirpOpts, err := parseSlirp4netnsNetworkOptions(c.runtime, ctrNetworkSlipOpts)
+ if err != nil {
+ return "", err
+ }
+ ipv6 = slirpOpts.enableIPv6
+ }
+
+ // Ensure that the container's /etc/resolv.conf is compatible with its
+ // network configuration.
+ resolv, err := resolvconf.FilterResolvDNS(contents, ipv6, c.config.CreateNetNS)
+ if err != nil {
+ return "", errors.Wrapf(err, "error parsing host resolv.conf")
+ }
+
dns := make([]net.IP, 0, len(c.runtime.config.Containers.DNSServers))
for _, i := range c.runtime.config.Containers.DNSServers {
result := net.ParseIP(i)
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index ec4fa9724..892ee34e3 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -6,14 +6,12 @@ package libpod
import (
"context"
"fmt"
- "io"
- "math"
"strings"
"time"
- "github.com/containers/podman/v3/libpod/define"
+ "github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/libpod/logs"
- journal "github.com/coreos/go-systemd/v22/sdjournal"
+ "github.com/coreos/go-systemd/v22/sdjournal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -24,122 +22,187 @@ const (
// journaldLogErr is the journald priority signifying stderr
journaldLogErr = "3"
-
- // bufLen is the length of the buffer to read from a k8s-file
- // formatted log line
- // let's set it as 2k just to be safe if k8s-file format ever changes
- bufLen = 16384
)
func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine) error {
- var config journal.JournalReaderConfig
- if options.Tail < 0 {
- config.NumFromTail = 0
- } else if options.Tail == 0 {
- config.NumFromTail = math.MaxUint64
- } else {
- config.NumFromTail = uint64(options.Tail)
+ journal, err := sdjournal.NewJournal()
+ if err != nil {
+ return err
}
- if options.Multi {
- config.Formatter = journalFormatterWithID
- } else {
- config.Formatter = journalFormatter
- }
- defaultTime := time.Time{}
- if options.Since != defaultTime {
- // coreos/go-systemd/sdjournal doesn't correctly handle requests for data in the future
- // return nothing instead of falsely printing
- if time.Now().Before(options.Since) {
- return nil
- }
- // coreos/go-systemd/sdjournal expects a negative time.Duration for times in the past
- config.Since = -time.Since(options.Since)
+ // While logs are written to the `logChannel`, we inspect each event
+ // and stop once the container has died. Having logs and events in one
+ // stream prevents a race condition that we faced in #10323.
+
+ // Add the filters for events.
+ match := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"}
+ if err := journal.AddMatch(match.String()); err != nil {
+ return errors.Wrapf(err, "adding filter to journald logger: %v", match)
+ }
+ match = sdjournal.Match{Field: "PODMAN_ID", Value: c.ID()}
+ if err := journal.AddMatch(match.String()); err != nil {
+ return errors.Wrapf(err, "adding filter to journald logger: %v", match)
}
- config.Matches = append(config.Matches, journal.Match{
- Field: "CONTAINER_ID_FULL",
- Value: c.ID(),
- })
- options.WaitGroup.Add(1)
- r, err := journal.NewJournalReader(config)
- if err != nil {
+ // Add the filter for logs. Note the disjunction so that we match
+ // either the events or the logs.
+ if err := journal.AddDisjunction(); err != nil {
+ return errors.Wrap(err, "adding filter disjunction to journald logger")
+ }
+ match = sdjournal.Match{Field: "CONTAINER_ID_FULL", Value: c.ID()}
+ if err := journal.AddMatch(match.String()); err != nil {
+ return errors.Wrapf(err, "adding filter to journald logger: %v", match)
+ }
+
+ if err := journal.SeekHead(); err != nil {
return err
}
- if r == nil {
- return errors.Errorf("journal reader creation failed")
+ // API requires Next() immediately after SeekHead().
+ if _, err := journal.Next(); err != nil {
+ return errors.Wrap(err, "initial journal cursor")
}
- if options.Tail == math.MaxInt64 {
- r.Rewind()
+
+ // API requires a next|prev before getting a cursor.
+ if _, err := journal.Previous(); err != nil {
+ return errors.Wrap(err, "initial journal cursor")
}
- state, err := c.State()
- if err != nil {
- return err
+
+ // Note that the initial cursor may not yet be ready, so we'll do an
+ // exponential backoff.
+ var cursor string
+ var cursorError error
+ for i := 1; i <= 3; i++ {
+ cursor, cursorError = journal.GetCursor()
+ if err != nil {
+ continue
+ }
+ time.Sleep(time.Duration(i*100) * time.Millisecond)
+ break
+ }
+ if cursorError != nil {
+ return errors.Wrap(cursorError, "inital journal cursor")
+ }
+
+ // We need the container's events in the same journal to guarantee
+ // consistency, see #10323.
+ if options.Follow && c.runtime.config.Engine.EventsLogger != "journald" {
+ return errors.Errorf("using --follow with the journald --log-driver but without the journald --events-backend (%s) is not supported", c.runtime.config.Engine.EventsLogger)
}
- if options.Follow && state == define.ContainerStateRunning {
- go func() {
- done := make(chan bool)
- until := make(chan time.Time)
- go func() {
- select {
- case <-ctx.Done():
- until <- time.Time{}
- case <-done:
- // nothing to do anymore
+ options.WaitGroup.Add(1)
+ go func() {
+ defer func() {
+ options.WaitGroup.Done()
+ if err := journal.Close(); err != nil {
+ logrus.Errorf("Unable to close journal: %v", err)
+ }
+ }()
+
+ afterTimeStamp := false // needed for options.Since
+ tailQueue := []*logs.LogLine{} // needed for options.Tail
+ doTail := options.Tail > 0
+ for {
+ select {
+ case <-ctx.Done():
+ // Remote client may have closed/lost the connection.
+ return
+ default:
+ // Fallthrough
+ }
+
+ if _, err := journal.Next(); err != nil {
+ logrus.Errorf("Failed to move journal cursor to next entry: %v", err)
+ return
+ }
+ latestCursor, err := journal.GetCursor()
+ if err != nil {
+ logrus.Errorf("Failed to get journal cursor: %v", err)
+ return
+ }
+
+ // Hit the end of the journal.
+ if cursor == latestCursor {
+ if doTail {
+ // Flush *once* we hit the end of the journal.
+ startIndex := int64(len(tailQueue)-1) - options.Tail
+ if startIndex < 0 {
+ startIndex = 0
+ }
+ for i := startIndex; i < int64(len(tailQueue)); i++ {
+ logChannel <- tailQueue[i]
+ }
+ tailQueue = nil
+ doTail = false
+ }
+ // Unless we follow, quit.
+ if !options.Follow {
+ return
}
- }()
- go func() {
- // FIXME (#10323): we are facing a terrible
- // race condition here. At the time the
- // container dies and `c.Wait()` has returned,
- // we may not have received all journald logs.
- // So far there is no other way than waiting
- // for a second. Ultimately, `r.Follow` is
- // racy and we may have to implement our custom
- // logic here.
- c.Wait(ctx)
- time.Sleep(time.Second)
- until <- time.Time{}
- }()
- follower := journaldFollowBuffer{logChannel, options.Multi}
- err := r.Follow(until, follower)
+ // Sleep until something's happening on the journal.
+ journal.Wait(sdjournal.IndefiniteWait)
+ continue
+ }
+ cursor = latestCursor
+
+ entry, err := journal.GetEntry()
if err != nil {
- logrus.Debugf(err.Error())
+ logrus.Errorf("Failed to get journal entry: %v", err)
+ return
}
- r.Close()
- options.WaitGroup.Done()
- done <- true
- return
- }()
- return nil
- }
- go func() {
- bytes := make([]byte, bufLen)
- // /me complains about no do-while in go
- ec, err := r.Read(bytes)
- for ec != 0 && err == nil {
- // because we are reusing bytes, we need to make
- // sure the old data doesn't get into the new line
- bytestr := string(bytes[:ec])
- logLine, err2 := logs.NewJournaldLogLine(bytestr, options.Multi)
- if err2 != nil {
- logrus.Error(err2)
+ if !afterTimeStamp {
+ entryTime := time.Unix(0, int64(entry.RealtimeTimestamp)*int64(time.Microsecond))
+ if entryTime.Before(options.Since) {
+ continue
+ }
+ afterTimeStamp = true
+ }
+
+ // If we're reading an event and the container exited/died,
+ // then we're done and can return.
+ event, ok := entry.Fields["PODMAN_EVENT"]
+ if ok {
+ status, err := events.StringToStatus(event)
+ if err != nil {
+ logrus.Errorf("Failed to translate event: %v", err)
+ return
+ }
+ if status == events.Exited {
+ return
+ }
+ continue
+ }
+
+ var message string
+ var formatError error
+
+ if options.Multi {
+ message, formatError = journalFormatterWithID(entry)
+ } else {
+ message, formatError = journalFormatter(entry)
+ }
+
+ if formatError != nil {
+ logrus.Errorf("Failed to parse journald log entry: %v", err)
+ return
+ }
+
+ logLine, err := logs.NewJournaldLogLine(message, options.Multi)
+ if err != nil {
+ logrus.Errorf("Failed parse log line: %v", err)
+ return
+ }
+ if doTail {
+ tailQueue = append(tailQueue, logLine)
continue
}
logChannel <- logLine
- ec, err = r.Read(bytes)
}
- if err != nil && err != io.EOF {
- logrus.Error(err)
- }
- r.Close()
- options.WaitGroup.Done()
}()
+
return nil
}
-func journalFormatterWithID(entry *journal.JournalEntry) (string, error) {
+func journalFormatterWithID(entry *sdjournal.JournalEntry) (string, error) {
output, err := formatterPrefix(entry)
if err != nil {
return "", err
@@ -162,7 +225,7 @@ func journalFormatterWithID(entry *journal.JournalEntry) (string, error) {
return output, nil
}
-func journalFormatter(entry *journal.JournalEntry) (string, error) {
+func journalFormatter(entry *sdjournal.JournalEntry) (string, error) {
output, err := formatterPrefix(entry)
if err != nil {
return "", err
@@ -176,7 +239,7 @@ func journalFormatter(entry *journal.JournalEntry) (string, error) {
return output, nil
}
-func formatterPrefix(entry *journal.JournalEntry) (string, error) {
+func formatterPrefix(entry *sdjournal.JournalEntry) (string, error) {
usec := entry.RealtimeTimestamp
tsString := time.Unix(0, int64(usec)*int64(time.Microsecond)).Format(logs.LogTimeFormat)
output := fmt.Sprintf("%s ", tsString)
@@ -202,7 +265,7 @@ func formatterPrefix(entry *journal.JournalEntry) (string, error) {
return output, nil
}
-func formatterMessage(entry *journal.JournalEntry) (string, error) {
+func formatterMessage(entry *sdjournal.JournalEntry) (string, error) {
// Finally, append the message
msg, ok := entry.Fields["MESSAGE"]
if !ok {
@@ -211,18 +274,3 @@ func formatterMessage(entry *journal.JournalEntry) (string, error) {
msg = strings.TrimSuffix(msg, "\n")
return msg, nil
}
-
-type journaldFollowBuffer struct {
- logChannel chan *logs.LogLine
- withID bool
-}
-
-func (f journaldFollowBuffer) Write(p []byte) (int, error) {
- bytestr := string(p)
- logLine, err := logs.NewJournaldLogLine(bytestr, f.withID)
- if err != nil {
- return -1, err
- }
- f.logChannel <- logLine
- return len(p), nil
-}
diff --git a/libpod/events/filters.go b/libpod/events/filters.go
index acfb96302..4d27e8fc4 100644
--- a/libpod/events/filters.go
+++ b/libpod/events/filters.go
@@ -97,18 +97,41 @@ func parseFilter(filter string) (string, string, error) {
return filterSplit[0], filterSplit[1], nil
}
-func generateEventOptions(filters []string, since, until string) ([]EventFilter, error) {
- options := make([]EventFilter, 0, len(filters))
+// applyFilters applies the EventFilter slices in sequence. Filters under the
+// same key are disjunctive while each key must match (conjuctive).
+func applyFilters(event *Event, filterMap map[string][]EventFilter) bool {
+ for _, filters := range filterMap {
+ success := false
+ for _, filter := range filters {
+ if filter(event) {
+ success = true
+ break
+ }
+ }
+ if !success {
+ return false
+ }
+ }
+ return true
+}
+
+// generateEventFilter parses the specified filters into a filter map that can
+// later on be used to filter events. Keys are conjunctive, values are
+// disjunctive.
+func generateEventFilters(filters []string, since, until string) (map[string][]EventFilter, error) {
+ filterMap := make(map[string][]EventFilter)
for _, filter := range filters {
key, val, err := parseFilter(filter)
if err != nil {
return nil, err
}
- funcFilter, err := generateEventFilter(key, val)
+ filterFunc, err := generateEventFilter(key, val)
if err != nil {
return nil, err
}
- options = append(options, funcFilter)
+ filterSlice := filterMap[key]
+ filterSlice = append(filterSlice, filterFunc)
+ filterMap[key] = filterSlice
}
if len(since) > 0 {
@@ -116,7 +139,8 @@ func generateEventOptions(filters []string, since, until string) ([]EventFilter,
if err != nil {
return nil, errors.Wrapf(err, "unable to convert since time of %s", since)
}
- options = append(options, generateEventSinceOption(timeSince))
+ filterFunc := generateEventSinceOption(timeSince)
+ filterMap["since"] = []EventFilter{filterFunc}
}
if len(until) > 0 {
@@ -124,7 +148,8 @@ func generateEventOptions(filters []string, since, until string) ([]EventFilter,
if err != nil {
return nil, errors.Wrapf(err, "unable to convert until time of %s", until)
}
- options = append(options, generateEventUntilOption(timeUntil))
+ filterFunc := generateEventUntilOption(timeUntil)
+ filterMap["until"] = []EventFilter{filterFunc}
}
- return options, nil
+ return filterMap, nil
}
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 23e5f15b1..7006290e9 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -69,9 +69,9 @@ func (e EventJournalD) Write(ee Event) error {
// Read reads events from the journal and sends qualified events to the event channel
func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
defer close(options.EventChannel)
- eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until)
+ filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until)
if err != nil {
- return errors.Wrapf(err, "failed to generate event options")
+ return errors.Wrapf(err, "failed to parse event filters")
}
var untilTime time.Time
if len(options.Until) > 0 {
@@ -159,11 +159,7 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
}
continue
}
- include := true
- for _, filter := range eventOptions {
- include = include && filter(newEvent)
- }
- if include {
+ if applyFilters(newEvent, filterMap) {
options.EventChannel <- newEvent
}
}
diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go
index 0f00525e8..952444f2b 100644
--- a/libpod/events/logfile.go
+++ b/libpod/events/logfile.go
@@ -44,9 +44,9 @@ func (e EventLogFile) Write(ee Event) error {
// Reads from the log file
func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
defer close(options.EventChannel)
- eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until)
+ filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until)
if err != nil {
- return errors.Wrapf(err, "unable to generate event options")
+ return errors.Wrapf(err, "failed to parse event filters")
}
t, err := e.getTail(options)
if err != nil {
@@ -92,11 +92,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
default:
return errors.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
}
- include := true
- for _, filter := range eventOptions {
- include = include && filter(event)
- }
- if include && copy {
+ if copy && applyFilters(event, filterMap) {
options.EventChannel <- event
}
}
diff --git a/libpod/network/config.go b/libpod/network/config.go
index ac4478602..9a3bc4763 100644
--- a/libpod/network/config.go
+++ b/libpod/network/config.go
@@ -44,17 +44,17 @@ type CNIPlugins interface {
// HostLocalBridge describes a configuration for a bridge plugin
// https://github.com/containernetworking/plugins/tree/master/plugins/main/bridge#network-configuration-reference
type HostLocalBridge struct {
- PluginType string `json:"type"`
- BrName string `json:"bridge,omitempty"`
- IsGW bool `json:"isGateway"`
- IsDefaultGW bool `json:"isDefaultGateway,omitempty"`
- ForceAddress bool `json:"forceAddress,omitempty"`
- IPMasq bool `json:"ipMasq,omitempty"`
- MTU int `json:"mtu,omitempty"`
- HairpinMode bool `json:"hairpinMode,omitempty"`
- PromiscMode bool `json:"promiscMode,omitempty"`
- Vlan int `json:"vlan,omitempty"`
- IPAM IPAMHostLocalConf `json:"ipam"`
+ PluginType string `json:"type"`
+ BrName string `json:"bridge,omitempty"`
+ IsGW bool `json:"isGateway"`
+ IsDefaultGW bool `json:"isDefaultGateway,omitempty"`
+ ForceAddress bool `json:"forceAddress,omitempty"`
+ IPMasq bool `json:"ipMasq,omitempty"`
+ MTU int `json:"mtu,omitempty"`
+ HairpinMode bool `json:"hairpinMode,omitempty"`
+ PromiscMode bool `json:"promiscMode,omitempty"`
+ Vlan int `json:"vlan,omitempty"`
+ IPAM IPAMConfig `json:"ipam"`
}
// Bytes outputs []byte
@@ -62,9 +62,9 @@ func (h *HostLocalBridge) Bytes() ([]byte, error) {
return json.MarshalIndent(h, "", "\t")
}
-// IPAMHostLocalConf describes an IPAM configuration
+// IPAMConfig describes an IPAM configuration
// https://github.com/containernetworking/plugins/tree/master/plugins/ipam/host-local#network-configuration-reference
-type IPAMHostLocalConf struct {
+type IPAMConfig struct {
PluginType string `json:"type"`
Routes []IPAMRoute `json:"routes,omitempty"`
ResolveConf string `json:"resolveConf,omitempty"`
@@ -81,7 +81,7 @@ type IPAMLocalHostRangeConf struct {
}
// Bytes outputs the configuration as []byte
-func (i IPAMHostLocalConf) Bytes() ([]byte, error) {
+func (i IPAMConfig) Bytes() ([]byte, error) {
return json.MarshalIndent(i, "", "\t")
}
@@ -101,19 +101,12 @@ func (p PortMapConfig) Bytes() ([]byte, error) {
return json.MarshalIndent(p, "", "\t")
}
-// IPAMDHCP describes the ipamdhcp config
-type IPAMDHCP struct {
- DHCP string `json:"type"`
- Routes []IPAMRoute `json:"routes,omitempty"`
- Ranges [][]IPAMLocalHostRangeConf `json:"ranges,omitempty"`
-}
-
// MacVLANConfig describes the macvlan config
type MacVLANConfig struct {
- PluginType string `json:"type"`
- Master string `json:"master"`
- IPAM IPAMDHCP `json:"ipam"`
- MTU int `json:"mtu,omitempty"`
+ PluginType string `json:"type"`
+ Master string `json:"master"`
+ IPAM IPAMConfig `json:"ipam"`
+ MTU int `json:"mtu,omitempty"`
}
// Bytes outputs the configuration as []byte
diff --git a/libpod/network/netconflist.go b/libpod/network/netconflist.go
index d2031df6d..d6c33740e 100644
--- a/libpod/network/netconflist.go
+++ b/libpod/network/netconflist.go
@@ -45,7 +45,7 @@ func NewNcList(name, version string, labels NcLabels) NcList {
}
// NewHostLocalBridge creates a new LocalBridge for host-local
-func NewHostLocalBridge(name string, isGateWay, isDefaultGW, ipMasq bool, mtu int, vlan int, ipamConf IPAMHostLocalConf) *HostLocalBridge {
+func NewHostLocalBridge(name string, isGateWay, isDefaultGW, ipMasq bool, mtu int, vlan int, ipamConf IPAMConfig) *HostLocalBridge {
hostLocalBridge := HostLocalBridge{
PluginType: "bridge",
BrName: name,
@@ -65,8 +65,8 @@ func NewHostLocalBridge(name string, isGateWay, isDefaultGW, ipMasq bool, mtu in
}
// NewIPAMHostLocalConf creates a new IPAMHostLocal configuration
-func NewIPAMHostLocalConf(routes []IPAMRoute, ipamRanges [][]IPAMLocalHostRangeConf) (IPAMHostLocalConf, error) {
- ipamConf := IPAMHostLocalConf{
+func NewIPAMHostLocalConf(routes []IPAMRoute, ipamRanges [][]IPAMLocalHostRangeConf) (IPAMConfig, error) {
+ ipamConf := IPAMConfig{
PluginType: "host-local",
Routes: routes,
// Possible future support ? Leaving for clues
@@ -177,8 +177,10 @@ func HasDNSNamePlugin(paths []string) bool {
// NewMacVLANPlugin creates a macvlanconfig with a given device name
func NewMacVLANPlugin(device string, gateway net.IP, ipRange *net.IPNet, subnet *net.IPNet, mtu int) (MacVLANConfig, error) {
- i := IPAMDHCP{DHCP: "dhcp"}
- if gateway != nil || ipRange != nil || subnet != nil {
+ i := IPAMConfig{PluginType: "dhcp"}
+ if gateway != nil ||
+ (ipRange != nil && ipRange.IP != nil && ipRange.Mask != nil) ||
+ (subnet != nil && subnet.IP != nil && subnet.Mask != nil) {
ipam, err := NewIPAMLocalHostRange(subnet, ipRange, gateway)
if err != nil {
return MacVLANConfig{}, err
@@ -186,6 +188,12 @@ func NewMacVLANPlugin(device string, gateway net.IP, ipRange *net.IPNet, subnet
ranges := make([][]IPAMLocalHostRangeConf, 0)
ranges = append(ranges, ipam)
i.Ranges = ranges
+ route, err := NewIPAMDefaultRoute(IsIPv6(subnet.IP))
+ if err != nil {
+ return MacVLANConfig{}, err
+ }
+ i.Routes = []IPAMRoute{route}
+ i.PluginType = "host-local"
}
m := MacVLANConfig{
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 0e8a4f768..c928e02a6 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -273,7 +273,6 @@ func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) {
if err != nil {
return nil, errors.Wrap(err, "error creating rootless cni network namespace")
}
-
// setup slirp4netns here
path := r.config.Engine.NetworkCmdPath
if path == "" {
@@ -437,9 +436,32 @@ func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) {
return rootlessCNINS, nil
}
+// setPrimaryMachineIP is used for podman-machine and it sets
+// and environment variable with the IP address of the podman-machine
+// host.
+func setPrimaryMachineIP() error {
+ // no connection is actually made here
+ conn, err := net.Dial("udp", "8.8.8.8:80")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logrus.Error(err)
+ }
+ }()
+ addr := conn.LocalAddr().(*net.UDPAddr)
+ return os.Setenv("PODMAN_MACHINE_HOST", addr.IP.String())
+}
+
// setUpOCICNIPod will set up the cni networks, on error it will also tear down the cni
// networks. If rootless it will join/create the rootless cni namespace.
func (r *Runtime) setUpOCICNIPod(podNetwork ocicni.PodNetwork) ([]ocicni.NetResult, error) {
+ if r.config.MachineEnabled() {
+ if err := setPrimaryMachineIP(); err != nil {
+ return nil, err
+ }
+ }
rootlessCNINS, err := r.GetRootlessCNINetNs(true)
if err != nil {
return nil, err
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 713026a9e..d775b55e1 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/defaultnet"
"github.com/containers/common/pkg/secrets"
"github.com/containers/image/v5/pkg/sysregistriesv2"
is "github.com/containers/image/v5/storage"
@@ -217,8 +218,6 @@ func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...R
return nil, err
}
- runtime.libimageEventsShutdown = make(chan bool)
-
return runtime, nil
}
@@ -460,6 +459,11 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
}
}
+ // If we need to make a default network - do so now.
+ if err := defaultnet.Create(runtime.config.Network.DefaultNetwork, runtime.config.Network.DefaultSubnet, runtime.config.Network.NetworkConfigDir, runtime.config.Engine.StaticDir, runtime.config.Engine.MachineEnabled); err != nil {
+ logrus.Errorf("Failed to created default CNI network: %v", err)
+ }
+
// Set up the CNI net plugin
netPlugin, err := ocicni.InitCNI(runtime.config.Network.DefaultNetwork, runtime.config.Network.NetworkConfigDir, runtime.config.Network.CNIPluginDirs...)
if err != nil {
@@ -701,6 +705,8 @@ var libimageEventsMap = map[libimage.EventType]events.Status{
// events on the libimage.Runtime. The gourtine will be cleaned up implicitly
// when the main() exists.
func (r *Runtime) libimageEvents() {
+ r.libimageEventsShutdown = make(chan bool)
+
toLibpodEventStatus := func(e *libimage.Event) events.Status {
status, found := libimageEventsMap[e.Type]
if !found {
@@ -709,9 +715,8 @@ func (r *Runtime) libimageEvents() {
return status
}
+ eventChannel := r.libimageRuntime.EventChannel()
go func() {
- eventChannel := r.libimageRuntime.EventChannel()
-
for {
// Make sure to read and write all events before
// checking if we're about to shutdown.
@@ -780,7 +785,9 @@ func (r *Runtime) Shutdown(force bool) error {
// attempt to shut it down
if r.store != nil {
// Wait for the events to be written.
- r.libimageEventsShutdown <- true
+ if r.libimageEventsShutdown != nil {
+ r.libimageEventsShutdown <- true
+ }
// Note that the libimage runtime shuts down the store.
if err := r.libimageRuntime.Shutdown(force); err != nil {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 4e4b2a8ab..6c69d1b72 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -581,6 +581,15 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if err := c.stop(c.StopTimeout()); err != nil && errors.Cause(err) != define.ErrConmonDead {
return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
}
+
+ // We unlocked as part of stop() above - there's a chance someone
+ // else got in and removed the container before we reacquired the
+ // lock.
+ // Do a quick ping of the database to check if the container
+ // still exists.
+ if ok, _ := r.state.HasContainer(c.ID()); !ok {
+ return nil
+ }
}
// Remove all active exec sessions