From 17105028e519237b5dd310e32b2a334eaa41bb36 Mon Sep 17 00:00:00 2001
From: Daniel J Walsh <dwalsh@redhat.com>
Date: Thu, 21 Apr 2022 15:15:41 -0400
Subject: vendor in latest containers/(storage,common,image)

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
---
 .../containers/common/pkg/config/config.go         |   10 +
 .../containers/common/pkg/config/containers.conf   |   41 +-
 .../containers/common/pkg/config/default.go        |    6 +-
 .../common/pkg/secrets/shelldriver/shelldriver.go  |   25 +-
 vendor/github.com/containers/image/v5/copy/copy.go |   11 +-
 .../image/v5/directory/directory_dest.go           |   15 +-
 .../containers/image/v5/directory/directory_src.go |    5 +-
 .../containers/image/v5/docker/docker_client.go    |    3 +-
 .../image/v5/docker/docker_image_dest.go           |    3 +-
 .../containers/image/v5/docker/docker_image_src.go |    9 +-
 .../image/v5/docker/internal/tarfile/reader.go     |    3 +-
 .../image/v5/docker/internal/tarfile/src.go        |    5 +-
 .../containers/image/v5/docker/lookaside.go        |    3 +-
 .../image/v5/internal/iolimits/iolimits.go         |    3 +-
 .../v5/internal/streamdigest/stream_digest.go      |    3 +-
 .../image/v5/oci/archive/oci_transport.go          |    3 +-
 .../containers/image/v5/oci/layout/oci_dest.go     |    9 +-
 .../containers/image/v5/oci/layout/oci_src.go      |    3 +-
 .../image/v5/openshift/openshift-copies.go         |    5 +-
 .../containers/image/v5/ostree/ostree_dest.go      |   23 +-
 .../containers/image/v5/ostree/ostree_src.go       |    3 +-
 .../containers/image/v5/pkg/blobcache/blobcache.go |   13 +-
 .../image/v5/pkg/compression/compression.go        |    7 +-
 .../image/v5/pkg/docker/config/config.go           |    3 +-
 .../v5/pkg/sysregistriesv2/system_registries_v2.go |    9 +-
 .../v5/pkg/tlsclientconfig/tlsclientconfig.go      |    7 +-
 vendor/github.com/containers/image/v5/sif/load.go  |    7 +-
 vendor/github.com/containers/image/v5/sif/src.go   |    5 +-
 .../containers/image/v5/signature/mechanism.go     |    4 +-
 .../image/v5/signature/mechanism_gpgme.go          |    3 +-
 .../image/v5/signature/mechanism_openpgp.go        |    6 +-
 .../containers/image/v5/signature/policy_config.go |    3 +-
 .../image/v5/signature/policy_eval_signedby.go     |    4 +-
 .../containers/image/v5/storage/storage_image.go   |   13 +-
 .../containers/image/v5/tarball/tarball_src.go     |    7 +-
 .../image/v5/tarball/tarball_transport.go          |    4 +-
 vendor/github.com/containers/storage/Makefile      |   18 +-
 .../containers/storage/drivers/driver_freebsd.go   |   29 +
 .../storage/drivers/register/register_zfs.go       |    2 +-
 .../containers/storage/drivers/zfs/zfs.go          |    4 +-
 .../containers/storage/drivers/zfs/zfs_freebsd.go  |    5 +
 .../containers/storage/drivers/zfs/zfs_linux.go    |    5 +
 vendor/github.com/containers/storage/layers.go     |   10 +-
 .../containers/storage/storage.conf-freebsd        |  205 +++
 .../github.com/mitchellh/mapstructure/CHANGELOG.md |   83 --
 vendor/github.com/mitchellh/mapstructure/LICENSE   |   21 -
 vendor/github.com/mitchellh/mapstructure/README.md |   46 -
 .../mitchellh/mapstructure/decode_hooks.go         |  257 ----
 vendor/github.com/mitchellh/mapstructure/error.go  |   50 -
 vendor/github.com/mitchellh/mapstructure/go.mod    |    3 -
 .../mitchellh/mapstructure/mapstructure.go         | 1467 --------------------
 vendor/modules.txt                                 |   10 +-
 52 files changed, 428 insertions(+), 2073 deletions(-)
 create mode 100644 vendor/github.com/containers/storage/storage.conf-freebsd
 delete mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
 delete mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE
 delete mode 100644 vendor/github.com/mitchellh/mapstructure/README.md
 delete mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go
 delete mode 100644 vendor/github.com/mitchellh/mapstructure/error.go
 delete mode 100644 vendor/github.com/mitchellh/mapstructure/go.mod
 delete mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go

(limited to 'vendor')

diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index b28c527bc..319b8d153 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -95,6 +95,13 @@ type ContainersConfig struct {
 	// Annotation to add to all containers
 	Annotations []string `toml:"annotations,omitempty"`
 
+	// BaseHostsFile is the path to a hosts file, the entries from this file
+	// are added to the containers hosts file. As special value "image" is
+	// allowed which uses the /etc/hosts file from within the image and "none"
+	// which uses no base file at all. If it is empty we should default
+	// to /etc/hosts.
+	BaseHostsFile string `toml:"base_hosts_file,omitempty"`
+
 	// Default way to create a cgroup namespace for the container
 	CgroupNS string `toml:"cgroupns,omitempty"`
 
@@ -136,6 +143,9 @@ type ContainersConfig struct {
 	// EnvHost Pass all host environment variables into the container.
 	EnvHost bool `toml:"env_host,omitempty"`
 
+	// HostContainersInternalIP is used to set a specific host.containers.internal ip.
+	HostContainersInternalIP string `toml:"host_containers_internal_ip,omitempty"`
+
 	// HTTPProxy is the proxy environment variable list to apply to container process
 	HTTPProxy bool `toml:"http_proxy,omitempty"`
 
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index f069c531d..429b254bc 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -26,6 +26,13 @@
 #
 #apparmor_profile = "container-default"
 
+# The hosts entries from the base hosts file are added to the containers hosts
+# file. This must be either an absolute path or as special values "image" which
+# uses the hosts file from the container image or "none" which means
+# no base hosts file is used. The default is "" which will use /etc/hosts.
+#
+#base_hosts_file = ""
+
 # Default way to to create a cgroup namespace for the container
 # Options are:
 # `private` Create private Cgroup Namespace for the container.
@@ -114,6 +121,16 @@ default_sysctls = [
 #
 #env_host = false
 
+# Set the ip for the host.containers.internal entry in the containers /etc/hosts
+# file. This can be set to "none" to disable adding this entry. By default it
+# will automatically choose the host ip.
+#
+# NOTE: When using podman machine this entry will never be added to the containers
+# hosts file instead the gvproxy dns resolver will resolve this hostname. Therefore
+# it is not possible to disable the entry in this case.
+#
+#host_containers_internal_ip = ""
+
 # Default proxy environment variables passed into the container.
 # The environment variables passed in include:
 # http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of
@@ -464,9 +481,26 @@ default_sysctls = [
 #network_cmd_path = ""
 
 # Default options to pass to the slirp4netns binary.
-# For example "allow_host_loopback=true"
-#
-#network_cmd_options = ["enable_ipv6=true",]
+# Valid options values are:
+#
+# - allow_host_loopback=true|false: Allow the slirp4netns to reach the host loopback IP (`10.0.2.2`).
+#   Default is false.
+# - mtu=MTU: Specify the MTU to use for this network. (Default is `65520`).
+# - cidr=CIDR: Specify ip range to use for this network. (Default is `10.0.2.0/24`).
+# - enable_ipv6=true|false: Enable IPv6. Default is true. (Required for `outbound_addr6`).
+# - outbound_addr=INTERFACE: Specify the outbound interface slirp should bind to (ipv4 traffic only).
+# - outbound_addr=IPv4: Specify the outbound ipv4 address slirp should bind to.
+# - outbound_addr6=INTERFACE: Specify the outbound interface slirp should bind to (ipv6 traffic only).
+# - outbound_addr6=IPv6: Specify the outbound ipv6 address slirp should bind to.
+# - port_handler=rootlesskit: Use rootlesskit for port forwarding. Default.
+#   Note: Rootlesskit changes the source IP address of incoming packets to a IP address in the container
+#   network namespace, usually `10.0.2.100`. If your application requires the real source IP address,
+#   e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for
+#   rootless containers when connected to user-defined networks.
+# - port_handler=slirp4netns: Use the slirp4netns port forwarding, it is slower than rootlesskit but
+#   preserves the correct source IP address. This port handler cannot be used for user-defined networks.
+#
+#network_cmd_options = []
 
 # Whether to use chroot instead of pivot_root in the runtime
 #
@@ -644,4 +678,3 @@ default_sysctls = [
 # TOML does not provide a way to end a table other than a further table being
 # defined, so every key hereafter will be part of [machine] and not the
 # main config.
-
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 275f67cbf..62b348d6e 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -122,6 +122,8 @@ const (
 	CgroupfsCgroupsManager = "cgroupfs"
 	// DefaultApparmorProfile  specifies the default apparmor profile for the container.
 	DefaultApparmorProfile = apparmor.Profile
+	// DefaultHostsFile is the default path to the hosts file
+	DefaultHostsFile = "/etc/hosts"
 	// SystemdCgroupsManager represents systemd native cgroup manager
 	SystemdCgroupsManager = "systemd"
 	// DefaultLogSizeMax is the default value for the maximum log size
@@ -189,6 +191,7 @@ func DefaultConfig() (*Config, error) {
 			Volumes:             []string{},
 			Annotations:         []string{},
 			ApparmorProfile:     DefaultApparmorProfile,
+			BaseHostsFile:       "",
 			CgroupNS:            cgroupNS,
 			Cgroups:             "enabled",
 			DefaultCapabilities: DefaultCapabilities,
@@ -299,9 +302,6 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
 	c.ServiceTimeout = uint(5)
 	c.StopTimeout = uint(10)
 	c.ExitCommandDelay = uint(5 * 60)
-	c.NetworkCmdOptions = []string{
-		"enable_ipv6=true",
-	}
 	c.Remote = isRemote()
 	c.OCIRuntimes = map[string][]string{
 		"crun": {
diff --git a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go
index 846bd5c17..8eac200f7 100644
--- a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go
+++ b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go
@@ -3,12 +3,12 @@ package shelldriver
 import (
 	"bytes"
 	"context"
+	"fmt"
 	"os"
 	"os/exec"
 	"sort"
 	"strings"
 
-	"github.com/mitchellh/mapstructure"
 	"github.com/pkg/errors"
 )
 
@@ -27,22 +27,33 @@ var (
 type driverConfig struct {
 	// DeleteCommand contains a shell command that deletes a secret.
 	// The secret id is provided as environment variable SECRET_ID
-	DeleteCommand string `mapstructure:"delete"`
+	DeleteCommand string
 	// ListCommand contains a shell command that lists all secrets.
 	// The output is expected to be one id per line
-	ListCommand string `mapstructure:"list"`
+	ListCommand string
 	// LookupCommand contains a shell command that retrieves a secret.
 	// The secret id is provided as environment variable SECRET_ID
-	LookupCommand string `mapstructure:"lookup"`
+	LookupCommand string
 	// StoreCommand contains a shell command that stores a secret.
 	// The secret id is provided as environment variable SECRET_ID
 	// The secret value itself is provided over stdin
-	StoreCommand string `mapstructure:"store"`
+	StoreCommand string
 }
 
 func (cfg *driverConfig) ParseOpts(opts map[string]string) error {
-	if err := mapstructure.Decode(opts, cfg); err != nil {
-		return err
+	for key, value := range opts {
+		switch key {
+		case "delete":
+			cfg.DeleteCommand = value
+		case "list":
+			cfg.ListCommand = value
+		case "lookup":
+			cfg.LookupCommand = value
+		case "store":
+			cfg.StoreCommand = value
+		default:
+			return fmt.Errorf("invalid shell driver option: %q", key)
+		}
 	}
 	if cfg.DeleteCommand == "" ||
 		cfg.ListCommand == "" ||
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index b616e566c..644f82615 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -5,7 +5,6 @@ import (
 	"context"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"reflect"
 	"strings"
@@ -199,7 +198,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
 		return nil, err
 	}
 
-	reportWriter := ioutil.Discard
+	reportWriter := io.Discard
 
 	if options.ReportWriter != nil {
 		reportWriter = options.ReportWriter
@@ -232,7 +231,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
 	// createProgressBar() will print a single line instead.
 	progressOutput := reportWriter
 	if !isTTY(reportWriter) {
-		progressOutput = ioutil.Discard
+		progressOutput = io.Discard
 	}
 
 	c := &copier{
@@ -1091,7 +1090,7 @@ func customPartialBlobDecorFunc(s decor.Statistics) string {
 }
 
 // createProgressBar creates a mpb.Bar in pool.  Note that if the copier's reportWriter
-// is ioutil.Discard, the progress bar's output will be discarded
+// is io.Discard, the progress bar's output will be discarded
 // NOTE: Every progress bar created within a progress pool must either successfully
 // complete or be aborted, or pool.Wait() will hang. That is typically done
 // using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
@@ -1143,7 +1142,7 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
 			),
 		)
 	}
-	if c.progressOutput == ioutil.Discard {
+	if c.progressOutput == io.Discard {
 		c.Printf("Copying %s %s\n", kind, info.Digest)
 	}
 	return bar
@@ -1669,7 +1668,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
 	// sent there if we are not already at EOF.
 	if getOriginalLayerCopyWriter != nil {
 		logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
-		_, err := io.Copy(ioutil.Discard, originalLayerReader)
+		_, err := io.Copy(io.Discard, originalLayerReader)
 		if err != nil {
 			return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
 		}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go
index ea20e7c5e..3b135e68e 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go
@@ -3,7 +3,6 @@ package directory
 import (
 	"context"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"runtime"
@@ -62,7 +61,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag
 				return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath())
 			}
 			if versionExists {
-				contents, err := ioutil.ReadFile(d.ref.versionPath())
+				contents, err := os.ReadFile(d.ref.versionPath())
 				if err != nil {
 					return nil, err
 				}
@@ -86,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag
 		}
 	}
 	// create version file
-	err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
+	err = os.WriteFile(d.ref.versionPath(), []byte(version), 0644)
 	if err != nil {
 		return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath())
 	}
@@ -149,7 +148,7 @@ func (d *dirImageDestination) HasThreadSafePutBlob() bool {
 // to any other readers for download using the supplied digest.
 // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
 func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
-	blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
+	blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
 	if err != nil {
 		return types.BlobInfo{}, err
 	}
@@ -232,7 +231,7 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
 // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
 // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
 func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
-	return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
+	return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
 }
 
 // PutSignatures writes a set of signatures to the destination.
@@ -240,7 +239,7 @@ func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte,
 // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
 func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
 	for i, sig := range signatures {
-		if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
+		if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
 			return err
 		}
 	}
@@ -272,7 +271,7 @@ func pathExists(path string) (bool, error) {
 
 // returns true if directory is empty
 func isDirEmpty(path string) (bool, error) {
-	files, err := ioutil.ReadDir(path)
+	files, err := os.ReadDir(path)
 	if err != nil {
 		return false, err
 	}
@@ -281,7 +280,7 @@ func isDirEmpty(path string) (bool, error) {
 
 // deletes the contents of a directory
 func removeDirContents(path string) error {
-	files, err := ioutil.ReadDir(path)
+	files, err := os.ReadDir(path)
 	if err != nil {
 		return err
 	}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go
index ad9129d40..8b509112a 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_src.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_src.go
@@ -3,7 +3,6 @@ package directory
 import (
 	"context"
 	"io"
-	"io/ioutil"
 	"os"
 
 	"github.com/containers/image/v5/manifest"
@@ -37,7 +36,7 @@ func (s *dirImageSource) Close() error {
 // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
 // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
 func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
-	m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest))
+	m, err := os.ReadFile(s.ref.manifestPath(instanceDigest))
 	if err != nil {
 		return nil, "", err
 	}
@@ -71,7 +70,7 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
 func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
 	signatures := [][]byte{}
 	for i := 0; ; i++ {
-		signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest))
+		signature, err := os.ReadFile(s.ref.signaturePath(i, instanceDigest))
 		if err != nil {
 			if os.IsNotExist(err) {
 				break
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index 9837235d8..d984db718 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -7,7 +7,6 @@ import (
 	"encoding/json"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"net/http"
 	"net/url"
 	"os"
@@ -654,7 +653,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
 	params.Add("refresh_token", c.auth.IdentityToken)
 	params.Add("client_id", "containers/image")
 
-	authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
+	authReq.Body = io.NopCloser(bytes.NewBufferString(params.Encode()))
 	authReq.Header.Add("User-Agent", c.userAgent)
 	authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
 	logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index e3275aa45..d02100cf8 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -7,7 +7,6 @@ import (
 	"encoding/json"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"net/http"
 	"net/url"
 	"os"
@@ -592,7 +591,7 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
 		if err != nil {
 			return err
 		}
-		err = ioutil.WriteFile(url.Path, signature, 0644)
+		err = os.WriteFile(url.Path, signature, 0644)
 		if err != nil {
 			return err
 		}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index c08e5538a..c8e176f90 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -4,7 +4,6 @@ import (
 	"context"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"mime"
 	"mime/multipart"
 	"net/http"
@@ -308,7 +307,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
 				break
 			}
 			toSkip := c.Offset - currentOffset
-			if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
+			if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
 				errs <- err
 				break
 			}
@@ -316,7 +315,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
 		}
 		s := signalCloseReader{
 			closed:        make(chan interface{}),
-			stream:        ioutil.NopCloser(io.LimitReader(body, int64(c.Length))),
+			stream:        io.NopCloser(io.LimitReader(body, int64(c.Length))),
 			consumeStream: true,
 		}
 		streams <- s
@@ -515,7 +514,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
 	switch url.Scheme {
 	case "file":
 		logrus.Debugf("Reading %s", url.Path)
-		sig, err := ioutil.ReadFile(url.Path)
+		sig, err := os.ReadFile(url.Path)
 		if err != nil {
 			if os.IsNotExist(err) {
 				return nil, true, nil
@@ -765,7 +764,7 @@ func (s signalCloseReader) Read(p []byte) (int, error) {
 func (s signalCloseReader) Close() error {
 	defer close(s.closed)
 	if s.consumeStream {
-		if _, err := io.Copy(ioutil.Discard, s.stream); err != nil {
+		if _, err := io.Copy(io.Discard, s.stream); err != nil {
 			s.stream.Close()
 			return err
 		}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
index 6164ceb66..c77c002d1 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
@@ -4,7 +4,6 @@ import (
 	"archive/tar"
 	"encoding/json"
 	"io"
-	"io/ioutil"
 	"os"
 	"path"
 
@@ -53,7 +52,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
 // The caller should call .Close() on the returned archive when done.
 func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
 	// Save inputStream to a temporary file
-	tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
+	tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
 	if err != nil {
 		return nil, errors.Wrap(err, "creating temporary file")
 	}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
index b8d84d245..8e9be17c1 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
@@ -6,7 +6,6 @@ import (
 	"context"
 	"encoding/json"
 	"io"
-	"io/ioutil"
 	"os"
 	"path"
 	"sync"
@@ -170,7 +169,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
 
 			uncompressedSize := h.Size
 			if isCompressed {
-				uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
+				uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
 				if err != nil {
 					return nil, errors.Wrapf(err, "reading %s to find its size", layerPath)
 				}
@@ -263,7 +262,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
 	}
 
 	if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
-		return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
+		return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
 	}
 
 	if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go
index 22d84931c..d0a3f1be0 100644
--- a/vendor/github.com/containers/image/v5/docker/lookaside.go
+++ b/vendor/github.com/containers/image/v5/docker/lookaside.go
@@ -2,7 +2,6 @@ package docker
 
 import (
 	"fmt"
-	"io/ioutil"
 	"net/url"
 	"os"
 	"path"
@@ -146,7 +145,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
 			continue
 		}
 		configPath := filepath.Join(dirPath, configName)
-		configBytes, err := ioutil.ReadFile(configPath)
+		configBytes, err := os.ReadFile(configPath)
 		if err != nil {
 			return nil, err
 		}
diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
index 3fed1995c..49fa410e9 100644
--- a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
+++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
@@ -2,7 +2,6 @@ package iolimits
 
 import (
 	"io"
-	"io/ioutil"
 
 	"github.com/pkg/errors"
 )
@@ -47,7 +46,7 @@ const (
 func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
 	limitedReader := io.LimitReader(reader, int64(limit+1))
 
-	res, err := ioutil.ReadAll(limitedReader)
+	res, err := io.ReadAll(limitedReader)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
index 306220585..84bb656ac 100644
--- a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
+++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
@@ -3,7 +3,6 @@ package streamdigest
 import (
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 
 	"github.com/containers/image/v5/internal/putblobdigest"
@@ -16,7 +15,7 @@ import (
 // It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
 // If an error occurs, inputInfo is not modified.
 func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
-	diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
+	diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
 	if err != nil {
 		return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
 	}
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
index 54d325d34..4fa912765 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
@@ -3,7 +3,6 @@ package archive
 import (
 	"context"
 	"fmt"
-	"io/ioutil"
 	"os"
 	"strings"
 
@@ -161,7 +160,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
 // createOCIRef creates the oci reference of the image
 // If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
 func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
-	dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
+	dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
 	if err != nil {
 		return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory")
 	}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
index c8156cc3a..77e8fd876 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
@@ -4,7 +4,6 @@ import (
 	"context"
 	"encoding/json"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"runtime"
@@ -124,7 +123,7 @@ func (d *ociImageDestination) HasThreadSafePutBlob() bool {
 // to any other readers for download using the supplied digest.
 // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
 func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
-	blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
+	blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
 	if err != nil {
 		return types.BlobInfo{}, err
 	}
@@ -238,7 +237,7 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanc
 	if err := ensureParentDirectoryExists(blobPath); err != nil {
 		return err
 	}
-	if err := ioutil.WriteFile(blobPath, m, 0644); err != nil {
+	if err := os.WriteFile(blobPath, m, 0644); err != nil {
 		return err
 	}
 
@@ -309,14 +308,14 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]
 // - Uploaded data MAY be visible to others before Commit() is called
 // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
 func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
-	if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
+	if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
 		return err
 	}
 	indexJSON, err := json.Marshal(d.index)
 	if err != nil {
 		return err
 	}
-	return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644)
+	return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
 }
 
 func ensureDirectoryExists(path string) error {
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
index 9d8ab689b..8973f461c 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
@@ -3,7 +3,6 @@ package layout
 import (
 	"context"
 	"io"
-	"io/ioutil"
 	"net/http"
 	"net/url"
 	"os"
@@ -93,7 +92,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
 		return nil, "", err
 	}
 
-	m, err := ioutil.ReadFile(manifestPath)
+	m, err := os.ReadFile(manifestPath)
 	if err != nil {
 		return nil, "", err
 	}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
index 4ffbced6b..a6473ae68 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
@@ -5,7 +5,6 @@ import (
 	"crypto/x509"
 	"encoding/json"
 	"fmt"
-	"io/ioutil"
 	"net"
 	"net/http"
 	"net/url"
@@ -625,7 +624,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
 // loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile
 // LoadFromFile takes a filename and deserializes the contents into Config object
 func loadFromFile(filename string) (*clientcmdConfig, error) {
-	kubeconfigBytes, err := ioutil.ReadFile(filename)
+	kubeconfigBytes, err := os.ReadFile(filename)
 	if err != nil {
 		return nil, err
 	}
@@ -1013,7 +1012,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
 		return data, nil
 	}
 	if len(file) > 0 {
-		fileData, err := ioutil.ReadFile(file)
+		fileData, err := os.ReadFile(file)
 		if err != nil {
 			return []byte{}, err
 		}
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
index 3eb2a2cba..011118fa5 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
@@ -10,7 +10,6 @@ import (
 	"encoding/json"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"os/exec"
 	"path/filepath"
@@ -148,7 +147,7 @@ func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
 // to any other readers for download using the supplied digest.
 // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
 func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
-	tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
+	tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
 	if err != nil {
 		return types.BlobInfo{}, err
 	}
@@ -180,20 +179,24 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader,
 }
 
 func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
-	entries, err := ioutil.ReadDir(dir)
+	entries, err := os.ReadDir(dir)
 	if err != nil {
 		return err
 	}
 
-	for _, info := range entries {
-		fullpath := filepath.Join(dir, info.Name())
-		if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
+	for _, entry := range entries {
+		fullpath := filepath.Join(dir, entry.Name())
+		if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
 			if err := os.Remove(fullpath); err != nil {
 				return err
 			}
 			continue
 		}
 
+		info, err := entry.Info()
+		if err != nil {
+			return err
+		}
 		if selinuxHnd != nil {
 			relPath, err := filepath.Rel(root, fullpath)
 			if err != nil {
@@ -223,7 +226,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
 			}
 		}
 
-		if info.IsDir() {
+		if entry.IsDir() {
 			if usermode {
 				if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
 					return err
@@ -233,7 +236,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
 			if err != nil {
 				return err
 			}
-		} else if usermode && (info.Mode().IsRegular()) {
+		} else if usermode && (entry.Type().IsRegular()) {
 			if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
 				return err
 			}
@@ -405,7 +408,7 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [
 	}
 	d.digest = digest
 
-	return ioutil.WriteFile(manifestPath, manifestBlob, 0644)
+	return os.WriteFile(manifestPath, manifestBlob, 0644)
 }
 
 // PutSignatures writes signatures to the destination.
@@ -423,7 +426,7 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [
 
 	for i, sig := range signatures {
 		signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
-		if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil {
+		if err := os.WriteFile(signaturePath, sig, 0644); err != nil {
 			return err
 		}
 	}
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
index d30c764a6..1e1f2be03 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
@@ -9,7 +9,6 @@ import (
 	"encoding/base64"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"strconv"
 	"strings"
 	"unsafe"
@@ -369,7 +368,7 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d
 		}
 		defer sigReader.Close()
 
-		sig, err := ioutil.ReadAll(sigReader)
+		sig, err := os.ReadAll(sigReader)
 		if err != nil {
 			return nil, err
 		}
diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go
index b67a83f33..8b22733ac 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go
@@ -4,7 +4,6 @@ import (
 	"bytes"
 	"context"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"sync"
@@ -196,7 +195,7 @@ func (s *blobCacheSource) Close() error {
 func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
 	if instanceDigest != nil {
 		filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false))
-		manifestBytes, err := ioutil.ReadFile(filename)
+		manifestBytes, err := os.ReadFile(filename)
 		if err == nil {
 			s.cacheHits++
 			return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil
@@ -280,10 +279,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
 			switch s.reference.compress {
 			case types.Compress:
 				alternate = blobFile + compressedNote
-				replaceDigest, err = ioutil.ReadFile(alternate)
+				replaceDigest, err = os.ReadFile(alternate)
 			case types.Decompress:
 				alternate = blobFile + decompressedNote
-				replaceDigest, err = ioutil.ReadFile(alternate)
+				replaceDigest, err = os.ReadFile(alternate)
 			}
 			if err == nil && digest.Digest(replaceDigest).Validate() == nil {
 				alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false))
@@ -373,7 +372,7 @@ func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os
 		_, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
 	} else {
 		// Drain the pipe to keep from stalling the PutBlob() thread.
-		if _, err := io.Copy(ioutil.Discard, decompressReader); err != nil {
+		if _, err := io.Copy(io.Discard, decompressReader); err != nil {
 			logrus.Debugf("error draining the pipe: %v", err)
 		}
 	}
@@ -423,7 +422,7 @@ func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, in
 	compression := archive.Uncompressed
 	if inputInfo.Digest != "" {
 		filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
-		tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+		tempfile, err = os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
 		if err == nil {
 			stream = io.TeeReader(stream, tempfile)
 			defer func() {
@@ -457,7 +456,7 @@ func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, in
 				if compression == archive.Gzip {
 					// The stream is compressed, so create a file which we'll
 					// use to store a decompressed copy.
-					decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+					decompressedTemp, err2 := os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
 					if err2 != nil {
 						logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2)
 						decompressedTemp.Close()
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
index c28e81792..34c90dd77 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
@@ -5,7 +5,6 @@ import (
 	"compress/bzip2"
 	"fmt"
 	"io"
-	"io/ioutil"
 
 	"github.com/containers/image/v5/pkg/compression/internal"
 	"github.com/containers/image/v5/pkg/compression/types"
@@ -65,7 +64,7 @@ func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
 
 // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
 func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) {
-	return ioutil.NopCloser(bzip2.NewReader(r)), nil
+	return io.NopCloser(bzip2.NewReader(r)), nil
 }
 
 // XzDecompressor is a DecompressorFunc for the xz compression algorithm.
@@ -74,7 +73,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
 	if err != nil {
 		return nil, err
 	}
-	return ioutil.NopCloser(r), nil
+	return io.NopCloser(r), nil
 }
 
 // gzipCompressor is a CompressorFunc for the gzip compression algorithm.
@@ -161,7 +160,7 @@ func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
 			return nil, false, errors.Wrapf(err, "initializing decompression")
 		}
 	} else {
-		res = ioutil.NopCloser(stream)
+		res = io.NopCloser(stream)
 	}
 	return res, decompressor != nil, nil
 }
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index 52734bead..d0bdd08e9 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -4,7 +4,6 @@ import (
 	"encoding/base64"
 	"encoding/json"
 	"fmt"
-	"io/ioutil"
 	"os"
 	"os/exec"
 	"path/filepath"
@@ -544,7 +543,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e
 func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
 	var auths dockerConfigFile
 
-	raw, err := ioutil.ReadFile(path)
+	raw, err := os.ReadFile(path)
 	if err != nil {
 		if os.IsNotExist(err) {
 			auths.AuthConfigs = map[string]dockerAuthConfig{}
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
index c5df241b7..c1753c845 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
@@ -2,6 +2,7 @@ package sysregistriesv2
 
 import (
 	"fmt"
+	"io/fs"
 	"os"
 	"path/filepath"
 	"reflect"
@@ -643,17 +644,17 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) {
 		dirPaths = append(dirPaths, wrapper.userConfigDirPath)
 	}
 	for _, dirPath := range dirPaths {
-		err := filepath.Walk(dirPath,
+		err := filepath.WalkDir(dirPath,
 			// WalkFunc to read additional configs
-			func(path string, info os.FileInfo, err error) error {
+			func(path string, d fs.DirEntry, err error) error {
 				switch {
 				case err != nil:
 					// return error (could be a permission problem)
 					return err
-				case info == nil:
+				case d == nil:
 					// this should only happen when err != nil but let's be sure
 					return nil
-				case info.IsDir():
+				case d.IsDir():
 					if path != dirPath {
 						// make sure to not recurse into sub-directories
 						return filepath.SkipDir
diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
index 7e2142b1f..c766417d0 100644
--- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
+++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
@@ -2,7 +2,6 @@ package tlsclientconfig
 
 import (
 	"crypto/tls"
-	"io/ioutil"
 	"net"
 	"net/http"
 	"os"
@@ -19,7 +18,7 @@ import (
 // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
 func SetupCertificates(dir string, tlsc *tls.Config) error {
 	logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
-	fs, err := ioutil.ReadDir(dir)
+	fs, err := os.ReadDir(dir)
 	if err != nil {
 		if os.IsNotExist(err) {
 			return nil
@@ -35,7 +34,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
 		fullPath := filepath.Join(dir, f.Name())
 		if strings.HasSuffix(f.Name(), ".crt") {
 			logrus.Debugf(" crt: %s", fullPath)
-			data, err := ioutil.ReadFile(fullPath)
+			data, err := os.ReadFile(fullPath)
 			if err != nil {
 				if os.IsNotExist(err) {
 					// Dangling symbolic link?
@@ -81,7 +80,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
 	return nil
 }
 
-func hasFile(files []os.FileInfo, name string) bool {
+func hasFile(files []os.DirEntry, name string) bool {
 	for _, f := range files {
 		if f.Name() == name {
 			return true
diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go
index ba6d875ba..70758ad43 100644
--- a/vendor/github.com/containers/image/v5/sif/load.go
+++ b/vendor/github.com/containers/image/v5/sif/load.go
@@ -5,7 +5,6 @@ import (
 	"context"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"os/exec"
 	"path/filepath"
@@ -103,7 +102,7 @@ func writeInjectedScript(extractedRootPath string, injectedScript []byte) error
 	if err := os.MkdirAll(parentDirPath, 0755); err != nil {
 		return fmt.Errorf("creating %s: %w", parentDirPath, err)
 	}
-	if err := ioutil.WriteFile(filePath, injectedScript, 0755); err != nil {
+	if err := os.WriteFile(filePath, injectedScript, 0755); err != nil {
 		return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err)
 	}
 	return nil
@@ -121,7 +120,7 @@ func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, i
 	conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./",
 		extractedRootPath, squashFSPath, extractedRootPath, tarPath)
 	script := "#!/bin/sh\n" + conversionCommand + "\n"
-	if err := ioutil.WriteFile(scriptPath, []byte(script), 0755); err != nil {
+	if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
 		return err
 	}
 	defer os.Remove(scriptPath)
@@ -149,7 +148,7 @@ func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, i
 // at start, and is exclusively used by the current process (i.e. it is safe
 // to use hard-coded relative paths within it).
 func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) {
-	// We could allocate unique names for all of these using ioutil.Temp*, but tempDir is exclusive,
+	// We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive,
 	// so we can just hard-code a set of unique values here.
 	// We create and/or manage cleanup of these two paths.
 	squashFSPath := filepath.Join(tempDir, "rootfs.squashfs")
diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go
index ba95a469f..ccf125966 100644
--- a/vendor/github.com/containers/image/v5/sif/src.go
+++ b/vendor/github.com/containers/image/v5/sif/src.go
@@ -7,7 +7,6 @@ import (
 	"errors"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 
 	"github.com/containers/image/v5/internal/tmpdir"
@@ -65,7 +64,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
 		_ = sifImg.UnloadContainer()
 	}()
 
-	workDir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif")
+	workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif")
 	if err != nil {
 		return nil, fmt.Errorf("creating temp directory: %w", err)
 	}
@@ -170,7 +169,7 @@ func (s *sifImageSource) HasThreadSafeGetBlob() bool {
 func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
 	switch info.Digest {
 	case s.configDigest:
-		return ioutil.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil
+		return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil
 	case s.layerDigest:
 		reader, err := os.Open(s.layerFile)
 		if err != nil {
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go
index 961246147..249b5a1fe 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism.go
@@ -6,7 +6,7 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
-	"io/ioutil"
+	"io"
 	"strings"
 
 	// This code is used only to parse the data in an explicitly-untrusted
@@ -82,7 +82,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents
 	if !md.IsSigned {
 		return nil, "", errors.New("The input is not a signature")
 	}
-	content, err := ioutil.ReadAll(md.UnverifiedBody)
+	content, err := io.ReadAll(md.UnverifiedBody)
 	if err != nil {
 		// Coverage: An error during reading the body can happen only if
 		// 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
index c166fb32d..4c7968417 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
@@ -7,7 +7,6 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
-	"io/ioutil"
 	"os"
 
 	"github.com/proglottis/gpgme"
@@ -37,7 +36,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith
 // of these keys.
 // The caller must call .Close() on the returned SigningMechanism.
 func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) {
-	dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-")
+	dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-")
 	if err != nil {
 		return nil, nil, err
 	}
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
index ef4e70e7f..63cb7788b 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
@@ -7,7 +7,7 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
-	"io/ioutil"
+	"io"
 	"os"
 	"path"
 	"strings"
@@ -44,7 +44,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith
 		}
 	}
 
-	pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg"))
+	pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg"))
 	if err != nil {
 		if !os.IsNotExist(err) {
 			return nil, err
@@ -130,7 +130,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
 	if !md.IsSigned {
 		return nil, "", errors.New("not signed")
 	}
-	content, err := ioutil.ReadAll(md.UnverifiedBody)
+	content, err := io.ReadAll(md.UnverifiedBody)
 	if err != nil {
 		// Coverage: md.UnverifiedBody.Read only fails if the body is encrypted
 		// (and possibly also signed, but it _must_ be encrypted) and the signing
diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go
index 82fbb68cb..bb91cae8c 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_config.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_config.go
@@ -16,7 +16,6 @@ package signature
 import (
 	"encoding/json"
 	"fmt"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"regexp"
@@ -80,7 +79,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
 
 // NewPolicyFromFile returns a policy configured in the specified file.
 func NewPolicyFromFile(fileName string) (*Policy, error) {
-	contents, err := ioutil.ReadFile(fileName)
+	contents, err := os.ReadFile(fileName)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
index 26cca4759..65e825973 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
@@ -5,7 +5,7 @@ package signature
 import (
 	"context"
 	"fmt"
-	"io/ioutil"
+	"os"
 	"strings"
 
 	"github.com/containers/image/v5/manifest"
@@ -33,7 +33,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types
 	if pr.KeyData != nil {
 		data = pr.KeyData
 	} else {
-		d, err := ioutil.ReadFile(pr.KeyPath)
+		d, err := os.ReadFile(pr.KeyPath)
 		if err != nil {
 			return sarRejected, nil, err
 		}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go
index 08ae042ac..8071e3b32 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_image.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_image.go
@@ -10,7 +10,6 @@ import (
 	stderrors "errors"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"sync"
@@ -155,7 +154,7 @@ func (s *storageImageSource) HasThreadSafeGetBlob() bool {
 // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
 func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
 	if info.Digest == image.GzippedEmptyLayerDigest {
-		return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
+		return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
 	}
 
 	// NOTE: the blob is first written to a temporary file and subsequently
@@ -167,7 +166,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
 	}
 	defer rc.Close()
 
-	tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
+	tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
 	if err != nil {
 		return nil, 0, err
 	}
@@ -210,7 +209,7 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC
 		}
 		r := bytes.NewReader(b)
 		logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
-		return ioutil.NopCloser(r), int64(r.Len()), "", nil
+		return io.NopCloser(r), int64(r.Len()), "", nil
 	}
 	// Step through the list of matching layers.  Tests may want to verify that if we have multiple layers
 	// which claim to have the same contents, that we actually do have multiple layers, otherwise we could
@@ -395,7 +394,7 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *
 // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
 // it's time to Commit() the image
 func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
-	directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
+	directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
 	if err != nil {
 		return nil, errors.Wrapf(err, "creating a temporary directory")
 	}
@@ -791,7 +790,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
 	}
 	// Assume it's a file, since we're only calling this from a place that expects to read files.
 	if filename, ok := s.filenames[info.Digest]; ok {
-		contents, err2 := ioutil.ReadFile(filename)
+		contents, err2 := os.ReadFile(filename)
 		if err2 != nil {
 			return nil, errors.Wrapf(err2, `reading blob from file %q`, filename)
 		}
@@ -1136,7 +1135,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
 		delete(dataBlobs, layerBlob.Digest)
 	}
 	for blob := range dataBlobs {
-		v, err := ioutil.ReadFile(s.filenames[blob])
+		v, err := os.ReadFile(s.filenames[blob])
 		if err != nil {
 			return errors.Wrapf(err, "copying non-layer blob %q to image", blob)
 		}
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
index 694ad17bd..aedfdf5de 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
@@ -6,7 +6,6 @@ import (
 	"encoding/json"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"runtime"
 	"strings"
@@ -87,7 +86,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
 			uncompressed = nil
 		}
 		// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
-		n, err := io.Copy(ioutil.Discard, reader)
+		n, err := io.Copy(io.Discard, reader)
 		if err != nil {
 			return nil, fmt.Errorf("error reading %q: %v", filename, err)
 		}
@@ -217,14 +216,14 @@ func (is *tarballImageSource) HasThreadSafeGetBlob() bool {
 func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
 	// We should only be asked about things in the manifest.  Maybe the configuration blob.
 	if blobinfo.Digest == is.configID {
-		return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
+		return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
 	}
 	// Maybe one of the layer blobs.
 	for i := range is.blobIDs {
 		if blobinfo.Digest == is.blobIDs[i] {
 			// We want to read that layer: open the file or memory block and hand it back.
 			if is.filenames[i] == "-" {
-				return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
+				return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
 			}
 			reader, err := os.Open(is.filenames[i])
 			if err != nil {
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
index d407c657f..63d835530 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
@@ -3,7 +3,7 @@ package tarball
 import (
 	"errors"
 	"fmt"
-	"io/ioutil"
+	"io"
 	"os"
 	"strings"
 
@@ -36,7 +36,7 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc
 	filenames := strings.Split(reference, separator)
 	for _, filename := range filenames {
 		if filename == "-" {
-			stdin, err = ioutil.ReadAll(os.Stdin)
+			stdin, err = io.ReadAll(os.Stdin)
 			if err != nil {
 				return nil, fmt.Errorf("error buffering stdin: %v", err)
 			}
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index d7ca0c1c4..2c1e4a185 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -69,44 +69,44 @@ local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd
 	done
 
 cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs
-	$(RUNINVM) make local-$@
+	$(RUNINVM) $(MAKE) local-$@
 
 docs: install.tools ## build the docs on the host
 	$(MAKE) -C docs docs
 
 gccgo: ## build using gccgo using VMs
-	$(RUNINVM) make local-$@
+	$(RUNINVM) $(MAKE) local-$@
 
 test: local-binary ## build the binaries and run the tests using VMs
-	$(RUNINVM) make local-binary local-cross local-test-unit local-test-integration
+	$(RUNINVM) $(MAKE) local-binary local-cross local-test-unit local-test-integration
 
 local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges)
 	@$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
 
 test-unit: local-binary ## run the unit tests using VMs
-	$(RUNINVM) make local-$@
+	$(RUNINVM) $(MAKE) local-$@
 
 local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges)
 	@cd tests; ./test_runner.bash
 
 test-integration: local-binary ## run the integration tests using VMs
-	$(RUNINVM) make local-$@
+	$(RUNINVM) $(MAKE) local-$@
 
 local-validate: ## validate DCO and gofmt on the host
 	@./hack/git-validation.sh
 	@./hack/gofmt.sh
 
 validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs
-	$(RUNINVM) make local-$@
+	$(RUNINVM) $(MAKE) local-$@
 
 install.tools:
-	make -C tests/tools
+	$(MAKE) -C tests/tools
 
 $(FFJSON):
-	make -C tests/tools
+	$(MAKE) -C tests/tools
 
 install.docs: docs
-	make -C docs install
+	$(MAKE) -C docs install
 
 install: install.docs
 
diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
index e1320ee07..79a591288 100644
--- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go
+++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
@@ -1,16 +1,45 @@
 package graphdriver
 
 import (
+	"fmt"
 	"golang.org/x/sys/unix"
+
+	"github.com/containers/storage/pkg/mount"
+)
+
+const (
+	// FsMagicZfs filesystem id for Zfs
+	FsMagicZfs = FsMagic(0x2fc12fc1)
 )
 
 var (
 	// Slice of drivers that should be used in an order
 	priority = []string{
 		"zfs",
+		"vfs",
+	}
+
+	// FsNames maps filesystem id to name of the filesystem.
+	FsNames = map[FsMagic]string{
+		FsMagicZfs: "zfs",
 	}
 )
 
+// NewDefaultChecker returns a check that parses /proc/mountinfo to check
+// if the specified path is mounted.
+// No-op on FreeBSD.
+func NewDefaultChecker() Checker {
+	return &defaultChecker{}
+}
+
+type defaultChecker struct {
+}
+
+func (c *defaultChecker) IsMounted(path string) bool {
+	m, _ := mount.Mounted(path)
+	return m
+}
+
 // Mounted checks if the given path is mounted as the fs type
 func Mounted(fsType FsMagic, mountPath string) (bool, error) {
 	var buf unix.Statfs_t
diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go
index c748468e5..4623e7f46 100644
--- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go
+++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go
@@ -1,4 +1,4 @@
-// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris
+// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris
 
 package register
 
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index e034bf152..f29dc8f85 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -344,7 +344,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error {
 				return errors.Wrap(err, "error creating zfs mount")
 			}
 			defer func() {
-				if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
+				if err := detachUnmount(mountpoint); err != nil {
 					logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
 				}
 			}()
@@ -483,7 +483,7 @@ func (d *Driver) Put(id string) error {
 
 	logger.Debugf(`unmount("%s")`, mountpoint)
 
-	if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
+	if err := detachUnmount(mountpoint); err != nil {
 		logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
 	}
 	if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
index bf6905159..fd98ad305 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
@@ -37,3 +37,8 @@ func getMountpoint(id string) string {
 
 	return id[:maxlen]
 }
+
+func detachUnmount(mountpoint string) error {
+	// FreeBSD doesn't have an equivalent to MNT_DETACH
+	return unix.Unmount(mountpoint, 0)
+}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
index edcb1da36..44c68f394 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
@@ -4,6 +4,7 @@ import (
 	graphdriver "github.com/containers/storage/drivers"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
+	"golang.org/x/sys/unix"
 )
 
 func checkRootdirFs(rootDir string) error {
@@ -27,3 +28,7 @@ func checkRootdirFs(rootDir string) error {
 func getMountpoint(id string) string {
 	return id
 }
+
+func detachUnmount(mountpoint string) error {
+	return unix.Unmount(mountpoint, unix.MNT_DETACH)
+}
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 5e9930ea7..34d27ffa3 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -683,7 +683,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
 		r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
 	}
 	if layer.UncompressedDigest != "" {
-		r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID)
+		r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
 	}
 	if err := r.Save(); err != nil {
 		r.driver.Remove(id)
@@ -866,6 +866,14 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
 				return nil, -1, err
 			}
 			delete(layer.Flags, incompleteFlag)
+		} else {
+			// applyDiffWithOptions in the `diff != nil` case handles this bit for us
+			if layer.CompressedDigest != "" {
+				r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
+			}
+			if layer.UncompressedDigest != "" {
+				r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
+			}
 		}
 		err = r.Save()
 		if err != nil {
diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd
new file mode 100644
index 000000000..cc655c62e
--- /dev/null
+++ b/vendor/github.com/containers/storage/storage.conf-freebsd
@@ -0,0 +1,205 @@
+# This file is is the configuration file for all tools
+# that use the containers/storage library. The storage.conf file
+# overrides all other storage.conf files. Container engines using the
+# container/storage library do not inherit fields from other storage.conf
+# files.
+#
+#  Note: The storage.conf file overrides other storage.conf files based on this precedence:
+#      /usr/containers/storage.conf
+#      /etc/containers/storage.conf
+#      $HOME/.config/containers/storage.conf
+#      $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set)
+# See man 5 containers-storage.conf for more information
+# The "container storage" table contains all of the server options.
+[storage]
+
+# Default Storage Driver, Must be set for proper operation.
+driver = "zfs"
+
+# Temporary storage location
+runroot = "/var/run/containers/storage"
+
+# Primary Read/Write location of container storage
+graphroot = "/var/db/containers/storage"
+
+
+# Storage path for rootless users
+#
+# rootless_storage_path = "$HOME/.local/share/containers/storage"
+
+[storage.options]
+# Storage options to be passed to underlying storage drivers
+
+# AdditionalImageStores is used to pass paths to additional Read/Only image stores
+# Must be comma separated list.
+additionalimagestores = [
+]
+
+# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
+# a container, to the UIDs/GIDs as they should appear outside of the container,
+# and the length of the range of UIDs/GIDs.  Additional mapped sets can be
+# listed and will be heeded by libraries, but there are limits to the number of
+# mappings which the kernel will allow when you later attempt to run a
+# container.
+#
+# remap-uids = 0:1668442479:65536
+# remap-gids = 0:1668442479:65536
+
+# Remap-User/Group is a user name which can be used to look up one or more UID/GID
+# ranges in the /etc/subuid or /etc/subgid file.  Mappings are set up starting
+# with an in-container ID of 0 and then a host-level ID taken from the lowest
+# range that matches the specified name, and using the length of that range.
+# Additional ranges are then assigned, using the ranges which specify the
+# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
+# until all of the entries have been used for maps.
+#
+# remap-user = "containers"
+# remap-group = "containers"
+
+# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
+# ranges in the /etc/subuid and /etc/subgid file.  These ranges will be partitioned
+# to containers configured to create automatically a user namespace.  Containers
+# configured to automatically create a user namespace can still overlap with containers
+# having an explicit mapping set.
+# This setting is ignored when running as rootless.
+# root-auto-userns-user = "storage"
+#
+# Auto-userns-min-size is the minimum size for a user namespace created automatically.
+# auto-userns-min-size=1024
+#
+# Auto-userns-max-size is the minimum size for a user namespace created automatically.
+# auto-userns-max-size=65536
+
+[storage.options.overlay]
+# ignore_chown_errors can be set to allow a non privileged user running with
+# a single UID within a user namespace to run containers. The user can pull
+# and use any image even those with multiple uids.  Note multiple UIDs will be
+# squashed down to the default uid in the container.  These images will have no
+# separation between the users in the container. Only supported for the overlay
+# and vfs drivers.
+#ignore_chown_errors = "false"
+
+# Inodes is used to set a maximum inodes of the container image.
+# inodes = ""
+
+# Path to an helper program to use for mounting the file system instead of mounting it
+# directly.
+#mount_program = "/usr/bin/fuse-overlayfs"
+
+# mountopt specifies comma separated list of extra mount options
+mountopt = "nodev"
+
+# Set to skip a PRIVATE bind mount on the storage home directory.
+# skip_mount_home = "false"
+
+# Size is used to set a maximum size of the container image.
+# size = ""
+
+# ForceMask specifies the permissions mask that is used for new files and
+# directories.
+#
+# The values "shared" and "private" are accepted.
+# Octal permission masks are also accepted.
+#
+#  "": No value specified.
+#     All files/directories, get set with the permissions identified within the
+#     image.
+#  "private": it is equivalent to 0700.
+#     All files/directories get set with 0700 permissions.  The owner has rwx
+#     access to the files. No other users on the system can access the files.
+#     This setting could be used with networked based homedirs.
+#  "shared": it is equivalent to 0755.
+#     The owner has rwx access to the files and everyone else can read, access
+#     and execute them. This setting is useful for sharing containers storage
+#     with other users.  For instance have a storage owned by root but shared
+#     to rootless users as an additional store.
+#     NOTE:  All files within the image are made readable and executable by any
+#     user on the system. Even /etc/shadow within your image is now readable by
+#     any user.
+#
+#   OCTAL: Users can experiment with other OCTAL Permissions.
+#
+#  Note: The force_mask Flag is an experimental feature, it could change in the
+#  future.  When "force_mask" is set the original permission mask is stored in
+#  the "user.containers.override_stat" xattr and the "mount_program" option must
+#  be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the
+#  extended attribute permissions to processes within containers rather then the
+#  "force_mask"  permissions.
+#
+# force_mask = ""
+
+[storage.options.thinpool]
+# Storage Options for thinpool
+
+# autoextend_percent determines the amount by which pool needs to be
+# grown. This is specified in terms of % of pool size. So a value of 20 means
+# that when threshold is hit, pool will be grown by 20% of existing
+# pool size.
+# autoextend_percent = "20"
+
+# autoextend_threshold determines the pool extension threshold in terms
+# of percentage of pool size. For example, if threshold is 60, that means when
+# pool is 60% full, threshold has been hit.
+# autoextend_threshold = "80"
+
+# basesize specifies the size to use when creating the base device, which
+# limits the size of images and containers.
+# basesize = "10G"
+
+# blocksize specifies a custom blocksize to use for the thin pool.
+# blocksize="64k"
+
+# directlvm_device specifies a custom block storage device to use for the
+# thin pool. Required if you setup devicemapper.
+# directlvm_device = ""
+
+# directlvm_device_force wipes device even if device already has a filesystem.
+# directlvm_device_force = "True"
+
+# fs specifies the filesystem type to use for the base device.
+# fs="xfs"
+
+# log_level sets the log level of devicemapper.
+# 0: LogLevelSuppress 0 (Default)
+# 2: LogLevelFatal
+# 3: LogLevelErr
+# 4: LogLevelWarn
+# 5: LogLevelNotice
+# 6: LogLevelInfo
+# 7: LogLevelDebug
+# log_level = "7"
+
+# min_free_space specifies the min free space percent in a thin pool require for
+# new device creation to succeed. Valid values are from 0% - 99%.
+# Value 0% disables
+# min_free_space = "10%"
+
+# mkfsarg specifies extra mkfs arguments to be used when creating the base
+# device.
+# mkfsarg = ""
+
+# metadata_size is used to set the `pvcreate --metadatasize` options when
+# creating thin devices. Default is 128k
+# metadata_size = ""
+
+# Size is used to set a maximum size of the container image.
+# size = ""
+
+# use_deferred_removal marks devicemapper block device for deferred removal.
+# If the thinpool is in use when the driver attempts to remove it, the driver
+# tells the kernel to remove it as soon as possible. Note this does not free
+# up the disk space, use deferred deletion to fully remove the thinpool.
+# use_deferred_removal = "True"
+
+# use_deferred_deletion marks thinpool device for deferred deletion.
+# If the device is busy when the driver attempts to delete it, the driver
+# will attempt to delete device every 30 seconds until successful.
+# If the program using the driver exits, the driver will continue attempting
+# to cleanup the next time the driver is used. Deferred deletion permanently
+# deletes the device and all data stored in device will be lost.
+# use_deferred_deletion = "True"
+
+# xfs_nospace_max_retries specifies the maximum number of retries XFS should
+# attempt to complete IO when ENOSPC (no space) error is returned by
+# underlying storage device.
+# xfs_nospace_max_retries = "0"
diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
deleted file mode 100644
index 38a099162..000000000
--- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
+++ /dev/null
@@ -1,83 +0,0 @@
-## 1.4.3
-
-* Fix cases where `json.Number` didn't decode properly [GH-261]
-
-## 1.4.2
-
-* Custom name matchers to support any sort of casing, formatting, etc. for
-  field names. [GH-250]
-* Fix possible panic in ComposeDecodeHookFunc [GH-251]
-
-## 1.4.1
-
-* Fix regression where `*time.Time` value would be set to empty and not be sent
-  to decode hooks properly [GH-232]
-
-## 1.4.0
-
-* A new decode hook type `DecodeHookFuncValue` has been added that has
-  access to the full values. [GH-183]
-* Squash is now supported with embedded fields that are struct pointers [GH-205]
-* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
-
-## 1.3.3
-
-* Decoding maps from maps creates a settable value for decode hooks [GH-203]
-
-## 1.3.2
-
-* Decode into interface type with a struct value is supported [GH-187]
-
-## 1.3.1
-
-* Squash should only squash embedded structs. [GH-194]
-
-## 1.3.0
-
-* Added `",omitempty"` support. This will ignore zero values in the source
-  structure when encoding. [GH-145]
-
-## 1.2.3
-
-* Fix duplicate entries in Keys list with pointer values. [GH-185]
-
-## 1.2.2
-
-* Do not add unsettable (unexported) values to the unused metadata key
-  or "remain" value. [GH-150]
-
-## 1.2.1
-
-* Go modules checksum mismatch fix
-
-## 1.2.0
-
-* Added support to capture unused values in a field using the `",remain"` value
-  in the mapstructure tag. There is an example to showcase usage.
-* Added `DecoderConfig` option to always squash embedded structs
-* `json.Number` can decode into `uint` types
-* Empty slices are preserved and not replaced with nil slices
-* Fix panic that can occur in when decoding a map into a nil slice of structs
-* Improved package documentation for godoc
-
-## 1.1.2
-
-* Fix error when decode hook decodes interface implementation into interface
-  type. [GH-140]
-
-## 1.1.1
-
-* Fix panic that can happen in `decodePtr`
-
-## 1.1.0
-
-* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
-* Support struct to struct decoding [GH-137]
-* If source map value is nil, then destination map value is nil (instead of empty)
-* If source slice value is nil, then destination slice value is nil (instead of empty)
-* If source pointer is nil, then destination pointer is set to nil (instead of
-  allocated zero value of type)
-
-## 1.0.0
-
-* Initial tagged stable release.
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
deleted file mode 100644
index f9c841a51..000000000
--- a/vendor/github.com/mitchellh/mapstructure/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Mitchell Hashimoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
deleted file mode 100644
index 0018dc7d9..000000000
--- a/vendor/github.com/mitchellh/mapstructure/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
-
-mapstructure is a Go library for decoding generic map values to structures
-and vice versa, while providing helpful error handling.
-
-This library is most useful when decoding values from some data stream (JSON,
-Gob, etc.) where you don't _quite_ know the structure of the underlying data
-until you read a part of it. You can therefore read a `map[string]interface{}`
-and use this library to decode it into the proper underlying native Go
-structure.
-
-## Installation
-
-Standard `go get`:
-
-```
-$ go get github.com/mitchellh/mapstructure
-```
-
-## Usage & Example
-
-For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
-
-The `Decode` function has examples associated with it there.
-
-## But Why?!
-
-Go offers fantastic standard libraries for decoding formats such as JSON.
-The standard method is to have a struct pre-created, and populate that struct
-from the bytes of the encoded format. This is great, but the problem is if
-you have configuration or an encoding that changes slightly depending on
-specific fields. For example, consider this JSON:
-
-```json
-{
-  "type": "person",
-  "name": "Mitchell"
-}
-```
-
-Perhaps we can't populate a specific structure without first reading
-the "type" field from the JSON. We could always do two passes over the
-decoding of the JSON (reading the "type" first, and the rest later).
-However, it is much simpler to just decode this into a `map[string]interface{}`
-structure, read the "type" key, then use something like this library
-to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
deleted file mode 100644
index 4d4bbc733..000000000
--- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package mapstructure
-
-import (
-	"encoding"
-	"errors"
-	"fmt"
-	"net"
-	"reflect"
-	"strconv"
-	"strings"
-	"time"
-)
-
-// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
-// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
-func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
-	// Create variables here so we can reference them with the reflect pkg
-	var f1 DecodeHookFuncType
-	var f2 DecodeHookFuncKind
-	var f3 DecodeHookFuncValue
-
-	// Fill in the variables into this interface and the rest is done
-	// automatically using the reflect package.
-	potential := []interface{}{f1, f2, f3}
-
-	v := reflect.ValueOf(h)
-	vt := v.Type()
-	for _, raw := range potential {
-		pt := reflect.ValueOf(raw).Type()
-		if vt.ConvertibleTo(pt) {
-			return v.Convert(pt).Interface()
-		}
-	}
-
-	return nil
-}
-
-// DecodeHookExec executes the given decode hook. This should be used
-// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
-// that took reflect.Kind instead of reflect.Type.
-func DecodeHookExec(
-	raw DecodeHookFunc,
-	from reflect.Value, to reflect.Value) (interface{}, error) {
-
-	switch f := typedDecodeHook(raw).(type) {
-	case DecodeHookFuncType:
-		return f(from.Type(), to.Type(), from.Interface())
-	case DecodeHookFuncKind:
-		return f(from.Kind(), to.Kind(), from.Interface())
-	case DecodeHookFuncValue:
-		return f(from, to)
-	default:
-		return nil, errors.New("invalid decode hook signature")
-	}
-}
-
-// ComposeDecodeHookFunc creates a single DecodeHookFunc that
-// automatically composes multiple DecodeHookFuncs.
-//
-// The composed funcs are called in order, with the result of the
-// previous transformation.
-func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
-	return func(f reflect.Value, t reflect.Value) (interface{}, error) {
-		var err error
-		data := f.Interface()
-
-		newFrom := f
-		for _, f1 := range fs {
-			data, err = DecodeHookExec(f1, newFrom, t)
-			if err != nil {
-				return nil, err
-			}
-			newFrom = reflect.ValueOf(data)
-		}
-
-		return data, nil
-	}
-}
-
-// StringToSliceHookFunc returns a DecodeHookFunc that converts
-// string to []string by splitting on the given sep.
-func StringToSliceHookFunc(sep string) DecodeHookFunc {
-	return func(
-		f reflect.Kind,
-		t reflect.Kind,
-		data interface{}) (interface{}, error) {
-		if f != reflect.String || t != reflect.Slice {
-			return data, nil
-		}
-
-		raw := data.(string)
-		if raw == "" {
-			return []string{}, nil
-		}
-
-		return strings.Split(raw, sep), nil
-	}
-}
-
-// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
-// strings to time.Duration.
-func StringToTimeDurationHookFunc() DecodeHookFunc {
-	return func(
-		f reflect.Type,
-		t reflect.Type,
-		data interface{}) (interface{}, error) {
-		if f.Kind() != reflect.String {
-			return data, nil
-		}
-		if t != reflect.TypeOf(time.Duration(5)) {
-			return data, nil
-		}
-
-		// Convert it by parsing
-		return time.ParseDuration(data.(string))
-	}
-}
-
-// StringToIPHookFunc returns a DecodeHookFunc that converts
-// strings to net.IP
-func StringToIPHookFunc() DecodeHookFunc {
-	return func(
-		f reflect.Type,
-		t reflect.Type,
-		data interface{}) (interface{}, error) {
-		if f.Kind() != reflect.String {
-			return data, nil
-		}
-		if t != reflect.TypeOf(net.IP{}) {
-			return data, nil
-		}
-
-		// Convert it by parsing
-		ip := net.ParseIP(data.(string))
-		if ip == nil {
-			return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
-		}
-
-		return ip, nil
-	}
-}
-
-// StringToIPNetHookFunc returns a DecodeHookFunc that converts
-// strings to net.IPNet
-func StringToIPNetHookFunc() DecodeHookFunc {
-	return func(
-		f reflect.Type,
-		t reflect.Type,
-		data interface{}) (interface{}, error) {
-		if f.Kind() != reflect.String {
-			return data, nil
-		}
-		if t != reflect.TypeOf(net.IPNet{}) {
-			return data, nil
-		}
-
-		// Convert it by parsing
-		_, net, err := net.ParseCIDR(data.(string))
-		return net, err
-	}
-}
-
-// StringToTimeHookFunc returns a DecodeHookFunc that converts
-// strings to time.Time.
-func StringToTimeHookFunc(layout string) DecodeHookFunc {
-	return func(
-		f reflect.Type,
-		t reflect.Type,
-		data interface{}) (interface{}, error) {
-		if f.Kind() != reflect.String {
-			return data, nil
-		}
-		if t != reflect.TypeOf(time.Time{}) {
-			return data, nil
-		}
-
-		// Convert it by parsing
-		return time.Parse(layout, data.(string))
-	}
-}
-
-// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
-// the decoder.
-//
-// Note that this is significantly different from the WeaklyTypedInput option
-// of the DecoderConfig.
-func WeaklyTypedHook(
-	f reflect.Kind,
-	t reflect.Kind,
-	data interface{}) (interface{}, error) {
-	dataVal := reflect.ValueOf(data)
-	switch t {
-	case reflect.String:
-		switch f {
-		case reflect.Bool:
-			if dataVal.Bool() {
-				return "1", nil
-			}
-			return "0", nil
-		case reflect.Float32:
-			return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
-		case reflect.Int:
-			return strconv.FormatInt(dataVal.Int(), 10), nil
-		case reflect.Slice:
-			dataType := dataVal.Type()
-			elemKind := dataType.Elem().Kind()
-			if elemKind == reflect.Uint8 {
-				return string(dataVal.Interface().([]uint8)), nil
-			}
-		case reflect.Uint:
-			return strconv.FormatUint(dataVal.Uint(), 10), nil
-		}
-	}
-
-	return data, nil
-}
-
-func RecursiveStructToMapHookFunc() DecodeHookFunc {
-	return func(f reflect.Value, t reflect.Value) (interface{}, error) {
-		if f.Kind() != reflect.Struct {
-			return f.Interface(), nil
-		}
-
-		var i interface{} = struct{}{}
-		if t.Type() != reflect.TypeOf(&i).Elem() {
-			return f.Interface(), nil
-		}
-
-		m := make(map[string]interface{})
-		t.Set(reflect.ValueOf(m))
-
-		return f.Interface(), nil
-	}
-}
-
-// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
-// strings to the UnmarshalText function, when the target type
-// implements the encoding.TextUnmarshaler interface
-func TextUnmarshallerHookFunc() DecodeHookFuncType {
-	return func(
-		f reflect.Type,
-		t reflect.Type,
-		data interface{}) (interface{}, error) {
-		if f.Kind() != reflect.String {
-			return data, nil
-		}
-		result := reflect.New(t).Interface()
-		unmarshaller, ok := result.(encoding.TextUnmarshaler)
-		if !ok {
-			return data, nil
-		}
-		if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil {
-			return nil, err
-		}
-		return result, nil
-	}
-}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
deleted file mode 100644
index 47a99e5af..000000000
--- a/vendor/github.com/mitchellh/mapstructure/error.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package mapstructure
-
-import (
-	"errors"
-	"fmt"
-	"sort"
-	"strings"
-)
-
-// Error implements the error interface and can represents multiple
-// errors that occur in the course of a single decode.
-type Error struct {
-	Errors []string
-}
-
-func (e *Error) Error() string {
-	points := make([]string, len(e.Errors))
-	for i, err := range e.Errors {
-		points[i] = fmt.Sprintf("* %s", err)
-	}
-
-	sort.Strings(points)
-	return fmt.Sprintf(
-		"%d error(s) decoding:\n\n%s",
-		len(e.Errors), strings.Join(points, "\n"))
-}
-
-// WrappedErrors implements the errwrap.Wrapper interface to make this
-// return value more useful with the errwrap and go-multierror libraries.
-func (e *Error) WrappedErrors() []error {
-	if e == nil {
-		return nil
-	}
-
-	result := make([]error, len(e.Errors))
-	for i, e := range e.Errors {
-		result[i] = errors.New(e)
-	}
-
-	return result
-}
-
-func appendErrors(errors []string, err error) []string {
-	switch e := err.(type) {
-	case *Error:
-		return append(errors, e.Errors...)
-	default:
-		return append(errors, e.Error())
-	}
-}
diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod
deleted file mode 100644
index a03ae9730..000000000
--- a/vendor/github.com/mitchellh/mapstructure/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/mitchellh/mapstructure
-
-go 1.14
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
deleted file mode 100644
index 6b81b0067..000000000
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ /dev/null
@@ -1,1467 +0,0 @@
-// Package mapstructure exposes functionality to convert one arbitrary
-// Go type into another, typically to convert a map[string]interface{}
-// into a native Go structure.
-//
-// The Go structure can be arbitrarily complex, containing slices,
-// other structs, etc. and the decoder will properly decode nested
-// maps and so on into the proper structures in the native Go struct.
-// See the examples to see what the decoder is capable of.
-//
-// The simplest function to start with is Decode.
-//
-// Field Tags
-//
-// When decoding to a struct, mapstructure will use the field name by
-// default to perform the mapping. For example, if a struct has a field
-// "Username" then mapstructure will look for a key in the source value
-// of "username" (case insensitive).
-//
-//     type User struct {
-//         Username string
-//     }
-//
-// You can change the behavior of mapstructure by using struct tags.
-// The default struct tag that mapstructure looks for is "mapstructure"
-// but you can customize it using DecoderConfig.
-//
-// Renaming Fields
-//
-// To rename the key that mapstructure looks for, use the "mapstructure"
-// tag and set a value directly. For example, to change the "username" example
-// above to "user":
-//
-//     type User struct {
-//         Username string `mapstructure:"user"`
-//     }
-//
-// Embedded Structs and Squashing
-//
-// Embedded structs are treated as if they're another field with that name.
-// By default, the two structs below are equivalent when decoding with
-// mapstructure:
-//
-//     type Person struct {
-//         Name string
-//     }
-//
-//     type Friend struct {
-//         Person
-//     }
-//
-//     type Friend struct {
-//         Person Person
-//     }
-//
-// This would require an input that looks like below:
-//
-//     map[string]interface{}{
-//         "person": map[string]interface{}{"name": "alice"},
-//     }
-//
-// If your "person" value is NOT nested, then you can append ",squash" to
-// your tag value and mapstructure will treat it as if the embedded struct
-// were part of the struct directly. Example:
-//
-//     type Friend struct {
-//         Person `mapstructure:",squash"`
-//     }
-//
-// Now the following input would be accepted:
-//
-//     map[string]interface{}{
-//         "name": "alice",
-//     }
-//
-// When decoding from a struct to a map, the squash tag squashes the struct
-// fields into a single map. Using the example structs from above:
-//
-//     Friend{Person: Person{Name: "alice"}}
-//
-// Will be decoded into a map:
-//
-//     map[string]interface{}{
-//         "name": "alice",
-//     }
-//
-// DecoderConfig has a field that changes the behavior of mapstructure
-// to always squash embedded structs.
-//
-// Remainder Values
-//
-// If there are any unmapped keys in the source value, mapstructure by
-// default will silently ignore them. You can error by setting ErrorUnused
-// in DecoderConfig. If you're using Metadata you can also maintain a slice
-// of the unused keys.
-//
-// You can also use the ",remain" suffix on your tag to collect all unused
-// values in a map. The field with this tag MUST be a map type and should
-// probably be a "map[string]interface{}" or "map[interface{}]interface{}".
-// See example below:
-//
-//     type Friend struct {
-//         Name  string
-//         Other map[string]interface{} `mapstructure:",remain"`
-//     }
-//
-// Given the input below, Other would be populated with the other
-// values that weren't used (everything but "name"):
-//
-//     map[string]interface{}{
-//         "name":    "bob",
-//         "address": "123 Maple St.",
-//     }
-//
-// Omit Empty Values
-//
-// When decoding from a struct to any other value, you may use the
-// ",omitempty" suffix on your tag to omit that value if it equates to
-// the zero value. The zero value of all types is specified in the Go
-// specification.
-//
-// For example, the zero type of a numeric type is zero ("0"). If the struct
-// field value is zero and a numeric type, the field is empty, and it won't
-// be encoded into the destination type.
-//
-//     type Source {
-//         Age int `mapstructure:",omitempty"`
-//     }
-//
-// Unexported fields
-//
-// Since unexported (private) struct fields cannot be set outside the package
-// where they are defined, the decoder will simply skip them.
-//
-// For this output type definition:
-//
-//     type Exported struct {
-//         private string // this unexported field will be skipped
-//         Public string
-//     }
-//
-// Using this map as input:
-//
-//     map[string]interface{}{
-//         "private": "I will be ignored",
-//         "Public":  "I made it through!",
-//     }
-//
-// The following struct will be decoded:
-//
-//     type Exported struct {
-//         private: "" // field is left with an empty string (zero value)
-//         Public: "I made it through!"
-//     }
-//
-// Other Configuration
-//
-// mapstructure is highly configurable. See the DecoderConfig struct
-// for other features and options that are supported.
-package mapstructure
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"reflect"
-	"sort"
-	"strconv"
-	"strings"
-)
-
-// DecodeHookFunc is the callback function that can be used for
-// data transformations. See "DecodeHook" in the DecoderConfig
-// struct.
-//
-// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or
-// DecodeHookFuncValue.
-// Values are a superset of Types (Values can return types), and Types are a
-// superset of Kinds (Types can return Kinds) and are generally a richer thing
-// to use, but Kinds are simpler if you only need those.
-//
-// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
-// we started with Kinds and then realized Types were the better solution,
-// but have a promise to not break backwards compat so we now support
-// both.
-type DecodeHookFunc interface{}
-
-// DecodeHookFuncType is a DecodeHookFunc which has complete information about
-// the source and target types.
-type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
-
-// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
-// source and target types.
-type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
-
-// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target
-// values.
-type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error)
-
-// DecoderConfig is the configuration that is used to create a new decoder
-// and allows customization of various aspects of decoding.
-type DecoderConfig struct {
-	// DecodeHook, if set, will be called before any decoding and any
-	// type conversion (if WeaklyTypedInput is on). This lets you modify
-	// the values before they're set down onto the resulting struct. The
-	// DecodeHook is called for every map and value in the input. This means
-	// that if a struct has embedded fields with squash tags the decode hook
-	// is called only once with all of the input data, not once for each
-	// embedded struct.
-	//
-	// If an error is returned, the entire decode will fail with that error.
-	DecodeHook DecodeHookFunc
-
-	// If ErrorUnused is true, then it is an error for there to exist
-	// keys in the original map that were unused in the decoding process
-	// (extra keys).
-	ErrorUnused bool
-
-	// ZeroFields, if set to true, will zero fields before writing them.
-	// For example, a map will be emptied before decoded values are put in
-	// it. If this is false, a map will be merged.
-	ZeroFields bool
-
-	// If WeaklyTypedInput is true, the decoder will make the following
-	// "weak" conversions:
-	//
-	//   - bools to string (true = "1", false = "0")
-	//   - numbers to string (base 10)
-	//   - bools to int/uint (true = 1, false = 0)
-	//   - strings to int/uint (base implied by prefix)
-	//   - int to bool (true if value != 0)
-	//   - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
-	//     FALSE, false, False. Anything else is an error)
-	//   - empty array = empty map and vice versa
-	//   - negative numbers to overflowed uint values (base 10)
-	//   - slice of maps to a merged map
-	//   - single values are converted to slices if required. Each
-	//     element is weakly decoded. For example: "4" can become []int{4}
-	//     if the target type is an int slice.
-	//
-	WeaklyTypedInput bool
-
-	// Squash will squash embedded structs.  A squash tag may also be
-	// added to an individual struct field using a tag.  For example:
-	//
-	//  type Parent struct {
-	//      Child `mapstructure:",squash"`
-	//  }
-	Squash bool
-
-	// Metadata is the struct that will contain extra metadata about
-	// the decoding. If this is nil, then no metadata will be tracked.
-	Metadata *Metadata
-
-	// Result is a pointer to the struct that will contain the decoded
-	// value.
-	Result interface{}
-
-	// The tag name that mapstructure reads for field names. This
-	// defaults to "mapstructure"
-	TagName string
-
-	// MatchName is the function used to match the map key to the struct
-	// field name or tag. Defaults to `strings.EqualFold`. This can be used
-	// to implement case-sensitive tag values, support snake casing, etc.
-	MatchName func(mapKey, fieldName string) bool
-}
-
-// A Decoder takes a raw interface value and turns it into structured
-// data, keeping track of rich error information along the way in case
-// anything goes wrong. Unlike the basic top-level Decode method, you can
-// more finely control how the Decoder behaves using the DecoderConfig
-// structure. The top-level Decode method is just a convenience that sets
-// up the most basic Decoder.
-type Decoder struct {
-	config *DecoderConfig
-}
-
-// Metadata contains information about decoding a structure that
-// is tedious or difficult to get otherwise.
-type Metadata struct {
-	// Keys are the keys of the structure which were successfully decoded
-	Keys []string
-
-	// Unused is a slice of keys that were found in the raw value but
-	// weren't decoded since there was no matching field in the result interface
-	Unused []string
-}
-
-// Decode takes an input structure and uses reflection to translate it to
-// the output structure. output must be a pointer to a map or struct.
-func Decode(input interface{}, output interface{}) error {
-	config := &DecoderConfig{
-		Metadata: nil,
-		Result:   output,
-	}
-
-	decoder, err := NewDecoder(config)
-	if err != nil {
-		return err
-	}
-
-	return decoder.Decode(input)
-}
-
-// WeakDecode is the same as Decode but is shorthand to enable
-// WeaklyTypedInput. See DecoderConfig for more info.
-func WeakDecode(input, output interface{}) error {
-	config := &DecoderConfig{
-		Metadata:         nil,
-		Result:           output,
-		WeaklyTypedInput: true,
-	}
-
-	decoder, err := NewDecoder(config)
-	if err != nil {
-		return err
-	}
-
-	return decoder.Decode(input)
-}
-
-// DecodeMetadata is the same as Decode, but is shorthand to
-// enable metadata collection. See DecoderConfig for more info.
-func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
-	config := &DecoderConfig{
-		Metadata: metadata,
-		Result:   output,
-	}
-
-	decoder, err := NewDecoder(config)
-	if err != nil {
-		return err
-	}
-
-	return decoder.Decode(input)
-}
-
-// WeakDecodeMetadata is the same as Decode, but is shorthand to
-// enable both WeaklyTypedInput and metadata collection. See
-// DecoderConfig for more info.
-func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
-	config := &DecoderConfig{
-		Metadata:         metadata,
-		Result:           output,
-		WeaklyTypedInput: true,
-	}
-
-	decoder, err := NewDecoder(config)
-	if err != nil {
-		return err
-	}
-
-	return decoder.Decode(input)
-}
-
-// NewDecoder returns a new decoder for the given configuration. Once
-// a decoder has been returned, the same configuration must not be used
-// again.
-func NewDecoder(config *DecoderConfig) (*Decoder, error) {
-	val := reflect.ValueOf(config.Result)
-	if val.Kind() != reflect.Ptr {
-		return nil, errors.New("result must be a pointer")
-	}
-
-	val = val.Elem()
-	if !val.CanAddr() {
-		return nil, errors.New("result must be addressable (a pointer)")
-	}
-
-	if config.Metadata != nil {
-		if config.Metadata.Keys == nil {
-			config.Metadata.Keys = make([]string, 0)
-		}
-
-		if config.Metadata.Unused == nil {
-			config.Metadata.Unused = make([]string, 0)
-		}
-	}
-
-	if config.TagName == "" {
-		config.TagName = "mapstructure"
-	}
-
-	if config.MatchName == nil {
-		config.MatchName = strings.EqualFold
-	}
-
-	result := &Decoder{
-		config: config,
-	}
-
-	return result, nil
-}
-
-// Decode decodes the given raw interface to the target pointer specified
-// by the configuration.
-func (d *Decoder) Decode(input interface{}) error {
-	return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
-}
-
-// Decodes an unknown data type into a specific reflection value.
-func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
-	var inputVal reflect.Value
-	if input != nil {
-		inputVal = reflect.ValueOf(input)
-
-		// We need to check here if input is a typed nil. Typed nils won't
-		// match the "input == nil" below so we check that here.
-		if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
-			input = nil
-		}
-	}
-
-	if input == nil {
-		// If the data is nil, then we don't set anything, unless ZeroFields is set
-		// to true.
-		if d.config.ZeroFields {
-			outVal.Set(reflect.Zero(outVal.Type()))
-
-			if d.config.Metadata != nil && name != "" {
-				d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
-			}
-		}
-		return nil
-	}
-
-	if !inputVal.IsValid() {
-		// If the input value is invalid, then we just set the value
-		// to be the zero value.
-		outVal.Set(reflect.Zero(outVal.Type()))
-		if d.config.Metadata != nil && name != "" {
-			d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
-		}
-		return nil
-	}
-
-	if d.config.DecodeHook != nil {
-		// We have a DecodeHook, so let's pre-process the input.
-		var err error
-		input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal)
-		if err != nil {
-			return fmt.Errorf("error decoding '%s': %s", name, err)
-		}
-	}
-
-	var err error
-	outputKind := getKind(outVal)
-	addMetaKey := true
-	switch outputKind {
-	case reflect.Bool:
-		err = d.decodeBool(name, input, outVal)
-	case reflect.Interface:
-		err = d.decodeBasic(name, input, outVal)
-	case reflect.String:
-		err = d.decodeString(name, input, outVal)
-	case reflect.Int:
-		err = d.decodeInt(name, input, outVal)
-	case reflect.Uint:
-		err = d.decodeUint(name, input, outVal)
-	case reflect.Float32:
-		err = d.decodeFloat(name, input, outVal)
-	case reflect.Struct:
-		err = d.decodeStruct(name, input, outVal)
-	case reflect.Map:
-		err = d.decodeMap(name, input, outVal)
-	case reflect.Ptr:
-		addMetaKey, err = d.decodePtr(name, input, outVal)
-	case reflect.Slice:
-		err = d.decodeSlice(name, input, outVal)
-	case reflect.Array:
-		err = d.decodeArray(name, input, outVal)
-	case reflect.Func:
-		err = d.decodeFunc(name, input, outVal)
-	default:
-		// If we reached this point then we weren't able to decode it
-		return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
-	}
-
-	// If we reached here, then we successfully decoded SOMETHING, so
-	// mark the key as used if we're tracking metainput.
-	if addMetaKey && d.config.Metadata != nil && name != "" {
-		d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
-	}
-
-	return err
-}
-
-// This decodes a basic type (bool, int, string, etc.) and sets the
-// value to "data" of that type.
-func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
-	if val.IsValid() && val.Elem().IsValid() {
-		elem := val.Elem()
-
-		// If we can't address this element, then its not writable. Instead,
-		// we make a copy of the value (which is a pointer and therefore
-		// writable), decode into that, and replace the whole value.
-		copied := false
-		if !elem.CanAddr() {
-			copied = true
-
-			// Make *T
-			copy := reflect.New(elem.Type())
-
-			// *T = elem
-			copy.Elem().Set(elem)
-
-			// Set elem so we decode into it
-			elem = copy
-		}
-
-		// Decode. If we have an error then return. We also return right
-		// away if we're not a copy because that means we decoded directly.
-		if err := d.decode(name, data, elem); err != nil || !copied {
-			return err
-		}
-
-		// If we're a copy, we need to set te final result
-		val.Set(elem.Elem())
-		return nil
-	}
-
-	dataVal := reflect.ValueOf(data)
-
-	// If the input data is a pointer, and the assigned type is the dereference
-	// of that exact pointer, then indirect it so that we can assign it.
-	// Example: *string to string
-	if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
-		dataVal = reflect.Indirect(dataVal)
-	}
-
-	if !dataVal.IsValid() {
-		dataVal = reflect.Zero(val.Type())
-	}
-
-	dataValType := dataVal.Type()
-	if !dataValType.AssignableTo(val.Type()) {
-		return fmt.Errorf(
-			"'%s' expected type '%s', got '%s'",
-			name, val.Type(), dataValType)
-	}
-
-	val.Set(dataVal)
-	return nil
-}
-
-func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataKind := getKind(dataVal)
-
-	converted := true
-	switch {
-	case dataKind == reflect.String:
-		val.SetString(dataVal.String())
-	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
-		if dataVal.Bool() {
-			val.SetString("1")
-		} else {
-			val.SetString("0")
-		}
-	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
-		val.SetString(strconv.FormatInt(dataVal.Int(), 10))
-	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
-		val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
-	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
-		val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
-	case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
-		dataKind == reflect.Array && d.config.WeaklyTypedInput:
-		dataType := dataVal.Type()
-		elemKind := dataType.Elem().Kind()
-		switch elemKind {
-		case reflect.Uint8:
-			var uints []uint8
-			if dataKind == reflect.Array {
-				uints = make([]uint8, dataVal.Len(), dataVal.Len())
-				for i := range uints {
-					uints[i] = dataVal.Index(i).Interface().(uint8)
-				}
-			} else {
-				uints = dataVal.Interface().([]uint8)
-			}
-			val.SetString(string(uints))
-		default:
-			converted = false
-		}
-	default:
-		converted = false
-	}
-
-	if !converted {
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
-			name, val.Type(), dataVal.Type(), data)
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataKind := getKind(dataVal)
-	dataType := dataVal.Type()
-
-	switch {
-	case dataKind == reflect.Int:
-		val.SetInt(dataVal.Int())
-	case dataKind == reflect.Uint:
-		val.SetInt(int64(dataVal.Uint()))
-	case dataKind == reflect.Float32:
-		val.SetInt(int64(dataVal.Float()))
-	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
-		if dataVal.Bool() {
-			val.SetInt(1)
-		} else {
-			val.SetInt(0)
-		}
-	case dataKind == reflect.String && d.config.WeaklyTypedInput:
-		str := dataVal.String()
-		if str == "" {
-			str = "0"
-		}
-
-		i, err := strconv.ParseInt(str, 0, val.Type().Bits())
-		if err == nil {
-			val.SetInt(i)
-		} else {
-			return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
-		}
-	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
-		jn := data.(json.Number)
-		i, err := jn.Int64()
-		if err != nil {
-			return fmt.Errorf(
-				"error decoding json.Number into %s: %s", name, err)
-		}
-		val.SetInt(i)
-	default:
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
-			name, val.Type(), dataVal.Type(), data)
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataKind := getKind(dataVal)
-	dataType := dataVal.Type()
-
-	switch {
-	case dataKind == reflect.Int:
-		i := dataVal.Int()
-		if i < 0 && !d.config.WeaklyTypedInput {
-			return fmt.Errorf("cannot parse '%s', %d overflows uint",
-				name, i)
-		}
-		val.SetUint(uint64(i))
-	case dataKind == reflect.Uint:
-		val.SetUint(dataVal.Uint())
-	case dataKind == reflect.Float32:
-		f := dataVal.Float()
-		if f < 0 && !d.config.WeaklyTypedInput {
-			return fmt.Errorf("cannot parse '%s', %f overflows uint",
-				name, f)
-		}
-		val.SetUint(uint64(f))
-	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
-		if dataVal.Bool() {
-			val.SetUint(1)
-		} else {
-			val.SetUint(0)
-		}
-	case dataKind == reflect.String && d.config.WeaklyTypedInput:
-		str := dataVal.String()
-		if str == "" {
-			str = "0"
-		}
-
-		i, err := strconv.ParseUint(str, 0, val.Type().Bits())
-		if err == nil {
-			val.SetUint(i)
-		} else {
-			return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
-		}
-	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
-		jn := data.(json.Number)
-		i, err := strconv.ParseUint(string(jn), 0, 64)
-		if err != nil {
-			return fmt.Errorf(
-				"error decoding json.Number into %s: %s", name, err)
-		}
-		val.SetUint(i)
-	default:
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
-			name, val.Type(), dataVal.Type(), data)
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataKind := getKind(dataVal)
-
-	switch {
-	case dataKind == reflect.Bool:
-		val.SetBool(dataVal.Bool())
-	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
-		val.SetBool(dataVal.Int() != 0)
-	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
-		val.SetBool(dataVal.Uint() != 0)
-	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
-		val.SetBool(dataVal.Float() != 0)
-	case dataKind == reflect.String && d.config.WeaklyTypedInput:
-		b, err := strconv.ParseBool(dataVal.String())
-		if err == nil {
-			val.SetBool(b)
-		} else if dataVal.String() == "" {
-			val.SetBool(false)
-		} else {
-			return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
-		}
-	default:
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
-			name, val.Type(), dataVal.Type(), data)
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataKind := getKind(dataVal)
-	dataType := dataVal.Type()
-
-	switch {
-	case dataKind == reflect.Int:
-		val.SetFloat(float64(dataVal.Int()))
-	case dataKind == reflect.Uint:
-		val.SetFloat(float64(dataVal.Uint()))
-	case dataKind == reflect.Float32:
-		val.SetFloat(dataVal.Float())
-	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
-		if dataVal.Bool() {
-			val.SetFloat(1)
-		} else {
-			val.SetFloat(0)
-		}
-	case dataKind == reflect.String && d.config.WeaklyTypedInput:
-		str := dataVal.String()
-		if str == "" {
-			str = "0"
-		}
-
-		f, err := strconv.ParseFloat(str, val.Type().Bits())
-		if err == nil {
-			val.SetFloat(f)
-		} else {
-			return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
-		}
-	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
-		jn := data.(json.Number)
-		i, err := jn.Float64()
-		if err != nil {
-			return fmt.Errorf(
-				"error decoding json.Number into %s: %s", name, err)
-		}
-		val.SetFloat(i)
-	default:
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
-			name, val.Type(), dataVal.Type(), data)
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
-	valType := val.Type()
-	valKeyType := valType.Key()
-	valElemType := valType.Elem()
-
-	// By default we overwrite keys in the current map
-	valMap := val
-
-	// If the map is nil or we're purposely zeroing fields, make a new map
-	if valMap.IsNil() || d.config.ZeroFields {
-		// Make a new map to hold our result
-		mapType := reflect.MapOf(valKeyType, valElemType)
-		valMap = reflect.MakeMap(mapType)
-	}
-
-	// Check input type and based on the input type jump to the proper func
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	switch dataVal.Kind() {
-	case reflect.Map:
-		return d.decodeMapFromMap(name, dataVal, val, valMap)
-
-	case reflect.Struct:
-		return d.decodeMapFromStruct(name, dataVal, val, valMap)
-
-	case reflect.Array, reflect.Slice:
-		if d.config.WeaklyTypedInput {
-			return d.decodeMapFromSlice(name, dataVal, val, valMap)
-		}
-
-		fallthrough
-
-	default:
-		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
-	}
-}
-
-func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
-	// Special case for BC reasons (covered by tests)
-	if dataVal.Len() == 0 {
-		val.Set(valMap)
-		return nil
-	}
-
-	for i := 0; i < dataVal.Len(); i++ {
-		err := d.decode(
-			name+"["+strconv.Itoa(i)+"]",
-			dataVal.Index(i).Interface(), val)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
-	valType := val.Type()
-	valKeyType := valType.Key()
-	valElemType := valType.Elem()
-
-	// Accumulate errors
-	errors := make([]string, 0)
-
-	// If the input data is empty, then we just match what the input data is.
-	if dataVal.Len() == 0 {
-		if dataVal.IsNil() {
-			if !val.IsNil() {
-				val.Set(dataVal)
-			}
-		} else {
-			// Set to empty allocated value
-			val.Set(valMap)
-		}
-
-		return nil
-	}
-
-	for _, k := range dataVal.MapKeys() {
-		fieldName := name + "[" + k.String() + "]"
-
-		// First decode the key into the proper type
-		currentKey := reflect.Indirect(reflect.New(valKeyType))
-		if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
-			errors = appendErrors(errors, err)
-			continue
-		}
-
-		// Next decode the data into the proper type
-		v := dataVal.MapIndex(k).Interface()
-		currentVal := reflect.Indirect(reflect.New(valElemType))
-		if err := d.decode(fieldName, v, currentVal); err != nil {
-			errors = appendErrors(errors, err)
-			continue
-		}
-
-		valMap.SetMapIndex(currentKey, currentVal)
-	}
-
-	// Set the built up map to the value
-	val.Set(valMap)
-
-	// If we had errors, return those
-	if len(errors) > 0 {
-		return &Error{errors}
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
-	typ := dataVal.Type()
-	for i := 0; i < typ.NumField(); i++ {
-		// Get the StructField first since this is a cheap operation. If the
-		// field is unexported, then ignore it.
-		f := typ.Field(i)
-		if f.PkgPath != "" {
-			continue
-		}
-
-		// Next get the actual value of this field and verify it is assignable
-		// to the map value.
-		v := dataVal.Field(i)
-		if !v.Type().AssignableTo(valMap.Type().Elem()) {
-			return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
-		}
-
-		tagValue := f.Tag.Get(d.config.TagName)
-		keyName := f.Name
-
-		// If Squash is set in the config, we squash the field down.
-		squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
-
-		// Determine the name of the key in the map
-		if index := strings.Index(tagValue, ","); index != -1 {
-			if tagValue[:index] == "-" {
-				continue
-			}
-			// If "omitempty" is specified in the tag, it ignores empty values.
-			if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) {
-				continue
-			}
-
-			// If "squash" is specified in the tag, we squash the field down.
-			squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1
-			if squash {
-				// When squashing, the embedded type can be a pointer to a struct.
-				if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
-					v = v.Elem()
-				}
-
-				// The final type must be a struct
-				if v.Kind() != reflect.Struct {
-					return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
-				}
-			}
-			keyName = tagValue[:index]
-		} else if len(tagValue) > 0 {
-			if tagValue == "-" {
-				continue
-			}
-			keyName = tagValue
-		}
-
-		switch v.Kind() {
-		// this is an embedded struct, so handle it differently
-		case reflect.Struct:
-			x := reflect.New(v.Type())
-			x.Elem().Set(v)
-
-			vType := valMap.Type()
-			vKeyType := vType.Key()
-			vElemType := vType.Elem()
-			mType := reflect.MapOf(vKeyType, vElemType)
-			vMap := reflect.MakeMap(mType)
-
-			// Creating a pointer to a map so that other methods can completely
-			// overwrite the map if need be (looking at you decodeMapFromMap). The
-			// indirection allows the underlying map to be settable (CanSet() == true)
-			// where as reflect.MakeMap returns an unsettable map.
-			addrVal := reflect.New(vMap.Type())
-			reflect.Indirect(addrVal).Set(vMap)
-
-			err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal))
-			if err != nil {
-				return err
-			}
-
-			// the underlying map may have been completely overwritten so pull
-			// it indirectly out of the enclosing value.
-			vMap = reflect.Indirect(addrVal)
-
-			if squash {
-				for _, k := range vMap.MapKeys() {
-					valMap.SetMapIndex(k, vMap.MapIndex(k))
-				}
-			} else {
-				valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
-			}
-
-		default:
-			valMap.SetMapIndex(reflect.ValueOf(keyName), v)
-		}
-	}
-
-	if val.CanAddr() {
-		val.Set(valMap)
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) {
-	// If the input data is nil, then we want to just set the output
-	// pointer to be nil as well.
-	isNil := data == nil
-	if !isNil {
-		switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
-		case reflect.Chan,
-			reflect.Func,
-			reflect.Interface,
-			reflect.Map,
-			reflect.Ptr,
-			reflect.Slice:
-			isNil = v.IsNil()
-		}
-	}
-	if isNil {
-		if !val.IsNil() && val.CanSet() {
-			nilValue := reflect.New(val.Type()).Elem()
-			val.Set(nilValue)
-		}
-
-		return true, nil
-	}
-
-	// Create an element of the concrete (non pointer) type and decode
-	// into that. Then set the value of the pointer to this type.
-	valType := val.Type()
-	valElemType := valType.Elem()
-	if val.CanSet() {
-		realVal := val
-		if realVal.IsNil() || d.config.ZeroFields {
-			realVal = reflect.New(valElemType)
-		}
-
-		if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
-			return false, err
-		}
-
-		val.Set(realVal)
-	} else {
-		if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
-			return false, err
-		}
-	}
-	return false, nil
-}
-
-func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
-	// Create an element of the concrete (non pointer) type and decode
-	// into that. Then set the value of the pointer to this type.
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	if val.Type() != dataVal.Type() {
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
-			name, val.Type(), dataVal.Type(), data)
-	}
-	val.Set(dataVal)
-	return nil
-}
-
-func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataValKind := dataVal.Kind()
-	valType := val.Type()
-	valElemType := valType.Elem()
-	sliceType := reflect.SliceOf(valElemType)
-
-	// If we have a non array/slice type then we first attempt to convert.
-	if dataValKind != reflect.Array && dataValKind != reflect.Slice {
-		if d.config.WeaklyTypedInput {
-			switch {
-			// Slice and array we use the normal logic
-			case dataValKind == reflect.Slice, dataValKind == reflect.Array:
-				break
-
-			// Empty maps turn into empty slices
-			case dataValKind == reflect.Map:
-				if dataVal.Len() == 0 {
-					val.Set(reflect.MakeSlice(sliceType, 0, 0))
-					return nil
-				}
-				// Create slice of maps of other sizes
-				return d.decodeSlice(name, []interface{}{data}, val)
-
-			case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
-				return d.decodeSlice(name, []byte(dataVal.String()), val)
-
-			// All other types we try to convert to the slice type
-			// and "lift" it into it. i.e. a string becomes a string slice.
-			default:
-				// Just re-try this function with data as a slice.
-				return d.decodeSlice(name, []interface{}{data}, val)
-			}
-		}
-
-		return fmt.Errorf(
-			"'%s': source data must be an array or slice, got %s", name, dataValKind)
-	}
-
-	// If the input value is nil, then don't allocate since empty != nil
-	if dataVal.IsNil() {
-		return nil
-	}
-
-	valSlice := val
-	if valSlice.IsNil() || d.config.ZeroFields {
-		// Make a new slice to hold our result, same size as the original data.
-		valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
-	}
-
-	// Accumulate any errors
-	errors := make([]string, 0)
-
-	for i := 0; i < dataVal.Len(); i++ {
-		currentData := dataVal.Index(i).Interface()
-		for valSlice.Len() <= i {
-			valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
-		}
-		currentField := valSlice.Index(i)
-
-		fieldName := name + "[" + strconv.Itoa(i) + "]"
-		if err := d.decode(fieldName, currentData, currentField); err != nil {
-			errors = appendErrors(errors, err)
-		}
-	}
-
-	// Finally, set the value to the slice we built up
-	val.Set(valSlice)
-
-	// If there were errors, we return those
-	if len(errors) > 0 {
-		return &Error{errors}
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataValKind := dataVal.Kind()
-	valType := val.Type()
-	valElemType := valType.Elem()
-	arrayType := reflect.ArrayOf(valType.Len(), valElemType)
-
-	valArray := val
-
-	if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
-		// Check input type
-		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
-			if d.config.WeaklyTypedInput {
-				switch {
-				// Empty maps turn into empty arrays
-				case dataValKind == reflect.Map:
-					if dataVal.Len() == 0 {
-						val.Set(reflect.Zero(arrayType))
-						return nil
-					}
-
-				// All other types we try to convert to the array type
-				// and "lift" it into it. i.e. a string becomes a string array.
-				default:
-					// Just re-try this function with data as a slice.
-					return d.decodeArray(name, []interface{}{data}, val)
-				}
-			}
-
-			return fmt.Errorf(
-				"'%s': source data must be an array or slice, got %s", name, dataValKind)
-
-		}
-		if dataVal.Len() > arrayType.Len() {
-			return fmt.Errorf(
-				"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
-
-		}
-
-		// Make a new array to hold our result, same size as the original data.
-		valArray = reflect.New(arrayType).Elem()
-	}
-
-	// Accumulate any errors
-	errors := make([]string, 0)
-
-	for i := 0; i < dataVal.Len(); i++ {
-		currentData := dataVal.Index(i).Interface()
-		currentField := valArray.Index(i)
-
-		fieldName := name + "[" + strconv.Itoa(i) + "]"
-		if err := d.decode(fieldName, currentData, currentField); err != nil {
-			errors = appendErrors(errors, err)
-		}
-	}
-
-	// Finally, set the value to the array we built up
-	val.Set(valArray)
-
-	// If there were errors, we return those
-	if len(errors) > 0 {
-		return &Error{errors}
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-
-	// If the type of the value to write to and the data match directly,
-	// then we just set it directly instead of recursing into the structure.
-	if dataVal.Type() == val.Type() {
-		val.Set(dataVal)
-		return nil
-	}
-
-	dataValKind := dataVal.Kind()
-	switch dataValKind {
-	case reflect.Map:
-		return d.decodeStructFromMap(name, dataVal, val)
-
-	case reflect.Struct:
-		// Not the most efficient way to do this but we can optimize later if
-		// we want to. To convert from struct to struct we go to map first
-		// as an intermediary.
-
-		// Make a new map to hold our result
-		mapType := reflect.TypeOf((map[string]interface{})(nil))
-		mval := reflect.MakeMap(mapType)
-
-		// Creating a pointer to a map so that other methods can completely
-		// overwrite the map if need be (looking at you decodeMapFromMap). The
-		// indirection allows the underlying map to be settable (CanSet() == true)
-		// where as reflect.MakeMap returns an unsettable map.
-		addrVal := reflect.New(mval.Type())
-
-		reflect.Indirect(addrVal).Set(mval)
-		if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil {
-			return err
-		}
-
-		result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val)
-		return result
-
-	default:
-		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
-	}
-}
-
-func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
-	dataValType := dataVal.Type()
-	if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
-		return fmt.Errorf(
-			"'%s' needs a map with string keys, has '%s' keys",
-			name, dataValType.Key().Kind())
-	}
-
-	dataValKeys := make(map[reflect.Value]struct{})
-	dataValKeysUnused := make(map[interface{}]struct{})
-	for _, dataValKey := range dataVal.MapKeys() {
-		dataValKeys[dataValKey] = struct{}{}
-		dataValKeysUnused[dataValKey.Interface()] = struct{}{}
-	}
-
-	errors := make([]string, 0)
-
-	// This slice will keep track of all the structs we'll be decoding.
-	// There can be more than one struct if there are embedded structs
-	// that are squashed.
-	structs := make([]reflect.Value, 1, 5)
-	structs[0] = val
-
-	// Compile the list of all the fields that we're going to be decoding
-	// from all the structs.
-	type field struct {
-		field reflect.StructField
-		val   reflect.Value
-	}
-
-	// remainField is set to a valid field set with the "remain" tag if
-	// we are keeping track of remaining values.
-	var remainField *field
-
-	fields := []field{}
-	for len(structs) > 0 {
-		structVal := structs[0]
-		structs = structs[1:]
-
-		structType := structVal.Type()
-
-		for i := 0; i < structType.NumField(); i++ {
-			fieldType := structType.Field(i)
-			fieldVal := structVal.Field(i)
-			if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct {
-				// Handle embedded struct pointers as embedded structs.
-				fieldVal = fieldVal.Elem()
-			}
-
-			// If "squash" is specified in the tag, we squash the field down.
-			squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous
-			remain := false
-
-			// We always parse the tags cause we're looking for other tags too
-			tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
-			for _, tag := range tagParts[1:] {
-				if tag == "squash" {
-					squash = true
-					break
-				}
-
-				if tag == "remain" {
-					remain = true
-					break
-				}
-			}
-
-			if squash {
-				if fieldVal.Kind() != reflect.Struct {
-					errors = appendErrors(errors,
-						fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
-				} else {
-					structs = append(structs, fieldVal)
-				}
-				continue
-			}
-
-			// Build our field
-			if remain {
-				remainField = &field{fieldType, fieldVal}
-			} else {
-				// Normal struct field, store it away
-				fields = append(fields, field{fieldType, fieldVal})
-			}
-		}
-	}
-
-	// for fieldType, field := range fields {
-	for _, f := range fields {
-		field, fieldValue := f.field, f.val
-		fieldName := field.Name
-
-		tagValue := field.Tag.Get(d.config.TagName)
-		tagValue = strings.SplitN(tagValue, ",", 2)[0]
-		if tagValue != "" {
-			fieldName = tagValue
-		}
-
-		rawMapKey := reflect.ValueOf(fieldName)
-		rawMapVal := dataVal.MapIndex(rawMapKey)
-		if !rawMapVal.IsValid() {
-			// Do a slower search by iterating over each key and
-			// doing case-insensitive search.
-			for dataValKey := range dataValKeys {
-				mK, ok := dataValKey.Interface().(string)
-				if !ok {
-					// Not a string key
-					continue
-				}
-
-				if d.config.MatchName(mK, fieldName) {
-					rawMapKey = dataValKey
-					rawMapVal = dataVal.MapIndex(dataValKey)
-					break
-				}
-			}
-
-			if !rawMapVal.IsValid() {
-				// There was no matching key in the map for the value in
-				// the struct. Just ignore.
-				continue
-			}
-		}
-
-		if !fieldValue.IsValid() {
-			// This should never happen
-			panic("field is not valid")
-		}
-
-		// If we can't set the field, then it is unexported or something,
-		// and we just continue onwards.
-		if !fieldValue.CanSet() {
-			continue
-		}
-
-		// Delete the key we're using from the unused map so we stop tracking
-		delete(dataValKeysUnused, rawMapKey.Interface())
-
-		// If the name is empty string, then we're at the root, and we
-		// don't dot-join the fields.
-		if name != "" {
-			fieldName = name + "." + fieldName
-		}
-
-		if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
-			errors = appendErrors(errors, err)
-		}
-	}
-
-	// If we have a "remain"-tagged field and we have unused keys then
-	// we put the unused keys directly into the remain field.
-	if remainField != nil && len(dataValKeysUnused) > 0 {
-		// Build a map of only the unused values
-		remain := map[interface{}]interface{}{}
-		for key := range dataValKeysUnused {
-			remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface()
-		}
-
-		// Decode it as-if we were just decoding this map onto our map.
-		if err := d.decodeMap(name, remain, remainField.val); err != nil {
-			errors = appendErrors(errors, err)
-		}
-
-		// Set the map to nil so we have none so that the next check will
-		// not error (ErrorUnused)
-		dataValKeysUnused = nil
-	}
-
-	if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
-		keys := make([]string, 0, len(dataValKeysUnused))
-		for rawKey := range dataValKeysUnused {
-			keys = append(keys, rawKey.(string))
-		}
-		sort.Strings(keys)
-
-		err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
-		errors = appendErrors(errors, err)
-	}
-
-	if len(errors) > 0 {
-		return &Error{errors}
-	}
-
-	// Add the unused keys to the list of unused keys if we're tracking metadata
-	if d.config.Metadata != nil {
-		for rawKey := range dataValKeysUnused {
-			key := rawKey.(string)
-			if name != "" {
-				key = name + "." + key
-			}
-
-			d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
-		}
-	}
-
-	return nil
-}
-
-func isEmptyValue(v reflect.Value) bool {
-	switch getKind(v) {
-	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
-		return v.Len() == 0
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.Interface, reflect.Ptr:
-		return v.IsNil()
-	}
-	return false
-}
-
-func getKind(val reflect.Value) reflect.Kind {
-	kind := val.Kind()
-
-	switch {
-	case kind >= reflect.Int && kind <= reflect.Int64:
-		return reflect.Int
-	case kind >= reflect.Uint && kind <= reflect.Uint64:
-		return reflect.Uint
-	case kind >= reflect.Float32 && kind <= reflect.Float64:
-		return reflect.Float32
-	default:
-		return kind
-	}
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 8150ab970..dac0c8fca 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -109,7 +109,7 @@ github.com/containers/buildah/pkg/rusage
 github.com/containers/buildah/pkg/sshagent
 github.com/containers/buildah/pkg/util
 github.com/containers/buildah/util
-# github.com/containers/common v0.47.5-0.20220421072908-49f1a40067b2
+# github.com/containers/common v0.47.5-0.20220421111103-112a47964ddb
 ## explicit
 github.com/containers/common/libimage
 github.com/containers/common/libimage/manifests
@@ -153,7 +153,7 @@ github.com/containers/common/version
 # github.com/containers/conmon v2.0.20+incompatible
 ## explicit
 github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.21.1-0.20220405081457-d1b64686e1d0
+# github.com/containers/image/v5 v5.21.1-0.20220421124950-8527e238867c
 ## explicit
 github.com/containers/image/v5/copy
 github.com/containers/image/v5/directory
@@ -233,7 +233,7 @@ github.com/containers/psgo/internal/dev
 github.com/containers/psgo/internal/host
 github.com/containers/psgo/internal/proc
 github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.39.1-0.20220414183333-eea4e0f5f1f9
+# github.com/containers/storage v1.39.1-0.20220421071128-4899f8265d63
 ## explicit
 github.com/containers/storage
 github.com/containers/storage/drivers
@@ -475,8 +475,6 @@ github.com/matttproud/golang_protobuf_extensions/pbutil
 github.com/miekg/pkcs11
 # github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
 github.com/mistifyio/go-zfs
-# github.com/mitchellh/mapstructure v1.4.3
-github.com/mitchellh/mapstructure
 # github.com/moby/sys/mount v0.2.0
 github.com/moby/sys/mount
 # github.com/moby/sys/mountinfo v0.6.1
@@ -645,7 +643,7 @@ github.com/stefanberger/go-pkcs11uri
 ## explicit
 github.com/stretchr/testify/assert
 github.com/stretchr/testify/require
-# github.com/sylabs/sif/v2 v2.4.2
+# github.com/sylabs/sif/v2 v2.6.0
 github.com/sylabs/sif/v2/pkg/sif
 # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
 ## explicit
-- 
cgit v1.2.3-54-g00ecf