diff options
270 files changed, 14414 insertions, 2698 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 223fc5b53..f94ee2f3b 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -34,13 +34,13 @@ env: # Image identifiers IMAGE_SUFFIX: "c5495735033528320" - FEDORA_AMI_ID: "ami-0df5df528071f1052" # matches c5495735033528320 - FEDORA_AARCH64_AMI_ID: "ami-02ee8b3a782a78791" # matches c5495735033528320 - # Complete image names + # EC2 images + FEDORA_AMI: "fedora-aws-${IMAGE_SUFFIX}" + FEDORA_AARCH64_AMI: "fedora-podman-aws-arm64-${IMAGE_SUFFIX}" + # GCP Images FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" #PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" - # Container FQIN's FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}" #PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}" @@ -210,7 +210,7 @@ build_aarch64_task: env: &stdenvars_aarch64 EC2_INST_TYPE: "t4g.xlarge" DISTRO_NV: ${FEDORA_AARCH64_NAME} - VM_IMAGE_NAME: ${FEDORA_AARCH64_AMI_ID} + VM_IMAGE_NAME: ${FEDORA_AARCH64_AMI} CTR_FQIN: ${FEDORA_CONTAINER_FQIN} CI_DESIRED_RUNTIME: crun TEST_FLAVOR: build @@ -308,6 +308,7 @@ bindings_task: only_if: >- $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && + $CIRRUS_CHANGE_TITLE !=~ '.*CI:COPR.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' depends_on: - build @@ -415,9 +416,7 @@ alt_build_task: alias: alt_build # Don't create task for [CI:DOCS] or multiarch builds # Docs: ./contrib/cirrus/CIModes.md - only_if: ¬_docs_multiarch >- - $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && - $CIRRUS_CRON != 'multiarch' + only_if: $CIRRUS_CRON != 'multiarch' depends_on: - build env: @@ -450,7 +449,7 @@ osx_alt_build_task: name: "OSX Cross" alias: osx_alt_build # Docs: ./contrib/cirrus/CIModes.md - only_if: *not_docs_multiarch + only_if: $CIRRUS_CRON != 'multiarch' depends_on: - build env: @@ -485,6 +484,7 @@ docker-py_test_task: only_if: ¬_tag_branch_build_docs >- $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && + $CIRRUS_CHANGE_TITLE !=~ '.*CI:COPR.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' depends_on: @@ -636,13 +636,11 @@ container_integration_test_task: matrix: &fedora_vm_axis - env: DISTRO_NV: ${FEDORA_NAME} - _BUILD_CACHE_HANDLE: ${FEDORA_NAME}-build-${CIRRUS_BUILD_ID} VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME} CTR_FQIN: ${FEDORA_CONTAINER_FQIN} CI_DESIRED_RUNTIME: crun #- env: #DISTRO_NV: ${PRIOR_FEDORA_NAME} - #_BUILD_CACHE_HANDLE: ${PRIOR_FEDORA_NAME}-build-${CIRRUS_BUILD_ID} #VM_IMAGE_NAME: ${PRIOR_FEDORA_CACHE_IMAGE_NAME} #CTR_FQIN: ${PRIOR_FEDORA_CONTAINER_FQIN} gce_instance: *standardvm @@ -703,7 +701,7 @@ podman_machine_task: TEST_FLAVOR: "machine" PRIV_NAME: "rootless" # intended use-case DISTRO_NV: "${FEDORA_NAME}" - VM_IMAGE_NAME: "${FEDORA_AMI_ID}" + VM_IMAGE_NAME: "${FEDORA_AMI}" clone_script: *get_gosrc setup_script: *setup main_script: *main @@ -722,6 +720,7 @@ local_system_test_task: &local_system_test_task only_if: ¬_tag_build_docs_multiarch >- $CIRRUS_TAG == '' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && + $CIRRUS_CHANGE_TITLE !=~ '.*CI:COPR.*' && $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' && $CIRRUS_CRON != 'multiarch' depends_on: @@ -787,8 +786,6 @@ rootless_remote_system_test_task: # Not used here, is used in other tasks VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME} CTR_FQIN: ${FEDORA_CONTAINER_FQIN} - # ID for re-use of build output - _BUILD_CACHE_HANDLE: ${FEDORA_NAME}-build-${CIRRUS_BUILD_ID} CI_DESIRED_RUNTIME: crun <<: *local_system_test_task alias: rootless_remote_system_test @@ -835,8 +832,6 @@ buildah_bud_test_task: # Not used here, is used in other tasks VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME} CTR_FQIN: ${FEDORA_CONTAINER_FQIN} - # ID for re-use of build output - _BUILD_CACHE_HANDLE: ${FEDORA_NAME}-build-${CIRRUS_BUILD_ID} matrix: - env: PODBIN_NAME: podman @@ -898,8 +893,6 @@ upgrade_test_task: TEST_FLAVOR: upgrade_test DISTRO_NV: ${FEDORA_NAME} VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME} - # ID for re-use of build output - _BUILD_CACHE_HANDLE: ${FEDORA_NAME}-build-${CIRRUS_BUILD_ID} clone_script: *get_gosrc setup_script: *setup main_script: *main @@ -1036,7 +1029,10 @@ artifacts_task: name: "Artifacts" alias: artifacts # Docs: ./contrib/cirrus/CIModes.md - only_if: *not_docs_multiarch + only_if: >- + $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && + $CIRRUS_CHANGE_TITLE !=~ '.*CI:COPR.*' && + $CIRRUS_CRON != 'multiarch' depends_on: - success # This task is a secondary/convenience for downstream consumers, don't diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index eddd35cba..a5ee0c1df 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,4 @@ - + # Contributing to Podman We'd love to have you join the community! Below summarizes the processes @@ -1,4 +1,4 @@ - + # Podman: A tool for managing OCI containers and pods diff --git a/cmd/podman/common/completion.go b/cmd/podman/common/completion.go index 02369c74a..71c62a7af 100644 --- a/cmd/podman/common/completion.go +++ b/cmd/podman/common/completion.go @@ -13,6 +13,7 @@ import ( libimageDefine "github.com/containers/common/libimage/define" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/podman/v4/cmd/podman/registry" "github.com/containers/podman/v4/libpod/define" @@ -1628,3 +1629,11 @@ func AutocompleteClone(cmd *cobra.Command, args []string, toComplete string) ([] } return nil, cobra.ShellCompDirectiveNoFileComp } + +// AutocompleteSSH - Autocomplete ssh modes +func AutocompleteSSH(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if !validCurrentCmdLine(cmd, args, toComplete) { + return nil, cobra.ShellCompDirectiveNoFileComp + } + return []string{string(ssh.GolangMode), string(ssh.NativeMode)}, cobra.ShellCompDirectiveNoFileComp +} diff --git a/cmd/podman/common/sign.go b/cmd/podman/common/sign.go index e8a90ed57..dc0d3ff5d 100644 --- a/cmd/podman/common/sign.go +++ b/cmd/podman/common/sign.go @@ -3,9 +3,9 @@ package common import ( "fmt" + "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/pkg/cli" "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/terminal" ) // PrepareSigningPassphrase updates pushOpts.SignPassphrase and SignSigstorePrivateKeyPassphrase based on a --sign-passphrase-file value signPassphraseFile, @@ -27,7 +27,7 @@ func PrepareSigningPassphrase(pushOpts *entities.ImagePushOptions, signPassphras } passphrase = p } else if pushOpts.SignBySigstorePrivateKeyFile != "" { - p := terminal.ReadPassphrase() + p := ssh.ReadPassphrase() passphrase = string(p) } // pushOpts.SignBy triggers a GPG-agent passphrase prompt, possibly using a more secure channel, so we usually shouldn’t prompt ourselves if no passphrase was explicitly provided. pushOpts.SignPassphrase = passphrase diff --git a/cmd/podman/containers/checkpoint.go b/cmd/podman/containers/checkpoint.go index 0eb0db394..4350e5586 100644 --- a/cmd/podman/containers/checkpoint.go +++ b/cmd/podman/containers/checkpoint.go @@ -130,14 +130,15 @@ func checkpoint(cmd *cobra.Command, args []string) error { var statistics checkpointStatistics for _, r := range responses { - if r.Err == nil { - if checkpointOptions.PrintStats { - statistics.ContainerStatistics = append(statistics.ContainerStatistics, r) - } else { - fmt.Println(r.Id) - } - } else { + switch { + case r.Err != nil: errs = append(errs, r.Err) + case checkpointOptions.PrintStats: + statistics.ContainerStatistics = append(statistics.ContainerStatistics, r) + case r.RawInput != "": + fmt.Println(r.RawInput) + default: + fmt.Println(r.Id) } } diff --git a/cmd/podman/containers/cleanup.go b/cmd/podman/containers/cleanup.go index c9a5cb28b..6abc5df19 100644 --- a/cmd/podman/containers/cleanup.go +++ b/cmd/podman/containers/cleanup.go @@ -84,21 +84,20 @@ func cleanup(cmd *cobra.Command, args []string) error { return err } for _, r := range responses { - if r.CleanErr == nil && r.RmErr == nil && r.RmiErr == nil { - fmt.Println(r.Id) - continue - } - if r.RmErr != nil { + switch { + case r.RmErr != nil: logrus.Errorf("Removing container: %v", r.RmErr) errs = append(errs, r.RmErr) - } - if r.RmiErr != nil { + case r.RmiErr != nil: logrus.Errorf("Removing image: %v", r.RmiErr) errs = append(errs, r.RmiErr) - } - if r.CleanErr != nil { + case r.CleanErr != nil: logrus.Errorf("Cleaning up container: %v", r.CleanErr) errs = append(errs, r.CleanErr) + case r.RawInput != "": + fmt.Println(r.RawInput) + default: + fmt.Println(r.Id) } } return errs.PrintErrors() diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go index 7d0f4d9ae..455127fd7 100644 --- a/cmd/podman/containers/create.go +++ b/cmd/podman/containers/create.go @@ -192,16 +192,14 @@ func replaceContainer(name string) error { } func CreateInit(c *cobra.Command, vals entities.ContainerCreateOptions, isInfra bool) (entities.ContainerCreateOptions, error) { - vals.UserNS = c.Flag("userns").Value.String() - // if user did not modify --userns flag and did turn on - // uid/gid mappings, set userns flag to "private" - if !c.Flag("userns").Changed && vals.UserNS == "host" { - if len(vals.UIDMap) > 0 || - len(vals.GIDMap) > 0 || - vals.SubUIDName != "" || - vals.SubGIDName != "" { - vals.UserNS = "private" + if len(vals.UIDMap) > 0 || len(vals.GIDMap) > 0 || vals.SubUIDName != "" || vals.SubGIDName != "" { + if c.Flag("userns").Changed { + return vals, errors.New("--userns and --uidmap/--gidmap/--subuidname/--subgidname are mutually exclusive") } + // force userns flag to "private" + vals.UserNS = "private" + } else { + vals.UserNS = c.Flag("userns").Value.String() } if c.Flag("kernel-memory") != nil && c.Flag("kernel-memory").Changed { logrus.Warnf("The --kernel-memory flag is no longer supported. This flag is a noop.") diff --git a/cmd/podman/containers/init.go b/cmd/podman/containers/init.go index 649cdf1c9..8f53dca45 100644 --- a/cmd/podman/containers/init.go +++ b/cmd/podman/containers/init.go @@ -74,10 +74,13 @@ func initContainer(cmd *cobra.Command, args []string) error { return err } for _, r := range report { - if r.Err == nil { - fmt.Println(r.Id) - } else { + switch { + case r.Err != nil: errs = append(errs, r.Err) + case r.RawInput != "": + fmt.Println(r.RawInput) + default: + fmt.Println(r.Id) } } return errs.PrintErrors() diff --git a/cmd/podman/containers/pause.go b/cmd/podman/containers/pause.go index 53aa423ac..38c4f45aa 100644 --- a/cmd/podman/containers/pause.go +++ b/cmd/podman/containers/pause.go @@ -113,10 +113,13 @@ func pause(cmd *cobra.Command, args []string) error { return err } for _, r := range responses { - if r.Err == nil { - fmt.Println(r.RawInput) - } else { + switch { + case r.Err != nil: errs = append(errs, r.Err) + case r.RawInput != "": + fmt.Println(r.RawInput) + default: + fmt.Println(r.Id) } } return errs.PrintErrors() diff --git a/cmd/podman/containers/restore.go b/cmd/podman/containers/restore.go index 6106f2bed..ee01e19b8 100644 --- a/cmd/podman/containers/restore.go +++ b/cmd/podman/containers/restore.go @@ -193,14 +193,15 @@ func restore(cmd *cobra.Command, args []string) error { var statistics restoreStatistics for _, r := range responses { - if r.Err == nil { - if restoreOptions.PrintStats { - statistics.ContainerStatistics = append(statistics.ContainerStatistics, r) - } else { - fmt.Println(r.Id) - } - } else { + switch { + case r.Err != nil: errs = append(errs, r.Err) + case restoreOptions.PrintStats: + statistics.ContainerStatistics = append(statistics.ContainerStatistics, r) + case r.RawInput != "": + fmt.Println(r.RawInput) + default: + fmt.Println(r.Id) } } diff --git a/cmd/podman/containers/start.go b/cmd/podman/containers/start.go index cd4fa17b8..061f0953d 100644 --- a/cmd/podman/containers/start.go +++ b/cmd/podman/containers/start.go @@ -59,8 +59,10 @@ func startFlags(cmd *cobra.Command) { flags.BoolVarP(&startOptions.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached") flags.BoolVar(&startOptions.SigProxy, "sig-proxy", false, "Proxy received signals to the process (default true if attaching, false otherwise)") - flags.StringSliceVarP(&filters, "filter", "f", []string{}, "Filter output based on conditions given") - _ = cmd.RegisterFlagCompletionFunc("filter", common.AutocompletePsFilters) + + filterFlagName := "filter" + flags.StringSliceVarP(&filters, filterFlagName, "f", []string{}, "Filter output based on conditions given") + _ = cmd.RegisterFlagCompletionFunc(filterFlagName, common.AutocompletePsFilters) flags.BoolVar(&startOptions.All, "all", false, "Start all containers regardless of their state or configuration") @@ -84,7 +86,7 @@ func init() { } func validateStart(cmd *cobra.Command, args []string) error { - if len(args) == 0 && !startOptions.Latest && !startOptions.All { + if len(args) == 0 && !startOptions.Latest && !startOptions.All && len(filters) < 1 { return errors.New("start requires at least one argument") } if startOptions.All && startOptions.Latest { @@ -123,33 +125,30 @@ func start(cmd *cobra.Command, args []string) error { } containers := args - if len(filters) > 0 { - for _, f := range filters { - split := strings.SplitN(f, "=", 2) - if len(split) == 1 { - return fmt.Errorf("invalid filter %q", f) - } - startOptions.Filters[split[0]] = append(startOptions.Filters[split[0]], split[1]) + for _, f := range filters { + split := strings.SplitN(f, "=", 2) + if len(split) < 2 { + return fmt.Errorf("invalid filter %q", f) } + startOptions.Filters[split[0]] = append(startOptions.Filters[split[0]], split[1]) } responses, err := registry.ContainerEngine().ContainerStart(registry.GetContext(), containers, startOptions) if err != nil { return err } - for _, r := range responses { - if r.Err == nil { - if startOptions.Attach { - // Implement the exitcode when the only one container is enabled attach - registry.SetExitCode(r.ExitCode) - } else { - fmt.Println(r.RawInput) - } - } else { + switch { + case r.Err != nil: errs = append(errs, r.Err) + case startOptions.Attach: + // Implement the exitcode when the only one container is enabled attach + registry.SetExitCode(r.ExitCode) + case r.RawInput != "": + fmt.Println(r.RawInput) + default: + fmt.Println(r.Id) } } - return errs.PrintErrors() } diff --git a/cmd/podman/containers/stop.go b/cmd/podman/containers/stop.go index b0f449266..7e31aa7d5 100644 --- a/cmd/podman/containers/stop.go +++ b/cmd/podman/containers/stop.go @@ -126,10 +126,13 @@ func stop(cmd *cobra.Command, args []string) error { return err } for _, r := range responses { - if r.Err == nil { - fmt.Println(r.RawInput) - } else { + switch { + case r.Err != nil: errs = append(errs, r.Err) + case r.RawInput != "": + fmt.Println(r.RawInput) + default: + fmt.Println(r.Id) } } return errs.PrintErrors() diff --git a/cmd/podman/containers/unpause.go b/cmd/podman/containers/unpause.go index 4282e490e..617c0e3a4 100644 --- a/cmd/podman/containers/unpause.go +++ b/cmd/podman/containers/unpause.go @@ -121,10 +121,13 @@ func unpause(cmd *cobra.Command, args []string) error { } for _, r := range responses { - if r.Err == nil { - fmt.Println(r.RawInput) - } else { + switch { + case r.Err != nil: errs = append(errs, r.Err) + case r.RawInput != "": + fmt.Println(r.RawInput) + default: + fmt.Println(r.Id) } } return errs.PrintErrors() diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go index 9f1b86eb4..837b233f4 100644 --- a/cmd/podman/images/build.go +++ b/cmd/podman/images/build.go @@ -18,6 +18,7 @@ import ( "github.com/containers/common/pkg/auth" "github.com/containers/common/pkg/completion" "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/docker/reference" encconfig "github.com/containers/ocicrypt/config" enchelpers "github.com/containers/ocicrypt/helpers" "github.com/containers/podman/v4/cmd/podman/common" @@ -184,7 +185,6 @@ func buildFlags(cmd *cobra.Command) { flags.SetNormalizeFunc(buildahCLI.AliasFlags) if registry.IsRemote() { _ = flags.MarkHidden("disable-content-trust") - _ = flags.MarkHidden("cache-from") _ = flags.MarkHidden("sign-by") _ = flags.MarkHidden("signature-policy") _ = flags.MarkHidden("tls-verify") @@ -519,6 +519,27 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil } } } + var cacheTo reference.Named + var cacheFrom reference.Named + if c.Flag("cache-to").Changed { + cacheTo, err = parse.RepoNameToNamedReference(flags.CacheTo) + if err != nil { + return nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", flags.CacheTo, err) + } + } + if c.Flag("cache-from").Changed { + cacheFrom, err = parse.RepoNameToNamedReference(flags.CacheFrom) + if err != nil { + return nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", flags.CacheTo, err) + } + } + var cacheTTL time.Duration + if c.Flag("cache-ttl").Changed { + cacheTTL, err = time.ParseDuration(flags.CacheTTL) + if err != nil { + return nil, fmt.Errorf("unable to parse value provided %q as --cache-ttl: %w", flags.CacheTTL, err) + } + } opts := buildahDefine.BuildOptions{ AddCapabilities: flags.CapAdd, @@ -529,6 +550,9 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil Args: args, BlobDirectory: flags.BlobCache, BuildOutput: flags.BuildOutput, + CacheFrom: cacheFrom, + CacheTo: cacheTo, + CacheTTL: cacheTTL, CommonBuildOpts: commonOpts, Compression: compression, ConfigureNetwork: networkPolicy, diff --git a/cmd/podman/images/rm.go b/cmd/podman/images/rm.go index d3fd17440..4e4b001ad 100644 --- a/cmd/podman/images/rm.go +++ b/cmd/podman/images/rm.go @@ -61,6 +61,7 @@ func imageRemoveFlagSet(flags *pflag.FlagSet) { flags.BoolVarP(&imageOpts.All, "all", "a", false, "Remove all images") flags.BoolVarP(&imageOpts.Ignore, "ignore", "i", false, "Ignore errors if a specified image does not exist") flags.BoolVarP(&imageOpts.Force, "force", "f", false, "Force Removal of the image") + flags.BoolVar(&imageOpts.NoPrune, "no-prune", false, "Do not remove dangling images") } func rm(cmd *cobra.Command, args []string) error { diff --git a/cmd/podman/images/scp.go b/cmd/podman/images/scp.go index a7aa43e61..18899d251 100644 --- a/cmd/podman/images/scp.go +++ b/cmd/podman/images/scp.go @@ -4,6 +4,7 @@ import ( "os" "strings" + "github.com/containers/common/pkg/ssh" "github.com/containers/podman/v4/cmd/podman/common" "github.com/containers/podman/v4/cmd/podman/registry" "github.com/spf13/cobra" @@ -48,6 +49,11 @@ func scp(cmd *cobra.Command, args []string) (finalErr error) { var ( err error ) + + containerConfig := registry.PodmanConfig() + + sshType := containerConfig.SSHMode + for i, val := range os.Args { if val == "image" { break @@ -67,7 +73,8 @@ func scp(cmd *cobra.Command, args []string) (finalErr error) { dst = args[1] } - err = registry.ImageEngine().Scp(registry.Context(), src, dst, parentFlags, quiet) + sshEngine := ssh.DefineMode(sshType) + err = registry.ImageEngine().Scp(registry.Context(), src, dst, parentFlags, quiet, sshEngine) if err != nil { return err } diff --git a/cmd/podman/machine/platform.go b/cmd/podman/machine/platform.go index 5ba649a48..3a7e56ac7 100644 --- a/cmd/podman/machine/platform.go +++ b/cmd/podman/machine/platform.go @@ -9,5 +9,5 @@ import ( ) func GetSystemDefaultProvider() machine.Provider { - return qemu.GetQemuProvider() + return qemu.GetVirtualizationProvider() } diff --git a/cmd/podman/root.go b/cmd/podman/root.go index 48f8470be..3637b2674 100644 --- a/cmd/podman/root.go +++ b/cmd/podman/root.go @@ -12,6 +12,7 @@ import ( "github.com/containers/common/pkg/completion" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/ssh" "github.com/containers/podman/v4/cmd/podman/common" "github.com/containers/podman/v4/cmd/podman/registry" "github.com/containers/podman/v4/cmd/podman/validate" @@ -338,6 +339,10 @@ func rootFlags(cmd *cobra.Command, opts *entities.PodmanConfig) { lFlags := cmd.Flags() + sshFlagName := "ssh" + lFlags.StringVar(&opts.SSHMode, sshFlagName, string(ssh.GolangMode), "define the ssh mode") + _ = cmd.RegisterFlagCompletionFunc(sshFlagName, common.AutocompleteSSH) + connectionFlagName := "connection" lFlags.StringVarP(&opts.Engine.ActiveService, connectionFlagName, "c", srv, "Connection to use for remote Podman service") _ = cmd.RegisterFlagCompletionFunc(connectionFlagName, common.AutocompleteSystemConnections) diff --git a/cmd/podman/system/connection/add.go b/cmd/podman/system/connection/add.go index 191603718..f3b61b254 100644 --- a/cmd/podman/system/connection/add.go +++ b/cmd/podman/system/connection/add.go @@ -1,23 +1,19 @@ package connection import ( - "encoding/json" "errors" "fmt" - "net" "net/url" "os" "regexp" "github.com/containers/common/pkg/completion" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/ssh" "github.com/containers/podman/v4/cmd/podman/registry" "github.com/containers/podman/v4/cmd/podman/system" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "golang.org/x/crypto/ssh" ) var ( @@ -74,6 +70,15 @@ func init() { func add(cmd *cobra.Command, args []string) error { // Default to ssh schema if none given + + entities := &ssh.ConnectionCreateOptions{ + Port: cOpts.Port, + Path: args[1], + Identity: cOpts.Identity, + Name: args[0], + Socket: cOpts.UDSPath, + Default: cOpts.Default, + } dest := args[1] if match, err := regexp.Match("^[A-Za-z][A-Za-z0-9+.-]*://", []byte(dest)); err != nil { return fmt.Errorf("invalid destination: %w", err) @@ -89,30 +94,20 @@ func add(cmd *cobra.Command, args []string) error { uri.Path = cmd.Flag("socket-path").Value.String() } - switch uri.Scheme { - case "ssh": - if uri.User.Username() == "" { - if uri.User, err = utils.GetUserInfo(uri); err != nil { - return err - } - } + var sshMode ssh.EngineMode + containerConfig := registry.PodmanConfig() - if cmd.Flags().Changed("port") { - uri.Host = net.JoinHostPort(uri.Hostname(), cmd.Flag("port").Value.String()) - } + flag := containerConfig.SSHMode - if uri.Port() == "" { - uri.Host = net.JoinHostPort(uri.Hostname(), cmd.Flag("port").DefValue) - } - iden := "" - if cmd.Flags().Changed("identity") { - iden = cOpts.Identity - } - if uri.Path == "" || uri.Path == "/" { - if uri.Path, err = getUDS(uri, iden); err != nil { - return err - } - } + sshMode = ssh.DefineMode(flag) + + if sshMode == ssh.InvalidMode { + return fmt.Errorf("invalid ssh mode") + } + + switch uri.Scheme { + case "ssh": + return ssh.Create(entities, sshMode) case "unix": if cmd.Flags().Changed("identity") { return errors.New("--identity option not supported for unix scheme") @@ -176,41 +171,3 @@ func add(cmd *cobra.Command, args []string) error { } return cfg.Write() } - -func getUDS(uri *url.URL, iden string) (string, error) { - cfg, err := utils.ValidateAndConfigure(uri, iden) - if err != nil { - return "", fmt.Errorf("failed to validate: %w", err) - } - dial, err := ssh.Dial("tcp", uri.Host, cfg) - if err != nil { - return "", fmt.Errorf("failed to connect: %w", err) - } - defer dial.Close() - - session, err := dial.NewSession() - if err != nil { - return "", fmt.Errorf("failed to create new ssh session on %q: %w", uri.Host, err) - } - defer session.Close() - - // Override podman binary for testing etc - podman := "podman" - if v, found := os.LookupEnv("PODMAN_BINARY"); found { - podman = v - } - infoJSON, err := utils.ExecRemoteCommand(dial, podman+" info --format=json") - if err != nil { - return "", err - } - - var info define.Info - if err := json.Unmarshal(infoJSON, &info); err != nil { - return "", fmt.Errorf("failed to parse 'podman info' results: %w", err) - } - - if info.Host.RemoteSocket == nil || len(info.Host.RemoteSocket.Path) == 0 { - return "", fmt.Errorf("remote podman %q failed to report its UDS socket", uri.Host) - } - return info.Host.RemoteSocket.Path, nil -} diff --git a/commands-demo.md b/commands-demo.md index dac279192..f3a1216a1 100644 --- a/commands-demo.md +++ b/commands-demo.md @@ -1,4 +1,4 @@ - + # libpod - library for running OCI-based containers in Pods diff --git a/commands.md b/commands.md index 97b4c69a9..3970d0657 100644 --- a/commands.md +++ b/commands.md @@ -1,4 +1,4 @@ - + # libpod - library for running OCI-based containers in Pods diff --git a/contrib/cirrus/CIModes.md b/contrib/cirrus/CIModes.md index c782ca64b..0b5a189a6 100644 --- a/contrib/cirrus/CIModes.md +++ b/contrib/cirrus/CIModes.md @@ -85,6 +85,16 @@ of this document, it's not possible to override the behavior of `$CIRRUS_PR`. + meta + success +### Intended `[CI:COPR]` PR Tasks: ++ ext_svc_check ++ automation ++ *build* ++ validate ++ swagger ++ consistency ++ meta ++ success + ### Intend `[CI:BUILD]` PR Tasks: + ext_svc_check + automation diff --git a/contrib/hello/README.md b/contrib/hello/README.md index 528466f7b..0fb430721 100644 --- a/contrib/hello/README.md +++ b/contrib/hello/README.md @@ -1,4 +1,4 @@ - + # Podman Hello World image diff --git a/contrib/pkginstaller/package.sh b/contrib/pkginstaller/package.sh index bb91fe01f..f6f7cef16 100755 --- a/contrib/pkginstaller/package.sh +++ b/contrib/pkginstaller/package.sh @@ -17,8 +17,12 @@ arch=$(cat "${BASEDIR}/ARCH") function build_podman() { pushd "$1" - make GOARCH="${arch}" podman-remote HELPER_BINARIES_DIR="${HELPER_BINARIES_DIR}" - make GOARCH="${arch}" podman-mac-helper + local goArch="${arch}" + if [ "${goArch}" = aarch64 ]; then + goArch=arm64 + fi + make GOARCH="${goArch}" podman-remote HELPER_BINARIES_DIR="${HELPER_BINARIES_DIR}" + make GOARCH="${goArch}" podman-mac-helper cp bin/darwin/podman "contrib/pkginstaller/out/packaging/${binDir}/podman" cp bin/darwin/podman-mac-helper "contrib/pkginstaller/out/packaging/${binDir}/podman-mac-helper" popd diff --git a/contrib/podmanimage/README.md b/contrib/podmanimage/README.md index 0f4f715ad..ab406a56a 100644 --- a/contrib/podmanimage/README.md +++ b/contrib/podmanimage/README.md @@ -1,4 +1,4 @@ - + # podmanimage diff --git a/docs/source/markdown/.gitignore b/docs/source/markdown/.gitignore index c441d74c5..6689b5b71 100644 --- a/docs/source/markdown/.gitignore +++ b/docs/source/markdown/.gitignore @@ -1,2 +1,8 @@ +podman-build.1.md +podman-container-clone.1.md podman-create.1.md +podman-kube-play.1.md +podman-pod-clone.1.md +podman-pod-create.1.md +podman-pull.1.md podman-run.1.md diff --git a/docs/source/markdown/options/README.md b/docs/source/markdown/options/README.md new file mode 100644 index 000000000..b737fabf7 --- /dev/null +++ b/docs/source/markdown/options/README.md @@ -0,0 +1,44 @@ +Common Man Page Options +======================= + +This subdirectory contains option (flag) names and descriptions +common to multiple podman man pages. Each file is one option. The +filename does not necessarily need to be identical to the option +name: for instance, `hostname.container.md` and `hostname.pod.md` +exist because the **--hostname** option is sufficiently different +between `podman-{create,run}` and `podman-pod-{create,run}` to +warrant living separately. + +How +=== + +The files here are included in `podman-*.md.in` files using the `@@option` +mechanism: + +``` +@@option foo ! will include options/foo.md +``` + +The tool that does this is `hack/markdown-preprocess`. It is a python +script because it needs to run on `readthedocs.io`. From a given `.md.in` +file, this script will create a `.md` file that can then be read by +`go-md2man`, `sphinx`, anything that groks markdown. This runs as +part of `make docs`. + +Special Substitutions +===================== + +Some options are almost identical except for 'pod' vs 'container' +differences. For those, use `<<text for pods|text for containers>>`. +Order is immaterial: the important thing is the presence of the +string "`pod`" in one half but not the other. The correct string +will be chosen based on the filename: if the file contains `-pod`, +such as `podman-pod-create`, the string with `pod` (case-insensitive) +in it will be chosen. + +The string `<<subcommand>>` will be replaced with the podman subcommand +as determined from the filename, e.g., `create` for `podman-create.1.md.in`. +This allows the shared use of examples in the option file: +``` + Example: podman <<subcommand>> --foo --bar +``` diff --git a/docs/source/markdown/options/add-host.md b/docs/source/markdown/options/add-host.md new file mode 100644 index 000000000..a6021cd3b --- /dev/null +++ b/docs/source/markdown/options/add-host.md @@ -0,0 +1,6 @@ +#### **--add-host**=*host:ip* + +Add a custom host-to-IP mapping (host:ip) + +Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** +option can be set multiple times. Conflicts with the **--no-hosts** option. diff --git a/docs/source/markdown/options/blkio-weight-device.md b/docs/source/markdown/options/blkio-weight-device.md new file mode 100644 index 000000000..55c5fd4fd --- /dev/null +++ b/docs/source/markdown/options/blkio-weight-device.md @@ -0,0 +1,3 @@ +#### **--blkio-weight-device**=*device:weight* + +Block IO relative device weight. diff --git a/docs/source/markdown/options/blkio-weight.md b/docs/source/markdown/options/blkio-weight.md new file mode 100644 index 000000000..eb8e94144 --- /dev/null +++ b/docs/source/markdown/options/blkio-weight.md @@ -0,0 +1,3 @@ +#### **--blkio-weight**=*weight* + +Block IO relative weight. The _weight_ is a value between **10** and **1000**. diff --git a/docs/source/markdown/options/cap-add.md b/docs/source/markdown/options/cap-add.md new file mode 100644 index 000000000..4476afebb --- /dev/null +++ b/docs/source/markdown/options/cap-add.md @@ -0,0 +1,3 @@ +#### **--cap-add**=*capability* + +Add Linux capabilities. diff --git a/docs/source/markdown/options/cap-drop.md b/docs/source/markdown/options/cap-drop.md new file mode 100644 index 000000000..8d7371ea2 --- /dev/null +++ b/docs/source/markdown/options/cap-drop.md @@ -0,0 +1,3 @@ +#### **--cap-drop**=*capability* + +Drop Linux capabilities. diff --git a/docs/source/markdown/options/cgroup-parent.md b/docs/source/markdown/options/cgroup-parent.md new file mode 100644 index 000000000..f376e4860 --- /dev/null +++ b/docs/source/markdown/options/cgroup-parent.md @@ -0,0 +1,5 @@ +#### **--cgroup-parent**=*path* + +Path to cgroups under which the cgroup for the <<container|pod>> will be created. If the +path is not absolute, the path is considered to be relative to the cgroups path +of the init process. Cgroups will be created if they do not already exist. diff --git a/docs/source/markdown/options/cgroupns.md b/docs/source/markdown/options/cgroupns.md new file mode 100644 index 000000000..6117b3fe1 --- /dev/null +++ b/docs/source/markdown/options/cgroupns.md @@ -0,0 +1,10 @@ +#### **--cgroupns**=*mode* + +Set the cgroup namespace mode for the container. + +- **host**: use the host's cgroup namespace inside the container. +- **container:**_id_: join the namespace of the specified container. +- **private**: create a new cgroup namespace. +- **ns:**_path_: join the namespace at the specified path. + +If the host uses cgroups v1, the default is set to **host**. On cgroups v2, the default is **private**. diff --git a/docs/source/markdown/options/cgroups.md b/docs/source/markdown/options/cgroups.md new file mode 100644 index 000000000..0dbbb15f3 --- /dev/null +++ b/docs/source/markdown/options/cgroups.md @@ -0,0 +1,10 @@ +#### **--cgroups**=*how* + +Determines whether the container will create CGroups. + +Default is **enabled**. + +The **enabled** option will create a new cgroup under the cgroup-parent. +The **disabled** option will force the container to not create CGroups, and thus conflicts with CGroup options (**--cgroupns** and **--cgroup-parent**). +The **no-conmon** option disables a new CGroup only for the **conmon** process. +The **split** option splits the current CGroup in two sub-cgroups: one for conmon and one for the container payload. It is not possible to set **--cgroup-parent** with **split**. diff --git a/docs/source/markdown/options/conmon-pidfile.md b/docs/source/markdown/options/conmon-pidfile.md new file mode 100644 index 000000000..e0e337cc8 --- /dev/null +++ b/docs/source/markdown/options/conmon-pidfile.md @@ -0,0 +1,4 @@ +#### **--conmon-pidfile**=*file* + +Write the pid of the **conmon** process to a file. As **conmon** runs in a separate process than Podman, this is necessary when using systemd to restart Podman containers. +(This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines) diff --git a/docs/source/markdown/options/cpu-period.md b/docs/source/markdown/options/cpu-period.md new file mode 100644 index 000000000..8df6445e9 --- /dev/null +++ b/docs/source/markdown/options/cpu-period.md @@ -0,0 +1,10 @@ +#### **--cpu-period**=*limit* + +Set the CPU period for the Completely Fair Scheduler (CFS), which is a +duration in microseconds. Once the container's CPU quota is used up, it will +not be scheduled to run until the current period ends. Defaults to 100000 +microseconds. + +On some systems, changing the CPU limits may not be allowed for non-root +users. For more details, see +https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error diff --git a/docs/source/markdown/options/cpu-quota.md b/docs/source/markdown/options/cpu-quota.md new file mode 100644 index 000000000..67b9dee8c --- /dev/null +++ b/docs/source/markdown/options/cpu-quota.md @@ -0,0 +1,12 @@ +#### **--cpu-quota**=*limit* + +Limit the CPU Completely Fair Scheduler (CFS) quota. + +Limit the container's CPU usage. By default, containers run with the full +CPU resource. The limit is a number in microseconds. If a number is provided, +the container will be allowed to use that much CPU time until the CPU period +ends (controllable via **--cpu-period**). + +On some systems, changing the CPU limits may not be allowed for non-root +users. For more details, see +https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error diff --git a/docs/source/markdown/options/cpu-rt-period.md b/docs/source/markdown/options/cpu-rt-period.md new file mode 100644 index 000000000..9014beb33 --- /dev/null +++ b/docs/source/markdown/options/cpu-rt-period.md @@ -0,0 +1,7 @@ +#### **--cpu-rt-period**=*microseconds* + +Limit the CPU real-time period in microseconds. + +Limit the container's Real Time CPU usage. This option tells the kernel to restrict the container's Real Time CPU usage to the period specified. + +This option is not supported on cgroups V2 systems. diff --git a/docs/source/markdown/options/cpu-rt-runtime.md b/docs/source/markdown/options/cpu-rt-runtime.md new file mode 100644 index 000000000..05b1d3b96 --- /dev/null +++ b/docs/source/markdown/options/cpu-rt-runtime.md @@ -0,0 +1,10 @@ +#### **--cpu-rt-runtime**=*microseconds* + +Limit the CPU real-time runtime in microseconds. + +Limit the containers Real Time CPU usage. This option tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: +Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. + +The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. + +This option is not supported on cgroups V2 systems. diff --git a/docs/source/markdown/options/cpu-shares.md b/docs/source/markdown/options/cpu-shares.md new file mode 100644 index 000000000..a5aacd2ca --- /dev/null +++ b/docs/source/markdown/options/cpu-shares.md @@ -0,0 +1,35 @@ +#### **--cpu-shares**, **-c**=*shares* + +CPU shares (relative weight). + +By default, all containers get the same proportion of CPU cycles. This +proportion can be modified by changing the container's CPU share weighting +relative to the combined weight of all the running containers. +Default weight is **1024**. + +The proportion will only apply when CPU-intensive processes are running. +When tasks in one container are idle, other containers can use the +left-over CPU time. The actual amount of CPU time will vary depending on +the number of containers running on the system. + +For example, consider three containers, one has a cpu-share of 1024 and +two others have a cpu-share setting of 512. When processes in all three +containers attempt to use 100% of CPU, the first container would receive +50% of the total CPU time. If a fourth container is added with a cpu-share +of 1024, the first container only gets 33% of the CPU. The remaining containers +receive 16.5%, 16.5% and 33% of the CPU. + +On a multi-core system, the shares of CPU time are distributed over all CPU +cores. Even if a container is limited to less than 100% of CPU time, it can +use 100% of each individual CPU core. + +For example, consider a system with more than three cores. +If the container _C0_ is started with **--cpu-shares=512** running one process, +and another container _C1_ with **--cpu-shares=1024** running two processes, +this can result in the following division of CPU shares: + +| PID | container | CPU | CPU share | +| ---- | ----------- | ------- | ------------ | +| 100 | C0 | 0 | 100% of CPU0 | +| 101 | C1 | 1 | 100% of CPU1 | +| 102 | C1 | 2 | 100% of CPU2 | diff --git a/docs/source/markdown/options/cpuset-cpus.md b/docs/source/markdown/options/cpuset-cpus.md new file mode 100644 index 000000000..d717516a0 --- /dev/null +++ b/docs/source/markdown/options/cpuset-cpus.md @@ -0,0 +1,5 @@ +#### **--cpuset-cpus**=*number* + +CPUs in which to allow execution. Can be specified as a comma-separated list +(e.g. **0,1**), as a range (e.g. **0-3**), or any combination thereof +(e.g. **0-3,7,11-15**). diff --git a/docs/source/markdown/options/cpuset-mems.md b/docs/source/markdown/options/cpuset-mems.md new file mode 100644 index 000000000..d2d13eb54 --- /dev/null +++ b/docs/source/markdown/options/cpuset-mems.md @@ -0,0 +1,8 @@ +#### **--cpuset-mems**=*nodes* + +Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on +NUMA systems. + +If there are four memory nodes on the system (0-3), use **--cpuset-mems=0,1** +then processes in the container will only use memory from the first +two memory nodes. diff --git a/docs/source/markdown/options/destroy.md b/docs/source/markdown/options/destroy.md new file mode 100644 index 000000000..885c96201 --- /dev/null +++ b/docs/source/markdown/options/destroy.md @@ -0,0 +1,3 @@ +#### **--destroy** + +Remove the original <<container|pod>> that we are cloning once used to mimic the configuration. diff --git a/docs/source/markdown/options/entrypoint.md b/docs/source/markdown/options/entrypoint.md new file mode 100644 index 000000000..0da5e42af --- /dev/null +++ b/docs/source/markdown/options/entrypoint.md @@ -0,0 +1,17 @@ +#### **--entrypoint**=*"command"* | *'["command", "arg1", ...]'* + +Overwrite the default ENTRYPOINT of the image. + +This option allows you to overwrite the default entrypoint of the image. + +The ENTRYPOINT of an image is similar to a COMMAND +because it specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The ENTRYPOINT gives a container its +default nature or behavior, so that when you set an ENTRYPOINT you can run the +container as if it were that binary, complete with default options, and you can +pass in more options via the COMMAND. But, sometimes an operator may want to run +something else inside the container, so you can override the default ENTRYPOINT +at runtime by using a **--entrypoint** and a string to specify the new +ENTRYPOINT. + +You need to specify multi option commands in the form of a json string. diff --git a/docs/source/markdown/options/expose.md b/docs/source/markdown/options/expose.md new file mode 100644 index 000000000..04b5aad66 --- /dev/null +++ b/docs/source/markdown/options/expose.md @@ -0,0 +1,4 @@ +#### **--expose**=*port* + +Expose a port, or a range of ports (e.g. **--expose=3300-3310**) to set up port redirection +on the host system. diff --git a/docs/source/markdown/options/health-cmd.md b/docs/source/markdown/options/health-cmd.md new file mode 100644 index 000000000..df0474393 --- /dev/null +++ b/docs/source/markdown/options/health-cmd.md @@ -0,0 +1,8 @@ +#### **--health-cmd**=*"command"* | *'["command", "arg1", ...]'* + +Set or alter a healthcheck command for a container. The command is a command to be executed inside your +container that determines your container health. The command is required for other healthcheck options +to be applied. A value of **none** disables existing healthchecks. + +Multiple options can be passed in the form of a JSON array; otherwise, the command will be interpreted +as an argument to **/bin/sh -c**. diff --git a/docs/source/markdown/options/health-interval.md b/docs/source/markdown/options/health-interval.md new file mode 100644 index 000000000..581e35f65 --- /dev/null +++ b/docs/source/markdown/options/health-interval.md @@ -0,0 +1,3 @@ +#### **--health-interval**=*interval* + +Set an interval for the healthchecks. An _interval_ of **disable** results in no automatic timer setup. The default is **30s**. diff --git a/docs/source/markdown/options/health-retries.md b/docs/source/markdown/options/health-retries.md new file mode 100644 index 000000000..e96dd3033 --- /dev/null +++ b/docs/source/markdown/options/health-retries.md @@ -0,0 +1,3 @@ +#### **--health-retries**=*retries* + +The number of retries allowed before a healthcheck is considered to be unhealthy. The default value is **3**. diff --git a/docs/source/markdown/options/health-start-period.md b/docs/source/markdown/options/health-start-period.md new file mode 100644 index 000000000..e88e270c6 --- /dev/null +++ b/docs/source/markdown/options/health-start-period.md @@ -0,0 +1,4 @@ +#### **--health-start-period**=*period* + +The initialization time needed for a container to bootstrap. The value can be expressed in time format like +**2m3s**. The default value is **0s**. diff --git a/docs/source/markdown/options/health-timeout.md b/docs/source/markdown/options/health-timeout.md new file mode 100644 index 000000000..5102ea81b --- /dev/null +++ b/docs/source/markdown/options/health-timeout.md @@ -0,0 +1,4 @@ +#### **--health-timeout**=*timeout* + +The maximum time allowed to complete the healthcheck before an interval is considered failed. Like start-period, the +value can be expressed in a time format such as **1m22s**. The default value is **30s**. diff --git a/docs/source/markdown/options/hostname.container.md b/docs/source/markdown/options/hostname.container.md new file mode 100644 index 000000000..6c525ac5b --- /dev/null +++ b/docs/source/markdown/options/hostname.container.md @@ -0,0 +1,5 @@ +#### **--hostname**, **-h**=*name* + +Container host name + +Sets the container host name that is available inside the container. Can only be used with a private UTS namespace `--uts=private` (default). If `--pod` is specified and the pod shares the UTS namespace (default) the pod's hostname will be used. diff --git a/docs/source/markdown/options/hostname.pod.md b/docs/source/markdown/options/hostname.pod.md new file mode 100644 index 000000000..418e1e8a7 --- /dev/null +++ b/docs/source/markdown/options/hostname.pod.md @@ -0,0 +1,3 @@ +#### **--hostname**=*name* + +Set a hostname to the pod. diff --git a/docs/source/markdown/options/infra-command.md b/docs/source/markdown/options/infra-command.md new file mode 100644 index 000000000..24edcddfe --- /dev/null +++ b/docs/source/markdown/options/infra-command.md @@ -0,0 +1,3 @@ +#### **--infra-command**=*command* + +The command that will be run to start the infra container. Default: "/pause". diff --git a/docs/source/markdown/options/infra-conmon-pidfile.md b/docs/source/markdown/options/infra-conmon-pidfile.md new file mode 100644 index 000000000..f1c518682 --- /dev/null +++ b/docs/source/markdown/options/infra-conmon-pidfile.md @@ -0,0 +1,3 @@ +#### **--infra-conmon-pidfile**=*file* + +Write the pid of the infra container's **conmon** process to a file. As **conmon** runs in a separate process than Podman, this is necessary when using systemd to manage Podman containers and pods. diff --git a/docs/source/markdown/options/infra-name.md b/docs/source/markdown/options/infra-name.md new file mode 100644 index 000000000..d8cd3989d --- /dev/null +++ b/docs/source/markdown/options/infra-name.md @@ -0,0 +1,3 @@ +#### **--infra-name**=*name* + +The name that will be used for the pod's infra container. diff --git a/docs/source/markdown/options/label-file.md b/docs/source/markdown/options/label-file.md new file mode 100644 index 000000000..fe4bc9176 --- /dev/null +++ b/docs/source/markdown/options/label-file.md @@ -0,0 +1,3 @@ +#### **--label-file**=*file* + +Read in a line-delimited file of labels. diff --git a/docs/source/markdown/options/link-local-ip.md b/docs/source/markdown/options/link-local-ip.md new file mode 100644 index 000000000..5bedff01e --- /dev/null +++ b/docs/source/markdown/options/link-local-ip.md @@ -0,0 +1,3 @@ +#### **--link-local-ip**=*ip* + +Not implemented. diff --git a/docs/source/markdown/options/log-driver.md b/docs/source/markdown/options/log-driver.md new file mode 100644 index 000000000..195e1f284 --- /dev/null +++ b/docs/source/markdown/options/log-driver.md @@ -0,0 +1,12 @@ +#### **--log-driver**=*driver* + +Logging driver for the container. Currently available options are **k8s-file**, **journald**, **none** and **passthrough**, with **json-file** aliased to **k8s-file** for scripting compatibility. (Default **journald**). + +The podman info command below will display the default log-driver for the system. +``` +$ podman info --format '{{ .Host.LogDriver }}' +journald +``` +The **passthrough** driver passes down the standard streams (stdin, stdout, stderr) to the +container. It is not allowed with the remote Podman client, including Mac and Windows (excluding WSL2) machines, and on a tty, since it is +vulnerable to attacks via TIOCSTI. diff --git a/docs/source/markdown/options/mac-address.md b/docs/source/markdown/options/mac-address.md new file mode 100644 index 000000000..0939ceaad --- /dev/null +++ b/docs/source/markdown/options/mac-address.md @@ -0,0 +1,11 @@ +#### **--mac-address**=*address* + +<<Container|Pod>> network interface MAC address (e.g. 92:d0:c6:0a:29:33) +This option can only be used if the <<container|pod>> is joined to only a single network - i.e., **--network=_network-name_** is used at most once - +and if the <<container|pod>> is not joining another container's network namespace via **--network=container:_id_**. + +Remember that the MAC address in an Ethernet network must be unique. +The IPv6 link-local address will be based on the device's MAC address +according to RFC4862. + +To specify multiple static MAC addresses per <<container|pod>>, set multiple networks using the **--network** option with a static MAC address specified for each using the `mac` mode for that option. diff --git a/docs/source/markdown/options/memory-swappiness.md b/docs/source/markdown/options/memory-swappiness.md new file mode 100644 index 000000000..65f0ef310 --- /dev/null +++ b/docs/source/markdown/options/memory-swappiness.md @@ -0,0 +1,5 @@ +#### **--memory-swappiness**=*number* + +Tune a container's memory swappiness behavior. Accepts an integer between *0* and *100*. + +This flag is not supported on cgroups V2 systems. diff --git a/docs/source/markdown/options/network-alias.md b/docs/source/markdown/options/network-alias.md new file mode 100644 index 000000000..f48a1bb95 --- /dev/null +++ b/docs/source/markdown/options/network-alias.md @@ -0,0 +1,8 @@ +#### **--network-alias**=*alias* + +Add a network-scoped alias for the <<container|pod>>, setting the alias for all networks that the container joins. To set a +name only for a specific network, use the alias option as described under the **--network** option. +If the network has DNS enabled (`podman network inspect -f {{.DNSEnabled}} <name>`), +these aliases can be used for name resolution on the given network. This option can be specified multiple times. +NOTE: When using CNI a <<container|pod>> will only have access to aliases on the first network that it joins. This limitation does +not exist with netavark/aardvark-dns. diff --git a/docs/source/markdown/options/no-hosts.md b/docs/source/markdown/options/no-hosts.md new file mode 100644 index 000000000..5b1e95b86 --- /dev/null +++ b/docs/source/markdown/options/no-hosts.md @@ -0,0 +1,5 @@ +#### **--no-hosts** + +Do not create _/etc/hosts_ for the <<container|pod>>. +By default, Podman will manage _/etc/hosts_, adding the container's own IP address and any hosts from **--add-host**. +**--no-hosts** disables this, and the image's _/etc/hosts_ will be preserved unmodified. diff --git a/docs/source/markdown/options/oom-score-adj.md b/docs/source/markdown/options/oom-score-adj.md new file mode 100644 index 000000000..123e8e243 --- /dev/null +++ b/docs/source/markdown/options/oom-score-adj.md @@ -0,0 +1,3 @@ +#### **--oom-score-adj**=*num* + +Tune the host's OOM preferences for containers (accepts values from **-1000** to **1000**). diff --git a/docs/source/markdown/options/pid.pod.md b/docs/source/markdown/options/pid.pod.md new file mode 100644 index 000000000..4086736b6 --- /dev/null +++ b/docs/source/markdown/options/pid.pod.md @@ -0,0 +1,7 @@ +#### **--pid**=*pid* + +Set the PID mode for the pod. The default is to create a private PID namespace for the pod. Requires the PID namespace to be shared via --share. + + host: use the host’s PID namespace for the pod + ns: join the specified PID namespace + private: create a new namespace for the pod (default) diff --git a/docs/source/markdown/options/pids-limit.md b/docs/source/markdown/options/pids-limit.md new file mode 100644 index 000000000..efcaef2e2 --- /dev/null +++ b/docs/source/markdown/options/pids-limit.md @@ -0,0 +1,3 @@ +#### **--pids-limit**=*limit* + +Tune the container's pids limit. Set to **-1** to have unlimited pids for the container. The default is **4096** on systems that support "pids" cgroup controller. diff --git a/docs/source/markdown/options/platform.md b/docs/source/markdown/options/platform.md new file mode 100644 index 000000000..edfa428ff --- /dev/null +++ b/docs/source/markdown/options/platform.md @@ -0,0 +1,4 @@ +#### **--platform**=*OS/ARCH* + +Specify the platform for selecting the image. (Conflicts with --arch and --os) +The `--platform` option can be used to override the current architecture and operating system. diff --git a/docs/source/markdown/options/pull.md b/docs/source/markdown/options/pull.md new file mode 100644 index 000000000..ae0911507 --- /dev/null +++ b/docs/source/markdown/options/pull.md @@ -0,0 +1,8 @@ +#### **--pull**=*policy* + +Pull image policy. The default is **missing**. + +- **always**: Always pull the image and throw an error if the pull fails. +- **missing**: Pull the image only if it could not be found in the local containers storage. Throw an error if no image could be found and the pull fails. +- **never**: Never pull the image but use the one from the local containers storage. Throw an error if no image could be found. +- **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found. diff --git a/docs/source/markdown/options/read-only-tmpfs.md b/docs/source/markdown/options/read-only-tmpfs.md new file mode 100644 index 000000000..7e14f7fef --- /dev/null +++ b/docs/source/markdown/options/read-only-tmpfs.md @@ -0,0 +1,3 @@ +#### **--read-only-tmpfs** + +If container is running in **--read-only** mode, then mount a read-write tmpfs on _/run_, _/tmp_, and _/var/tmp_. The default is **true**. diff --git a/docs/source/markdown/options/read-only.md b/docs/source/markdown/options/read-only.md new file mode 100644 index 000000000..ae5244a22 --- /dev/null +++ b/docs/source/markdown/options/read-only.md @@ -0,0 +1,7 @@ +#### **--read-only** + +Mount the container's root filesystem as read-only. + +By default a container will have its root filesystem writable allowing processes +to write files anywhere. By specifying the **--read-only** flag, the container will have +its root filesystem mounted as read-only prohibiting any writes. diff --git a/docs/source/markdown/options/replace.md b/docs/source/markdown/options/replace.md new file mode 100644 index 000000000..6cd849fb4 --- /dev/null +++ b/docs/source/markdown/options/replace.md @@ -0,0 +1,3 @@ +#### **--replace** + +If another <<container|pod>> with the same name already exists, replace and remove it. The default is **false**. diff --git a/docs/source/markdown/options/requires.md b/docs/source/markdown/options/requires.md new file mode 100644 index 000000000..fa2e9a8f3 --- /dev/null +++ b/docs/source/markdown/options/requires.md @@ -0,0 +1,5 @@ +#### **--requires**=*container* + +Specify one or more requirements. +A requirement is a dependency container that will be started before this container. +Containers can be specified by name or ID, with multiple containers being separated by commas. diff --git a/docs/source/markdown/options/secret.md b/docs/source/markdown/options/secret.md new file mode 100644 index 000000000..ff2c0ab83 --- /dev/null +++ b/docs/source/markdown/options/secret.md @@ -0,0 +1,22 @@ +#### **--secret**=*secret[,opt=opt ...]* + +Give the container access to a secret. Can be specified multiple times. + +A secret is a blob of sensitive data which a container needs at runtime but +should not be stored in the image or in source control, such as usernames and passwords, +TLS certificates and keys, SSH keys or other important generic strings or binary content (up to 500 kb in size). + +When secrets are specified as type `mount`, the secrets are copied and mounted into the container when a container is created. +When secrets are specified as type `env`, the secret will be set as an environment variable within the container. +Secrets are written in the container at the time of container creation, and modifying the secret using `podman secret` commands +after the container is created will not affect the secret inside the container. + +Secrets and its storage are managed using the `podman secret` command. + +Secret Options + +- `type=mount|env` : How the secret will be exposed to the container. Default mount. +- `target=target` : Target of secret. Defaults to secret name. +- `uid=0` : UID of secret. Defaults to 0. Mount secret type only. +- `gid=0` : GID of secret. Defaults to 0. Mount secret type only. +- `mode=0` : Mode of secret. Defaults to 0444. Mount secret type only. diff --git a/docs/source/markdown/options/stop-signal.md b/docs/source/markdown/options/stop-signal.md new file mode 100644 index 000000000..e70d715b8 --- /dev/null +++ b/docs/source/markdown/options/stop-signal.md @@ -0,0 +1,3 @@ +#### **--stop-signal**=*signal* + +Signal to stop a container. Default is **SIGTERM**. diff --git a/docs/source/markdown/options/stop-timeout.md b/docs/source/markdown/options/stop-timeout.md new file mode 100644 index 000000000..2845e70ae --- /dev/null +++ b/docs/source/markdown/options/stop-timeout.md @@ -0,0 +1,4 @@ +#### **--stop-timeout**=*seconds* + +Timeout to stop a container. Default is **10**. +Remote connections use local containers.conf for defaults diff --git a/docs/source/markdown/options/tmpfs.md b/docs/source/markdown/options/tmpfs.md new file mode 100644 index 000000000..0d6652dcd --- /dev/null +++ b/docs/source/markdown/options/tmpfs.md @@ -0,0 +1,14 @@ +#### **--tmpfs**=*fs* + +Create a tmpfs mount. + +Mount a temporary filesystem (**tmpfs**) mount into a container, for example: + +``` +$ podman <<subcommand>> -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image +``` + +This command mounts a **tmpfs** at _/tmp_ within the container. The supported mount +options are the same as the Linux default mount flags. If you do not specify +any options, the system uses the following options: +**rw,noexec,nosuid,nodev**. diff --git a/docs/source/markdown/options/uidmap.container.md b/docs/source/markdown/options/uidmap.container.md new file mode 100644 index 000000000..9e21494dd --- /dev/null +++ b/docs/source/markdown/options/uidmap.container.md @@ -0,0 +1,79 @@ +#### **--uidmap**=*container_uid:from_uid:amount* + +Run the container in a new user namespace using the supplied UID mapping. This +option conflicts with the **--userns** and **--subuidname** options. This +option provides a way to map host UIDs to container UIDs. It can be passed +several times to map different ranges. + +The _from_uid_ value is based upon the user running the command, either rootful or rootless users. +* rootful user: *container_uid*:*host_uid*:*amount* +* rootless user: *container_uid*:*intermediate_uid*:*amount* + +When **podman <<subcommand>>** is called by a privileged user, the option **--uidmap** +works as a direct mapping between host UIDs and container UIDs. + +host UID -> container UID + +The _amount_ specifies the number of consecutive UIDs that will be mapped. +If for example _amount_ is **4** the mapping would look like: + +| host UID | container UID | +| - | - | +| _from_uid_ | _container_uid_ | +| _from_uid_ + 1 | _container_uid_ + 1 | +| _from_uid_ + 2 | _container_uid_ + 2 | +| _from_uid_ + 3 | _container_uid_ + 3 | + +When **podman <<subcommand>>** is called by an unprivileged user (i.e. running rootless), +the value _from_uid_ is interpreted as an "intermediate UID". In the rootless +case, host UIDs are not mapped directly to container UIDs. Instead the mapping +happens over two mapping steps: + +host UID -> intermediate UID -> container UID + +The **--uidmap** option only influences the second mapping step. + +The first mapping step is derived by Podman from the contents of the file +_/etc/subuid_ and the UID of the user calling Podman. + +First mapping step: + +| host UID | intermediate UID | +| - | - | +| UID for the user starting Podman | 0 | +| 1st subordinate UID for the user starting Podman | 1 | +| 2nd subordinate UID for the user starting Podman | 2 | +| 3rd subordinate UID for the user starting Podman | 3 | +| nth subordinate UID for the user starting Podman | n | + +To be able to use intermediate UIDs greater than zero, the user needs to have +subordinate UIDs configured in _/etc/subuid_. See **subuid**(5). + +The second mapping step is configured with **--uidmap**. + +If for example _amount_ is **5** the second mapping step would look like: + +| intermediate UID | container UID | +| - | - | +| _from_uid_ | _container_uid_ | +| _from_uid_ + 1 | _container_uid_ + 1 | +| _from_uid_ + 2 | _container_uid_ + 2 | +| _from_uid_ + 3 | _container_uid_ + 3 | +| _from_uid_ + 4 | _container_uid_ + 4 | + +When running as rootless, Podman will use all the ranges configured in the _/etc/subuid_ file. + +The current user ID is mapped to UID=0 in the rootless user namespace. +Every additional range is added sequentially afterward: + +| host |rootless user namespace | length | +| - | - | - | +| $UID | 0 | 1 | +| 1 | $FIRST_RANGE_ID | $FIRST_RANGE_LENGTH | +| 1+$FIRST_RANGE_LENGTH | $SECOND_RANGE_ID | $SECOND_RANGE_LENGTH| + +Even if a user does not have any subordinate UIDs in _/etc/subuid_, +**--uidmap** could still be used to map the normal UID of the user to a +container UID by running `podman <<subcommand>> --uidmap $container_uid:0:1 --user $container_uid ...`. + +Note: the **--uidmap** flag cannot be called in conjunction with the **--pod** flag as a uidmap cannot be set on the container level when in a pod. diff --git a/docs/source/markdown/options/uidmap.pod.md b/docs/source/markdown/options/uidmap.pod.md new file mode 100644 index 000000000..ad1f1658f --- /dev/null +++ b/docs/source/markdown/options/uidmap.pod.md @@ -0,0 +1,6 @@ +#### **--uidmap**=*container_uid:from_uid:amount* + +Run all containers in the pod in a new user namespace using the supplied mapping. This +option conflicts with the **--userns** and **--subuidname** options. This +option provides a way to map host UIDs to container UIDs. It can be passed +several times to map different ranges. diff --git a/docs/source/markdown/options/ulimit.md b/docs/source/markdown/options/ulimit.md new file mode 100644 index 000000000..c23163acc --- /dev/null +++ b/docs/source/markdown/options/ulimit.md @@ -0,0 +1,3 @@ +#### **--ulimit**=*option* + +Ulimit options. You can use **host** to copy the current configuration from the host. diff --git a/docs/source/markdown/options/unsetenv.md b/docs/source/markdown/options/unsetenv.md new file mode 100644 index 000000000..3f4d31d48 --- /dev/null +++ b/docs/source/markdown/options/unsetenv.md @@ -0,0 +1,5 @@ +#### **--unsetenv**=*env* + +Unset default environment variables for the container. Default environment +variables include variables provided natively by Podman, environment variables +configured by the image, and environment variables from containers.conf. diff --git a/docs/source/markdown/options/uts.container.md b/docs/source/markdown/options/uts.container.md new file mode 100644 index 000000000..74a5f33d1 --- /dev/null +++ b/docs/source/markdown/options/uts.container.md @@ -0,0 +1,8 @@ +#### **--uts**=*mode* + +Set the UTS namespace mode for the container. The following values are supported: + +- **host**: use the host's UTS namespace inside the container. +- **private**: create a new namespace for the container (default). +- **ns:[path]**: run the container in the given existing UTS namespace. +- **container:[container]**: join the UTS namespace of the specified container. diff --git a/docs/source/markdown/options/uts.pod.md b/docs/source/markdown/options/uts.pod.md new file mode 100644 index 000000000..6144e3f70 --- /dev/null +++ b/docs/source/markdown/options/uts.pod.md @@ -0,0 +1,7 @@ +#### **--uts**=*mode* + +Set the UTS namespace mode for the pod. The following values are supported: + +- **host**: use the host's UTS namespace inside the pod. +- **private**: create a new namespace for the pod (default). +- **ns:[path]**: run the pod in the given existing UTS namespace. diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md.in index ba7081ff5..34282e2d2 100644 --- a/docs/source/markdown/podman-build.1.md +++ b/docs/source/markdown/podman-build.1.md.in @@ -47,12 +47,7 @@ command to see these containers. External containers can be removed with the ## OPTIONS -#### **--add-host**=*host* - -Add a custom host-to-IP mapping (host:ip) - -Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option -can be set multiple times. Conflicts with the --no-hosts option. +@@option add-host #### **--all-platforms** @@ -120,8 +115,43 @@ The value of [name] is matched with the following priority order: #### **--cache-from** -Images to utilize as potential cache sources. Podman does not currently support -caching so this is a NOOP. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines) +Repository to utilize as a potential cache source. When specified, Buildah will try to look for +cache images in the specified repository and will attempt to pull cache images instead of actually +executing the build steps locally. Buildah will only attempt to pull previously cached images if they +are considered as valid cache hits. + +Use the `--cache-to` option to populate a remote repository with cache content. + +Example + +```bash +# populate a cache and also consult it +buildah build -t test --layers --cache-to registry/myrepo/cache --cache-from registry/myrepo/cache . +``` + +Note: `--cache-from` option is ignored unless `--layers` is specified. + +#### **--cache-to** + +Set this flag to specify a remote repository that will be used to store cache images. Buildah will attempt to +push newly built cache image to the remote repository. + +Note: Use the `--cache-from` option in order to use cache content in a remote repository. + +Example + +```bash +# populate a cache and also consult it +buildah build -t test --layers --cache-to registry/myrepo/cache --cache-from registry/myrepo/cache . +``` + +Note: `--cache-to` option is ignored unless `--layers` is specified. + +#### **--cache-ttl** + +Limit the use of cached images to only consider images with created timestamps less than *duration* ago. +For example if `--cache-ttl=1h` is specified, Buildah will only consider intermediate cache images which are created +under the duration of one hour, and intermediate cache images outside this duration will be ignored. #### **--cap-add**=*CAP\_xxx* @@ -148,11 +178,7 @@ given. Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry. (Default: /etc/containers/certs.d) Please refer to containers-certs.d(5) for details. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines) -#### **--cgroup-parent**=*path* - -Path to cgroups under which the cgroup for the container will be created. If the -path is not absolute, the path is considered to be relative to the cgroups path -of the init process. Cgroups will be created if they do not already exist. +@@option cgroup-parent #### **--cgroupns**=*how* @@ -171,80 +197,15 @@ Thus, compressing the data before sending it is irrelevant to Podman. (This opti Set additional flags to pass to the C Preprocessor cpp(1). Containerfiles ending with a ".in" suffix will be preprocessed via cpp(1). This option can be used to pass additional flags to cpp.Note: You can also set default CPPFLAGS by setting the BUILDAH_CPPFLAGS environment variable (e.g., export BUILDAH_CPPFLAGS="-DDEBUG"). -#### **--cpu-period**=*limit* - -Set the CPU period for the Completely Fair Scheduler (CFS), which is a -duration in microseconds. Once the container's CPU quota is used up, it will -not be scheduled to run until the current period ends. Defaults to 100000 -microseconds. - -On some systems, changing the CPU limits may not be allowed for non-root -users. For more details, see -https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error - -#### **--cpu-quota**=*limit* - -Limit the CPU Completely Fair Scheduler (CFS) quota. - -Limit the container's CPU usage. By default, containers run with the full -CPU resource. The limit is a number in microseconds. If you provide a number, -the container will be allowed to use that much CPU time until the CPU period -ends (controllable via **--cpu-period**). - -On some systems, changing the CPU limits may not be allowed for non-root -users. For more details, see -https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error - -#### **--cpu-shares**, **-c**=*shares* - -CPU shares (relative weight) - -By default, all containers get the same proportion of CPU cycles. This -proportion can be modified by changing the container's CPU share weighting -relative to the weighting of all other running containers. - -To modify the proportion from the default of 1024, use the **--cpu-shares** -option to set the weighting to 2 or higher. - -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. - -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If you add a fourth container with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. If you start one -container **{C0}** with **-c=512** running one process, and another container -**{C1}** with **-c=1024** running two processes, this can result in the -following -division of CPU shares: - - PID container CPU CPU share - 100 {C0} 0 100% of CPU0 - 101 {C1} 1 100% of CPU1 - 102 {C1} 2 100% of CPU2 - -#### **--cpuset-cpus**=*num* +@@option cpu-period - CPUs in which to allow execution (0-3, 0,1) +@@option cpu-quota -#### **--cpuset-mems**=*nodes* +@@option cpu-shares -Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on -NUMA systems. +@@option cpuset-cpus -If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` -then processes in your container will only use memory from the first -two memory nodes. +@@option cpuset-mems #### **--creds**=*creds* @@ -485,11 +446,8 @@ considered insecure. Do not use existing cached images for the container build. Build from the start with a new set of cached layers. -#### **--no-hosts** +@@option no-hosts -Do not create _/etc/hosts_ for the container. -By default, Podman will manage _/etc/hosts_, adding the container's own IP address and any hosts from **--add-host**. -**--no-hosts** disables this, and the image's _/etc/hosts_ will be preserved unmodified. This option conflicts with **--add-host**. #### **--omit-history** diff --git a/docs/source/markdown/podman-container-clone.1.md b/docs/source/markdown/podman-container-clone.1.md.in index 9baedfd36..cf760d7a2 100644 --- a/docs/source/markdown/podman-container-clone.1.md +++ b/docs/source/markdown/podman-container-clone.1.md.in @@ -11,101 +11,25 @@ podman\-container\-clone - Creates a copy of an existing container ## OPTIONS -#### **--blkio-weight**=*weight* +@@option blkio-weight -Block IO weight (relative weight) accepts a weight value between 10 and 1000. +@@option blkio-weight-device -#### **--blkio-weight-device**=*weight* - -Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). - -#### **--cpu-period**=*limit* - -Set the CPU period for the Completely Fair Scheduler (CFS), which is a -duration in microseconds. Once the container's CPU quota is used up, it will -not be scheduled to run until the current period ends. Defaults to 100000 -microseconds. - -On some systems, changing the CPU limits may not be allowed for non-root -users. For more details, see -https://github.com/containers/podman/blob/master/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error +@@option cpu-period If none is specified, the original container's cpu period is used -#### **--cpu-quota**=*limit* - -Limit the CPU Completely Fair Scheduler (CFS) quota. - -Limit the container's CPU usage. By default, containers run with the full -CPU resource. The limit is a number in microseconds. If a number is provided, -the container will be allowed to use that much CPU time until the CPU period -ends (controllable via **--cpu-period**). - -On some systems, changing the CPU limits may not be allowed for non-root -users. For more details, see -https://github.com/containers/podman/blob/master/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error +@@option cpu-quota If none is specified, the original container's CPU quota are used. -#### **--cpu-rt-period**=*microseconds* - -Limit the CPU real-time period in microseconds - -Limit the container's Real Time CPU usage. This option tells the kernel to restrict the container's Real Time CPU usage to the period specified. - -This option is not supported on cgroups V2 systems. +@@option cpu-rt-period If none is specified, the original container's CPU runtime period is used. +@@option cpu-rt-runtime -#### **--cpu-rt-runtime**=*microseconds* - -Limit the CPU real-time runtime in microseconds. - -Limit the containers Real Time CPU usage. This option tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: -Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. - -The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. - -This option is not supported on cgroups V2 systems. - -#### **--cpu-shares**, **-c**=*shares* - -CPU shares (relative weight) - -By default, all containers get the same proportion of CPU cycles. This proportion -can be modified by changing the container's CPU share weighting relative -to the weighting of all other running containers. - -To modify the proportion from the default of 1024, use the **--cpu-shares** -option to set the weighting to 2 or higher. - -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. - -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If a fourth container is added with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. -If the container _C0_ is started with **--cpu-shares=512** running one process, -and another container _C1_ with **--cpu-shares=1024** running two processes, -this can result in the following division of CPU shares: - -| PID | container | CPU | CPU share | -| ---- | ----------- | ------- | ------------ | -| 100 | C0 | 0 | 100% of CPU0 | -| 101 | C1 | 1 | 100% of CPU1 | -| 102 | C1 | 2 | 100% of CPU2 | +@@option cpu-shares If none are specified, the original container's CPU shares are used. @@ -116,23 +40,15 @@ Set a number of CPUs for the container that overrides the original containers CP This is shorthand for **--cpu-period** and **--cpu-quota**, so only **--cpus** or either both the **--cpu-period** and **--cpu-quota** options can be set. -#### **--cpuset-cpus** - -CPUs in which to allow execution (0-3, 0,1). If none are specified, the original container's CPUset is used. - -#### **--cpuset-mems**=*nodes* +@@option cpuset-cpus -Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. +If none are specified, the original container's CPUset is used. -If there are four memory nodes on the system (0-3), use `--cpuset-mems=0,1` -then processes in the container will only use memory from the first -two memory nodes. +@@option cpuset-mems If none are specified, the original container's CPU memory nodes are used. -#### **--destroy** - -Remove the original container that we are cloning once used to mimic the configuration. +@@option destroy #### **--device-read-bps**=*path* @@ -179,11 +95,7 @@ The format of `LIMIT` is `<number>[<unit>]`. Unit can be `b` (bytes), `k` (kibibytes), `m` (mebibytes), or `g` (gibibytes). If you don't specify a unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. -#### **--memory-swappiness**=*number* - -Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - -This flag is not supported on cgroups V2 systems. +@@option memory-swappiness #### **--name** diff --git a/docs/source/markdown/podman-create.1.md.in b/docs/source/markdown/podman-create.1.md.in index 008c3c18f..5bb1dceca 100644 --- a/docs/source/markdown/podman-create.1.md.in +++ b/docs/source/markdown/podman-create.1.md.in @@ -66,12 +66,7 @@ and specified with a _tag_. ## OPTIONS -#### **--add-host**=*host* - -Add a custom host-to-IP mapping (host:ip) - -Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** -option can be set multiple times. +@@option add-host #### **--annotation**=*key=value* @@ -99,47 +94,23 @@ Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE environment variable. `export REGISTRY_AUTH_FILE=path` -#### **--blkio-weight**=*weight* - -Block IO weight (relative weight) accepts a weight value between 10 and 1000. - -#### **--blkio-weight-device**=*weight* +@@option blkio-weight -Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). +#### **--blkio-weight-device**=*device:weight* -#### **--cap-add**=*capability* +Block IO relative device weight. -Add Linux capabilities +@@option cap-add -#### **--cap-drop**=*capability* - -Drop Linux capabilities +@@option cap-drop @@option cgroup-conf -#### **--cgroup-parent**=*path* - -Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. - -#### **--cgroupns**=*mode* +@@option cgroup-parent -Set the cgroup namespace mode for the container. - **`host`**: use the host's cgroup namespace inside the container. - **`container:<NAME|ID>`**: join the namespace of the specified container. - **`ns:<PATH>`**: join the namespace at the specified path. - **`private`**: create a new cgroup namespace. +@@option cgroupns -If the host uses cgroups v1, the default is set to **host**. On cgroups v2 the default is **private**. - -#### **--cgroups**=*mode* - -Determines whether the container will create CGroups. -Valid values are *enabled*, *disabled*, *no-conmon*, *split*, with the default being *enabled*. - -The *enabled* option will create a new cgroup under the cgroup-parent. -The *disabled* option will force the container to not create CGroups, and thus conflicts with CGroup options (**--cgroupns** and **--cgroup-parent**). -The *no-conmon* option disables a new CGroup only for the conmon process. -The *split* option splits the current cgroup in two sub-cgroups: one for conmon and one for the container payload. It is not possible to set *--cgroup-parent* with *split*. +@@option cgroups @@option chrootdirs @@ -147,91 +118,17 @@ The *split* option splits the current cgroup in two sub-cgroups: one for conmon Write the container ID to the file -#### **--conmon-pidfile**=*path* - -Write the pid of the `conmon` process to a file. `conmon` runs in a separate process than Podman, so this is necessary when using systemd to restart Podman containers. -(This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines) - -#### **--cpu-period**=*limit* - -Set the CPU period for the Completely Fair Scheduler (CFS), which is a -duration in microseconds. Once the container's CPU quota is used up, it will -not be scheduled to run until the current period ends. Defaults to 100000 -microseconds. - -On some systems, changing the CPU limits may not be allowed for non-root -users. For more details, see -https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error - -#### **--cpu-quota**=*limit* - -Limit the CPU Completely Fair Scheduler (CFS) quota. - -Limit the container's CPU usage. By default, containers run with the full -CPU resource. The limit is a number in microseconds. If you provide a number, -the container will be allowed to use that much CPU time until the CPU period -ends (controllable via **--cpu-period**). - -On some systems, changing the CPU limits may not be allowed for non-root -users. For more details, see -https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error - -#### **--cpu-rt-period**=*microseconds* - -Limit the CPU real-time period in microseconds - -Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. - -This flag is not supported on cgroups V2 systems. +@@option conmon-pidfile -#### **--cpu-rt-runtime**=*microseconds* +@@option cpu-period -Limit the CPU real-time runtime in microseconds +@@option cpu-quota -Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: -Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. +@@option cpu-rt-period -The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. +@@option cpu-rt-runtime -This flag is not supported on cgroups V2 systems. - -#### **--cpu-shares**, **-c**=*shares* - -CPU shares (relative weight) - -By default, all containers get the same proportion of CPU cycles. This proportion -can be modified by changing the container's CPU share weighting relative -to the weighting of all other running containers. - -To modify the proportion from the default of 1024, use the **--cpu-shares** -flag to set the weighting to 2 or higher. - -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. - -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If you add a fourth container with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. -If the container _C0_ is started with **--cpu-shares=512** running one process, -and another container _C1_ with **--cpu-shares=1024** running two processes, -this can result in the following division of CPU shares: - -| PID | container | CPU | CPU share | -| ---- | ----------- | ------- | ------------ | -| 100 | C0 | 0 | 100% of CPU0 | -| 101 | C1 | 1 | 100% of CPU1 | -| 102 | C1 | 2 | 100% of CPU2 | +@@option cpu-shares #### **--cpus**=*number* @@ -243,17 +140,9 @@ On some systems, changing the CPU limits may not be allowed for non-root users. For more details, see https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error -#### **--cpuset-cpus**=*cpus* - -CPUs in which to allow execution (0-3, 0,1) - -#### **--cpuset-mems**=*nodes* +@@option cpuset-cpus -Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - -If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` -then processes in your container will only use memory from the first -two memory nodes. +@@option cpuset-mems #### **--device**=*host-device[:container-device][:permissions]* @@ -323,22 +212,7 @@ Set custom DNS options. Invalid if using **--dns-opt** and **--network** that is Set custom DNS search domains. Invalid if using **--dns-search** and **--network** that is set to 'none' or `container:<name|id>`. (Use --dns-search=. if you don't wish to set the search domain) -#### **--entrypoint**=*"command"* | *'["command", "arg1", ...]'* - -Overwrite the default ENTRYPOINT of the image - -This option allows you to overwrite the default entrypoint of the image. -The ENTRYPOINT of an image is similar to a COMMAND -because it specifies what executable to run when the container starts, but it is -(purposely) more difficult to override. The ENTRYPOINT gives a container its -default nature or behavior, so that when you set an ENTRYPOINT you can run the -container as if it were that binary, complete with default options, and you can -pass in more options via the COMMAND. But, sometimes an operator may want to run -something else inside the container, so you can override the default ENTRYPOINT -at runtime by using a **--entrypoint** and a string to specify the new -ENTRYPOINT. - -You need to specify multi option commands in the form of a json string. +@@option entrypoint #### **--env**, **-e**=*env* @@ -354,10 +228,7 @@ Read in a line delimited file of environment variables. See **Environment** note @@option env-host -#### **--expose**=*port* - -Expose a port, or a range of ports (e.g. --expose=3300-3310) to set up port redirection -on the host system. +@@option expose #### **--gidmap**=*container_gid:host_gid:amount* @@ -370,42 +241,21 @@ Note: the **--gidmap** flag cannot be called in conjunction with the **--pod** f @@option group-add -#### **--health-cmd**=*"command"* | *'["command", "arg1", ...]'* - -Set or alter a healthcheck command for a container. The command is a command to be executed inside your -container that determines your container health. The command is required for other healthcheck options -to be applied. A value of `none` disables existing healthchecks. - -Multiple options can be passed in the form of a JSON array; otherwise, the command will be interpreted -as an argument to `/bin/sh -c`. - -#### **--health-interval**=*interval* - -Set an interval for the healthchecks (a value of `disable` results in no automatic timer setup) (default "30s") - -#### **--health-retries**=*retries* +@@option health-cmd -The number of retries allowed before a healthcheck is considered to be unhealthy. The default value is `3`. +@@option health-interval -#### **--health-start-period**=*period* +@@option health-retries -The initialization time needed for a container to bootstrap. The value can be expressed in time format like -`2m3s`. The default value is `0s` +@@option health-start-period -#### **--health-timeout**=*timeout* - -The maximum time allowed to complete the healthcheck before an interval is considered failed. Like start-period, the -value can be expressed in a time format such as `1m22s`. The default value is `30s`. +@@option health-timeout #### **--help** Print usage statement -#### **--hostname**, **-h**=*name* - -Container host name - -Sets the container host name that is available inside the container. Can only be used with a private UTS namespace `--uts=private` (default). If `--pod` is specified and the pod shares the UTS namespace (default) the pod's hostname will be used. +@@option hostname.container @@option hostuser @@ -490,26 +340,11 @@ a private IPC namespace. Add metadata to a container (e.g., --label com.example.key=value) -#### **--label-file**=*file* - -Read in a line delimited file of labels - -#### **--link-local-ip**=*ip* - -Not implemented - -#### **--log-driver**=*driver* +@@option label-file -Logging driver for the container. Currently available options are *k8s-file*, *journald*, *none* and *passthrough*, with *json-file* aliased to *k8s-file* for scripting compatibility. +@@option link-local-ip -The podman info command below will display the default log-driver for the system. -``` -$ podman info --format '{{ .Host.LogDriver }}' -journald -``` -The *passthrough* driver passes down the standard streams (stdin, stdout, stderr) to the -container. It is not allowed with the remote Podman client, including Mac and Windows (excluding WSL2) machines, and on a tty, since it is -vulnerable to attacks via TIOCSTI. +@@option log-driver #### **--log-opt**=*name=value* @@ -528,17 +363,7 @@ It supports the same keys as **podman inspect --format**. This option is currently supported only by the **journald** log driver. -#### **--mac-address**=*address* - -Container network interface MAC address (e.g. 92:d0:c6:0a:29:33) -This option can only be used if the container is joined to only a single network - i.e., **--network=_network-name_** is used at most once - -and if the container is not joining another container's network namespace via **--network=container:_id_**. - -Remember that the MAC address in an Ethernet network must be unique. -The IPv6 link-local address will be based on the device's MAC address -according to RFC4862. - -To specify multiple static MAC addresses per container, set multiple networks using the **--network** option with a static MAC address specified for each using the `mac` mode for that option. +@@option mac-address #### **--memory**, **-m**=*limit* @@ -571,11 +396,7 @@ The format of `LIMIT` is `<number>[<unit>]`. Unit can be `b` (bytes), `k` (kibibytes), `m` (mebibytes), or `g` (gibibytes). If you don't specify a unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. -#### **--memory-swappiness**=*number* - -Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - -This flag is not supported on cgroups V2 systems. +@@option memory-swappiness @@option mount @@ -626,29 +447,17 @@ Valid _mode_ values are: Note: Rootlesskit changes the source IP address of incoming packets to an IP address in the container network namespace, usually `10.0.2.100`. If your application requires the real source IP address, e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for rootless containers when connected to user-defined networks. - **port_handler=slirp4netns**: Use the slirp4netns port forwarding, it is slower than rootlesskit but preserves the correct source IP address. This port handler cannot be used for user-defined networks. -#### **--network-alias**=*alias* - -Add a network-scoped alias for the container, setting the alias for all networks that the container joins. To set a -name only for a specific network, use the alias option as described under the **--network** option. -If the network has DNS enabled (`podman network inspect -f {{.DNSEnabled}} <name>`), -these aliases can be used for name resolution on the given network. This option can be specified multiple times. -NOTE: When using CNI a container will only have access to aliases on the first network that it joins. This limitation does -not exist with netavark/aardvark-dns. +@@option network-alias @@option no-healthcheck -#### **--no-hosts** +@@option no-hosts -Do not create _/etc/hosts_ for the container. -By default, Podman will manage _/etc/hosts_, adding the container's own IP address and any hosts from **--add-host**. -**--no-hosts** disables this, and the image's _/etc/hosts_ will be preserved unmodified. This option conflicts with **--add-host**. @@option oom-kill-disable -#### **--oom-score-adj**=*num* - -Tune the host's OOM preferences for containers (accepts -1000 to 1000) +@@option oom-score-adj #### **--os**=*OS* Override the OS, defaults to hosts, of the image to be pulled. For example, `windows`. @@ -668,14 +477,9 @@ Default is to create a private PID namespace for the container @@option pidfile -#### **--pids-limit**=*limit* - -Tune the container's pids limit. Set `-1` to have unlimited pids for the container. (default "4096" on systems that support PIDS cgroups). +@@option pids-limit -#### **--platform**=*OS/ARCH* - -Specify the platform for selecting the image. (Conflicts with --arch and --os) -The `--platform` option can be used to override the current architecture and operating system. +@@option platform #### **--pod**=*name* @@ -742,40 +546,19 @@ port to a random port on the host within an *ephemeral port range* defined by `/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host ports and the exposed ports, use `podman port`. -#### **--pull**=*policy* - -Pull image policy. The default is **missing**. - -- **always**: Always pull the image and throw an error if the pull fails. -- **missing**: Pull the image only if it could not be found in the local containers storage. Throw an error if no image could be found and the pull fails. -- **never**: Never pull the image but use the one from the local containers storage. Throw an error if no image could be found. -- **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found. +@@option pull #### **--quiet**, **-q** Suppress output information when pulling images -#### **--read-only** +@@option read-only -Mount the container's root filesystem as read-only. +@@option read-only-tmpfs -By default a container will have its root filesystem writable allowing processes -to write files anywhere. By specifying the `--read-only` flag the container will have -its root filesystem mounted as read-only prohibiting any writes. +@@option replace -#### **--read-only-tmpfs** - -If container is running in --read-only mode, then mount a read-write tmpfs on /run, /tmp, and /var/tmp. The default is *true* - -#### **--replace** - -If another container with the same name already exists, replace and remove it. The default is **false**. - -#### **--requires**=*container* - -Specify one or more requirements. -A requirement is a dependency container that will be started before this container. -Containers can be specified by name or ID, with multiple containers being separated by commas. +@@option requires #### **--restart**=*policy* @@ -818,28 +601,7 @@ finishes executing, similar to a tmpfs mount point being unmounted. @@option seccomp-policy -#### **--secret**=*secret[,opt=opt ...]* - -Give the container access to a secret. Can be specified multiple times. - -A secret is a blob of sensitive data which a container needs at runtime but -should not be stored in the image or in source control, such as usernames and passwords, -TLS certificates and keys, SSH keys or other important generic strings or binary content (up to 500 kb in size). - -When secrets are specified as type `mount`, the secrets are copied and mounted into the container when a container is created. -When secrets are specified as type `env`, the secret will be set as an environment variable within the container. -Secrets are written in the container at the time of container creation, and modifying the secret using `podman secret` commands -after the container is created will not affect the secret inside the container. - -Secrets and its storage are managed using the `podman secret` command. - -Secret Options - -- `type=mount|env` : How the secret will be exposed to the container. Default mount. -- `target=target` : Target of secret. Defaults to secret name. -- `uid=0` : UID of secret. Defaults to 0. Mount secret type only. -- `gid=0` : GID of secret. Defaults to 0. Mount secret type only. -- `mode=0` : Mode of secret. Defaults to 0444. Mount secret type only. +@@option secret #### **--security-opt**=*option* @@ -880,14 +642,9 @@ Size of `/dev/shm` (format: `<number>[<unit>]`, where unit = b (bytes), k (kibib If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. When size is `0`, there is no limit on the amount of memory used for IPC by the container. -#### **--stop-signal**=*SIGTERM* +@@option stop-signal -Signal to stop a container. Default is SIGTERM. - -#### **--stop-timeout**=*seconds* - -Timeout (in seconds) to stop a container. Default is 10. -Remote connections use local containers.conf for defaults +@@option stop-timeout #### **--subgidname**=*name* @@ -948,18 +705,7 @@ The `container_manage_cgroup` boolean must be enabled for this to be allowed on Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true, then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified, TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf. -#### **--tmpfs**=*fs* - -Create a tmpfs mount - -Mount a temporary filesystem (`tmpfs`) mount into a container, for example: - -$ podman create -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image - -This command mounts a `tmpfs` at `/tmp` within the container. The supported mount -options are the same as the Linux default `mount` flags. If you do not specify -any options, the systems uses the following options: -`rw,noexec,nosuid,nodev`. +@@option tmpfs #### **--tty**, **-t** @@ -974,97 +720,13 @@ standard input. @@option tz -#### **--uidmap**=*container_uid:from_uid:amount* - -Run the container in a new user namespace using the supplied UID mapping. This -option conflicts with the **--userns** and **--subuidname** options. This -option provides a way to map host UIDs to container UIDs. It can be passed -several times to map different ranges. - -The _from_uid_ value is based upon the user running the command, either rootful or rootless users. -* rootful user: *container_uid*:*host_uid*:*amount* -* rootless user: *container_uid*:*intermediate_uid*:*amount* - -When **podman create** is called by a privileged user, the option **--uidmap** -works as a direct mapping between host UIDs and container UIDs. - -host UID -> container UID - -The _amount_ specifies the number of consecutive UIDs that will be mapped. -If for example _amount_ is **4** the mapping would look like: - -| host UID | container UID | -| - | - | -| _from_uid_ | _container_uid_ | -| _from_uid_ + 1 | _container_uid_ + 1 | -| _from_uid_ + 2 | _container_uid_ + 2 | -| _from_uid_ + 3 | _container_uid_ + 3 | - -When **podman create** is called by an unprivileged user (i.e. running rootless), -the value _from_uid_ is interpreted as an "intermediate UID". In the rootless -case, host UIDs are not mapped directly to container UIDs. Instead the mapping -happens over two mapping steps: - -host UID -> intermediate UID -> container UID - -The **--uidmap** option only influences the second mapping step. - -The first mapping step is derived by Podman from the contents of the file -_/etc/subuid_ and the UID of the user calling Podman. - -First mapping step: +@@option uidmap.container -| host UID | intermediate UID | -| - | - | -| UID for the user starting Podman | 0 | -| 1st subordinate UID for the user starting Podman | 1 | -| 2nd subordinate UID for the user starting Podman | 2 | -| 3rd subordinate UID for the user starting Podman | 3 | -| nth subordinate UID for the user starting Podman | n | - -To be able to use intermediate UIDs greater than zero, the user needs to have -subordinate UIDs configured in _/etc/subuid_. See **subuid**(5). - -The second mapping step is configured with **--uidmap**. - -If for example _amount_ is **5** the second mapping step would look like: - -| intermediate UID | container UID | -| - | - | -| _from_uid_ | _container_uid_ | -| _from_uid_ + 1 | _container_uid_ + 1 | -| _from_uid_ + 2 | _container_uid_ + 2 | -| _from_uid_ + 3 | _container_uid_ + 3 | -| _from_uid_ + 4 | _container_uid_ + 4 | - -The current user ID is mapped to UID=0 in the rootless user namespace. -Every additional range is added sequentially afterward: - -| host |rootless user namespace | length | -| - | - | - | -| $UID | 0 | 1 | -| 1 | $FIRST_RANGE_ID | $FIRST_RANGE_LENGTH | -| 1+$FIRST_RANGE_LENGTH | $SECOND_RANGE_ID | $SECOND_RANGE_LENGTH| - -Even if a user does not have any subordinate UIDs in _/etc/subuid_, -**--uidmap** could still be used to map the normal UID of the user to a -container UID by running `podman create --uidmap $container_uid:0:1 --user $container_uid ...`. - -Note: the **--uidmap** flag cannot be called in conjunction with the **--pod** flag as a uidmap cannot be set on the container level when in a pod. - -#### **--ulimit**=*option* - -Ulimit options - -You can pass `host` to copy the current configuration from the host. +@@option ulimit @@option umask -#### **--unsetenv**=*env* - -Unset default environment variables for the container. Default environment -variables include variables provided natively by Podman, environment variables -configured by the image, and environment variables from containers.conf. +@@option unsetenv @@option unsetenv-all @@ -1120,14 +782,7 @@ Podman allocates unique ranges of UIDs and GIDs from the `containers` subordinat This option is incompatible with **--gidmap**, **--uidmap**, **--subuidname** and **--subgidname**. -#### **--uts**=*mode* - -Set the UTS namespace mode for the container. The following values are supported: - -- **host**: use the host's UTS namespace inside the container. -- **private**: create a new namespace for the container (default). -- **ns:[path]**: run the container in the given existing UTS namespace. -- **container:[container]**: join the UTS namespace of the specified container. +@@option uts.container #### **--variant**=*VARIANT* Use _VARIANT_ instead of the default architecture variant of the container image. Some images can use multiple variants of the arm architectures, such as arm/v5 and arm/v7. diff --git a/docs/source/markdown/podman-kube-play.1.md b/docs/source/markdown/podman-kube-play.1.md.in index b3c385fe9..9e9fc7f38 100644 --- a/docs/source/markdown/podman-kube-play.1.md +++ b/docs/source/markdown/podman-kube-play.1.md.in @@ -214,11 +214,8 @@ Valid _mode_ values are: Note: Rootlesskit changes the source IP address of incoming packets to an IP address in the container network namespace, usually `10.0.2.100`. If your application requires the real source IP address, e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for rootless containers when connected to user-defined networks. - **port_handler=slirp4netns**: Use the slirp4netns port forwarding, it is slower than rootlesskit but preserves the correct source IP address. This port handler cannot be used for user-defined networks. -#### **--no-hosts** +@@option no-hosts -Do not create /etc/hosts for the pod. -By default, Podman will manage /etc/hosts, adding the container's own IP address and any hosts from **--add-host**. -**--no-hosts** disables this, and the image's **/etc/host** will be preserved unmodified. This option conflicts with host added in the Kubernetes YAML. #### **--quiet**, **-q** diff --git a/docs/source/markdown/podman-pod-clone.1.md b/docs/source/markdown/podman-pod-clone.1.md.in index 5473407b0..a5746fd84 100644 --- a/docs/source/markdown/podman-pod-clone.1.md +++ b/docs/source/markdown/podman-pod-clone.1.md.in @@ -11,75 +11,25 @@ podman\-pod\-clone - Creates a copy of an existing pod ## OPTIONS -#### **--blkio-weight**=*weight* +@@option blkio-weight -Block IO weight (relative weight) accepts a weight value between 10 and 1000. +@@option blkio-weight-device -#### **--blkio-weight-device**=*weight* +@@option cgroup-parent -Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). - -#### **--cgroup-parent**=*path* - -Path to cgroups under which the cgroup for the pod will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. - -#### **--cpu-shares**, **-c**=*shares* - -CPU shares (relative weight) - -By default, all containers get the same proportion of CPU cycles. This proportion -can be modified by changing the container's CPU share weighting relative -to the weighting of all other running containers. - -To modify the proportion from the default of 1024, use the **--cpu-shares** -flag to set the weighting to 2 or higher. - -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. - -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If you add a fourth container with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. If you start one -container **{C0}** with **-c=512** running one process, and another container -**{C1}** with **-c=1024** running two processes, this can result in the following -division of CPU shares: - -PID container CPU CPU share -100 {C0} 0 100% of CPU0 -101 {C1} 1 100% of CPU1 -102 {C1} 2 100% of CPU2 +@@option cpu-shares #### **--cpus** Set a number of CPUs for the pod that overrides the original pods CPU limits. If none are specified, the original pod's Nano CPUs are used. -#### **--cpuset-cpus** - -CPUs in which to allow execution (0-3, 0,1). If none are specified, the original pod's CPUset is used. - +@@option cpuset-cpus -#### **--cpuset-mems**=*nodes* +If none are specified, the original pod's CPUset is used. -Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. +@@option cpuset-mems -If there are four memory nodes on the system (0-3), use `--cpuset-mems=0,1` -then processes in the container will only use memory from the first -two memory nodes. - -#### **--destroy** - -Remove the original pod that we are cloning once used to mimic the configuration. +@@option destroy #### **--device**=*host-device[:container-device][:permissions]* @@ -114,29 +64,19 @@ GID map for the user namespace. Using this flag will run all containers in the p Print usage statement. -#### **--hostname**=*name* - -Set a hostname to the pod. - -#### **--infra-command**=*command* +@@option hostname.pod -The command that will be run to start the infra container. Default: "/pause". +@@option infra-command -#### **--infra-conmon-pidfile**=*file* +@@option infra-conmon-pidfile -Write the pid of the infra container's **conmon** process to a file. As **conmon** runs in a separate process than Podman, this is necessary when using systemd to manage Podman containers and pods. - -#### **--infra-name**=*name* - -The name that will be used for the pod's infra container. +@@option infra-name #### **--label**, **-l**=*label* Add metadata to a pod (e.g., --label com.example.key=value). -#### **--label-file**=*label* - -Read in a line delimited file of labels. +@@option label-file #### **--memory**, **-m**=*limit* @@ -163,13 +103,7 @@ unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. Set a custom name for the cloned pod. The default if not specified is of the syntax: **<ORIGINAL_NAME>-clone** -#### **--pid**=*pid* - -Set the PID mode for the pod. The default is to create a private PID namespace for the pod. Requires the PID namespace to be shared via --share. - - host: use the host’s PID namespace for the pod - ns: join the specified PID namespace - private: create a new namespace for the pod (default) +@@option pid.pod #### **--security-opt**=*option* @@ -244,12 +178,7 @@ For the network namespace, only sysctls beginning with net.\* are allowed. Note: if the network namespace is not shared within the pod, these sysctls are not allowed. -#### **--uidmap**=*container_uid:from_uid:amount* - -Run all containers in the pod in a new user namespace using the supplied mapping. This -option conflicts with the **--userns** and **--subuidname** options. This -option provides a way to map host UIDs to container UIDs. It can be passed -several times to map different ranges. +@@option uidmap.pod #### **--userns**=*mode* @@ -280,14 +209,7 @@ Valid _mode_ values are: - *nomap*: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is ignored for containers created by the root user. -#### **--uts**=*mode* - -Set the UTS namespace mode for the pod. The following values are supported: - -- **host**: use the host's UTS namespace inside the pod. -- **private**: create a new namespace for the pod (default). -- **ns:[path]**: run the pod in the given existing UTS namespace. - +@@option uts.pod #### **--volume**, **-v**=*[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]* diff --git a/docs/source/markdown/podman-pod-create.1.md b/docs/source/markdown/podman-pod-create.1.md.in index 7b63ac51d..73b634548 100644 --- a/docs/source/markdown/podman-pod-create.1.md +++ b/docs/source/markdown/podman-pod-create.1.md.in @@ -28,85 +28,25 @@ which by default, is the cgroup parent for all containers joining the pod. Conta ## OPTIONS -#### **--add-host**=*host:ip* +@@option add-host -Add a custom host-to-IP mapping (host:ip) - -Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** -option can be set multiple times. The /etc/hosts file is shared between all containers in the pod. -#### **--blkio-weight**=*weight* - -Block IO weight (relative weight) accepts a weight value between 10 and 1000. - -#### **--blkio-weight-device**=*weight* - -Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). - -#### **--cgroup-parent**=*path* - -Path to cgroups under which the cgroup for the pod will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. - -#### **--cpu-shares**, **-c**=*shares* - -CPU shares (relative weight) - -By default, all containers get the same proportion of CPU cycles. This proportion -can be modified by changing the container's CPU share weighting relative -to the weighting of all other running containers. +@@option blkio-weight -To modify the proportion from the default of 1024, use the **--cpu-shares** -flag to set the weighting to 2 or higher. +@@option blkio-weight-device -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. +@@option cgroup-parent -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If you add a fourth container with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. If you start one -container **{C0}** with **-c=512** running one process, and another container -**{C1}** with **-c=1024** running two processes, this can result in the following -division of CPU shares: - -PID container CPU CPU share -100 {C0} 0 100% of CPU0 -101 {C1} 1 100% of CPU1 -102 {C1} 2 100% of CPU2 +@@option cpu-shares #### **--cpus**=*amount* Set the total number of CPUs delegated to the pod. Default is 0.000 which indicates that there is no limit on computation power. -#### **--cpuset-cpus**=*amount* - -Limit the CPUs to support execution. First CPU is numbered 0. Unlike --cpus this is of type string and parsed as a list of numbers - -Format is 0-3,0,1 - -Examples of the List Format: - -0-4,9 # bits 0, 1, 2, 3, 4, and 9 set -0-2,7,12-14 # bits 0, 1, 2, 7, 12, 13, and 14 set - -#### **--cpuset-mems**=*nodes* +@@option cpuset-cpus -Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - -If there are four memory nodes on the system (0-3), use `--cpuset-mems=0,1` -then processes in the container will only use memory from the first -two memory nodes. +@@option cpuset-mems #### **--device**=_host-device_[**:**_container-device_][**:**_permissions_] @@ -162,29 +102,21 @@ GID map for the user namespace. Using this flag will run the container with user Print usage statement. -#### **--hostname**=*name* - -Set a hostname to the pod +@@option hostname.pod #### **--infra** Create an infra container and associate it with the pod. An infra container is a lightweight container used to coordinate the shared kernel namespace of a pod. Default: true. -#### **--infra-command**=*command* - -The command that will be run to start the infra container. Default: "/pause". +@@option infra-command -#### **--infra-conmon-pidfile**=*file* - -Write the pid of the infra container's **conmon** process to a file. As **conmon** runs in a separate process than Podman, this is necessary when using systemd to manage Podman containers and pods. +@@option infra-conmon-pidfile #### **--infra-image**=*image* The custom image that will be used for the infra container. Unless specified, Podman builds a custom local image which does not require pulling down an image. -#### **--infra-name**=*name* - -The name that will be used for the pod's infra container. +@@option infra-name #### **--ip**=*ip* @@ -208,21 +140,9 @@ To specify multiple static IPv6 addresses per pod, set multiple networks using t Add metadata to a pod (e.g., --label com.example.key=value). -#### **--label-file**=*label* - -Read in a line delimited file of labels. - -#### **--mac-address**=*address* - -Pod network interface MAC address (e.g. 92:d0:c6:0a:29:33) -This option can only be used if the pod is joined to only a single network - i.e., **--network=_network-name_** is used at most once - -and if the pod is not joining another container's network namespace via **--network=container:_id_**. +@@option label-file -Remember that the MAC address in an Ethernet network must be unique. -The IPv6 link-local address will be based on the device's MAC address -according to RFC4862. - -To specify multiple static MAC addresses per pod, set multiple networks using the **--network** option with a static MAC address specified for each using the `mac` mode for that option. +@@option mac-address #### **--memory**, **-m**=*limit* @@ -282,29 +202,13 @@ Valid _mode_ values are: Note: Rootlesskit changes the source IP address of incoming packets to an IP address in the container network namespace, usually `10.0.2.100`. If your application requires the real source IP address, e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for rootless containers when connected to user-defined networks. - **port_handler=slirp4netns**: Use the slirp4netns port forwarding, it is slower than rootlesskit but preserves the correct source IP address. This port handler cannot be used for user-defined networks. -#### **--network-alias**=*alias* - -Add a network-scoped alias for the pod, setting the alias for all networks that the container joins. To set a -name only for a specific network, use the alias option as described under the **--network** option. -If the network has DNS enabled (`podman network inspect -f {{.DNSEnabled}} <name>`), -these aliases can be used for name resolution on the given network. This option can be specified multiple times. -NOTE: When using CNI a pod will only have access to aliases on the first network that it joins. This limitation does -not exist with netavark/aardvark-dns. +@@option network-alias -#### **--no-hosts** +@@option no-hosts -Do not create _/etc/hosts_ for the pod. -By default, Podman will manage _/etc/hosts_, adding the container's own IP address and any hosts from **--add-host**. -**--no-hosts** disables this, and the image's _/etc/hosts_ will be preserved unmodified. This option conflicts with **--add-host**. -#### **--pid**=*pid* - -Set the PID mode for the pod. The default is to create a private PID namespace for the pod. Requires the PID namespace to be shared via --share. - - host: use the host’s PID namespace for the pod - ns: join the specified PID namespace - private: create a new namespace for the pod (default) +@@option pid.pod #### **--pod-id-file**=*path* @@ -335,9 +239,7 @@ but only by the pod itself. **Note:** This cannot be modified once the pod is created. -#### **--replace** - -If another pod with the same name already exists, replace and remove it. The default is **false**. +@@option replace #### **--security-opt**=*option* @@ -418,12 +320,7 @@ For the network namespace, only sysctls beginning with net.\* are allowed. Note: if the network namespace is not shared within the pod, these sysctls are not allowed. -#### **--uidmap**=*container_uid:from_uid:amount* - -Run the container in a new user namespace using the supplied mapping. This -option conflicts with the **--userns** and **--subuidname** options. This -option provides a way to map host UIDs to container UIDs. It can be passed -several times to map different ranges. +@@option uidmap.pod #### **--userns**=*mode* @@ -454,13 +351,7 @@ Valid _mode_ values are: - *nomap*: creates a user namespace where the current rootless user's UID:GID are not mapped into the container. This option is not allowed for containers created by the root user. -#### **--uts**=*mode* - -Set the UTS namespace mode for the pod. The following values are supported: - -- **host**: use the host's UTS namespace inside the pod. -- **private**: create a new namespace for the pod (default). -- **ns:[path]**: run the pod in the given existing UTS namespace. +@@option uts.pod #### **--volume**, **-v**=*[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]* diff --git a/docs/source/markdown/podman-pull.1.md b/docs/source/markdown/podman-pull.1.md.in index 99e227226..4321cb364 100644 --- a/docs/source/markdown/podman-pull.1.md +++ b/docs/source/markdown/podman-pull.1.md.in @@ -85,11 +85,7 @@ Print the usage statement. Override the OS, defaults to hosts, of the image to be pulled. For example, `windows`. -#### **--platform**=*OS/ARCH* - -Specify the platform for selecting the image. The `--platform` option can be used to override the current architecture and operating system. - -*IMPORTANT: Conflicts with --arch and --os* +@@option platform #### **--quiet**, **-q** diff --git a/docs/source/markdown/podman-rmi.1.md b/docs/source/markdown/podman-rmi.1.md index 8d0e5e500..93658daaf 100644 --- a/docs/source/markdown/podman-rmi.1.md +++ b/docs/source/markdown/podman-rmi.1.md @@ -28,6 +28,9 @@ This option will cause podman to remove all containers that are using the image If a specified image does not exist in the local storage, ignore it and do not throw an error. +#### **--no-prune** + +This options will not remove dangling parents of specified image Remove an image by its short ID ``` diff --git a/docs/source/markdown/podman-run.1.md.in b/docs/source/markdown/podman-run.1.md.in index df4c43c41..21ce566ce 100644 --- a/docs/source/markdown/podman-run.1.md.in +++ b/docs/source/markdown/podman-run.1.md.in @@ -83,12 +83,7 @@ and specified with a _tag_. $ podman run oci-archive:/tmp/fedora echo hello ## OPTIONS -#### **--add-host**=*host:ip* - -Add a custom host-to-IP mapping (host:ip) - -Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** -option can be set multiple times. +@@option add-host #### **--annotation**=*key=value* @@ -116,49 +111,21 @@ Path to the authentication file. Default is *${XDG_RUNTIME_DIR}/containers/auth. Note: You can also override the default path of the authentication file by setting the **REGISTRY_AUTH_FILE** environment variable. -#### **--blkio-weight**=*weight* - -Block IO relative weight. The _weight_ is a value between **10** and **1000**. - -#### **--blkio-weight-device**=*device:weight* +@@option blkio-weight -Block IO relative device weight. +@@option blkio-weight-device -#### **--cap-add**=*capability* +@@option cap-add -Add Linux capabilities. - -#### **--cap-drop**=*capability* - -Drop Linux capabilities. +@@option cap-drop @@option cgroup-conf -#### **--cgroup-parent**=*path* - -Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. - -#### **--cgroupns**=*mode* +@@option cgroup-parent -Set the cgroup namespace mode for the container. +@@option cgroupns -- **host**: use the host's cgroup namespace inside the container. -- **container:**_id_: join the namespace of the specified container. -- **private**: create a new cgroup namespace. -- **ns:**_path_: join the namespace at the specified path. - -If the host uses cgroups v1, the default is set to **host**. On cgroups v2, the default is **private**. - -#### **--cgroups**=*how* - -Determines whether the container will create CGroups. - -Default is **enabled**. - -The **enabled** option will create a new cgroup under the cgroup-parent. -The **disabled** option will force the container to not create CGroups, and thus conflicts with CGroup options (**--cgroupns** and **--cgroup-parent**). -The **no-conmon** option disables a new CGroup only for the **conmon** process. -The **split** option splits the current CGroup in two sub-cgroups: one for conmon and one for the container payload. It is not possible to set **--cgroup-parent** with **split**. +@@option cgroups @@option chrootdirs @@ -166,88 +133,17 @@ The **split** option splits the current CGroup in two sub-cgroups: one for conmo Write the container ID to *file*. -#### **--conmon-pidfile**=*file* - -Write the pid of the **conmon** process to a file. As **conmon** runs in a separate process than Podman, this is necessary when using systemd to restart Podman containers. -(This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines) - -#### **--cpu-period**=*limit* - -Set the CPU period for the Completely Fair Scheduler (CFS), which is a -duration in microseconds. Once the container's CPU quota is used up, it will -not be scheduled to run until the current period ends. Defaults to 100000 -microseconds. - -On some systems, changing the CPU limits may not be allowed for non-root -users. For more details, see -https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error - -#### **--cpu-quota**=*limit* - -Limit the CPU Completely Fair Scheduler (CFS) quota. - -Limit the container's CPU usage. By default, containers run with the full -CPU resource. The limit is a number in microseconds. If you provide a number, -the container will be allowed to use that much CPU time until the CPU period -ends (controllable via **--cpu-period**). - -On some systems, changing the CPU limits may not be allowed for non-root -users. For more details, see -https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error - -#### **--cpu-rt-period**=*microseconds* - -Limit the CPU real-time period in microseconds. - -Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. +@@option conmon-pidfile -This flag is not supported on cgroups V2 systems. +@@option cpu-period -#### **--cpu-rt-runtime**=*microseconds* +@@option cpu-quota -Limit the CPU real-time runtime in microseconds. +@@option cpu-rt-period -Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: -Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. +@@option cpu-rt-runtime -The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. - -This flag is not supported on cgroups V2 systems. - -#### **--cpu-shares**, **-c**=*shares* - -CPU shares (relative weight). - -By default, all containers get the same proportion of CPU cycles. This proportion -can be modified by changing the container's CPU share weighting relative -to the combined weight of all the running containers. Default weight is **1024**. - -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. - -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If you add a fourth container with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. -If the container _C0_ is started with **--cpu-shares=512** running one process, -and another container _C1_ with **--cpu-shares=1024** running two processes, -this can result in the following division of CPU shares: - -| PID | container | CPU | CPU share | -| ---- | ----------- | ------- | ------------ | -| 100 | C0 | 0 | 100% of CPU0 | -| 101 | C1 | 1 | 100% of CPU1 | -| 102 | C1 | 2 | 100% of CPU2 | +@@option cpu-shares #### **--cpus**=*number* @@ -259,18 +155,9 @@ On some systems, changing the CPU limits may not be allowed for non-root users. For more details, see https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error -#### **--cpuset-cpus**=*number* - -CPUs in which to allow execution. Can be specified as a comma-separated list -(e.g. **0,1**), as a range (e.g. **0-3**), or any combination thereof -(e.g. **0-3,7,11-15**). - -#### **--cpuset-mems**=*nodes* - -Memory nodes (MEMs) in which to allow execution. Only effective on NUMA systems. +@@option cpuset-cpus -For example, if you have four memory nodes (0-3) on your system, use **--cpuset-mems=0,1** -to only use memory from the first two memory nodes. +@@option cpuset-mems #### **--detach**, **-d** @@ -357,23 +244,7 @@ Set custom DNS options. Invalid if using **--dns-opt** with **--network** that i Set custom DNS search domains. Invalid if using **--dns-search** and **--network** that is set to **none** or **container:**_id_. Use **--dns-search=.** if you don't wish to set the search domain. -#### **--entrypoint**=*"command"* | *'["command", "arg1", ...]'* - -Overwrite the default ENTRYPOINT of the image. - -This option allows you to overwrite the default entrypoint of the image. - -The ENTRYPOINT of an image is similar to a COMMAND -because it specifies what executable to run when the container starts, but it is -(purposely) more difficult to override. The ENTRYPOINT gives a container its -default nature or behavior, so that when you set an ENTRYPOINT you can run the -container as if it were that binary, complete with default options, and you can -pass in more options via the COMMAND. But, sometimes an operator may want to run -something else inside the container, so you can override the default ENTRYPOINT -at runtime by using a **--entrypoint** and a string to specify the new -ENTRYPOINT. - -You need to specify multi option commands in the form of a json string. +@@option entrypoint #### **--env**, **-e**=*env* @@ -389,10 +260,7 @@ Read in a line delimited file of environment variables. See **Environment** note @@option env-host -#### **--expose**=*port* - -Expose a port, or a range of ports (e.g. **--expose=3300-3310**) to set up port redirection -on the host system. +@@option expose #### **--gidmap**=*container_gid:host_gid:amount* @@ -405,42 +273,21 @@ Note: the **--gidmap** flag cannot be called in conjunction with the **--pod** f @@option group-add -#### **--health-cmd**=*"command"* | *'["command", "arg1", ...]'* - -Set or alter a healthcheck command for a container. The command is a command to be executed inside your -container that determines your container health. The command is required for other healthcheck options -to be applied. A value of **none** disables existing healthchecks. - -Multiple options can be passed in the form of a JSON array; otherwise, the command will be interpreted -as an argument to **/bin/sh -c**. - -#### **--health-interval**=*interval* - -Set an interval for the healthchecks. An _interval_ of **disable** results in no automatic timer setup. The default is **30s**. - -#### **--health-retries**=*retries* +@@option health-cmd -The number of retries allowed before a healthcheck is considered to be unhealthy. The default value is **3**. +@@option health-interval -#### **--health-start-period**=*period* +@@option health-retries -The initialization time needed for a container to bootstrap. The value can be expressed in time format like -**2m3s**. The default value is **0s**. +@@option health-start-period -#### **--health-timeout**=*timeout* - -The maximum time allowed to complete the healthcheck before an interval is considered failed. Like start-period, the -value can be expressed in a time format such as **1m22s**. The default value is **30s**. +@@option health-timeout #### **--help** Print usage statement -#### **--hostname**, **-h**=*name* - -Container host name - -Sets the container host name that is available inside the container. Can only be used with a private UTS namespace `--uts=private` (default). If `--pod` is specified and the pod shares the UTS namespace (default) the pod's hostname will be used. +@@option hostname.container @@option hostuser @@ -504,27 +351,11 @@ a private IPC namespace. Add metadata to a container. -#### **--label-file**=*file* - -Read in a line-delimited file of labels. - -#### **--link-local-ip**=*ip* - -Not implemented. - -#### **--log-driver**=*driver* +@@option label-file -Logging driver for the container. Currently available options are **k8s-file**, **journald**, **none** and **passthrough**, with **json-file** aliased to **k8s-file** for scripting compatibility. (Default **journald**) - -The podman info command below will display the default log-driver for the system. -``` -$ podman info --format '{{ .Host.LogDriver }}' -journald -``` -The **passthrough** driver passes down the standard streams (stdin, stdout, stderr) to the -container. It is not allowed with the remote Podman client, including Mac and Windows (excluding WSL2) machines, and on a tty, since it is -vulnerable to attacks via TIOCSTI. +@@option link-local-ip +@@option log-driver #### **--log-opt**=*name=value* @@ -543,17 +374,7 @@ Set custom logging configuration. The following *name*s are supported: This option is currently supported only by the **journald** log driver. -#### **--mac-address**=*address* - -Container network interface MAC address (e.g. 92:d0:c6:0a:29:33) -This option can only be used if the container is joined to only a single network - i.e., **--network=_network-name_** is used at most once - -and if the container is not joining another container's network namespace via **--network=container:_id_**. - -Remember that the MAC address in an Ethernet network must be unique. -The IPv6 link-local address will be based on the device's MAC address -according to RFC4862. - -To specify multiple static MAC addresses per container, set multiple networks using the **--network** option with a static MAC address specified for each using the `mac` mode for that option. +@@option mac-address #### **--memory**, **-m**=*number[unit]* @@ -587,11 +408,7 @@ the value of **--memory**. Set _number_ to **-1** to enable unlimited swap. -#### **--memory-swappiness**=*number* - -Tune a container's memory swappiness behavior. Accepts an integer between *0* and *100*. - -This flag is not supported on cgroups V2 systems. +@@option memory-swappiness @@option mount @@ -643,29 +460,17 @@ Valid _mode_ values are: Note: Rootlesskit changes the source IP address of incoming packets to an IP address in the container network namespace, usually `10.0.2.100`. If your application requires the real source IP address, e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for rootless containers when connected to user-defined networks. - **port_handler=slirp4netns**: Use the slirp4netns port forwarding, it is slower than rootlesskit but preserves the correct source IP address. This port handler cannot be used for user-defined networks. -#### **--network-alias**=*alias* - -Add a network-scoped alias for the container, setting the alias for all networks that the container joins. To set a -name only for a specific network, use the alias option as described under the **--network** option. -If the network has DNS enabled (`podman network inspect -f {{.DNSEnabled}} <name>`), -these aliases can be used for name resolution on the given network. This option can be specified multiple times. -NOTE: When using CNI a container will only have access to aliases on the first network that it joins. This limitation does -not exist with netavark/aardvark-dns. +@@option network-alias @@option no-healthcheck -#### **--no-hosts** +@@option no-hosts -Do not create _/etc/hosts_ for the container. -By default, Podman will manage _/etc/hosts_, adding the container's own IP address and any hosts from **--add-host**. -**--no-hosts** disables this, and the image's _/etc/hosts_ will be preserved unmodified. This option conflicts with **--add-host**. @@option oom-kill-disable -#### **--oom-score-adj**=*num* - -Tune the host's OOM preferences for containers (accepts values from **-1000** to **1000**). +@@option oom-score-adj #### **--os**=*OS* Override the OS, defaults to hosts, of the image to be pulled. For example, `windows`. @@ -691,14 +496,9 @@ The default is to create a private PID namespace for the container. @@option pidfile -#### **--pids-limit**=*limit* - -Tune the container's pids limit. Set to **-1** to have unlimited pids for the container. The default is **4096** on systems that support "pids" cgroup controller. +@@option pids-limit -#### **--platform**=*OS/ARCH* - -Specify the platform for selecting the image. (Conflicts with --arch and --os) -The `--platform` option can be used to override the current architecture and operating system. +@@option platform #### **--pod**=*name* @@ -772,40 +572,19 @@ When using this option, Podman will bind any exposed port to a random port on th within an ephemeral port range defined by */proc/sys/net/ipv4/ip_local_port_range*. To find the mapping between the host ports and the exposed ports, use **podman port**. -#### **--pull**=*policy* - -Pull image policy. The default is **missing**. - -- **always**: Always pull the image and throw an error if the pull fails. -- **missing**: Pull the image only if it could not be found in the local containers storage. Throw an error if no image could be found and the pull fails. -- **never**: Never pull the image but use the one from the local containers storage. Throw an error if no image could be found. -- **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found. +@@option pull #### **--quiet**, **-q** Suppress output information when pulling images -#### **--read-only** +@@option read-only -Mount the container's root filesystem as read-only. +@@option read-only-tmpfs -By default a container will have its root filesystem writable allowing processes -to write files anywhere. By specifying the **--read-only** flag, the container will have -its root filesystem mounted as read-only prohibiting any writes. +@@option replace -#### **--read-only-tmpfs** - -If container is running in **--read-only** mode, then mount a read-write tmpfs on _/run_, _/tmp_, and _/var/tmp_. The default is **true**. - -#### **--replace** - -If another container with the same name already exists, replace and remove it. The default is **false**. - -#### **--requires**=*container* - -Specify one or more requirements. -A requirement is a dependency container that will be started before this container. -Containers can be specified by name or ID, with multiple containers being separated by commas. +@@option requires #### **--restart**=*policy* @@ -856,28 +635,7 @@ Note: On **SELinux** systems, the rootfs needs the correct label, which is by de @@option seccomp-policy -#### **--secret**=*secret[,opt=opt ...]* - -Give the container access to a secret. Can be specified multiple times. - -A secret is a blob of sensitive data which a container needs at runtime but -should not be stored in the image or in source control, such as usernames and passwords, -TLS certificates and keys, SSH keys or other important generic strings or binary content (up to 500 kb in size). - -When secrets are specified as type `mount`, the secrets are copied and mounted into the container when a container is created. -When secrets are specified as type `env`, the secret will be set as an environment variable within the container. -Secrets are written in the container at the time of container creation, and modifying the secret using `podman secret` commands -after the container is created will not affect the secret inside the container. - -Secrets and its storage are managed using the `podman secret` command. - -Secret Options - -- `type=mount|env` : How the secret will be exposed to the container. Default mount. -- `target=target` : Target of secret. Defaults to secret name. -- `uid=0` : UID of secret. Defaults to 0. Mount secret type only. -- `gid=0` : GID of secret. Defaults to 0. Mount secret type only. -- `mode=0` : Mode of secret. Defaults to 0444. Mount secret type only. +@@option secret #### **--security-opt**=*option* @@ -921,14 +679,9 @@ When _size_ is **0**, there is no limit on the amount of memory used for IPC by Sets whether the signals sent to the **podman run** command are proxied to the container process. SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is **true**. -#### **--stop-signal**=*signal* - -Signal to stop a container. Default is **SIGTERM**. +@@option stop-signal -#### **--stop-timeout**=*seconds* - -Timeout to stop a container. Default is **10**. -Remote connections use local containers.conf for defaults +@@option stop-timeout #### **--subgidname**=*name* @@ -1002,20 +755,7 @@ setsebool -P container_manage_cgroup true Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true, then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified, TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf. -#### **--tmpfs**=*fs* - -Create a tmpfs mount. - -Mount a temporary filesystem (**tmpfs**) mount into a container, for example: - -``` -$ podman run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image -``` - -This command mounts a **tmpfs** at _/tmp_ within the container. The supported mount -options are the same as the Linux default mount flags. If you do not specify -any options, the system uses the following options: -**rw,noexec,nosuid,nodev**. +@@option tmpfs #### **--tty**, **-t** @@ -1033,97 +773,13 @@ echo "asdf" | podman run --rm -i someimage /bin/cat @@option tz -#### **--uidmap**=*container_uid:from_uid:amount* - -Run the container in a new user namespace using the supplied UID mapping. This -option conflicts with the **--userns** and **--subuidname** options. This -option provides a way to map host UIDs to container UIDs. It can be passed -several times to map different ranges. - -The _from_uid_ value is based upon the user running the command, either rootful or rootless users. -* rootful user: *container_uid*:*host_uid*:*amount* -* rootless user: *container_uid*:*intermediate_uid*:*amount* - -When **podman run** is called by a privileged user, the option **--uidmap** -works as a direct mapping between host UIDs and container UIDs. - -host UID -> container UID - -The _amount_ specifies the number of consecutive UIDs that will be mapped. -If for example _amount_ is **4** the mapping would look like: - -| host UID | container UID | -| - | - | -| _from_uid_ | _container_uid_ | -| _from_uid_ + 1 | _container_uid_ + 1 | -| _from_uid_ + 2 | _container_uid_ + 2 | -| _from_uid_ + 3 | _container_uid_ + 3 | - -When **podman run** is called by an unprivileged user (i.e. running rootless), -the value _from_uid_ is interpreted as an "intermediate UID". In the rootless -case, host UIDs are not mapped directly to container UIDs. Instead the mapping -happens over two mapping steps: - -host UID -> intermediate UID -> container UID - -The **--uidmap** option only influences the second mapping step. - -The first mapping step is derived by Podman from the contents of the file -_/etc/subuid_ and the UID of the user calling Podman. - -First mapping step: +@@option uidmap.container -| host UID | intermediate UID | -| - | - | -| UID for the user starting Podman | 0 | -| 1st subordinate UID for the user starting Podman | 1 | -| 2nd subordinate UID for the user starting Podman | 2 | -| 3rd subordinate UID for the user starting Podman | 3 | -| nth subordinate UID for the user starting Podman | n | - -To be able to use intermediate UIDs greater than zero, the user needs to have -subordinate UIDs configured in _/etc/subuid_. See **subuid**(5). - -The second mapping step is configured with **--uidmap**. - -If for example _amount_ is **5** the second mapping step would look like: - -| intermediate UID | container UID | -| - | - | -| _from_uid_ | _container_uid_ | -| _from_uid_ + 1 | _container_uid_ + 1 | -| _from_uid_ + 2 | _container_uid_ + 2 | -| _from_uid_ + 3 | _container_uid_ + 3 | -| _from_uid_ + 4 | _container_uid_ + 4 | - -When running as rootless, Podman will use all the ranges configured in the _/etc/subuid_ file. - -The current user ID is mapped to UID=0 in the rootless user namespace. -Every additional range is added sequentially afterward: - -| host |rootless user namespace | length | -| - | - | - | -| $UID | 0 | 1 | -| 1 | $FIRST_RANGE_ID | $FIRST_RANGE_LENGTH | -| 1+$FIRST_RANGE_LENGTH | $SECOND_RANGE_ID | $SECOND_RANGE_LENGTH| - -Even if a user does not have any subordinate UIDs in _/etc/subuid_, -**--uidmap** could still be used to map the normal UID of the user to a -container UID by running `podman run --uidmap $container_uid:0:1 --user $container_uid ...`. - -Note: the **--uidmap** flag cannot be called in conjunction with the **--pod** flag as a uidmap cannot be set on the container level when in a pod. - -#### **--ulimit**=*option* - -Ulimit options. You can use **host** to copy the current configuration from the host. +@@option ulimit @@option umask -#### **--unsetenv**=*env* - -Unset default environment variables for the container. Default environment -variables include variables provided natively by Podman, environment variables -configured by the image, and environment variables from containers.conf. +@@option unsetenv @@option unsetenv-all @@ -1179,14 +835,7 @@ The rootless option `--userns=keep-id` uses all the subuids and subgids of the u **private**: create a new namespace for the container. This option is incompatible with **--gidmap**, **--uidmap**, **--subuidname** and **--subgidname**. -#### **--uts**=*mode* - -Set the UTS namespace mode for the container. The following values are supported: - -- **host**: use the host's UTS namespace inside the container. -- **private**: create a new namespace for the container (default). -- **ns:[path]**: run the container in the given existing UTS namespace. -- **container:[container]**: join the UTS namespace of the specified container. +@@option uts.container #### **--variant**=*VARIANT* Use _VARIANT_ instead of the default architecture variant of the container image. Some images can use multiple variants of the arm architectures, such as arm/v5 and arm/v7. diff --git a/docs/source/markdown/podman.1.md b/docs/source/markdown/podman.1.md index 4c019ae97..d1192b6d2 100644 --- a/docs/source/markdown/podman.1.md +++ b/docs/source/markdown/podman.1.md @@ -133,6 +133,12 @@ for cgroup V2, the default runtime is `crun`, the manpage to consult is `crun(8) Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json` to podman build, the option given would be `--runtime-flag log-format=json`. + +#### **--ssh**=*value* + +This option allows the user to change the ssh mode, meaning that rather than using the default **golang** mode, one can instead use **--ssh=native** +to use the installed ssh binary and config file declared in containers.conf. + #### **--storage-driver**=*value* Storage driver. The default storage driver for UID 0 is configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode), and is *vfs* for non-root users when *fuse-overlayfs* is not available. The `STORAGE_DRIVER` environment variable overrides the default. The --storage-driver specified driver overrides all. diff --git a/docs/tutorials/README.md b/docs/tutorials/README.md index 2cdb86fa0..69f55673d 100644 --- a/docs/tutorials/README.md +++ b/docs/tutorials/README.md @@ -1,4 +1,4 @@ - + # Podman Tutorials diff --git a/docs/tutorials/basic_networking.md b/docs/tutorials/basic_networking.md index 05c3a731e..7ffc8bcd3 100644 --- a/docs/tutorials/basic_networking.md +++ b/docs/tutorials/basic_networking.md @@ -1,4 +1,4 @@ - + # Basic Networking Guide for Podman diff --git a/docs/tutorials/podman-derivative-api.md b/docs/tutorials/podman-derivative-api.md index e38c2b13d..9de355883 100644 --- a/docs/tutorials/podman-derivative-api.md +++ b/docs/tutorials/podman-derivative-api.md @@ -1,4 +1,4 @@ - + # How to use libpod for custom/derivative projects diff --git a/docs/tutorials/podman-for-windows.md b/docs/tutorials/podman-for-windows.md index 48f9c1ab5..bb37f4a48 100644 --- a/docs/tutorials/podman-for-windows.md +++ b/docs/tutorials/podman-for-windows.md @@ -1,4 +1,4 @@ - + Podman for Windows ================== diff --git a/docs/tutorials/podman_tutorial.md b/docs/tutorials/podman_tutorial.md index a371189e9..e978d0fb2 100644 --- a/docs/tutorials/podman_tutorial.md +++ b/docs/tutorials/podman_tutorial.md @@ -1,4 +1,4 @@ - + # Basic Setup and Use of Podman Podman is a utility provided as part of the libpod library. It can be used to create and maintain diff --git a/docs/tutorials/podman_tutorial_cn.md b/docs/tutorials/podman_tutorial_cn.md index 36e83e16f..37608dddd 100644 --- a/docs/tutorials/podman_tutorial_cn.md +++ b/docs/tutorials/podman_tutorial_cn.md @@ -1,6 +1,6 @@ > - 译文出自:[掘金翻译计划](https://juejin.cn/translate) - + Podman是由libpod库提供一个实用的程序,可以被用于创建和管理容器。 diff --git a/docs/tutorials/rootless_tutorial.md b/docs/tutorials/rootless_tutorial.md index d9cf68a20..67434ed3c 100644 --- a/docs/tutorials/rootless_tutorial.md +++ b/docs/tutorials/rootless_tutorial.md @@ -1,4 +1,4 @@ - + # Basic Setup and Use of Podman in a Rootless environment. @@ -11,8 +11,8 @@ require ( github.com/container-orchestrated-devices/container-device-interface v0.4.0 github.com/containernetworking/cni v1.1.2 github.com/containernetworking/plugins v1.1.1 - github.com/containers/buildah v1.26.1-0.20220716095526-d31d27c357ab - github.com/containers/common v0.49.1-0.20220729221035-246800047d46 + github.com/containers/buildah v1.27.0 + github.com/containers/common v0.49.2-0.20220809074359-b0ea008ba661 github.com/containers/conmon v2.0.20+incompatible github.com/containers/image/v5 v5.22.0 github.com/containers/ocicrypt v1.1.5 @@ -27,7 +27,6 @@ require ( github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 github.com/docker/go-units v0.4.0 - github.com/dtylman/scp v0.0.0-20181017070807-f3000a34aef4 github.com/fsnotify/fsnotify v1.5.4 github.com/ghodss/yaml v1.0.0 github.com/godbus/dbus/v5 v5.1.0 @@ -61,7 +60,6 @@ require ( github.com/vbauerster/mpb/v7 v7.4.2 github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4 go.etcd.io/bbolt v1.3.6 - golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 @@ -392,15 +392,13 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= -github.com/containers/buildah v1.26.1-0.20220716095526-d31d27c357ab h1:NeI0DOkTf3Tn4OpdjMhMubAfTPs2oCO5jUY5wnpv4qk= -github.com/containers/buildah v1.26.1-0.20220716095526-d31d27c357ab/go.mod h1:iVtQtU6a+pbETBqIzg0oAWW3gTR1ItrAihJpLFFppmA= -github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a/go.mod h1:1dA7JPGoSi83kjf5H4NIrGANyLOULyvFqV1bwvYFEek= -github.com/containers/common v0.49.1-0.20220729221035-246800047d46 h1:BNNV+JlPYSmaa9rTapL9kh2JZrg7hmWwi/VrIY/KH1E= -github.com/containers/common v0.49.1-0.20220729221035-246800047d46/go.mod h1:ueM5hT0itKqCQvVJDs+EtjornAQtrHYxQJzP2gxeGIg= +github.com/containers/buildah v1.27.0 h1:LJ1ks7vKxwPzJGr5BWVvigbtVL9w7XeHtNEmiIOPJqI= +github.com/containers/buildah v1.27.0/go.mod h1:anH3ExvDXRNP9zLQCrOc1vWb5CrhqLF/aYFim4tslvA= +github.com/containers/common v0.49.1/go.mod h1:ueM5hT0itKqCQvVJDs+EtjornAQtrHYxQJzP2gxeGIg= +github.com/containers/common v0.49.2-0.20220809074359-b0ea008ba661 h1:2Ldzg1st4REr5uUJRhjsye1zCbu0i/89RBh87Xc/cTY= +github.com/containers/common v0.49.2-0.20220809074359-b0ea008ba661/go.mod h1:eT2iSsNzjOlF5VFLkyj9OU2SXznURvEYndsioQImuoE= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/image/v5 v5.21.2-0.20220712113758-29aec5f7bbbf/go.mod h1:0+N0ZM9mgMmoZZc6uNcgnEsbX85Ne7b29cIW5lqWwVU= -github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c/go.mod h1:ykVAVRj4DhQNMHZDVU+KCtXjWBKpqiUe669eF0WBEEc= github.com/containers/image/v5 v5.22.0 h1:KemxPmD4D2YYOFZN2SgoTk7nBFcnwPiPW0MqjYtknSE= github.com/containers/image/v5 v5.22.0/go.mod h1:D8Ksv2RNB8qLJ7xe1P3rgJJOSQpahA6amv2Ax++/YO4= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= @@ -416,9 +414,6 @@ github.com/containers/psgo v1.7.2 h1:WbCvsY9w+nCv3j4der0mbD3PSRUv/W8l+G0YrZrdSDc github.com/containers/psgo v1.7.2/go.mod h1:SLpqxsPOHtTqRygjutCPXmeU2PoEFzV3gzJplN4BMx0= github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc= -github.com/containers/storage v1.41.0/go.mod h1:Pb0l5Sm/89kolX3o2KolKQ5cCHk5vPNpJrhNaLcdS5s= -github.com/containers/storage v1.41.1-0.20220712184034-d26be7b27860/go.mod h1:uu6HCcijN30xRxW1ZuZRngwFGOlH5NpBWYiNBnDQNRw= -github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a/go.mod h1:4DfR+cPpkXKhJnnyydD3z82DXrnTBT63y1k0QWtM2i4= github.com/containers/storage v1.42.0 h1:zm2AQD4NDeTB3JQ8X+Wo5+VRqNB+b4ocEd7Qj6ylPJA= github.com/containers/storage v1.42.0/go.mod h1:JiUJwOgOo1dr2DdOUc1MRe2GCAXABYoYmOdPF8yvH78= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -515,8 +510,6 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dtylman/scp v0.0.0-20181017070807-f3000a34aef4 h1:Tc//0LMiRsUsOIu4S+HFKleax9X1+3SRKo+36ldZX0c= -github.com/dtylman/scp v0.0.0-20181017070807-f3000a34aef4/go.mod h1:jN1ZaUPSNA8jm10nmaRLky84qV/iCeiHmcEf3EbP+dc= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eggsampler/acme/v3 v3.2.1/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo= @@ -1041,7 +1034,6 @@ github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= @@ -1049,6 +1041,7 @@ github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -1308,7 +1301,6 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab h1:Y github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.0.0-20190417131837-cd1349b7c47e/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7 h1:Rf+QsQGxrYCia8mVyOPnoQZ+vJkZGL+ESWBDUM5s9cQ= github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7/go.mod h1:/tgP02fPXGHkU3/qKK1Y0Db4yqNyGm03vLq/mzHzcS4= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= @@ -1347,6 +1339,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= +github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1762,8 +1756,9 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= diff --git a/hack/markdown-preprocess b/hack/markdown-preprocess index 6d2675cc4..86667a32d 100755 --- a/hack/markdown-preprocess +++ b/hack/markdown-preprocess @@ -5,6 +5,7 @@ import glob import os +import re import sys def main(): @@ -28,11 +29,11 @@ def process(infile): # Some options are the same between containers and pods; determine # which description to use from the name of the source man page. pod_or_container = 'container' - if '-pod-' in infile: + if '-pod-' in infile or '-kube-' in infile: pod_or_container = 'pod' # Sometimes a man page includes the subcommand. - subcommand = removesuffix(removeprefix(infile,"podman-"),".1.md.in").replace("-", " ") + subcommand = podman_subcommand(infile) # foo.md.in -> foo.md -- but always write to a tmpfile outfile = os.path.splitext(infile)[0] @@ -46,42 +47,65 @@ def process(infile): if line.startswith('@@option '): _, optionname = line.strip().split(" ") optionfile = os.path.join("options", optionname + '.md') - fh_out.write("[//]: # (BEGIN included file " + optionfile + ")\n") + + # Comment intended to help someone viewing the .md file. + # Leading newline is important because if two lines are + # consecutive without a break, sphinx (but not go-md2man) + # treats them as one line and will unwantedly render the + # comment in its output. + fh_out.write("\n[//]: # (BEGIN included file " + optionfile + ")\n") with open(optionfile, 'r') as fh_optfile: for opt_line in fh_optfile: - opt_line = opt_line.replace('<POD-OR-CONTAINER>', pod_or_container) - opt_line = opt_line.replace('<SUBCOMMAND>', subcommand) + opt_line = replace_type(opt_line, pod_or_container) + opt_line = opt_line.replace('<<subcommand>>', subcommand) fh_out.write(opt_line) - - # Weird special case: options/image-volume.md ends in a - # list, and in markdown lists are continued across lines, - # so without an intervening blank line the '[//]' comment - # becomes part of the final list entry. - if opt_line.startswith('-'): - fh_out.write("\n") - - fh_out.write("[//]: # (END included file " + optionfile + ")\n") + fh_out.write("\n[//]: # (END included file " + optionfile + ")\n") else: fh_out.write(line) os.chmod(outfile_tmp, 0o444) os.rename(outfile_tmp, outfile) -# str.removeprefix() is python 3.9+, we need to support older versions on mac -def removeprefix(string: str, prefix: str) -> str: - if string.startswith(prefix): - return string[len(prefix):] - else: - return string[:] - -# str.removesuffix() is python 3.9+, we need to support older versions on mac -def removesuffix(string: str, suffix: str) -> str: - # suffix='' should not call self[:-0]. - if suffix and string.endswith(suffix): - return string[:-len(suffix)] - else: - return string[:] +# Given a file path of the form podman-foo-bar.1.md.in, return "foo bar" +def podman_subcommand(string: str) -> str: + if string.startswith("podman-"): + string = string[len("podman-"):] + if string.endswith(".1.md.in"): + string = string[:-len(".1.md.in")] + return string.replace("-", " ") + +# Replace instances of '<<pod|container>>' with the desired one (based on +# 'type' which is 'pod' or 'container'). +def replace_type(line: str, type: str) -> str: + # Internal helper function: determines the desired half of the <a|b> string + def replwith(matchobj): + lhs, rhs = matchobj[0].split('|') + # Strip off '<<' and '>>' + lhs = lhs[2:] + rhs = rhs[:len(rhs)-2] + + # Check both sides for 'pod' followed by (non-"m" or end-of-string). + # The non-m prevents us from triggering on 'podman', which could + # conceivably be present in both sides. And we check for 'pod', + # not 'container', because it's possible to have something like + # <<container in pod|container>>. + if re.match('pod([^m]|$)', lhs, re.IGNORECASE): + if re.match('pod([^m]|$)', rhs, re.IGNORECASE): + raise Exception("'%s' matches 'pod' in both left and right sides" % matchobj[0]) + # Only left-hand side has "pod" + if type == 'pod': + return lhs + else: + return rhs + else: + if not re.match('pod([^m]|$)', rhs, re.IGNORECASE): + raise Exception("'%s' does not match 'pod' in either side" % matchobj[0]) + if type == 'pod': + return rhs + else: + return lhs + return re.sub('<<[^\|>]+\|[^\|>]+>>', replwith, line) if __name__ == "__main__": main() diff --git a/hack/markdown-preprocess-review b/hack/markdown-preprocess-review new file mode 100755 index 000000000..a487265ad --- /dev/null +++ b/hack/markdown-preprocess-review @@ -0,0 +1,132 @@ +#!/usr/bin/perl + +(our $ME = $0) =~ s|^.*/||; + +use v5.20; + +our $DSM = 'docs/source/markdown'; + +my ($oldname, $newname); +my %oldname; +my %changed; +open my $git_diff, '-|', 'git', 'log', '-1', '-p' + or die "$ME: Cannot fork: $!\n"; +while (my $line = <$git_diff>) { + chomp $line; + + if ($line =~ m!^\-\-\-\s+a/$DSM/(podman-\S+\.md(\.in)?)!) { + $oldname = $1; + $newname = undef; + } + elsif ($line =~ m!^\+\+\+\s+b/$DSM/(podman-\S+\.md(\.in)?)!) { + $newname = $1; + $oldname{$newname} = $oldname; + } + elsif ($newname) { + if ($line =~ s/^-####\s+//) { + $line =~ /^\*\*--(\S+?)\*\*/ + or die "$ME: in $newname: weird '$line'"; + $changed{$newname}{$1}{name} //= $1; + } + # Usually the same, but not for host.container and host.pod.md + elsif ($line =~ /^\+\@\@option\s+(\S+)/) { + my $optfile = $1; + if ($optfile =~ /^(.*)\.\S+$/) { + $changed{$newname}{$1}{name} = $optfile; + } + } + } +} +close $git_diff; + +# Pass 2: read each oldfile, parse changed options +for my $f (sort keys %changed) { + my $oldfile = $oldname{$f}; + open my $git_fh, '-|', 'git', 'show', "HEAD^:$DSM/$oldfile" + or die "$ME: Cannot fork: $!\n"; + my $opt; + while (my $line = <$git_fh>) { + if ($line =~ /^####\s+\*\*--(\S+?)\*\*/) { + $opt = $1; + if ($changed{$f}{$opt}) { + $changed{$f}{$opt}{text} = $line; + } + else { + undef $opt; + } + } + elsif ($line =~ /^#/ || $line =~ /^\@\@option\s/) { + undef $opt; + } + elsif ($opt) { + $changed{$f}{$opt}{text} .= $line; + } + } + close $git_fh + or die "$ME: Error running git on $oldfile\n"; +} + +# Pass 3: write out files +my $tempdir = "/tmp/$ME.diffs"; +system('rm', '-rf', $tempdir); +mkdir $tempdir, 0755; + +for my $md_file (sort keys %changed) { + for my $opt (sort keys %{$changed{$md_file}}) { + my $d = "$tempdir/$changed{$md_file}{$opt}{name}"; + mkdir $d, 0755; + + my $outfile = "$d/$md_file"; + open my $fh, '>', $outfile + or die "$ME: Cannot create $outfile: $!\n"; + # strip all trailing newlines + (my $text = $changed{$md_file}{$opt}{text}) =~ s/\n+$/\n/s; + print { $fh } $text; + close $fh + or die "$ME: Error writing $outfile: $!\n"; + + my $new_text = "$DSM/options/$changed{$md_file}{$opt}{name}.md"; + die "$ME: $md_file: File does not exist: $new_text\n" if ! -e $new_text; + system('cp', $new_text, "$d/zzz-chosen.md"); + } +} + +# Now run diffuse +chdir $tempdir or die; +my @all_opts = glob("*"); +for my $i (0..$#all_opts) { + my $opt = $all_opts[$i]; + chdir "$tempdir/$opt" + or die "??? Internal error, cannot cd $tempdir/$opt: $!"; + + $| = 1; printf "--%s (%d/%d) ", $opt, $i+1, scalar(@all_opts); + + my @all_files = glob("*"); + if (all_files_identical(@all_files)) { + pop @all_files; + print "[identical between @all_files]\n"; + next; + } + + # Prompt + print "[Y/n/q] "; + my $ans = <STDIN>; + next if $ans =~ /^n/i; + exit 0 if $ans =~ /^q/i; + + system("diffuse", "-w", glob("*")) == 0 + or die "Diffuse failed\n"; +} + + +sub all_files_identical { + my %sha; + for my $f (@_) { + my $result = qx{sha256sum $f}; + $result =~ /^([0-9a-f]+)\s/ + or die "Internal error: unexpected result from sha256sum $f: $result"; + $sha{$1}++; + } + + return (keys(%sha) == 1); +} diff --git a/hack/markdown-preprocess.t b/hack/markdown-preprocess.t new file mode 100755 index 000000000..a6fe793b1 --- /dev/null +++ b/hack/markdown-preprocess.t @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +""" +Tests for markdown-preprocess +""" + +import unittest + +# https://stackoverflow.com/questions/66665217/how-to-import-a-python-script-without-a-py-extension +from importlib.util import spec_from_loader, module_from_spec +from importlib.machinery import SourceFileLoader + +spec = spec_from_loader("mp", SourceFileLoader("mp", "hack/markdown-preprocess")) +mp = module_from_spec(spec) +spec.loader.exec_module(mp) + +class TestPodReplacer(unittest.TestCase): + def test_basic(self): + """basic pod|container and vice-versa""" + s = '<<container|pod>>' + self.assertEqual(mp.replace_type(s, 'pod'), 'pod') + self.assertEqual(mp.replace_type(s, 'container'), 'container') + s = '<<container|pod>>' + self.assertEqual(mp.replace_type(s, 'pod'), 'pod') + self.assertEqual(mp.replace_type(s, 'container'), 'container') + + def test_case_insensitive(self): + """test case-insensitive replacement of Pod, Container""" + s = '<<Pod|Container>>' + self.assertEqual(mp.replace_type(s, 'pod'), 'Pod') + self.assertEqual(mp.replace_type(s, 'container'), 'Container') + s = '<<Container|Pod>>' + self.assertEqual(mp.replace_type(s, 'pod'), 'Pod') + self.assertEqual(mp.replace_type(s, 'container'), 'Container') + + def test_dont_care_about_podman(self): + """we ignore 'podman'""" + self.assertEqual(mp.replace_type('<<podman container|pod in podman>>', 'container'), 'podman container') + + def test_exception_both(self): + """test that 'pod' on both sides raises exception""" + with self.assertRaisesRegex(Exception, "in both left and right sides"): + mp.replace_type('<<pod 123|pod 321>>', 'pod') + + def test_exception_neither(self): + """test that 'pod' on neither side raises exception""" + with self.assertRaisesRegex(Exception, "in either side"): + mp.replace_type('<<container 123|container 321>>', 'pod') + +class TestPodmanSubcommand(unittest.TestCase): + def test_basic(self): + """podman subcommand basic test""" + self.assertEqual(mp.podman_subcommand("podman-foo.1.md.in"), "foo") + self.assertEqual(mp.podman_subcommand("podman-foo-bar.1.md.in"), "foo bar") + + +if __name__ == '__main__': + unittest.main() diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go index 15cfc824e..a00f0b089 100644 --- a/pkg/api/handlers/compat/images_build.go +++ b/pkg/api/handlers/compat/images_build.go @@ -17,6 +17,7 @@ import ( "github.com/containers/buildah" buildahDefine "github.com/containers/buildah/define" "github.com/containers/buildah/pkg/parse" + "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" "github.com/containers/podman/v4/libpod" "github.com/containers/podman/v4/pkg/api/handlers/utils" @@ -78,6 +79,8 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { AppArmor string `schema:"apparmor"` BuildArgs string `schema:"buildargs"` CacheFrom string `schema:"cachefrom"` + CacheTo string `schema:"cacheto"` + CacheTTL string `schema:"cachettl"` CgroupParent string `schema:"cgroupparent"` Compression uint64 `schema:"compression"` ConfigureNetwork string `schema:"networkmode"` @@ -386,6 +389,31 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { } } + var cacheFrom reference.Named + if _, found := r.URL.Query()["cachefrom"]; found { + cacheFrom, err = parse.RepoNameToNamedReference(query.CacheFrom) + if err != nil { + utils.BadRequest(w, "cacheFrom", query.CacheFrom, err) + return + } + } + var cacheTo reference.Named + if _, found := r.URL.Query()["cacheto"]; found { + cacheTo, err = parse.RepoNameToNamedReference(query.CacheTo) + if err != nil { + utils.BadRequest(w, "cacheto", query.CacheTo, err) + return + } + } + var cacheTTL time.Duration + if _, found := r.URL.Query()["cachettl"]; found { + cacheTTL, err = time.ParseDuration(query.CacheTTL) + if err != nil { + utils.BadRequest(w, "cachettl", query.CacheTTL, err) + return + } + } + var buildArgs = map[string]string{} if _, found := r.URL.Query()["buildargs"]; found { if err := json.Unmarshal([]byte(query.BuildArgs), &buildArgs); err != nil { @@ -578,6 +606,9 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { AdditionalTags: additionalTags, Annotations: annotations, CPPFlags: cppflags, + CacheFrom: cacheFrom, + CacheTo: cacheTo, + CacheTTL: cacheTTL, Args: buildArgs, AllPlatforms: query.AllPlatforms, CommonBuildOpts: &buildah.CommonBuildOptions{ diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go index 67943ecf1..82c1971cd 100644 --- a/pkg/api/handlers/libpod/images.go +++ b/pkg/api/handlers/libpod/images.go @@ -12,6 +12,7 @@ import ( "github.com/containers/buildah" "github.com/containers/common/libimage" + "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/manifest" "github.com/containers/podman/v4/libpod" "github.com/containers/podman/v4/libpod/define" @@ -547,6 +548,7 @@ func ImagesBatchRemove(w http.ResponseWriter, r *http.Request) { Ignore bool `schema:"ignore"` LookupManifest bool `schema:"lookupManifest"` Images []string `schema:"images"` + NoPrune bool `schema:"noprune"` }{} if err := decoder.Decode(&query, r.URL.Query()); err != nil { @@ -554,7 +556,7 @@ func ImagesBatchRemove(w http.ResponseWriter, r *http.Request) { return } - opts := entities.ImageRemoveOptions{All: query.All, Force: query.Force, Ignore: query.Ignore, LookupManifest: query.LookupManifest} + opts := entities.ImageRemoveOptions{All: query.All, Force: query.Force, Ignore: query.Ignore, LookupManifest: query.LookupManifest, NoPrune: query.NoPrune} imageEngine := abi.ImageEngine{Libpod: runtime} rmReport, rmErrors := imageEngine.Remove(r.Context(), query.Images, opts) strErrs := errorhandling.ErrorsToStrings(rmErrors) @@ -617,7 +619,7 @@ func ImageScp(w http.ResponseWriter, r *http.Request) { sourceArg := utils.GetName(r) - rep, source, dest, _, err := domainUtils.ExecuteTransfer(sourceArg, query.Destination, []string{}, query.Quiet) + rep, source, dest, _, err := domainUtils.ExecuteTransfer(sourceArg, query.Destination, []string{}, query.Quiet, ssh.GolangMode) if err != nil { utils.Error(w, http.StatusInternalServerError, err) return diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go index b994a5857..6d7b052b7 100644 --- a/pkg/bindings/connection.go +++ b/pkg/bindings/connection.go @@ -14,11 +14,9 @@ import ( "time" "github.com/blang/semver/v4" - "github.com/containers/podman/v4/pkg/terminal" + "github.com/containers/common/pkg/ssh" "github.com/containers/podman/v4/version" "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" ) type APIResponse struct { @@ -74,8 +72,7 @@ func NewConnection(ctx context.Context, uri string) (context.Context, error) { // or ssh://<user>@<host>[:port]/run/podman/podman.sock?secure=True func NewConnectionWithIdentity(ctx context.Context, uri string, identity string) (context.Context, error) { var ( - err error - secure bool + err error ) if v, found := os.LookupEnv("CONTAINER_HOST"); found && uri == "" { uri = v @@ -85,11 +82,6 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, identity string) identity = v } - passPhrase := "" - if v, found := os.LookupEnv("CONTAINER_PASSPHRASE"); found { - passPhrase = v - } - _url, err := url.Parse(uri) if err != nil { return nil, fmt.Errorf("value of CONTAINER_HOST is not a valid url: %s: %w", uri, err) @@ -99,11 +91,26 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, identity string) var connection Connection switch _url.Scheme { case "ssh": - secure, err = strconv.ParseBool(_url.Query().Get("secure")) + port, err := strconv.Atoi(_url.Port()) if err != nil { - secure = false + return nil, err } - connection, err = sshClient(_url, secure, passPhrase, identity) + conn, err := ssh.Dial(&ssh.ConnectionDialOptions{ + Host: uri, + Identity: identity, + User: _url.User, + Port: port, + }, "golang") + if err != nil { + return nil, err + } + connection = Connection{URI: _url} + connection.Client = &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return ssh.DialNet(conn, "unix", _url) + }, + }} case "unix": if !strings.HasPrefix(uri, "unix:///") { // autofix unix://path_element vs unix:///path_element @@ -184,124 +191,6 @@ func pingNewConnection(ctx context.Context) (*semver.Version, error) { return nil, fmt.Errorf("ping response was %d", response.StatusCode) } -func sshClient(_url *url.URL, secure bool, passPhrase string, identity string) (Connection, error) { - // if you modify the authmethods or their conditionals, you will also need to make similar - // changes in the client (currently cmd/podman/system/connection/add getUDS). - - var signers []ssh.Signer // order Signers are appended to this list determines which key is presented to server - - if len(identity) > 0 { - s, err := terminal.PublicKey(identity, []byte(passPhrase)) - if err != nil { - return Connection{}, fmt.Errorf("failed to parse identity %q: %w", identity, err) - } - - signers = append(signers, s) - logrus.Debugf("SSH Ident Key %q %s %s", identity, ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) - } - - if sock, found := os.LookupEnv("SSH_AUTH_SOCK"); found { - logrus.Debugf("Found SSH_AUTH_SOCK %q, ssh-agent signer(s) enabled", sock) - - c, err := net.Dial("unix", sock) - if err != nil { - return Connection{}, err - } - - agentSigners, err := agent.NewClient(c).Signers() - if err != nil { - return Connection{}, err - } - signers = append(signers, agentSigners...) - - if logrus.IsLevelEnabled(logrus.DebugLevel) { - for _, s := range agentSigners { - logrus.Debugf("SSH Agent Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) - } - } - } - - var authMethods []ssh.AuthMethod - if len(signers) > 0 { - var dedup = make(map[string]ssh.Signer) - // Dedup signers based on fingerprint, ssh-agent keys override CONTAINER_SSHKEY - for _, s := range signers { - fp := ssh.FingerprintSHA256(s.PublicKey()) - if _, found := dedup[fp]; found { - logrus.Debugf("Dedup SSH Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) - } - dedup[fp] = s - } - - var uniq []ssh.Signer - for _, s := range dedup { - uniq = append(uniq, s) - } - authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { - return uniq, nil - })) - } - - if pw, found := _url.User.Password(); found { - authMethods = append(authMethods, ssh.Password(pw)) - } - - if len(authMethods) == 0 { - callback := func() (string, error) { - pass, err := terminal.ReadPassword("Login password:") - return string(pass), err - } - authMethods = append(authMethods, ssh.PasswordCallback(callback)) - } - - port := _url.Port() - if port == "" { - port = "22" - } - - callback := ssh.InsecureIgnoreHostKey() - if secure { - host := _url.Hostname() - if port != "22" { - host = fmt.Sprintf("[%s]:%s", host, port) - } - key := terminal.HostKey(host) - if key != nil { - callback = ssh.FixedHostKey(key) - } - } - - bastion, err := ssh.Dial("tcp", - net.JoinHostPort(_url.Hostname(), port), - &ssh.ClientConfig{ - User: _url.User.Username(), - Auth: authMethods, - HostKeyCallback: callback, - HostKeyAlgorithms: []string{ - ssh.KeyAlgoRSA, - ssh.KeyAlgoDSA, - ssh.KeyAlgoECDSA256, - ssh.KeyAlgoECDSA384, - ssh.KeyAlgoECDSA521, - ssh.KeyAlgoED25519, - }, - Timeout: 5 * time.Second, - }, - ) - if err != nil { - return Connection{}, fmt.Errorf("connection to bastion host (%s) failed: %w", _url.String(), err) - } - - connection := Connection{URI: _url} - connection.Client = &http.Client{ - Transport: &http.Transport{ - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - return bastion.Dial("unix", _url.Path) - }, - }} - return connection, nil -} - func unixClient(_url *url.URL) Connection { connection := Connection{URI: _url} connection.Client = &http.Client{ diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go index 6883585e2..2615bc516 100644 --- a/pkg/bindings/images/build.go +++ b/pkg/bindings/images/build.go @@ -224,6 +224,15 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO if len(options.Manifest) > 0 { params.Set("manifest", options.Manifest) } + if options.CacheFrom != nil { + params.Set("cachefrom", options.CacheFrom.String()) + } + if options.CacheTo != nil { + params.Set("cacheto", options.CacheTo.String()) + } + if int64(options.CacheTTL) != 0 { + params.Set("cachettl", options.CacheTTL.String()) + } if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 { params.Set("memswap", strconv.Itoa(int(memSwap))) } diff --git a/pkg/bindings/images/push.go b/pkg/bindings/images/push.go index 8db3726e6..5069dd780 100644 --- a/pkg/bindings/images/push.go +++ b/pkg/bindings/images/push.go @@ -62,6 +62,8 @@ func Push(ctx context.Context, source string, destination string, options *PushO writer := io.Writer(os.Stderr) if options.GetQuiet() { writer = ioutil.Discard + } else if progressWriter := options.GetProgressWriter(); progressWriter != nil { + writer = progressWriter } dec := json.NewDecoder(response.Body) diff --git a/pkg/bindings/images/types.go b/pkg/bindings/images/types.go index 0664afc1b..7b28c499e 100644 --- a/pkg/bindings/images/types.go +++ b/pkg/bindings/images/types.go @@ -1,6 +1,8 @@ package images import ( + "io" + buildahDefine "github.com/containers/buildah/define" ) @@ -15,6 +17,8 @@ type RemoveOptions struct { Ignore *bool // Confirms if given name is a manifest list and removes it, otherwise returns error. LookupManifest *bool + // Does not remove dangling parent images + NoPrune *bool } //go:generate go run ../generator/generator.go DiffOptions @@ -129,6 +133,10 @@ type PushOptions struct { Format *string // Password for authenticating against the registry. Password *string + // ProgressWriter is a writer where push progress are sent. + // Since API handler for image push is quiet by default, WithQuiet(false) is necessary for + // the writer to receive progress messages. + ProgressWriter *io.Writer // SkipTLSVerify to skip HTTPS and certificate verification. SkipTLSVerify *bool // RemoveSignatures Discard any pre-existing signatures in the image. diff --git a/pkg/bindings/images/types_push_options.go b/pkg/bindings/images/types_push_options.go index 1ae031824..817d873f8 100644 --- a/pkg/bindings/images/types_push_options.go +++ b/pkg/bindings/images/types_push_options.go @@ -2,6 +2,7 @@ package images import ( + "io" "net/url" "github.com/containers/podman/v4/pkg/bindings/internal/util" @@ -107,6 +108,21 @@ func (o *PushOptions) GetPassword() string { return *o.Password } +// WithProgressWriter set field ProgressWriter to given value +func (o *PushOptions) WithProgressWriter(value io.Writer) *PushOptions { + o.ProgressWriter = &value + return o +} + +// GetProgressWriter returns value of field ProgressWriter +func (o *PushOptions) GetProgressWriter() io.Writer { + if o.ProgressWriter == nil { + var z io.Writer + return z + } + return *o.ProgressWriter +} + // WithSkipTLSVerify set field SkipTLSVerify to given value func (o *PushOptions) WithSkipTLSVerify(value bool) *PushOptions { o.SkipTLSVerify = &value diff --git a/pkg/bindings/images/types_remove_options.go b/pkg/bindings/images/types_remove_options.go index 559ebcfd5..8972ac93c 100644 --- a/pkg/bindings/images/types_remove_options.go +++ b/pkg/bindings/images/types_remove_options.go @@ -76,3 +76,18 @@ func (o *RemoveOptions) GetLookupManifest() bool { } return *o.LookupManifest } + +// WithNoPrune set field NoPrune to given value +func (o *RemoveOptions) WithNoPrune(value bool) *RemoveOptions { + o.NoPrune = &value + return o +} + +// GetNoPrune returns value of field NoPrune +func (o *RemoveOptions) GetNoPrune() bool { + if o.NoPrune == nil { + var z bool + return z + } + return *o.NoPrune +} diff --git a/pkg/bindings/test/images_test.go b/pkg/bindings/test/images_test.go index 8f76ce456..9c9796661 100644 --- a/pkg/bindings/test/images_test.go +++ b/pkg/bindings/test/images_test.go @@ -379,6 +379,10 @@ var _ = Describe("Podman images", func() { Expect(err).To(HaveOccurred()) }) + It("Image Push", func() { + Skip("TODO: implement test for image push to registry") + }) + It("Build no options", func() { results, err := images.Build(bt.conn, []string{"fixture/Containerfile"}, entities.BuildOptions{}) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go index 7048cd1d2..3ba507750 100644 --- a/pkg/domain/entities/containers.go +++ b/pkg/domain/entities/containers.go @@ -202,6 +202,7 @@ type CheckpointOptions struct { type CheckpointReport struct { Err error `json:"-"` Id string `json:"Id"` //nolint:revive,stylecheck + RawInput string `json:"RawInput"` RuntimeDuration int64 `json:"runtime_checkpoint_duration"` CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` } @@ -228,6 +229,7 @@ type RestoreOptions struct { type RestoreReport struct { Err error `json:"-"` Id string `json:"Id"` //nolint:revive,stylecheck + RawInput string `json:"RawInput"` RuntimeDuration int64 `json:"runtime_restore_duration"` CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` } @@ -374,6 +376,7 @@ type ContainerCleanupOptions struct { type ContainerCleanupReport struct { CleanErr error Id string //nolint:revive,stylecheck + RawInput string RmErr error RmiErr error } @@ -388,8 +391,9 @@ type ContainerInitOptions struct { // ContainerInitReport describes the results of a // container init type ContainerInitReport struct { - Err error - Id string //nolint:revive,stylecheck + Err error + Id string //nolint:revive,stylecheck + RawInput string } // ContainerMountOptions describes the input values for mounting containers diff --git a/pkg/domain/entities/engine.go b/pkg/domain/entities/engine.go index 32faa74af..c1a4ffdf3 100644 --- a/pkg/domain/entities/engine.go +++ b/pkg/domain/entities/engine.go @@ -52,4 +52,5 @@ type PodmanConfig struct { Runroot string StorageDriver string StorageOpts []string + SSHMode string } diff --git a/pkg/domain/entities/engine_image.go b/pkg/domain/entities/engine_image.go index 5f76ae50b..b8b694873 100644 --- a/pkg/domain/entities/engine_image.go +++ b/pkg/domain/entities/engine_image.go @@ -4,6 +4,7 @@ import ( "context" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/ssh" "github.com/containers/podman/v4/pkg/domain/entities/reports" ) @@ -22,7 +23,7 @@ type ImageEngine interface { Push(ctx context.Context, source string, destination string, opts ImagePushOptions) error Remove(ctx context.Context, images []string, opts ImageRemoveOptions) (*ImageRemoveReport, []error) Save(ctx context.Context, nameOrID string, tags []string, options ImageSaveOptions) error - Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool) error + Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool, sshMode ssh.EngineMode) error Search(ctx context.Context, term string, opts ImageSearchOptions) ([]ImageSearchReport, error) SetTrust(ctx context.Context, args []string, options SetTrustOptions) error ShowTrust(ctx context.Context, args []string, options ShowTrustOptions) (*ShowTrustReport, error) diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go index dad2dc6cc..21c1372b9 100644 --- a/pkg/domain/entities/images.go +++ b/pkg/domain/entities/images.go @@ -94,6 +94,8 @@ type ImageRemoveOptions struct { Ignore bool // Confirms if given name is a manifest list and removes it, otherwise returns error. LookupManifest bool + // NoPrune will not remove dangling images + NoPrune bool } // ImageRemoveReport is the response for removing one or more image(s) from storage diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go index 2820032c9..5b5bc665e 100644 --- a/pkg/domain/infra/abi/containers.go +++ b/pkg/domain/infra/abi/containers.go @@ -39,6 +39,7 @@ import ( // is specified. It also returns a list of the corresponding input name used to lookup each container. func getContainersAndInputByContext(all, latest bool, names []string, filters map[string][]string, runtime *libpod.Runtime) (ctrs []*libpod.Container, rawInput []string, err error) { var ctr *libpod.Container + var filteredCtrs []*libpod.Container ctrs = []*libpod.Container{} filterFuncs := make([]libpod.ContainerFilter, 0, len(filters)) @@ -57,7 +58,17 @@ func getContainersAndInputByContext(all, latest bool, names []string, filters ma } rawInput = []string{} for _, candidate := range ctrs { - rawInput = append(rawInput, candidate.ID()) + if len(names) > 0 { + for _, name := range names { + if candidate.ID() == name || candidate.Name() == name { + rawInput = append(rawInput, candidate.ID()) + filteredCtrs = append(filteredCtrs, candidate) + } + } + ctrs = filteredCtrs + } else { + rawInput = append(rawInput, candidate.ID()) + } } case all: ctrs, err = runtime.GetAllContainers() @@ -610,8 +621,9 @@ func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrID string, func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds []string, options entities.CheckpointOptions) ([]*entities.CheckpointReport, error) { var ( - err error - cons []*libpod.Container + ctrs []*libpod.Container + rawInputs []string + err error ) checkOpts := libpod.ContainerCheckpointOptions{ Keep: options.Keep, @@ -628,24 +640,34 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ CreateImage: options.CreateImage, } + idToRawInput := map[string]string{} if options.All { running := func(c *libpod.Container) bool { state, _ := c.State() return state == define.ContainerStateRunning } - cons, err = ic.Libpod.GetContainers(running) + ctrs, err = ic.Libpod.GetContainers(running) + if err != nil { + return nil, err + } } else { - cons, err = getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod) - } - if err != nil { - return nil, err + ctrs, rawInputs, err = getContainersAndInputByContext(false, options.Latest, namesOrIds, nil, ic.Libpod) + if err != nil { + return nil, err + } + if len(rawInputs) == len(ctrs) { + for i := range ctrs { + idToRawInput[ctrs[i].ID()] = rawInputs[i] + } + } } - reports := make([]*entities.CheckpointReport, 0, len(cons)) - for _, con := range cons { - criuStatistics, runtimeCheckpointDuration, err := con.Checkpoint(ctx, checkOpts) + reports := make([]*entities.CheckpointReport, 0, len(ctrs)) + for _, c := range ctrs { + criuStatistics, runtimeCheckpointDuration, err := c.Checkpoint(ctx, checkOpts) reports = append(reports, &entities.CheckpointReport{ Err: err, - Id: con.ID(), + Id: c.ID(), + RawInput: idToRawInput[c.ID()], RuntimeDuration: runtimeCheckpointDuration, CRIUStatistics: criuStatistics, }) @@ -655,7 +677,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []string, options entities.RestoreOptions) ([]*entities.RestoreReport, error) { var ( - containers []*libpod.Container + ctrs []*libpod.Container checkpointImageImportErrors []error err error ) @@ -682,19 +704,21 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st }, } + idToRawInput := map[string]string{} switch { case options.Import != "": - containers, err = checkpoint.CRImportCheckpointTar(ctx, ic.Libpod, options) + ctrs, err = checkpoint.CRImportCheckpointTar(ctx, ic.Libpod, options) case options.All: - containers, err = ic.Libpod.GetContainers(filterFuncs...) + ctrs, err = ic.Libpod.GetContainers(filterFuncs...) case options.Latest: - containers, err = getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod) + ctrs, err = getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod) default: for _, nameOrID := range namesOrIds { logrus.Debugf("look up container: %q", nameOrID) - ctr, err := ic.Libpod.LookupContainer(nameOrID) + c, err := ic.Libpod.LookupContainer(nameOrID) if err == nil { - containers = append(containers, ctr) + ctrs = append(ctrs, c) + idToRawInput[c.ID()] = nameOrID } else { // If container was not found, check if this is a checkpoint image logrus.Debugf("look up image: %q", nameOrID) @@ -712,7 +736,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st if err != nil { return nil, err } - importedContainers, err := checkpoint.CRImportCheckpoint(ctx, ic.Libpod, options, mountPoint) + importedCtrs, err := checkpoint.CRImportCheckpoint(ctx, ic.Libpod, options, mountPoint) if err != nil { // CRImportCheckpoint is expected to import exactly one container from checkpoint image checkpointImageImportErrors = append( @@ -720,7 +744,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st fmt.Errorf("unable to import checkpoint from image: %q: %v", nameOrID, err), ) } else { - containers = append(containers, importedContainers[0]) + ctrs = append(ctrs, importedCtrs[0]) } } } @@ -729,12 +753,13 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st return nil, err } - reports := make([]*entities.RestoreReport, 0, len(containers)) - for _, con := range containers { - criuStatistics, runtimeRestoreDuration, err := con.Restore(ctx, restoreOptions) + reports := make([]*entities.RestoreReport, 0, len(ctrs)) + for _, c := range ctrs { + criuStatistics, runtimeRestoreDuration, err := c.Restore(ctx, restoreOptions) reports = append(reports, &entities.RestoreReport{ Err: err, - Id: con.ID(), + Id: c.ID(), + RawInput: idToRawInput[c.ID()], RuntimeDuration: runtimeRestoreDuration, CRIUStatistics: criuStatistics, }) @@ -898,38 +923,7 @@ func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID s func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) { reports := []*entities.ContainerStartReport{} var exitCode = define.ExecErrorCodeGeneric - containersNamesOrIds := namesOrIds - all := options.All - if len(options.Filters) > 0 { - all = false - filterFuncs := make([]libpod.ContainerFilter, 0, len(options.Filters)) - if len(options.Filters) > 0 { - for k, v := range options.Filters { - generatedFunc, err := dfilters.GenerateContainerFilterFuncs(k, v, ic.Libpod) - if err != nil { - return nil, err - } - filterFuncs = append(filterFuncs, generatedFunc) - } - } - candidates, err := ic.Libpod.GetContainers(filterFuncs...) - if err != nil { - return nil, err - } - containersNamesOrIds = []string{} - for _, candidate := range candidates { - if options.All { - containersNamesOrIds = append(containersNamesOrIds, candidate.ID()) - continue - } - for _, nameOrID := range namesOrIds { - if nameOrID == candidate.ID() || nameOrID == candidate.Name() { - containersNamesOrIds = append(containersNamesOrIds, nameOrID) - } - } - } - } - ctrs, rawInputs, err := getContainersAndInputByContext(all, options.Latest, containersNamesOrIds, options.Filters, ic.Libpod) + ctrs, rawInputs, err := getContainersAndInputByContext(options.All, options.Latest, namesOrIds, options.Filters, ic.Libpod) if err != nil { return nil, err } @@ -1223,14 +1217,20 @@ func (ic *ContainerEngine) ContainerLogs(ctx context.Context, containers []strin } func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []string, options entities.ContainerCleanupOptions) ([]*entities.ContainerCleanupReport, error) { - reports := []*entities.ContainerCleanupReport{} - ctrs, err := getContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod) + ctrs, rawInputs, err := getContainersAndInputByContext(options.All, options.Latest, namesOrIds, nil, ic.Libpod) if err != nil { return nil, err } + idToRawInput := map[string]string{} + if len(rawInputs) == len(ctrs) { + for i := range ctrs { + idToRawInput[ctrs[i].ID()] = rawInputs[i] + } + } + reports := []*entities.ContainerCleanupReport{} for _, ctr := range ctrs { var err error - report := entities.ContainerCleanupReport{Id: ctr.ID()} + report := entities.ContainerCleanupReport{Id: ctr.ID(), RawInput: idToRawInput[ctr.ID()]} if options.Exec != "" { if options.Remove { @@ -1271,13 +1271,19 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st } func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []string, options entities.ContainerInitOptions) ([]*entities.ContainerInitReport, error) { - ctrs, err := getContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod) + ctrs, rawInputs, err := getContainersAndInputByContext(options.All, options.Latest, namesOrIds, nil, ic.Libpod) if err != nil { return nil, err } + idToRawInput := map[string]string{} + if len(rawInputs) == len(ctrs) { + for i := range ctrs { + idToRawInput[ctrs[i].ID()] = rawInputs[i] + } + } reports := make([]*entities.ContainerInitReport, 0, len(ctrs)) for _, ctr := range ctrs { - report := entities.ContainerInitReport{Id: ctr.ID()} + report := entities.ContainerInitReport{Id: ctr.ID(), RawInput: idToRawInput[ctr.ID()]} err := ctr.Init(ctx, ctr.PodID() != "") // If we're initializing all containers, ignore invalid state errors diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index 94178a8e2..77d1bf0db 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -18,6 +18,7 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" @@ -565,6 +566,7 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie libimageOptions.Force = opts.Force libimageOptions.Ignore = opts.Ignore libimageOptions.LookupManifest = opts.LookupManifest + libimageOptions.NoPrune = opts.NoPrune if !opts.All { libimageOptions.Filters = append(libimageOptions.Filters, "intermediate=false") } @@ -581,7 +583,7 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie rmErrors = libimageErrors - return + return report, rmErrors } // Shutdown Libpod engine @@ -682,8 +684,8 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie return nil, nil } -func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool) error { - rep, source, dest, flags, err := domainUtils.ExecuteTransfer(src, dst, parentFlags, quiet) +func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool, sshMode ssh.EngineMode) error { + rep, source, dest, flags, err := domainUtils.ExecuteTransfer(src, dst, parentFlags, quiet, sshMode) if err != nil { return err } diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index 225aee017..d49f029d5 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -362,6 +362,12 @@ func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrID string, } func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds []string, opts entities.CheckpointOptions) ([]*entities.CheckpointReport, error) { + var ( + err error + ctrs []entities.ListContainer + rawInputs []string + idToRawInput = map[string]string{} + ) options := new(containers.CheckpointOptions) options.WithFileLocks(opts.FileLocks) options.WithIgnoreRootfs(opts.IgnoreRootFS) @@ -374,11 +380,6 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ options.WithLeaveRunning(opts.LeaveRunning) options.WithWithPrevious(opts.WithPrevious) - var ( - err error - ctrs = []entities.ListContainer{} - ) - if opts.All { allCtrs, err := getContainersByContext(ic.ClientCtx, true, false, []string{}) if err != nil { @@ -391,10 +392,15 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ } } } else { - ctrs, err = getContainersByContext(ic.ClientCtx, false, false, namesOrIds) + ctrs, rawInputs, err = getContainersAndInputByContext(ic.ClientCtx, false, false, namesOrIds, nil) if err != nil { return nil, err } + if len(rawInputs) == len(ctrs) { + for i := range ctrs { + idToRawInput[ctrs[i].ID] = rawInputs[i] + } + } } reports := make([]*entities.CheckpointReport, 0, len(ctrs)) for _, c := range ctrs { @@ -402,6 +408,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ if err != nil { reports = append(reports, &entities.CheckpointReport{Id: c.ID, Err: err}) } else { + report.RawInput = idToRawInput[report.Id] reports = append(reports, report) } } @@ -413,6 +420,10 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st return nil, fmt.Errorf("--import-previous is not supported on the remote client") } + var ( + ids []string + idToRawInput = map[string]string{} + ) options := new(containers.RestoreOptions) options.WithFileLocks(opts.FileLocks) options.WithIgnoreRootfs(opts.IgnoreRootFS) @@ -431,10 +442,6 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st report, err := containers.Restore(ic.ClientCtx, "", options) return []*entities.RestoreReport{report}, err } - - var ( - ids = []string{} - ) if opts.All { allCtrs, err := getContainersByContext(ic.ClientCtx, true, false, []string{}) if err != nil { @@ -457,6 +464,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st ctrData, _, err := ic.ContainerInspect(ic.ClientCtx, []string{nameOrID}, entities.InspectOptions{}) if err == nil && len(ctrData) > 0 { ids = append(ids, ctrData[0].ID) + idToRawInput[ctrData[0].ID] = nameOrID } else { // If container was not found, check if this is a checkpoint image inspectReport, err := images.GetImage(ic.ClientCtx, nameOrID, getImageOptions) @@ -480,6 +488,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st if err != nil { reports = append(reports, &entities.RestoreReport{Id: id, Err: err}) } + report.RawInput = idToRawInput[report.Id] reports = append(reports, report) } return reports, nil @@ -658,36 +667,7 @@ func logIfRmError(id string, err error, reports []*reports.RmReport) { func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) { reports := []*entities.ContainerStartReport{} var exitCode = define.ExecErrorCodeGeneric - containersNamesOrIds := namesOrIds - all := options.All - if len(options.Filters) > 0 { - all = false - containersNamesOrIds = []string{} - opts := new(containers.ListOptions).WithFilters(options.Filters).WithAll(true) - candidates, listErr := containers.List(ic.ClientCtx, opts) - if listErr != nil { - return nil, listErr - } - for _, candidate := range candidates { - if options.All { - containersNamesOrIds = append(containersNamesOrIds, candidate.ID) - continue - } - for _, nameOrID := range namesOrIds { - if nameOrID == candidate.ID { - containersNamesOrIds = append(containersNamesOrIds, nameOrID) - continue - } - for _, containerName := range candidate.Names { - if containerName == nameOrID { - containersNamesOrIds = append(containersNamesOrIds, nameOrID) - continue - } - } - } - } - } - ctrs, err := getContainersByContext(ic.ClientCtx, all, false, containersNamesOrIds) + ctrs, namesOrIds, err := getContainersAndInputByContext(ic.ClientCtx, options.All, false, namesOrIds, options.Filters) if err != nil { return nil, err } @@ -935,21 +915,28 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st } func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []string, options entities.ContainerInitOptions) ([]*entities.ContainerInitReport, error) { - ctrs, err := getContainersByContext(ic.ClientCtx, options.All, false, namesOrIds) + ctrs, rawInputs, err := getContainersAndInputByContext(ic.ClientCtx, options.All, false, namesOrIds, nil) if err != nil { return nil, err } + idToRawInput := map[string]string{} + if len(rawInputs) == len(ctrs) { + for i := range ctrs { + idToRawInput[ctrs[i].ID] = rawInputs[i] + } + } reports := make([]*entities.ContainerInitReport, 0, len(ctrs)) - for _, ctr := range ctrs { - err := containers.ContainerInit(ic.ClientCtx, ctr.ID, nil) + for _, c := range ctrs { + err := containers.ContainerInit(ic.ClientCtx, c.ID, nil) // When using all, it is NOT considered an error if a container // has already been init'd. if err != nil && options.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) { err = nil } reports = append(reports, &entities.ContainerInitReport{ - Err: err, - Id: ctr.ID, + Err: err, + RawInput: idToRawInput[c.ID], + Id: c.ID, }) } return reports, nil diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go index a0b01dd71..90d558119 100644 --- a/pkg/domain/infra/tunnel/helpers.go +++ b/pkg/domain/infra/tunnel/helpers.go @@ -31,8 +31,17 @@ func getContainersAndInputByContext(contextWithConnection context.Context, all, rawInputs := []string{} switch { case len(filters) > 0: + namesOrIDs = nil for i := range allContainers { - namesOrIDs = append(namesOrIDs, allContainers[i].ID) + if len(namesOrIDs) > 0 { + for _, name := range namesOrIDs { + if name == allContainers[i].ID { + namesOrIDs = append(namesOrIDs, allContainers[i].ID) + } + } + } else { + namesOrIDs = append(namesOrIDs, allContainers[i].ID) + } } case all: for i := range allContainers { diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index 4f79325fd..bb3014099 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -12,6 +12,7 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" "github.com/containers/podman/v4/pkg/bindings/images" @@ -28,7 +29,7 @@ func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.Boo } func (ir *ImageEngine) Remove(ctx context.Context, imagesArg []string, opts entities.ImageRemoveOptions) (*entities.ImageRemoveReport, []error) { - options := new(images.RemoveOptions).WithForce(opts.Force).WithIgnore(opts.Ignore).WithAll(opts.All).WithLookupManifest(opts.LookupManifest) + options := new(images.RemoveOptions).WithForce(opts.Force).WithIgnore(opts.Ignore).WithAll(opts.All).WithLookupManifest(opts.LookupManifest).WithNoPrune(opts.NoPrune) return images.Remove(ir.ClientCtx, imagesArg, options) } @@ -240,7 +241,7 @@ func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOpti func (ir *ImageEngine) Push(ctx context.Context, source string, destination string, opts entities.ImagePushOptions) error { options := new(images.PushOptions) - options.WithAll(opts.All).WithCompress(opts.Compress).WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithFormat(opts.Format).WithRemoveSignatures(opts.RemoveSignatures).WithQuiet(opts.Quiet).WithCompressionFormat(opts.CompressionFormat) + options.WithAll(opts.All).WithCompress(opts.Compress).WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithFormat(opts.Format).WithRemoveSignatures(opts.RemoveSignatures).WithQuiet(opts.Quiet).WithCompressionFormat(opts.CompressionFormat).WithProgressWriter(opts.Writer) if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined { if s == types.OptionalBoolTrue { @@ -364,7 +365,7 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie return nil, errors.New("not implemented yet") } -func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool) error { +func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, parentFlags []string, quiet bool, sshMode ssh.EngineMode) error { options := new(images.ScpOptions) var destination *string diff --git a/pkg/domain/utils/scp.go b/pkg/domain/utils/scp.go index 3c73cddd1..44a0d94d7 100644 --- a/pkg/domain/utils/scp.go +++ b/pkg/domain/utils/scp.go @@ -1,31 +1,24 @@ package utils import ( - "bytes" "fmt" "io/ioutil" - "net" "net/url" "os" "os/exec" "os/user" "strconv" "strings" - "time" - - scpD "github.com/dtylman/scp" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/ssh" + "github.com/containers/image/v5/transports/alltransports" "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/terminal" - "github.com/docker/distribution/reference" "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" ) -func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool) (*entities.ImageLoadReport, *entities.ImageScpOptions, *entities.ImageScpOptions, []string, error) { +func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool, sshMode ssh.EngineMode) (*entities.ImageLoadReport, *entities.ImageScpOptions, *entities.ImageScpOptions, []string, error) { source := entities.ImageScpOptions{} dest := entities.ImageScpOptions{} sshInfo := entities.ImageScpConnections{} @@ -46,10 +39,6 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool) (*entiti return nil, nil, nil, nil, fmt.Errorf("could not make config: %w", err) } - cfg, err := config.ReadCustomConfig() // get ready to set ssh destination if necessary - if err != nil { - return nil, nil, nil, nil, err - } locations := []*entities.ImageScpOptions{} cliConnections := []string{} args := []string{src} @@ -83,9 +72,7 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool) (*entiti source.Quiet = quiet source.File = f.Name() // after parsing the arguments, set the file for the save/load dest.File = source.File - if err = os.Remove(source.File); err != nil { // remove the file and simply use its name so podman creates the file upon save. avoids umask errors - return nil, nil, nil, nil, err - } + defer os.Remove(source.File) allLocal := true // if we are all localhost, do not validate connections but if we are using one localhost and one non we need to use sshd for _, val := range cliConnections { @@ -98,6 +85,10 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool) (*entiti cliConnections = []string{} } + cfg, err := config.ReadCustomConfig() // get ready to set ssh destination if necessary + if err != nil { + return nil, nil, nil, nil, err + } var serv map[string]config.Destination serv, err = GetServiceInformation(&sshInfo, cliConnections, cfg) if err != nil { @@ -109,12 +100,12 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool) (*entiti switch { case source.Remote: // if we want to load FROM the remote, dest can either be local or remote in this case - err = SaveToRemote(source.Image, source.File, "", sshInfo.URI[0], sshInfo.Identities[0]) + err = SaveToRemote(source.Image, source.File, "", sshInfo.URI[0], sshInfo.Identities[0], sshMode) if err != nil { return nil, nil, nil, nil, err } if dest.Remote { // we want to load remote -> remote, both source and dest are remote - rep, id, err := LoadToRemote(dest, dest.File, "", sshInfo.URI[1], sshInfo.Identities[1]) + rep, id, err := LoadToRemote(dest, dest.File, "", sshInfo.URI[1], sshInfo.Identities[1], sshMode) if err != nil { return nil, nil, nil, nil, err } @@ -138,7 +129,8 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool) (*entiti if err != nil { return nil, nil, nil, nil, err } - rep, id, err := LoadToRemote(dest, source.File, "", sshInfo.URI[0], sshInfo.Identities[0]) + + rep, id, err := LoadToRemote(dest, source.File, "", sshInfo.URI[0], sshInfo.Identities[0], sshMode) if err != nil { return nil, nil, nil, nil, err } @@ -220,34 +212,37 @@ func LoginUser(user string) (*exec.Cmd, error) { // loadToRemote takes image and remote connection information. it connects to the specified client // and copies the saved image dir over to the remote host and then loads it onto the machine // returns a string containing output or an error -func LoadToRemote(dest entities.ImageScpOptions, localFile string, tag string, url *url.URL, iden string) (string, string, error) { - dial, remoteFile, err := CreateConnection(url, iden) +func LoadToRemote(dest entities.ImageScpOptions, localFile string, tag string, url *url.URL, iden string, sshEngine ssh.EngineMode) (string, string, error) { + port, err := strconv.Atoi(url.Port()) if err != nil { return "", "", err } - defer dial.Close() - n, err := scpD.CopyTo(dial, localFile, remoteFile) + remoteFile, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: url.String(), Port: port, User: url.User, Args: []string{"mktemp"}}, sshEngine) if err != nil { - errOut := strconv.Itoa(int(n)) + " Bytes copied before error" - return " ", "", fmt.Errorf("%v: %w", errOut, err) + return "", "", err } - var run string - if tag != "" { - return "", "", fmt.Errorf("renaming of an image is currently not supported: %w", define.ErrInvalidArg) + + opts := ssh.ConnectionScpOptions{User: url.User, Identity: iden, Port: port, Source: localFile, Destination: "ssh://" + url.User.String() + "@" + url.Hostname() + ":" + remoteFile} + scpRep, err := ssh.Scp(&opts, sshEngine) + if err != nil { + return "", "", err } - podman := os.Args[0] - run = podman + " image load --input=" + remoteFile + ";rm " + remoteFile // run ssh image load of the file copied via scp - out, err := ExecRemoteCommand(dial, run) + out, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: url.String(), Port: port, User: url.User, Args: []string{"podman", "image", "load", "--input=" + scpRep + ";", "rm", scpRep}}, sshEngine) if err != nil { return "", "", err } - rep := strings.TrimSuffix(string(out), "\n") + if tag != "" { + return "", "", fmt.Errorf("renaming of an image is currently not supported: %w", define.ErrInvalidArg) + } + rep := strings.TrimSuffix(out, "\n") outArr := strings.Split(rep, " ") id := outArr[len(outArr)-1] if len(dest.Tag) > 0 { // tag the remote image using the output ID - run = podman + " tag " + id + " " + dest.Tag - _, err = ExecRemoteCommand(dial, run) + _, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: url.Hostname(), Port: port, User: url.User, Args: []string{"podman", "image", "tag", id, dest.Tag}}, sshEngine) + if err != nil { + return "", "", err + } if err != nil { return "", "", err } @@ -258,94 +253,37 @@ func LoadToRemote(dest entities.ImageScpOptions, localFile string, tag string, u // saveToRemote takes image information and remote connection information. it connects to the specified client // and saves the specified image on the remote machine and then copies it to the specified local location // returns an error if one occurs. -func SaveToRemote(image, localFile string, tag string, uri *url.URL, iden string) error { - dial, remoteFile, err := CreateConnection(uri, iden) - - if err != nil { - return err - } - defer dial.Close() - +func SaveToRemote(image, localFile string, tag string, uri *url.URL, iden string, sshEngine ssh.EngineMode) error { if tag != "" { return fmt.Errorf("renaming of an image is currently not supported: %w", define.ErrInvalidArg) } - podman := os.Args[0] - run := podman + " image save " + image + " --format=oci-archive --output=" + remoteFile // run ssh image load of the file copied via scp. Files are reverse in this case... - _, err = ExecRemoteCommand(dial, run) + + port, err := strconv.Atoi(uri.Port()) if err != nil { return err } - n, err := scpD.CopyFrom(dial, remoteFile, localFile) - if _, conErr := ExecRemoteCommand(dial, "rm "+remoteFile); conErr != nil { - logrus.Errorf("Removing file on endpoint: %v", conErr) - } - if err != nil { - errOut := strconv.Itoa(int(n)) + " Bytes copied before error" - return fmt.Errorf("%v: %w", errOut, err) - } - return nil -} -// makeRemoteFile creates the necessary remote file on the host to -// save or load the image to. returns a string with the file name or an error -func MakeRemoteFile(dial *ssh.Client) (string, error) { - run := "mktemp" - remoteFile, err := ExecRemoteCommand(dial, run) + remoteFile, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: uri.String(), Port: port, User: uri.User, Args: []string{"mktemp"}}, sshEngine) if err != nil { - return "", err + return err } - return strings.TrimSuffix(string(remoteFile), "\n"), nil -} -// createConnections takes a boolean determining which ssh client to dial -// and returns the dials client, its newly opened remote file, and an error if applicable. -func CreateConnection(url *url.URL, iden string) (*ssh.Client, string, error) { - cfg, err := ValidateAndConfigure(url, iden) + _, err = ssh.Exec(&ssh.ConnectionExecOptions{Host: uri.String(), Port: port, User: uri.User, Args: []string{"podman", "image", "save", image, "--format", "oci-archive", "--output", remoteFile}}, sshEngine) if err != nil { - return nil, "", err + return err } - dialAdd, err := ssh.Dial("tcp", url.Host, cfg) // dial the client + + opts := ssh.ConnectionScpOptions{User: uri.User, Identity: iden, Port: port, Source: "ssh://" + uri.User.String() + "@" + uri.Hostname() + ":" + remoteFile, Destination: localFile} + scpRep, err := ssh.Scp(&opts, sshEngine) if err != nil { - return nil, "", fmt.Errorf("failed to connect: %w", err) + return err } - file, err := MakeRemoteFile(dialAdd) + _, err = ssh.Exec(&ssh.ConnectionExecOptions{Host: uri.String(), Port: port, User: uri.User, Args: []string{"rm", scpRep}}, sshEngine) if err != nil { - return nil, "", err + logrus.Errorf("Removing file on endpoint: %v", err) } - return dialAdd, file, nil -} - -// GetSerivceInformation takes the parsed list of hosts to connect to and validates the information -func GetServiceInformation(sshInfo *entities.ImageScpConnections, cliConnections []string, cfg *config.Config) (map[string]config.Destination, error) { - var serv map[string]config.Destination - var urlS string - var iden string - for i, val := range cliConnections { - splitEnv := strings.SplitN(val, "::", 2) - sshInfo.Connections = append(sshInfo.Connections, splitEnv[0]) - conn, found := cfg.Engine.ServiceDestinations[sshInfo.Connections[i]] - if found { - urlS = conn.URI - iden = conn.Identity - } else { // no match, warn user and do a manual connection. - urlS = "ssh://" + sshInfo.Connections[i] - iden = "" - logrus.Warnf("Unknown connection name given. Please use system connection add to specify the default remote socket location") - } - urlFinal, err := url.Parse(urlS) // create an actual url to pass to exec command - if err != nil { - return nil, err - } - if urlFinal.User.Username() == "" { - if urlFinal.User, err = GetUserInfo(urlFinal); err != nil { - return nil, err - } - } - sshInfo.URI = append(sshInfo.URI, urlFinal) - sshInfo.Identities = append(sshInfo.Identities, iden) - } - return serv, nil + return nil } // execPodman executes the podman save/load command given the podman binary @@ -413,18 +351,32 @@ func ParseImageSCPArg(arg string) (*entities.ImageScpOptions, []string, error) { return &location, cliConnections, nil } -// validateImagePortion is a helper function to validate the image name in an SCP argument func ValidateImagePortion(location entities.ImageScpOptions, arg string) (entities.ImageScpOptions, error) { if RemoteArgLength(arg, 1) > 0 { - err := ValidateImageName(strings.Split(arg, "::")[1]) - if err != nil { - return location, err - } - location.Image = strings.Split(arg, "::")[1] // this will get checked/set again once we validate connections + before := strings.Split(arg, "::")[1] + name := ValidateImageName(before) + if before != name { + location.Image = name + } else { + location.Image = before + } // this will get checked/set again once we validate connections } return location, nil } +// validateImageName makes sure that the image given is valid and no injections are occurring +// we simply use this for error checking, bot setting the image +func ValidateImageName(input string) string { + // ParseNormalizedNamed transforms a shortname image into its + // full name reference so busybox => docker.io/library/busybox + // we want to keep our shortnames, so only return an error if + // we cannot parse what the user has given us + if ref, err := alltransports.ParseImageName(input); err == nil { + return ref.Transport().Name() + } + return input +} + // validateSCPArgs takes the array of source and destination options and checks for common errors func ValidateSCPArgs(locations []*entities.ImageScpOptions) error { if len(locations) > 2 { @@ -440,17 +392,6 @@ func ValidateSCPArgs(locations []*entities.ImageScpOptions) error { return nil } -// validateImageName makes sure that the image given is valid and no injections are occurring -// we simply use this for error checking, bot setting the image -func ValidateImageName(input string) error { - // ParseNormalizedNamed transforms a shortname image into its - // full name reference so busybox => docker.io/library/busybox - // we want to keep our shortnames, so only return an error if - // we cannot parse what the user has given us - _, err := reference.ParseNormalizedNamed(input) - return err -} - // remoteArgLength is a helper function to simplify the extracting of host argument data // returns an int which contains the length of a specified index in a host::image string func RemoteArgLength(input string, side int) int { @@ -460,23 +401,36 @@ func RemoteArgLength(input string, side int) int { return -1 } -// ExecRemoteCommand takes a ssh client connection and a command to run and executes the -// command on the specified client. The function returns the Stdout from the client or the Stderr -func ExecRemoteCommand(dial *ssh.Client, run string) ([]byte, error) { - sess, err := dial.NewSession() // new ssh client session - if err != nil { - return nil, err - } - defer sess.Close() - - var buffer bytes.Buffer - var bufferErr bytes.Buffer - sess.Stdout = &buffer // output from client funneled into buffer - sess.Stderr = &bufferErr // err form client funneled into buffer - if err := sess.Run(run); err != nil { // run the command on the ssh client - return nil, fmt.Errorf("%v: %w", bufferErr.String(), err) +// GetSerivceInformation takes the parsed list of hosts to connect to and validates the information +func GetServiceInformation(sshInfo *entities.ImageScpConnections, cliConnections []string, cfg *config.Config) (map[string]config.Destination, error) { + var serv map[string]config.Destination + var urlS string + var iden string + for i, val := range cliConnections { + splitEnv := strings.SplitN(val, "::", 2) + sshInfo.Connections = append(sshInfo.Connections, splitEnv[0]) + conn, found := cfg.Engine.ServiceDestinations[sshInfo.Connections[i]] + if found { + urlS = conn.URI + iden = conn.Identity + } else { // no match, warn user and do a manual connection. + urlS = "ssh://" + sshInfo.Connections[i] + iden = "" + logrus.Warnf("Unknown connection name given. Please use system connection add to specify the default remote socket location") + } + urlFinal, err := url.Parse(urlS) // create an actual url to pass to exec command + if err != nil { + return nil, err + } + if urlFinal.User.Username() == "" { + if urlFinal.User, err = GetUserInfo(urlFinal); err != nil { + return nil, err + } + } + sshInfo.URI = append(sshInfo.URI, urlFinal) + sshInfo.Identities = append(sshInfo.Identities, iden) } - return buffer.Bytes(), nil + return serv, nil } func GetUserInfo(uri *url.URL) (*url.Userinfo, error) { @@ -502,79 +456,3 @@ func GetUserInfo(uri *url.URL) (*url.Userinfo, error) { } return url.User(usr.Username), nil } - -// ValidateAndConfigure will take a ssh url and an identity key (rsa and the like) and ensure the information given is valid -// iden iden can be blank to mean no identity key -// once the function validates the information it creates and returns an ssh.ClientConfig. -func ValidateAndConfigure(uri *url.URL, iden string) (*ssh.ClientConfig, error) { - var signers []ssh.Signer - passwd, passwdSet := uri.User.Password() - if iden != "" { // iden might be blank if coming from image scp or if no validation is needed - value := iden - s, err := terminal.PublicKey(value, []byte(passwd)) - if err != nil { - return nil, fmt.Errorf("failed to read identity %q: %w", value, err) - } - signers = append(signers, s) - logrus.Debugf("SSH Ident Key %q %s %s", value, ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) - } - if sock, found := os.LookupEnv("SSH_AUTH_SOCK"); found { // validate ssh information, specifically the unix file socket used by the ssh agent. - logrus.Debugf("Found SSH_AUTH_SOCK %q, ssh-agent signer enabled", sock) - - c, err := net.Dial("unix", sock) - if err != nil { - return nil, err - } - agentSigners, err := agent.NewClient(c).Signers() - if err != nil { - return nil, err - } - - signers = append(signers, agentSigners...) - - if logrus.IsLevelEnabled(logrus.DebugLevel) { - for _, s := range agentSigners { - logrus.Debugf("SSH Agent Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) - } - } - } - var authMethods []ssh.AuthMethod // now we validate and check for the authorization methods, most notaibly public key authorization - if len(signers) > 0 { - var dedup = make(map[string]ssh.Signer) - for _, s := range signers { - fp := ssh.FingerprintSHA256(s.PublicKey()) - if _, found := dedup[fp]; found { - logrus.Debugf("Dedup SSH Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) - } - dedup[fp] = s - } - - var uniq []ssh.Signer - for _, s := range dedup { - uniq = append(uniq, s) - } - authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { - return uniq, nil - })) - } - if passwdSet { // if password authentication is given and valid, add to the list - authMethods = append(authMethods, ssh.Password(passwd)) - } - if len(authMethods) == 0 { - authMethods = append(authMethods, ssh.PasswordCallback(func() (string, error) { - pass, err := terminal.ReadPassword(fmt.Sprintf("%s's login password:", uri.User.Username())) - return string(pass), err - })) - } - tick, err := time.ParseDuration("40s") - if err != nil { - return nil, err - } - cfg := &ssh.ClientConfig{ - User: uri.User.Username(), - Auth: authMethods, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - Timeout: tick, - } - return cfg, nil -} diff --git a/pkg/machine/applehv/machine.go b/pkg/machine/applehv/machine.go new file mode 100644 index 000000000..35a8e9851 --- /dev/null +++ b/pkg/machine/applehv/machine.go @@ -0,0 +1,70 @@ +//go:build arm64 && !windows && !linux +// +build darwin + +package applehv + +import ( + "time" + + "github.com/containers/podman/v4/pkg/machine" +) + +type Provider struct{} + +var ( + hvProvider = &Provider{} + // vmtype refers to qemu (vs libvirt, krun, etc). + vmtype = "apple" +) + +func GetVirtualizationProvider() machine.Provider { + return hvProvider +} + +const ( + // Some of this will need to change when we are closer to having + // working code. + VolumeTypeVirtfs = "virtfs" + MountType9p = "9p" + dockerSock = "/var/run/docker.sock" + dockerConnectTimeout = 5 * time.Second + apiUpTimeout = 20 * time.Second +) + +type apiForwardingState int + +const ( + noForwarding apiForwardingState = iota + claimUnsupported + notInstalled + machineLocal + dockerGlobal +) + +func (p *Provider) NewMachine(opts machine.InitOptions) (machine.VM, error) { + return nil, machine.ErrNotImplemented +} + +func (p *Provider) LoadVMByName(name string) (machine.VM, error) { + return nil, machine.ErrNotImplemented +} + +func (p *Provider) List(opts machine.ListOptions) ([]*machine.ListResponse, error) { + return nil, machine.ErrNotImplemented +} + +func (p *Provider) IsValidVMName(name string) (bool, error) { + return false, machine.ErrNotImplemented +} + +func (p *Provider) CheckExclusiveActiveVM() (bool, string, error) { + return false, "", machine.ErrNotImplemented +} + +func (p *Provider) RemoveAndCleanMachines() error { + return machine.ErrNotImplemented +} + +func (p *Provider) VMType() string { + return vmtype +} diff --git a/pkg/machine/config.go b/pkg/machine/config.go index 253601dad..5162006db 100644 --- a/pkg/machine/config.go +++ b/pkg/machine/config.go @@ -66,6 +66,7 @@ var ( ErrVMAlreadyExists = errors.New("VM already exists") ErrVMAlreadyRunning = errors.New("VM already running or starting") ErrMultipleActiveVM = errors.New("only one VM can be active at a time") + ErrNotImplemented = errors.New("functionality not implemented") ForwarderBinaryName = "gvproxy" ) diff --git a/pkg/machine/e2e/init_test.go b/pkg/machine/e2e/init_test.go index b246dc4da..859a3ca46 100644 --- a/pkg/machine/e2e/init_test.go +++ b/pkg/machine/e2e/init_test.go @@ -3,7 +3,7 @@ package e2e_test import ( "io/ioutil" "os" - "runtime" + "strconv" "time" "github.com/containers/podman/v4/pkg/machine" @@ -80,7 +80,7 @@ var _ = Describe("podman machine init", func() { It("machine init with cpus, disk size, memory, timezone", func() { name := randomString() i := new(initMachine) - session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withCPUs(2).withDiskSize(102).withMemory(4000).withTimezone("Pacific/Honolulu")).run() + session, err := mb.setName(name).setCmd(i.withImagePath(mb.imagePath).withCPUs(2).withDiskSize(102).withMemory(4096).withTimezone("Pacific/Honolulu")).run() Expect(err).To(BeNil()) Expect(session).To(Exit(0)) @@ -102,18 +102,13 @@ var _ = Describe("podman machine init", func() { Expect(diskSession.outputToString()).To(ContainSubstring("102 GiB")) sshMemory := sshMachine{} - memorySession, err := mb.setName(name).setCmd(sshMemory.withSSHComand([]string{"cat", "/proc/meminfo", "|", "numfmt", "--field", "2", "--from-unit=Ki", "--to-unit=Mi", "|", "sed", "'s/ kB/M/g'", "|", "grep", "MemTotal"})).run() + memorySession, err := mb.setName(name).setCmd(sshMemory.withSSHComand([]string{"cat", "/proc/meminfo", "|", "grep", "-i", "'memtotal'", "|", "grep", "-o", "'[[:digit:]]*'"})).run() Expect(err).To(BeNil()) Expect(memorySession).To(Exit(0)) - switch runtime.GOOS { - // os's handle memory differently - case "linux": - Expect(memorySession.outputToString()).To(ContainSubstring("3822")) - case "darwin": - Expect(memorySession.outputToString()).To(ContainSubstring("3824")) - default: - // add windows when testing on that platform - } + foundMemory, err := strconv.Atoi(memorySession.outputToString()) + Expect(err).To(BeNil()) + Expect(foundMemory).To(BeNumerically(">", 3800000)) + Expect(foundMemory).To(BeNumerically("<", 4200000)) sshTimezone := sshMachine{} timezoneSession, err := mb.setName(name).setCmd(sshTimezone.withSSHComand([]string{"date"})).run() diff --git a/pkg/machine/e2e/set_test.go b/pkg/machine/e2e/set_test.go index 4839e33da..a32bb72f2 100644 --- a/pkg/machine/e2e/set_test.go +++ b/pkg/machine/e2e/set_test.go @@ -1,7 +1,7 @@ package e2e_test import ( - "runtime" + "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -29,7 +29,7 @@ var _ = Describe("podman machine set", func() { Expect(session).To(Exit(0)) set := setMachine{} - setSession, err := mb.setName(name).setCmd(set.withCPUs(2).withDiskSize(102).withMemory(4000)).run() + setSession, err := mb.setName(name).setCmd(set.withCPUs(2).withDiskSize(102).withMemory(4096)).run() Expect(err).To(BeNil()) Expect(setSession).To(Exit(0)) @@ -56,18 +56,14 @@ var _ = Describe("podman machine set", func() { Expect(diskSession.outputToString()).To(ContainSubstring("102 GiB")) sshMemory := sshMachine{} - memorySession, err := mb.setName(name).setCmd(sshMemory.withSSHComand([]string{"cat", "/proc/meminfo", "|", "numfmt", "--field", "2", "--from-unit=Ki", "--to-unit=Mi", "|", "sed", "'s/ kB/M/g'", "|", "grep", "MemTotal"})).run() + memorySession, err := mb.setName(name).setCmd(sshMemory.withSSHComand([]string{"cat", "/proc/meminfo", "|", "grep", "-i", "'memtotal'", "|", "grep", "-o", "'[[:digit:]]*'"})).run() Expect(err).To(BeNil()) Expect(memorySession).To(Exit(0)) - switch runtime.GOOS { - // it seems macos and linux handle memory differently - case "linux": - Expect(memorySession.outputToString()).To(ContainSubstring("3822")) - case "darwin": - Expect(memorySession.outputToString()).To(ContainSubstring("3824")) - default: - // windows can go here if we ever run tests there - } + foundMemory, err := strconv.Atoi(memorySession.outputToString()) + Expect(err).To(BeNil()) + Expect(foundMemory).To(BeNumerically(">", 3800000)) + Expect(foundMemory).To(BeNumerically("<", 4200000)) + // Setting a running machine results in 125 runner, err := mb.setName(name).setCmd(set.withCPUs(4)).run() Expect(err).To(BeNil()) diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go index 7974c261e..213f7ce5d 100644 --- a/pkg/machine/qemu/machine.go +++ b/pkg/machine/qemu/machine.go @@ -42,7 +42,7 @@ var ( vmtype = "qemu" ) -func GetQemuProvider() machine.Provider { +func GetVirtualizationProvider() machine.Provider { return qemuProvider } diff --git a/pkg/machine/wsl/machine.go b/pkg/machine/wsl/machine.go index 9a57102f0..8f6ef7a43 100644 --- a/pkg/machine/wsl/machine.go +++ b/pkg/machine/wsl/machine.go @@ -364,14 +364,6 @@ func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) { return false, err } - if err := v.writeConfig(); err != nil { - return false, err - } - - if err := setupConnections(v, opts, sshDir); err != nil { - return false, err - } - dist, err := provisionWSLDist(v) if err != nil { return false, err @@ -393,6 +385,14 @@ func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) { // Cycle so that user change goes into effect _ = terminateDist(dist) + if err := v.writeConfig(); err != nil { + return false, err + } + + if err := setupConnections(v, opts, sshDir); err != nil { + return false, err + } + return true, nil } diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go index b5d10df8c..ec85f0f79 100644 --- a/pkg/specgen/generate/container.go +++ b/pkg/specgen/generate/container.go @@ -192,16 +192,24 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat // - "container" denotes the container should join the VM of the SandboxID // (the infra container) if len(s.Pod) > 0 { - annotations[ann.SandboxID] = s.Pod + p, err := r.LookupPod(s.Pod) + if err != nil { + return nil, err + } + sandboxID := p.ID() + if p.HasInfraContainer() { + infra, err := p.InfraContainer() + if err != nil { + return nil, err + } + sandboxID = infra.ID() + } + annotations[ann.SandboxID] = sandboxID annotations[ann.ContainerType] = ann.ContainerTypeContainer // Check if this is an init-ctr and if so, check if // the pod is running. we do not want to add init-ctrs to // a running pod because it creates confusion for us. if len(s.InitContainerType) > 0 { - p, err := r.LookupPod(s.Pod) - if err != nil { - return nil, err - } containerStatuses, err := p.Status() if err != nil { return nil, err diff --git a/pkg/terminal/util.go b/pkg/terminal/util.go deleted file mode 100644 index 0f0968c30..000000000 --- a/pkg/terminal/util.go +++ /dev/null @@ -1,134 +0,0 @@ -package terminal - -import ( - "bufio" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/containers/storage/pkg/homedir" - "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/knownhosts" - "golang.org/x/term" -) - -var ( - passPhrase []byte - phraseSync sync.Once - password []byte - passwordSync sync.Once -) - -// ReadPassword prompts for a secret and returns value input by user from stdin -// Unlike terminal.ReadPassword(), $(echo $SECRET | podman...) is supported. -// Additionally, all input after `<secret>/n` is queued to podman command. -func ReadPassword(prompt string) (pw []byte, err error) { - fd := int(os.Stdin.Fd()) - if term.IsTerminal(fd) { - fmt.Fprint(os.Stderr, prompt) - pw, err = term.ReadPassword(fd) - fmt.Fprintln(os.Stderr) - return - } - - var b [1]byte - for { - n, err := os.Stdin.Read(b[:]) - // terminal.ReadPassword discards any '\r', so we do the same - if n > 0 && b[0] != '\r' { - if b[0] == '\n' { - return pw, nil - } - pw = append(pw, b[0]) - // limit size, so that a wrong input won't fill up the memory - if len(pw) > 1024 { - err = errors.New("password too long, 1024 byte limit") - } - } - if err != nil { - // terminal.ReadPassword accepts EOF-terminated passwords - // if non-empty, so we do the same - if err == io.EOF && len(pw) > 0 { - err = nil - } - return pw, err - } - } -} - -func PublicKey(path string, passphrase []byte) (ssh.Signer, error) { - key, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - signer, err := ssh.ParsePrivateKey(key) - if err != nil { - if _, ok := err.(*ssh.PassphraseMissingError); !ok { - return nil, err - } - if len(passphrase) == 0 { - passphrase = ReadPassphrase() - } - return ssh.ParsePrivateKeyWithPassphrase(key, passphrase) - } - return signer, nil -} - -func ReadPassphrase() []byte { - phraseSync.Do(func() { - secret, err := ReadPassword("Key Passphrase: ") - if err != nil { - secret = []byte{} - } - passPhrase = secret - }) - return passPhrase -} - -func ReadLogin() []byte { - passwordSync.Do(func() { - secret, err := ReadPassword("Login password: ") - if err != nil { - secret = []byte{} - } - password = secret - }) - return password -} - -func HostKey(host string) ssh.PublicKey { - // parse OpenSSH known_hosts file - // ssh or use ssh-keyscan to get initial key - knownHosts := filepath.Join(homedir.Get(), ".ssh", "known_hosts") - fd, err := os.Open(knownHosts) - if err != nil { - logrus.Error(err) - return nil - } - - // support -H parameter for ssh-keyscan - hashhost := knownhosts.HashHostname(host) - - scanner := bufio.NewScanner(fd) - for scanner.Scan() { - _, hosts, key, _, _, err := ssh.ParseKnownHosts(scanner.Bytes()) - if err != nil { - logrus.Errorf("Failed to parse known_hosts: %s", scanner.Text()) - continue - } - - for _, h := range hosts { - if h == host || h == hashhost { - return key - } - } - } - - return nil -} diff --git a/podman.spec.rpkg b/podman.spec.rpkg index 7068c9745..f27b31108 100644 --- a/podman.spec.rpkg +++ b/podman.spec.rpkg @@ -59,6 +59,7 @@ BuildRequires: go-rpm-macros %endif %if 0%{?rhel} <= 8 BuildRequires: pkgconfig(devmapper) +BuildRequires: python3 %endif BuildRequires: gpgme-devel BuildRequires: libassuan-devel diff --git a/test/README.md b/test/README.md index 769bdbfd7..b44deadaf 100644 --- a/test/README.md +++ b/test/README.md @@ -1,4 +1,4 @@ - + # Test utils Test utils provide common functions and structs for testing. It includes two structs: * `PodmanTest`: Handle the *podman* command and other global resources like temporary diff --git a/test/buildah-bud/apply-podman-deltas b/test/buildah-bud/apply-podman-deltas index 6578afc93..8ce58b06d 100755 --- a/test/buildah-bud/apply-podman-deltas +++ b/test/buildah-bud/apply-podman-deltas @@ -152,6 +152,10 @@ errmsg "checking authfile: stat /tmp/nonexistent: no such file or directory" \ "Error: checking authfile: stat /tmp/nonexistent: no such file or directory" \ "bud with Containerfile should fail with nonexistent authfile" +errmsg "cannot find Containerfile or Dockerfile" \ + "no such file or directory" \ + "bud-github-context-from-commit" + ############################################################################### # BEGIN tests that don't make sense under podman due to fundamental differences @@ -216,7 +220,10 @@ skip_if_remote "--output option not implemented in podman-remote" \ "build with custom build output and output rootfs to tar" \ "build with custom build output and output rootfs to tar by pipe" \ "build with custom build output must fail for bad input" \ - "build with custom build output and output rootfs to tar with no additional step" + "build with custom build output and output rootfs to tar with no additional step" \ + "build with custom build output for single-stage-cached and output rootfs to directory" \ + "build with custom build output for multi-stage-cached and output rootfs to directory" \ + "build with custom build output for multi-stage and output rootfs to directory" # https://github.com/containers/podman/issues/14544 skip_if_remote "logfile not implemented on remote" "bud-logfile-with-split-logfile-by-platform" diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go index 8f5e1a0b6..bc99455f3 100644 --- a/test/e2e/checkpoint_test.go +++ b/test/e2e/checkpoint_test.go @@ -132,6 +132,7 @@ var _ = Describe("Podman checkpoint", func() { result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) + Expect(result.OutputToString()).To(Equal(cid)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Exited")) @@ -156,6 +157,7 @@ var _ = Describe("Podman checkpoint", func() { result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) + Expect(result.OutputToString()).To(Equal(cid)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) @@ -214,6 +216,7 @@ var _ = Describe("Podman checkpoint", func() { result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) + Expect(result.OutputToString()).To(Equal("test_name")) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Exited")) @@ -221,6 +224,7 @@ var _ = Describe("Podman checkpoint", func() { result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) + Expect(result.OutputToString()).To(Equal("test_name")) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) @@ -298,6 +302,7 @@ var _ = Describe("Podman checkpoint", func() { result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) + Expect(result.OutputToString()).To(Equal("second")) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1)) ps := podmanTest.Podman([]string{"ps", "-q", "--no-trunc"}) @@ -310,6 +315,7 @@ var _ = Describe("Podman checkpoint", func() { result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) + Expect(result.OutputToString()).To(Equal("second")) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) Expect(podmanTest.GetContainerStatus()).To(Not(ContainSubstring("Exited"))) @@ -325,16 +331,20 @@ var _ = Describe("Podman checkpoint", func() { session1 := podmanTest.Podman(localRunString) session1.WaitWithDefaultTimeout() Expect(session1).Should(Exit(0)) + cid1 := session1.OutputToString() localRunString = getRunString([]string{"--name", "second", ALPINE, "top"}) session2 := podmanTest.Podman(localRunString) session2.WaitWithDefaultTimeout() Expect(session2).Should(Exit(0)) + cid2 := session2.OutputToString() result := podmanTest.Podman([]string{"container", "checkpoint", "-a"}) result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) + Expect(result.OutputToString()).To(ContainSubstring(cid1)) + Expect(result.OutputToString()).To(ContainSubstring(cid2)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(0)) ps := podmanTest.Podman([]string{"ps", "-q", "--no-trunc"}) @@ -347,6 +357,8 @@ var _ = Describe("Podman checkpoint", func() { result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) + Expect(result.OutputToString()).To(ContainSubstring(cid1)) + Expect(result.OutputToString()).To(ContainSubstring(cid2)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up")) Expect(podmanTest.GetContainerStatus()).To(Not(ContainSubstring("Exited"))) diff --git a/test/e2e/cleanup_test.go b/test/e2e/cleanup_test.go new file mode 100644 index 000000000..f15f9bd5a --- /dev/null +++ b/test/e2e/cleanup_test.go @@ -0,0 +1,128 @@ +package integration + +import ( + "os" + + . "github.com/containers/podman/v4/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gexec" +) + +var _ = Describe("Podman container cleanup", func() { + var ( + tempdir string + err error + podmanTest *PodmanTestIntegration + ) + + BeforeEach(func() { + SkipIfRemote("podman container cleanup is not supported in remote") + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + podmanTest = PodmanTestCreate(tempdir) + podmanTest.Setup() + }) + + AfterEach(func() { + podmanTest.Cleanup() + f := CurrentGinkgoTestDescription() + processTestResult(f) + + }) + + It("podman cleanup bogus container", func() { + session := podmanTest.Podman([]string{"container", "cleanup", "foobar"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(125)) + Expect(session.ErrorToString()).To(ContainSubstring("no such container")) + }) + + It("podman cleanup container by id", func() { + session := podmanTest.Podman([]string{"create", ALPINE, "ls"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + cid := session.OutputToString() + session = podmanTest.Podman([]string{"container", "cleanup", cid}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(Equal(cid)) + }) + + It("podman cleanup container by short id", func() { + session := podmanTest.Podman([]string{"create", ALPINE, "ls"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + cid := session.OutputToString() + shortID := cid[0:10] + session = podmanTest.Podman([]string{"container", "cleanup", shortID}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(Equal(shortID)) + }) + + It("podman cleanup container by name", func() { + session := podmanTest.Podman([]string{"create", "--name", "foo", ALPINE, "ls"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + session = podmanTest.Podman([]string{"container", "cleanup", "foo"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(Equal("foo")) + }) + + It("podman cleanup all containers", func() { + session := podmanTest.Podman([]string{"create", ALPINE, "ls"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + cid := session.OutputToString() + + session = podmanTest.Podman([]string{"container", "cleanup", "--all"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(Equal(cid)) + }) + + It("podman cleanup latest container", func() { + SkipIfRemote("--latest flag n/a") + session := podmanTest.Podman([]string{"create", ALPINE, "ls"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"create", ALPINE, "ls"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + cid := session.OutputToString() + + session = podmanTest.Podman([]string{"container", "cleanup", "--latest"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(Equal(cid)) + }) + + It("podman cleanup running container", func() { + session := podmanTest.RunTopContainer("running") + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + session = podmanTest.Podman([]string{"container", "cleanup", "running"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(125)) + Expect(session.ErrorToString()).To(ContainSubstring("container state improper")) + }) + + It("podman cleanup paused container", func() { + SkipIfRootlessCgroupsV1("Pause is not supported in cgroups v1") + session := podmanTest.RunTopContainer("paused") + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + session = podmanTest.Podman([]string{"pause", "paused"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + session = podmanTest.Podman([]string{"container", "cleanup", "paused"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(125)) + Expect(session.ErrorToString()).To(ContainSubstring("container state improper")) + }) +}) diff --git a/test/e2e/image_scp_test.go b/test/e2e/image_scp_test.go index 77fe810bd..2c275d974 100644 --- a/test/e2e/image_scp_test.go +++ b/test/e2e/image_scp_test.go @@ -3,9 +3,11 @@ package integration import ( "io/ioutil" "os" + "path/filepath" "github.com/containers/common/pkg/config" . "github.com/containers/podman/v4/test/utils" + "github.com/containers/storage/pkg/homedir" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gexec" @@ -56,6 +58,9 @@ var _ = Describe("podman image scp", func() { }) It("podman image scp with proper connection", func() { + if _, err := os.Stat(filepath.Join(homedir.Get(), ".ssh", "known_hosts")); err != nil { + Skip("known_hosts does not exist or is not accessible") + } cmd := []string{"system", "connection", "add", "--default", "QA", diff --git a/test/e2e/init_test.go b/test/e2e/init_test.go index ccc102fa3..25b9e079a 100644 --- a/test/e2e/init_test.go +++ b/test/e2e/init_test.go @@ -52,6 +52,7 @@ var _ = Describe("Podman init", func() { init := podmanTest.Podman([]string{"init", cid}) init.WaitWithDefaultTimeout() Expect(init).Should(Exit(0)) + Expect(init.OutputToString()).To(Equal(cid)) result := podmanTest.Podman([]string{"inspect", cid}) result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) @@ -67,6 +68,7 @@ var _ = Describe("Podman init", func() { init := podmanTest.Podman([]string{"init", name}) init.WaitWithDefaultTimeout() Expect(init).Should(Exit(0)) + Expect(init.OutputToString()).To(Equal(name)) result := podmanTest.Podman([]string{"inspect", name}) result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) @@ -79,9 +81,11 @@ var _ = Describe("Podman init", func() { session := podmanTest.Podman([]string{"create", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) + cid := session.OutputToString() init := podmanTest.Podman([]string{"init", "--latest"}) init.WaitWithDefaultTimeout() Expect(init).Should(Exit(0)) + Expect(init.OutputToString()).To(Equal(cid)) result := podmanTest.Podman([]string{"inspect", "--latest"}) result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) @@ -93,15 +97,21 @@ var _ = Describe("Podman init", func() { session := podmanTest.Podman([]string{"create", "--name", "test1", ALPINE, "ls"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) + cid := session.OutputToString() session2 := podmanTest.Podman([]string{"create", "--name", "test2", ALPINE, "ls"}) session2.WaitWithDefaultTimeout() Expect(session2).Should(Exit(0)) + cid2 := session2.OutputToString() session3 := podmanTest.Podman([]string{"run", "--name", "test3", "-d", ALPINE, "top"}) session3.WaitWithDefaultTimeout() Expect(session3).Should(Exit(0)) + cid3 := session3.OutputToString() init := podmanTest.Podman([]string{"init", "--all"}) init.WaitWithDefaultTimeout() Expect(init).Should(Exit(0)) + Expect(init.OutputToString()).To(ContainSubstring(cid)) + Expect(init.OutputToString()).To(ContainSubstring(cid2)) + Expect(init.OutputToString()).To(ContainSubstring(cid3)) result := podmanTest.Podman([]string{"inspect", "test1"}) result.WaitWithDefaultTimeout() Expect(result).Should(Exit(0)) diff --git a/test/e2e/rmi_test.go b/test/e2e/rmi_test.go index d1a0cd6f5..f87f65c34 100644 --- a/test/e2e/rmi_test.go +++ b/test/e2e/rmi_test.go @@ -307,4 +307,82 @@ RUN touch %s`, CIRROS_IMAGE, imageName) } wg.Wait() }) + + It("podman rmi --no-prune with dangling parents", func() { + podmanTest.AddImageToRWStore(ALPINE) + session := podmanTest.Podman([]string{"create", "--name", "c_test1", ALPINE, "true"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"commit", "-q", "c_test1", "test1"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"create", "--name", "c_test2", "test1", "true"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"commit", "-q", "c_test2", "test2"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + imageID2 := session.OutputToString() + + session = podmanTest.Podman([]string{"create", "--name", "c_test3", "test2", "true"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"commit", "-q", "c_test3", "test3"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + imageID3 := session.OutputToString() + + session = podmanTest.Podman([]string{"untag", "test2"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"untag", "test1"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"rmi", "-f", "--no-prune", "test3"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(ContainSubstring(imageID3)) + Expect(session.OutputToString()).NotTo(ContainSubstring(imageID2)) + }) + + It("podman rmi --no-prune with undangling parents", func() { + podmanTest.AddImageToRWStore(ALPINE) + session := podmanTest.Podman([]string{"create", "--name", "c_test1", ALPINE, "true"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"commit", "-q", "c_test1", "test1"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"create", "--name", "c_test2", "test1", "true"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"commit", "-q", "c_test2", "test2"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + imageID2 := session.OutputToString() + + session = podmanTest.Podman([]string{"create", "--name", "c_test3", "test2", "true"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"commit", "-q", "c_test3", "test3"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + imageID3 := session.OutputToString() + + session = podmanTest.Podman([]string{"rmi", "-f", "--no-prune", "test3"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(ContainSubstring(imageID3)) + Expect(session.OutputToString()).NotTo(ContainSubstring(imageID2)) + }) }) diff --git a/test/e2e/run_cleanup_test.go b/test/e2e/run_cleanup_test.go index ea2caf907..5aa81140d 100644 --- a/test/e2e/run_cleanup_test.go +++ b/test/e2e/run_cleanup_test.go @@ -36,7 +36,7 @@ var _ = Describe("Podman run exit", func() { It("podman run -d mount cleanup test", func() { SkipIfRemote("podman-remote does not support mount") - SkipIfRootless("TODO rootless podman mount requires podman unshare first") + SkipIfRootless("rootless podman mount requires podman unshare first") result := podmanTest.Podman([]string{"run", "-dt", ALPINE, "top"}) result.WaitWithDefaultTimeout() @@ -69,6 +69,49 @@ var _ = Describe("Podman run exit", func() { pmount.WaitWithDefaultTimeout() Expect(pmount).Should(Exit(0)) Expect(pmount.OutputToString()).NotTo(ContainSubstring(cid)) + }) + + It("podman run -d mount cleanup rootless test", func() { + SkipIfRemote("podman-remote does not support mount") + SkipIfNotRootless("Use unshare in rootless only") + + result := podmanTest.Podman([]string{"run", "-dt", ALPINE, "top"}) + result.WaitWithDefaultTimeout() + cid := result.OutputToString() + Expect(result).Should(Exit(0)) + + mount := podmanTest.Podman([]string{"unshare", "mount"}) + mount.WaitWithDefaultTimeout() + Expect(mount).Should(Exit(0)) + Expect(mount.OutputToString()).To(ContainSubstring(cid)) + + // command: podman <options> unshare podman <options> image mount ALPINE + args := []string{"unshare", podmanTest.PodmanBinary} + opts := podmanTest.PodmanMakeOptions([]string{"mount", "--no-trunc"}, false, false) + args = append(args, opts...) + + pmount := podmanTest.Podman(args) + pmount.WaitWithDefaultTimeout() + Expect(pmount).Should(Exit(0)) + Expect(pmount.OutputToString()).To(ContainSubstring(cid)) + stop := podmanTest.Podman([]string{"stop", cid}) + stop.WaitWithDefaultTimeout() + Expect(stop).Should(Exit(0)) + + // We have to force cleanup so the unmount happens + podmanCleanupSession := podmanTest.Podman([]string{"container", "cleanup", cid}) + podmanCleanupSession.WaitWithDefaultTimeout() + Expect(podmanCleanupSession).Should(Exit(0)) + + mount = podmanTest.Podman([]string{"unshare", "mount"}) + mount.WaitWithDefaultTimeout() + Expect(mount).Should(Exit(0)) + Expect(mount.OutputToString()).NotTo(ContainSubstring(cid)) + + pmount = podmanTest.Podman(args) + pmount.WaitWithDefaultTimeout() + Expect(pmount).Should(Exit(0)) + Expect(pmount.OutputToString()).NotTo(ContainSubstring(cid)) }) }) diff --git a/test/e2e/run_userns_test.go b/test/e2e/run_userns_test.go index 613727118..f247b2dac 100644 --- a/test/e2e/run_userns_test.go +++ b/test/e2e/run_userns_test.go @@ -307,6 +307,30 @@ var _ = Describe("Podman UserNS support", func() { } }) + + It("podman --userns= conflicts with ui[dg]map and sub[ug]idname", func() { + session := podmanTest.Podman([]string{"run", "--userns=host", "--uidmap=0:1:500", "alpine", "true"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(125)) + Expect(session.ErrorToString()).To(ContainSubstring("--userns and --uidmap/--gidmap/--subuidname/--subgidname are mutually exclusive")) + + session = podmanTest.Podman([]string{"run", "--userns=host", "--gidmap=0:200:5000", "alpine", "true"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(125)) + Expect(session.ErrorToString()).To(ContainSubstring("--userns and --uidmap/--gidmap/--subuidname/--subgidname are mutually exclusive")) + + // with sub[ug]idname we don't check for the error output since the error message could be different, depending on the + // system configuration since the specified user could not be defined and cause a different earlier error. + // In any case, make sure the command doesn't succeed. + session = podmanTest.Podman([]string{"run", "--userns=private", "--subuidname=containers", "alpine", "true"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Not(Exit(0))) + + session = podmanTest.Podman([]string{"run", "--userns=private", "--subgidname=containers", "alpine", "true"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Not(Exit(0))) + }) + It("podman PODMAN_USERNS", func() { SkipIfNotRootless("keep-id only works in rootless mode") diff --git a/test/e2e/start_test.go b/test/e2e/start_test.go index 73af9d12c..f3e8cc015 100644 --- a/test/e2e/start_test.go +++ b/test/e2e/start_test.go @@ -1,6 +1,7 @@ package integration import ( + "fmt" "io/ioutil" "os" "strconv" @@ -99,23 +100,6 @@ var _ = Describe("Podman start", func() { Expect(session.OutputToString()).To(Equal(shortID)) }) - It("podman container start single container by short id", func() { - session := podmanTest.Podman([]string{"container", "create", ALPINE, "ls"}) - session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(0)) - cid := session.OutputToString() - shortID := cid[0:10] - session = podmanTest.Podman([]string{"container", "start", shortID}) - session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(0)) - Expect(session.OutputToString()).To(Equal(shortID)) - - session = podmanTest.Podman([]string{"stop", shortID}) - session.WaitWithDefaultTimeout() - Expect(session).Should(Exit(0)) - Expect(session.OutputToString()).To(Equal(shortID)) - }) - It("podman start single container by name", func() { name := "foobar99" session := podmanTest.Podman([]string{"create", "--name", name, ALPINE, "ls"}) @@ -124,9 +108,6 @@ var _ = Describe("Podman start", func() { session = podmanTest.Podman([]string{"start", name}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) - if podmanTest.RemoteTest { - Skip("Container-start name check doesn't work on remote client. It always returns the full ID.") - } Expect(session.OutputToString()).To(Equal(name)) }) @@ -231,4 +212,42 @@ var _ = Describe("Podman start", func() { _, err = strconv.Atoi(containerPID) // Make sure it's a proper integer Expect(err).To(BeNil()) }) + + It("podman start container --filter", func() { + session1 := podmanTest.Podman([]string{"container", "create", ALPINE}) + session1.WaitWithDefaultTimeout() + Expect(session1).Should(Exit(0)) + cid1 := session1.OutputToString() + + session1 = podmanTest.Podman([]string{"container", "create", ALPINE}) + session1.WaitWithDefaultTimeout() + Expect(session1).Should(Exit(0)) + cid2 := session1.OutputToString() + + session1 = podmanTest.Podman([]string{"container", "create", ALPINE}) + session1.WaitWithDefaultTimeout() + Expect(session1).Should(Exit(0)) + cid3 := session1.OutputToString() + shortCid3 := cid3[0:5] + + session1 = podmanTest.Podman([]string{"start", cid1, "-f", "status=running"}) + session1.WaitWithDefaultTimeout() + Expect(session1).Should(Exit(0)) + Expect(session1.OutputToString()).To(HaveLen(0)) + + session1 = podmanTest.Podman([]string{"start", "--all", "--filter", fmt.Sprintf("id=%swrongid", shortCid3)}) + session1.WaitWithDefaultTimeout() + Expect(session1).Should(Exit(0)) + Expect(session1.OutputToString()).To(HaveLen(0)) + + session1 = podmanTest.Podman([]string{"start", "--all", "--filter", fmt.Sprintf("id=%s", shortCid3)}) + session1.WaitWithDefaultTimeout() + Expect(session1).Should(Exit(0)) + Expect(session1.OutputToString()).To(BeEquivalentTo(cid3)) + + session1 = podmanTest.Podman([]string{"start", "-f", fmt.Sprintf("id=%s", cid2)}) + session1.WaitWithDefaultTimeout() + Expect(session1).Should(Exit(0)) + Expect(session1.OutputToString()).To(BeEquivalentTo(cid2)) + }) }) diff --git a/test/e2e/stop_test.go b/test/e2e/stop_test.go index 7a258466a..23abb6d92 100644 --- a/test/e2e/stop_test.go +++ b/test/e2e/stop_test.go @@ -69,6 +69,19 @@ var _ = Describe("Podman stop", func() { Expect(strings.TrimSpace(finalCtrs.OutputToString())).To(Equal("")) }) + It("podman stop single container by short id", func() { + session := podmanTest.RunTopContainer("test1") + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + cid := session.OutputToString() + shortID := cid[0:10] + + session = podmanTest.Podman([]string{"stop", shortID}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(Equal(shortID)) + }) + It("podman stop container by name", func() { session := podmanTest.RunTopContainer("test1") session.WaitWithDefaultTimeout() @@ -198,9 +211,13 @@ var _ = Describe("Podman stop", func() { session := podmanTest.RunTopContainer("test1") session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) + cid := session.OutputToString() + session = podmanTest.Podman([]string{"stop", "-l", "-t", "1"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(Equal(cid)) + finalCtrs := podmanTest.Podman([]string{"ps", "-q"}) finalCtrs.WaitWithDefaultTimeout() Expect(finalCtrs).Should(Exit(0)) diff --git a/test/system/010-images.bats b/test/system/010-images.bats index aa390f236..16ee681a3 100644 --- a/test/system/010-images.bats +++ b/test/system/010-images.bats @@ -259,8 +259,8 @@ Labels.created_at | 20[0-9-]\\\+T[0-9:]\\\+Z run_podman 2 rmi -a is "$output" "Error: 2 errors occurred: -.** image used by .*: image is in use by a container -.** image used by .*: image is in use by a container" +.** image used by .*: image is in use by a container: consider listing external containers and force-removing image +.** image used by .*: image is in use by a container: consider listing external containers and force-removing image" run_podman rmi -af is "$output" "Untagged: $IMAGE @@ -292,7 +292,7 @@ Deleted: $pauseID" "infra images gets removed as well" pauseID=$output run_podman 2 rmi $pauseImage - is "$output" "Error: image used by .* image is in use by a container" + is "$output" "Error: image used by .* image is in use by a container: consider listing external containers and force-removing image" run_podman rmi -f $pauseImage is "$output" "Untagged: $pauseImage diff --git a/test/system/045-start.bats b/test/system/045-start.bats index ad8483bba..d19171ec3 100644 --- a/test/system/045-start.bats +++ b/test/system/045-start.bats @@ -66,4 +66,20 @@ load helpers is "$output" "$cid_exited_0" } +@test "podman start print IDs or raw input" { + # start --all must print the IDs + run_podman create $IMAGE top + ctrID="$output" + run_podman start --all + is "$output" "$ctrID" + + # start $input must print $input + cname=$(random_string) + run_podman create --name $cname $IMAGE top + run_podman start $cname + is "$output" $cname + + run_podman rm -t 0 -f $ctrID $cname +} + # vim: filetype=sh diff --git a/test/system/050-stop.bats b/test/system/050-stop.bats index 39002512b..a21a036c2 100644 --- a/test/system/050-stop.bats +++ b/test/system/050-stop.bats @@ -59,6 +59,22 @@ load helpers is "${lines[3]}" "c4--Created.*" "ps -a, created container (unaffected)" } +@test "podman stop print IDs or raw input" { + # stop -a must print the IDs + run_podman run -d $IMAGE top + ctrID="$output" + run_podman stop --all + is "$output" "$ctrID" + + # stop $input must print $input + cname=$(random_string) + run_podman run -d --name $cname $IMAGE top + run_podman stop $cname + is "$output" $cname + + run_podman rm -t 0 -f $ctrID $cname +} + # #9051 : podman stop --ignore was not working with podman-remote @test "podman stop --ignore" { name=thiscontainerdoesnotexist diff --git a/test/system/520-checkpoint.bats b/test/system/520-checkpoint.bats index 7c8fc143a..73fa5d4c4 100644 --- a/test/system/520-checkpoint.bats +++ b/test/system/520-checkpoint.bats @@ -101,6 +101,25 @@ function teardown() { run_podman rm -t 0 -f $cid } +@test "podman checkpoint/restore print IDs or raw input" { + # checkpoint/restore -a must print the IDs + run_podman run -d $IMAGE top + ctrID="$output" + run_podman container checkpoint -a + is "$output" "$ctrID" + run_podman container restore -a + is "$output" "$ctrID" + + # checkpoint/restore $input must print $input + cname=$(random_string) + run_podman run -d --name $cname $IMAGE top + run_podman container checkpoint $cname + is "$output" $cname + run_podman container restore $cname + is "$output" $cname + + run_podman rm -t 0 -f $ctrID $cname +} @test "podman checkpoint --export, with volumes" { skip_if_remote "Test uses --root/--runroot, which are N/A over remote" diff --git a/test/system/900-ssh.bats b/test/system/900-ssh.bats new file mode 100644 index 000000000..0757f5838 --- /dev/null +++ b/test/system/900-ssh.bats @@ -0,0 +1,61 @@ +#!/usr/bin/env bats +# +# Simplest set of podman tests. If any of these fail, we have serious problems. +# + +load helpers + +# Override standard setup! We don't yet trust podman-images or podman-rm +function setup() { + if ! is_remote; then + skip "only applicable on podman-remote" + fi + + basic_setup +} + +function teardown() { + if ! is_remote; then + return + fi + + # In case test function failed to clean up + if [[ -n $_SERVICE_PID ]]; then + run kill $_SERVICE_PID + fi + + # see test/system/272-system-connection.bats for why this is needed + mount \ + | grep $PODMAN_TMPDIR \ + | awk '{print $3}' \ + | xargs -l1 --no-run-if-empty umount + + run_podman system connection rm --all + + basic_teardown +} + +function _run_podman_remote() { + PODMAN=${PODMAN%%--url*} run_podman "$@" +} + +@test "podman --ssh test" { + skip_if_no_ssh "cannot run these tests without an ssh binary" + # Start server + _SERVICE_PORT=$(random_free_port 63000-64999) + + ${PODMAN%%-remote*} --root ${PODMAN_TMPDIR}/root \ + --runroot ${PODMAN_TMPDIR}/runroot \ + system service -t 99 tcp:localhost:$_SERVICE_PORT & + _SERVICE_PID=$! + wait_for_port localhost $_SERVICE_PORT + + notme=${PODMAN_ROOTLESS_USER} + + uid=$(id -u $notme) + + run_podman 125 --ssh=native system connection add testing ssh://$notme@localhost:22/run/user/$uid/podman/podman.sock + is "$output" "Error: exit status 255" + + # need to figure out how to podman remote test with the new ssh +} diff --git a/test/system/TODO.md b/test/system/TODO.md index e47292f26..55e7601d1 100644 --- a/test/system/TODO.md +++ b/test/system/TODO.md @@ -1,4 +1,4 @@ - + # Overview diff --git a/test/system/helpers.bash b/test/system/helpers.bash index 19bc6547c..b821175bb 100644 --- a/test/system/helpers.bash +++ b/test/system/helpers.bash @@ -347,6 +347,10 @@ function wait_for_port() { # BEGIN miscellaneous tools # Shortcuts for common needs: +function no_ssh() { + [ "$(man ssh)" -ne 0 ] +} + function is_ubuntu() { grep -qiw ubuntu /etc/os-release } @@ -470,6 +474,17 @@ function _add_label_if_missing() { } ###################### +# skip_if_no_ssh # ...with an optional message +###################### +function skip_if_no_ssh() { + if no_ssh; then + local msg=$(_add_label_if_missing "$1" "ssh") + skip "${msg:-not applicable with no ssh binary}" + fi +} + + +###################### # skip_if_rootless # ...with an optional message ###################### function skip_if_rootless() { diff --git a/transfer.md b/transfer.md index 765094dc9..c755a761f 100644 --- a/transfer.md +++ b/transfer.md @@ -1,4 +1,4 @@ - + # Podman Usage Transfer This document outlines useful information for ops and dev transfer as it relates to infrastructure that utilizes `Podman`. diff --git a/troubleshooting.md b/troubleshooting.md index 1fa044fe9..6d46a543f 100644 --- a/troubleshooting.md +++ b/troubleshooting.md @@ -1,4 +1,4 @@ - + # Troubleshooting diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 70b59782a..857d80497 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -28,12 +28,12 @@ env: # GCE project where images live IMAGE_PROJECT: "libpod-218412" FEDORA_NAME: "fedora-36" - PRIOR_FEDORA_NAME: "fedora-35" + #PRIOR_FEDORA_NAME: "fedora-35" UBUNTU_NAME: "ubuntu-2204" - IMAGE_SUFFIX: "c6193881921355776" + IMAGE_SUFFIX: "c6013173500215296" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" + #PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}" @@ -66,15 +66,15 @@ meta_task: alias: meta container: - image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}" # see contrib/imgts + image: "quay.io/libpod/imgts:latest" cpu: 1 memory: 1 env: # Space-separated list of images used by this repository state + # TODO: Re-add ${PRIOR_FEDORA_CACHE_IMAGE_NAME} when place back in use IMGNAMES: |- ${FEDORA_CACHE_IMAGE_NAME} - ${PRIOR_FEDORA_CACHE_IMAGE_NAME} ${UBUNTU_CACHE_IMAGE_NAME} build-push-${IMAGE_SUFFIX} BUILDID: "${CIRRUS_BUILD_ID}" @@ -215,10 +215,10 @@ integration_task: DISTRO_NV: "${FEDORA_NAME}" IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'vfs' - - env: - DISTRO_NV: "${PRIOR_FEDORA_NAME}" - IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'vfs' + # - env: + # DISTRO_NV: "${PRIOR_FEDORA_NAME}" + # IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + # STORAGE_DRIVER: 'vfs' - env: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" @@ -228,10 +228,10 @@ integration_task: DISTRO_NV: "${FEDORA_NAME}" IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'overlay' - - env: - DISTRO_NV: "${PRIOR_FEDORA_NAME}" - IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'overlay' + # - env: + # DISTRO_NV: "${PRIOR_FEDORA_NAME}" + # IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + # STORAGE_DRIVER: 'overlay' - env: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" @@ -272,11 +272,11 @@ integration_rootless_task: IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'overlay' PRIV_NAME: rootless - - env: - DISTRO_NV: "${PRIOR_FEDORA_NAME}" - IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'overlay' - PRIV_NAME: rootless + # - env: + # DISTRO_NV: "${PRIOR_FEDORA_NAME}" + # IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + # STORAGE_DRIVER: 'overlay' + # PRIV_NAME: rootless - env: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 46e5dc9d6..667d5f81f 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,36 @@ # Changelog +## v1.27.0 (2022-08-01) + + build: support filtering cache by duration using `--cache-ttl`. + build: support building from commit when using git repo as build context. + build: clean up git repos correctly when using subdirs. + build: add support for distributing cache to remote sources using `--cache-to` and `--cache-from`. + imagebuildah: optimize cache hits for `COPY` and `ADD` instructions. + build: support OCI hooks for ephemeral build containers. + build: add support for `--userns=auto`. + copier: add NoOverwriteNonDirDir option . + add initial support for building images using Buildah on FreeBSD. + multistage: this now skips the computing of unwanted stages to improve performance. + multiarch: support splitting build logs for `--platform` using `--logsplit`. + build: add support for building images where the base image has no history. + commit: allow disabling image history with `--omit-history`. + build: add support for renaming a device in rootless setups. + build: now supports additionalBuildContext in builds via the `--build-context` option. + build: `--output` produces artifacts even if the build container is not committed. + build: now accepts `-cpp-flag`, allowing users to pass in CPP flags when processing a Containerfile with C Preprocessor-like syntax. + build: now accepts a branch and a subdirectory when the build context is a git repository. + build: output now shows a progress bar while pushing and pulling images + build: now errors out if the path to Containerfile is a directory. + build: support building container images on environments that are rootless and without any valid login sessions. + fix: `--output` now generates artifacts even if the entire build is cached. + fix: `--output` generates artifacts only for the target stage in multi-stage builds. + fix,add: now fails on a bad HTTP response instead of writing to container + fix,squash: never use build cache when computing the last step of the last stage + fix,build,run: allow reusing secret more than once in different RUN steps + fix: compatibility with Docker build by making its --label and --annotate options set empty labels and annotations when given a name but no `=` or label value. + ## v1.26.0 (2022-05-04) imagebuildah,build: move deepcopy of args before we spawn goroutine diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index b40462eea..a3016e2ed 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -50,7 +50,7 @@ endif # Note: Uses the -N -l go compiler options to disable compiler optimizations # and inlining. Using these build options allows you to subsequently # use source debugging tools like delve. -all: bin/buildah bin/imgtype bin/copy docs +all: bin/buildah bin/imgtype bin/copy bin/tutorial docs # Update nix/nixpkgs.json its latest stable commit .PHONY: nixpkgs @@ -74,7 +74,8 @@ bin/buildah: $(SOURCES) cmd/buildah/*.go .PHONY: buildah buildah: bin/buildah -ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list))) +# TODO: remove `grep -v loong64` from `ALL_CROSS_TARGETS` once go.etcd.io/bbolt 1.3.7 is out. +ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep -v loong64))) LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS)) DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS)) WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS))) @@ -92,6 +93,9 @@ bin/imgtype: $(SOURCES) tests/imgtype/imgtype.go bin/copy: $(SOURCES) tests/copy/copy.go $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/copy/copy.go +bin/tutorial: $(SOURCES) tests/tutorial/tutorial.go + $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/tutorial/tutorial.go + .PHONY: clean clean: $(RM) -r bin tests/testreport/testreport @@ -108,7 +112,7 @@ gopath: test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd) codespell: - codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L uint,iff,od,ERRO -w + codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L uint,iff,od,ERRO -w .PHONY: validate validate: install.tools diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go index 8e5ad458c..212be3ca8 100644 --- a/vendor/github.com/containers/buildah/bind/mount.go +++ b/vendor/github.com/containers/buildah/bind/mount.go @@ -150,7 +150,7 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou // Check if the source is a directory or something else. info, err := os.Stat(spec.Mounts[i].Source) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { logrus.Warnf("couldn't find %q on host to bind mount into container", spec.Mounts[i].Source) continue } @@ -269,7 +269,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { mount := getMountByID(id) // check if this mountpoint is mounted if err := unix.Lstat(mount.Mountpoint, &st); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { logrus.Debugf("mountpoint %q is not present(?), skipping", mount.Mountpoint) continue } diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 5e6397f7f..41f1ba311 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -3,6 +3,7 @@ package buildah import ( "context" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -445,7 +446,7 @@ func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) { } buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID) continue } @@ -482,7 +483,7 @@ func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) { } buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID) continue } diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index a8a010bcd..a6fa96acf 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,32 @@ +- Changelog for v1.27.0 (2022-08-01) + * build: support filtering cache by duration using `--cache-ttl`. + * build: support building from commit when using git repo as build context. + * build: clean up git repos correctly when using subdirs. + * build: add support for distributing cache to remote sources using `--cache-to` and `--cache-from`. + * imagebuildah: optimize cache hits for `COPY` and `ADD` instructions. + * build: support OCI hooks for ephemeral build containers. + * build: add support for `--userns=auto`. + * copier: add NoOverwriteNonDirDir option . + * add initial support for building images using Buildah on FreeBSD. + * multistage: this now skips the computing of unwanted stages to improve performance. + * multiarch: support splitting build logs for `--platform` using `--logsplit`. + * build: add support for building images where the base image has no history. + * commit: allow disabling image history with `--omit-history`. + * build: add support for renaming a device in rootless setups. + * build: now supports additionalBuildContext in builds via the `--build-context` option. + * build: `--output` produces artifacts even if the build container is not committed. + * build: now accepts `-cpp-flag`, allowing users to pass in CPP flags when processing a Containerfile with C Preprocessor-like syntax. + * build: now accepts a branch and a subdirectory when the build context is a git repository. + * build: output now shows a progress bar while pushing and pulling images + * build: now errors out if the path to Containerfile is a directory. + * build: support building container images on environments that are rootless and without any valid login sessions. + * fix: `--output` now generates artifacts even if the entire build is cached. + * fix: `--output` generates artifacts only for the target stage in multi-stage builds. + * fix,add: now fails on a bad HTTP response instead of writing to container + * fix,squash: never use build cache when computing the last step of the last stage + * fix,build,run: allow reusing secret more than once in different RUN steps + * fix: compatibility with Docker build by making its --label and --annotate options set empty labels and annotations when given a name but no `=` or label value. + - Changelog for v1.26.0 (2022-05-04) * imagebuildah,build: move deepcopy of args before we spawn goroutine * Vendor in containers/storage v1.40.2 diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run_linux.go index 809a70131..2e2ed1bb7 100644 --- a/vendor/github.com/containers/buildah/chroot/run.go +++ b/vendor/github.com/containers/buildah/chroot/run_linux.go @@ -6,6 +6,7 @@ package chroot import ( "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -1090,7 +1091,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Bind /dev read-only. subDev := filepath.Join(spec.Root.Path, "/dev") if err := unix.Mount("/dev", subDev, "bind", devFlags, ""); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { err = os.Mkdir(subDev, 0755) if err == nil { err = unix.Mount("/dev", subDev, "bind", devFlags, "") @@ -1114,7 +1115,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Bind /proc read-only. subProc := filepath.Join(spec.Root.Path, "/proc") if err := unix.Mount("/proc", subProc, "bind", procFlags, ""); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { err = os.Mkdir(subProc, 0755) if err == nil { err = unix.Mount("/proc", subProc, "bind", procFlags, "") @@ -1129,7 +1130,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Bind /sys read-only. subSys := filepath.Join(spec.Root.Path, "/sys") if err := unix.Mount("/sys", subSys, "bind", sysFlags, ""); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { err = os.Mkdir(subSys, 0755) if err == nil { err = unix.Mount("/sys", subSys, "bind", sysFlags, "") @@ -1218,7 +1219,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( } if err != nil { // If the target can't be stat()ted, check the error. - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", target, err) } // The target isn't there yet, so create it. @@ -1304,7 +1305,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( r := filepath.Join(spec.Root.Path, roPath) target, err := filepath.EvalSymlinks(r) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // No target, no problem. continue } @@ -1313,7 +1314,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Check if the location is already read-only. var fs unix.Statfs_t if err = unix.Statfs(target, &fs); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // No target, no problem. continue } @@ -1325,7 +1326,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Mount the location over itself, so that we can remount it as read-only. roFlags := uintptr(unix.MS_NODEV | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY) if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REC, ""); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // No target, no problem. continue } @@ -1370,7 +1371,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Get some info about the target. targetinfo, err := os.Stat(target) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // No target, no problem. continue } diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go index 3c7b021e1..de464ab52 100644 --- a/vendor/github.com/containers/buildah/copier/copier.go +++ b/vendor/github.com/containers/buildah/copier/copier.go @@ -1558,7 +1558,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM } else { // FreeBSD can return EISDIR for "mkdir /": // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739. - if !os.IsExist(err) && !errors.Is(err, syscall.EISDIR) { + if !errors.Is(err, os.ErrExist) && !errors.Is(err, syscall.EISDIR) { return fmt.Errorf("copier: put: error checking directory %q: %w", path, err) } } @@ -1581,7 +1581,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM } createFile := func(path string, tr *tar.Reader) (int64, error) { f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) - if err != nil && os.IsExist(err) { + if err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err2 := os.Lstat(path); err2 == nil && st.IsDir() { return 0, fmt.Errorf("copier: put: error creating file at %q: %w", path, err) @@ -1626,7 +1626,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM return errorResponse("copier: put: %s (%s): exists but is not a directory", req.Directory, targetDirectory) } } else { - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { return errorResponse("copier: put: %s: %v", req.Directory, err) } if err := ensureDirectoryUnderRoot(req.Directory); err != nil { @@ -1738,7 +1738,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), true, nil); err != nil { return fmt.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root) } - if err = os.Link(linkTarget, path); err != nil && os.IsExist(err) { + if err = os.Link(linkTarget, path); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break @@ -1753,7 +1753,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM // todo: the general solution requires resolving to an absolute path, handling // renaming, and then possibly converting back to a relative symlink // } - if err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)); err != nil && os.IsExist(err) { + if err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break @@ -1768,7 +1768,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM ignoredItems[nameBeforeRenaming] = struct{}{} goto nextHeader } - if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) { + if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break @@ -1783,7 +1783,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM ignoredItems[nameBeforeRenaming] = struct{}{} goto nextHeader } - if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) { + if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break @@ -1794,7 +1794,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM } } case tar.TypeDir: - if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) { + if err = os.Mkdir(path, 0700); err != nil && errors.Is(err, os.ErrExist) { if st, stErr := os.Lstat(path); stErr == nil && !st.IsDir() { if req.PutOptions.NoOverwriteNonDirDir { break @@ -1821,7 +1821,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM // the archive more than once for whatever reason directoryModes[path] = mode case tar.TypeFifo: - if err = mkfifo(path, 0600); err != nil && os.IsExist(err) { + if err = mkfifo(path, 0600); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break @@ -1943,7 +1943,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, } else { // FreeBSD can return EISDIR for "mkdir /": // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739. - if !os.IsExist(err) && !errors.Is(err, syscall.EISDIR) { + if !errors.Is(err, os.ErrExist) && !errors.Is(err, syscall.EISDIR) { return errorResponse("copier: mkdir: error checking directory %q: %v", path, err) } } diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index 501f85ff2..352280433 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -5,6 +5,7 @@ import ( "time" nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage/pkg/archive" @@ -136,6 +137,16 @@ type BuildOptions struct { RuntimeArgs []string // TransientMounts is a list of mounts that won't be kept in the image. TransientMounts []string + // CacheFrom specifies any remote repository which can be treated as + // potential cache source. + CacheFrom reference.Named + // CacheTo specifies any remote repository which can be treated as + // potential cache destination. + CacheTo reference.Named + // CacheTTL specifies duration, if specified using `--cache-ttl` then + // cache intermediate images under this duration will be considered as + // valid cache sources and images outside this duration will be ignored. + CacheTTL time.Duration // Compression specifies the type of compression which is applied to // layer blobs. The default is to not use compression, but // archive.Gzip is recommended. diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index 07d900811..77e2529f8 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -30,7 +30,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.27.0-dev" + Version = "1.27.0" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" @@ -137,12 +137,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err } return "", "", fmt.Errorf("cloning %q to %q:\n%s: %w", url, name, string(combinedOutput), err) } - // Check if git url specifies any subdir - // if subdir is there switch to subdir. - if gitSubDir != "" { - name = filepath.Join(name, gitSubDir) - } - return name, "", nil + return name, gitSubDir, nil } if strings.HasPrefix(url, "github.com/") { ghurl := url @@ -178,11 +173,13 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err return "", "", errors.New("unreachable code reached") } -func cloneToDirectory(url, dir string) ([]byte, string, error) { +// parseGitBuildContext parses git build context to `repo`, `sub-dir` +// `branch/commit`, accepts GitBuildContext in the format of +// `repourl.git[#[branch-or-commit]:subdir]`. +func parseGitBuildContext(url string) (string, string, string) { gitSubdir := "" gitBranch := "" gitBranchPart := strings.Split(url, "#") - var cmd *exec.Cmd if len(gitBranchPart) > 1 { // check if string contains path to a subdir gitSubDirPart := strings.Split(gitBranchPart[1], ":") @@ -191,16 +188,52 @@ func cloneToDirectory(url, dir string) ([]byte, string, error) { } gitBranch = gitSubDirPart[0] } - if gitBranch == "" { - logrus.Debugf("cloning %q to %q", gitBranchPart[0], dir) - cmd = exec.Command("git", "clone", "--recurse-submodules", gitBranchPart[0], dir) - } else { - logrus.Debugf("cloning repo %q and branch %q to %q", gitBranchPart[0], gitBranch, dir) - cmd = exec.Command("git", "clone", "--recurse-submodules", "-b", gitBranch, gitBranchPart[0], dir) - } + return gitBranchPart[0], gitSubdir, gitBranch +} +func cloneToDirectory(url, dir string) ([]byte, string, error) { + var cmd *exec.Cmd + gitRepo, gitSubdir, gitBranch := parseGitBuildContext(url) + // init repo + cmd = exec.Command("git", "init", dir) combinedOutput, err := cmd.CombinedOutput() - return combinedOutput, gitSubdir, err + if err != nil { + return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git init`: %w", err) + } + // add origin + cmd = exec.Command("git", "remote", "add", "origin", gitRepo) + cmd.Dir = dir + combinedOutput, err = cmd.CombinedOutput() + if err != nil { + return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git remote add`: %w", err) + } + // fetch required branch or commit and perform checkout + // Always default to `HEAD` if nothing specified + fetch := "HEAD" + if gitBranch != "" { + fetch = gitBranch + } + logrus.Debugf("fetching repo %q and branch (or commit ID) %q to %q", gitRepo, fetch, dir) + cmd = exec.Command("git", "fetch", "--depth=1", "origin", "--", fetch) + cmd.Dir = dir + combinedOutput, err = cmd.CombinedOutput() + if err != nil { + return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git fetch`: %w", err) + } + if fetch == "HEAD" { + // We fetched default branch therefore + // we don't have any valid `branch` or + // `commit` name hence checkout detached + // `FETCH_HEAD` + fetch = "FETCH_HEAD" + } + cmd = exec.Command("git", "checkout", fetch) + cmd.Dir = dir + combinedOutput, err = cmd.CombinedOutput() + if err != nil { + return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git checkout`: %w", err) + } + return combinedOutput, gitSubdir, nil } func downloadToDirectory(url, dir string) error { diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index 652f09112..6a6f6dd9d 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -4,11 +4,11 @@ go 1.17 require ( github.com/containerd/containerd v1.6.6 - github.com/containernetworking/cni v1.1.1 - github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a - github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c + github.com/containernetworking/cni v1.1.2 + github.com/containers/common v0.49.1 + github.com/containers/image/v5 v5.22.0 github.com/containers/ocicrypt v1.1.5 - github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a + github.com/containers/storage v1.42.0 github.com/docker/distribution v2.8.1+incompatible github.com/docker/docker v20.10.17+incompatible github.com/docker/go-units v0.4.0 @@ -17,16 +17,16 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/mattn/go-shellwords v1.0.12 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.19.0 + github.com/onsi/gomega v1.20.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 github.com/opencontainers/runc v1.1.3 github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 - github.com/opencontainers/runtime-tools v0.9.0 + github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7 github.com/opencontainers/selinux v1.10.1 github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df github.com/seccomp/libseccomp-golang v0.10.0 - github.com/sirupsen/logrus v1.8.1 + github.com/sirupsen/logrus v1.9.0 github.com/spf13/cobra v1.5.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.0 @@ -34,13 +34,13 @@ require ( go.etcd.io/bbolt v1.3.6 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f - golang.org/x/sys v0.0.0-20220624220833-87e55d714810 + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 ) require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/BurntSushi/toml v1.1.0 // indirect + github.com/BurntSushi/toml v1.2.0 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Microsoft/hcsshim v0.9.3 // indirect github.com/VividCortex/ewma v1.2.0 // indirect @@ -63,6 +63,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/google/go-containerregistry v0.10.0 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/uuid v1.3.0 // indirect @@ -72,7 +73,7 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jinzhu/copier v0.3.5 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.15.8 // indirect + github.com/klauspost/compress v1.15.9 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect github.com/manifoldco/promptui v0.9.0 // indirect @@ -100,7 +101,7 @@ require ( github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/sylabs/sif/v2 v2.7.1 // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect - github.com/theupdateframework/go-tuf v0.3.0 // indirect + github.com/theupdateframework/go-tuf v0.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vbatts/tar-split v0.11.2 // indirect diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index cdd6cf3b4..4f39445ee 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -92,8 +92,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= -github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= +github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -183,6 +183,7 @@ github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngE github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= @@ -219,7 +220,6 @@ github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= -github.com/cilium/ebpf v0.9.0/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -338,18 +338,18 @@ github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= -github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k= github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= -github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a h1:kdcruVl641VTIm8C3O58WRYcBTbnWCsh6AJymk28ScM= -github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a/go.mod h1:1dA7JPGoSi83kjf5H4NIrGANyLOULyvFqV1bwvYFEek= -github.com/containers/image/v5 v5.21.2-0.20220712113758-29aec5f7bbbf/go.mod h1:0+N0ZM9mgMmoZZc6uNcgnEsbX85Ne7b29cIW5lqWwVU= -github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c h1:ms1Vyzs9Eb17J38aFKrL0+ig2pVwQq3OleaO7VmQuV0= -github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c/go.mod h1:ykVAVRj4DhQNMHZDVU+KCtXjWBKpqiUe669eF0WBEEc= +github.com/containers/common v0.49.1 h1:6y4/s2WwYxrv+Cox7fotOo316wuZI+iKKPUQweCYv50= +github.com/containers/common v0.49.1/go.mod h1:ueM5hT0itKqCQvVJDs+EtjornAQtrHYxQJzP2gxeGIg= +github.com/containers/image/v5 v5.22.0 h1:KemxPmD4D2YYOFZN2SgoTk7nBFcnwPiPW0MqjYtknSE= +github.com/containers/image/v5 v5.22.0/go.mod h1:D8Ksv2RNB8qLJ7xe1P3rgJJOSQpahA6amv2Ax++/YO4= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= @@ -360,10 +360,8 @@ github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pA github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ= github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= -github.com/containers/storage v1.41.0/go.mod h1:Pb0l5Sm/89kolX3o2KolKQ5cCHk5vPNpJrhNaLcdS5s= -github.com/containers/storage v1.41.1-0.20220712184034-d26be7b27860/go.mod h1:uu6HCcijN30xRxW1ZuZRngwFGOlH5NpBWYiNBnDQNRw= -github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a h1:+arJAP0v8kEy5fKRPIELjarjpwUHhB7SyRE0uFXlyKY= -github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a/go.mod h1:4DfR+cPpkXKhJnnyydD3z82DXrnTBT63y1k0QWtM2i4= +github.com/containers/storage v1.42.0 h1:zm2AQD4NDeTB3JQ8X+Wo5+VRqNB+b4ocEd7Qj6ylPJA= +github.com/containers/storage v1.42.0/go.mod h1:JiUJwOgOo1dr2DdOUc1MRe2GCAXABYoYmOdPF8yvH78= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -480,7 +478,6 @@ github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -859,8 +856,8 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA= -github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -869,9 +866,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= @@ -965,6 +961,7 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= @@ -973,7 +970,6 @@ github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwK github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= @@ -1041,8 +1037,9 @@ github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+t github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= +github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1060,8 +1057,6 @@ github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84 github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= -github.com/opencontainers/runc v1.1.1-0.20220607072441-a7a45d7d2721/go.mod h1:QvA0UNe48mC1JxcXq0sENIR38+/LdJMLNxuAvtFBhxA= -github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= @@ -1070,15 +1065,17 @@ github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20201121164853-7413a7f753e1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU= -github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7 h1:Rf+QsQGxrYCia8mVyOPnoQZ+vJkZGL+ESWBDUM5s9cQ= +github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7/go.mod h1:/tgP02fPXGHkU3/qKK1Y0Db4yqNyGm03vLq/mzHzcS4= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= +github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= @@ -1157,8 +1154,6 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -1199,8 +1194,9 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -1261,8 +1257,9 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45 github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/theupdateframework/go-tuf v0.3.0 h1:od2sc5+BSkKZhmUG2o2rmruy0BGSmhrbDhCnpxh87X8= github.com/theupdateframework/go-tuf v0.3.0/go.mod h1:E5XP0wXitrFUHe4b8cUcAAdxBW4LbfnqF4WXXGLgWNo= +github.com/theupdateframework/go-tuf v0.3.1 h1:NkjMlCuLcDpHNtsWXY4lTmbbQQ5nOM7JSBbOKEEiI1c= +github.com/theupdateframework/go-tuf v0.3.1/go.mod h1:lhHZ3Vt2pdAh15h0Cc6gWdlI+Okn2ZznD3q/cNjd5jw= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1276,11 +1273,11 @@ github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLY github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= @@ -1699,12 +1696,14 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810 h1:rHZQSjJdAI4Xf5Qzeh2bBc5YJIkPFVM6oDtMFYmgws0= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index ec427da1c..510602469 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -747,7 +747,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, } layerFile.Close() } - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), blobDir, err) } } diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index e098db473..a1810d6ad 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -185,7 +185,8 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B options.Manifest = "" type instance struct { v1.Platform - ID string + ID string + Ref reference.Canonical } var instances []instance var instancesLock sync.Mutex @@ -266,10 +267,10 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B if err != nil { return err } - id, ref = thisID, thisRef instancesLock.Lock() instances = append(instances, instance{ ID: thisID, + Ref: thisRef, Platform: platformSpec, }) instancesLock.Unlock() @@ -284,6 +285,25 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B return "", nil, merr.ErrorOrNil() } + // Reasons for this id, ref assigment w.r.t to use-case: + // + // * Single-platform build: On single platform build we only + // have one built instance i.e on indice 0 of built instances, + // so assign that. + // + // * Multi-platform build with manifestList: If this is a build for + // multiple platforms ( more than one platform ) and --manifest + // option then this assignment is insignificant since it will be + // overriden anyways with the id and ref of manifest list later in + // in this code. + // + // * Multi-platform build without manifest list: If this is a build for + // multiple platforms without --manifest then we are free to return + // id and ref of any one of the image in the instance list so always + // return indice 0 for predictable output instead returning the id and + // ref of the go routine which completed at last. + id, ref = instances[0].ID, instances[0].Ref + if manifestList != "" { rt, err := libimage.RuntimeFromStore(store, nil) if err != nil { @@ -396,6 +416,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr for i, d := range dockerfilecontents[1:] { additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) if err != nil { + dockerfiles := dockerfiles[1:] return "", nil, fmt.Errorf("error parsing additional Dockerfile %s: %w", dockerfiles[i], err) } mainNode.Children = append(mainNode.Children, additionalNode.Children...) @@ -662,6 +683,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri for i, d := range dockerfilecontents[1:] { additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) if err != nil { + dockerfilenames := dockerfilenames[1:] return nil, fmt.Errorf("error parsing additional Dockerfile %s: %w", dockerfilenames[i], err) } mainNode.Children = append(mainNode.Children, additionalNode.Children...) diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index c9e2493b3..ddd2dfc48 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -58,6 +58,9 @@ var builtinAllowedBuildArgs = map[string]bool{ // interface. It coordinates the entire build by using one or more // StageExecutors to handle each stage of the build. type Executor struct { + cacheFrom reference.Named + cacheTo reference.Named + cacheTTL time.Duration containerSuffix string logger *logrus.Logger stages map[string]*StageExecutor @@ -212,6 +215,9 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o } exec := Executor{ + cacheFrom: options.CacheFrom, + cacheTo: options.CacheTo, + cacheTTL: options.CacheTTL, containerSuffix: options.ContainerSuffix, logger: logger, stages: make(map[string]*StageExecutor), diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index d21757f4b..9d8214fbd 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -2,6 +2,8 @@ package imagebuildah import ( "context" + "crypto/sha256" + "errors" "fmt" "io" "os" @@ -22,6 +24,7 @@ import ( "github.com/containers/buildah/util" config "github.com/containers/common/pkg/config" cp "github.com/containers/image/v5/copy" + imagedocker "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" is "github.com/containers/image/v5/storage" @@ -35,6 +38,7 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" "github.com/openshift/imagebuilder" + "github.com/openshift/imagebuilder/dockerfile/command" "github.com/openshift/imagebuilder/dockerfile/parser" "github.com/sirupsen/logrus" ) @@ -123,7 +127,7 @@ func (s *StageExecutor) Preserve(path string) error { } st, err := os.Stat(archivedPath) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { createdDirPerms := os.FileMode(0755) if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil { return fmt.Errorf("error ensuring volume path exists: %w", err) @@ -165,7 +169,7 @@ func (s *StageExecutor) Preserve(path string) error { archivedPath := filepath.Join(s.mountPoint, cachedPath) logrus.Debugf("no longer need cache of %q in %q", archivedPath, s.volumeCache[cachedPath]) if err := os.Remove(s.volumeCache[cachedPath]); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { continue } return err @@ -186,7 +190,7 @@ func (s *StageExecutor) volumeCacheInvalidate(path string) error { } for _, cachedPath := range invalidated { if err := os.Remove(s.volumeCache[cachedPath]); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { continue } return err @@ -217,7 +221,7 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) { logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile) continue } - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { return nil, err } createdDirPerms := os.FileMode(0755) @@ -867,6 +871,21 @@ func (s *StageExecutor) getImageRootfs(ctx context.Context, image string) (mount return builder.MountPoint, nil } +// getContentSummary generates content summary for cases where we added content and need +// to get summary with updated digests. +func (s *StageExecutor) getContentSummaryAfterAddingContent() string { + contentType, digest := s.builder.ContentDigester.Digest() + summary := contentType + if digest != "" { + if summary != "" { + summary = summary + ":" + } + summary = summary + digest.Encoded() + logrus.Debugf("added content %s", summary) + } + return summary +} + // Execute runs each of the steps in the stage's parsed tree, in turn. func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, err error) { var resourceUsage rusage.Rusage @@ -945,6 +964,22 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, s.log(commitMessage) } } + // logCachePulled produces build log for cases when `--cache-from` + // is used and a valid intermediate image is pulled from remote source. + logCachePulled := func(cacheKey string) { + if !s.executor.quiet { + cacheHitMessage := "--> Cache pulled from remote" + fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, fmt.Sprintf("%s:%s", s.executor.cacheFrom, cacheKey)) + } + } + // logCachePush produces build log for cases when `--cache-to` + // is used and a valid intermediate image is pushed tp remote source. + logCachePush := func(cacheKey string) { + if !s.executor.quiet { + cacheHitMessage := "--> Pushing cache" + fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, fmt.Sprintf("%s:%s", s.executor.cacheTo, cacheKey)) + } + } logCacheHit := func(cacheID string) { if !s.executor.quiet { cacheHitMessage := "--> Using cache" @@ -960,6 +995,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } + // Parse and populate buildOutputOption if needed + var buildOutputOption define.BuildOutputOption + canGenerateBuildOutput := (s.executor.buildOutput != "" && lastStage) + if canGenerateBuildOutput { + logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput) + buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput) + if err != nil { + return "", nil, fmt.Errorf("failed to parse build output: %w", err) + } + } + if len(children) == 0 { // There are no steps. if s.builder.FromImageID == "" || s.executor.squash { @@ -970,6 +1016,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash); err != nil { return "", nil, fmt.Errorf("error committing base container: %w", err) } + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } } else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 { // The image would be modified by the labels passed // via the command line, so we need to commit. @@ -977,6 +1029,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output, s.executor.squash); err != nil { return "", nil, err } + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } } else { // We don't need to squash the base image, and the // image wouldn't be modified by the command line @@ -985,22 +1043,16 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil { return "", nil, err } - if s.executor.buildOutput != "" && lastStage { - // If we have reached this point then our build is just performing a tag - // and it contains no steps or instructions (i.e Containerfile only contains - // `FROM <imagename> and nothing else so we will never end up committing this - // but instead just re-tag image. For such use-cases if `-o` or `--output` was - // specified honor that and export the contents of the current build anyways. - logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput) - buildOutputOption, err := parse.GetBuildOutput(s.executor.buildOutput) - if err != nil { - return "", nil, fmt.Errorf("failed to parse build output: %w", err) - } - if err := s.generateBuildOutput(buildah.CommitOptions{}, buildOutputOption); err != nil { + // If we have reached this point then our build is just performing a tag + // and it contains no steps or instructions (i.e Containerfile only contains + // `FROM <imagename> and nothing else so we will never end up committing this + // but instead just re-tag image. For such use-cases if `-o` or `--output` was + // specified honor that and export the contents of the current build anyways. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { return "", nil, err } } - } logImageID(imgID) } @@ -1101,15 +1153,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err) } // In case we added content, retrieve its digest. - addedContentType, addedContentDigest := s.builder.ContentDigester.Digest() - addedContentSummary := addedContentType - if addedContentDigest != "" { - if addedContentSummary != "" { - addedContentSummary = addedContentSummary + ":" - } - addedContentSummary = addedContentSummary + addedContentDigest.Encoded() - logrus.Debugf("added content %s", addedContentSummary) - } + addedContentSummary := s.getContentSummaryAfterAddingContent() if moreInstructions { // There are still more instructions to process // for this stage. Make a note of the @@ -1134,6 +1178,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, return "", nil, fmt.Errorf("error committing container for step %+v: %w", *step, err) } logImageID(imgID) + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } } else { imgID = "" } @@ -1143,19 +1193,33 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // We're in a multi-layered build. var ( - commitName string - cacheID string - err error - rebase bool - addedContentSummary string + commitName string + cacheID string + cacheKey string + pulledAndUsedCacheImage bool + err error + rebase bool + addedContentSummary string + canMatchCacheOnlyAfterRun bool ) + needsCacheKey := (s.executor.cacheFrom != nil || s.executor.cacheTo != nil) + // If we have to commit for this instruction, only assign the // stage's configured output name to the last layer. if lastInstruction { commitName = s.output } + // If --cache-from or --cache-to is specified make sure to populate + // cacheKey since it will be used either while pulling or pushing the + // cache images. + if needsCacheKey { + cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + if err != nil { + return "", nil, fmt.Errorf("failed while generating cache key: %w", err) + } + } // Check if there's already an image based on our parent that // has the same change that we're about to make, so far as we // can tell. @@ -1164,17 +1228,57 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // determining if a cached layer with the same build args already exists // and that is done in the if block below. if checkForLayers && step.Command != "arg" && !(s.executor.squash && lastInstruction && lastStage) { + // For `COPY` and `ADD`, history entries include digests computed from + // the content that's copied in. We need to compute that information so that + // it can be used to evaluate the cache, which means we need to go ahead + // and copy the content. + canMatchCacheOnlyAfterRun = (step.Command == command.Add || step.Command == command.Copy) + if canMatchCacheOnlyAfterRun { + if err = ib.Run(step, s, noRunsRemaining); err != nil { + logrus.Debugf("Error building at step %+v: %v", *step, err) + return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err) + } + // Retrieve the digest info for the content that we just copied + // into the rootfs. + addedContentSummary = s.getContentSummaryAfterAddingContent() + // regenerate cache key with updated content summary + if needsCacheKey { + cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + if err != nil { + return "", nil, fmt.Errorf("failed while generating cache key: %w", err) + } + } + } cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err) } + // All the best effort to find cache on localstorage have failed try pulling + // cache from remote repo if `--cache-from` was configured. + if cacheID == "" && s.executor.cacheFrom != nil { + // only attempt to use cache again if pulling was successful + // otherwise do nothing and attempt to run the step, err != nil + // is ignored and will be automatically logged for --log-level debug + if id, err := s.pullCache(ctx, cacheKey); id != "" && err == nil { + logCachePulled(cacheKey) + cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + if err != nil { + return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err) + } + if cacheID != "" { + pulledAndUsedCacheImage = true + } + } + } } // If we didn't find a cache entry, or we need to add content // to find the digest of the content to check for a cached // image, run the step so that we can check if the result // matches a cache. - if cacheID == "" { + // We already called ib.Run() for the `canMatchCacheOnlyAfterRun` + // cases above, so we shouldn't do it again. + if cacheID == "" && !canMatchCacheOnlyAfterRun { // Process the instruction directly. if err = ib.Run(step, s, noRunsRemaining); err != nil { logrus.Debugf("Error building at step %+v: %v", *step, err) @@ -1182,14 +1286,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } // In case we added content, retrieve its digest. - addedContentType, addedContentDigest := s.builder.ContentDigester.Digest() - addedContentSummary = addedContentType - if addedContentDigest != "" { - if addedContentSummary != "" { - addedContentSummary = addedContentSummary + ":" + addedContentSummary = s.getContentSummaryAfterAddingContent() + // regenerate cache key with updated content summary + if needsCacheKey { + cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + if err != nil { + return "", nil, fmt.Errorf("failed while generating cache key: %w", err) } - addedContentSummary = addedContentSummary + addedContentDigest.Encoded() - logrus.Debugf("added content %s", addedContentSummary) } // Check if there's already an image based on our parent that @@ -1201,6 +1304,10 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } } else { + // This log line is majorly here so we can verify in tests + // that our cache is performing in the most optimal way for + // various cases. + logrus.Debugf("Found a cache hit in the first iteration with id %s", cacheID) // If the instruction would affect our configuration, // process the configuration change so that, if we fall // off the cache path, the filesystem changes from the @@ -1246,15 +1353,61 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if err != nil { return "", nil, fmt.Errorf("error committing container for step %+v: %w", *step, err) } + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } } - // Create a squashed version of this image - // if we're supposed to create one and this - // is the last instruction of the last stage. - if s.executor.squash && lastInstruction && lastStage { - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true) - if err != nil { - return "", nil, fmt.Errorf("error committing final squash step %+v: %w", *step, err) + // Following step is just built and was not used from + // cache so check if --cache-to was specified if yes + // then attempt pushing this cache to remote repo and + // fail accordingly. + // + // Or + // + // Try to push this cache to remote repository only + // if cache was present on local storage and not + // pulled from remote source while processing this + if s.executor.cacheTo != nil && (!pulledAndUsedCacheImage || cacheID == "") { + logCachePush(cacheKey) + if err = s.pushCache(ctx, imgID, cacheKey); err != nil { + return "", nil, err + } + } + + if lastInstruction && lastStage { + if s.executor.squash { + // Create a squashed version of this image + // if we're supposed to create one and this + // is the last instruction of the last stage. + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true) + if err != nil { + return "", nil, fmt.Errorf("error committing final squash step %+v: %w", *step, err) + } + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } + } else if cacheID != "" { + // If we found a valid cache hit and this is lastStage + // and not a squashed build then there is no opportunity + // for us to perform a `commit` later in the code since + // everything will be used from cache. + // + // If above statement is true and --output was provided + // then generate output manually since there is no opportunity + // for us to perform `commit` anywhere in the code. + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } } } @@ -1521,6 +1674,114 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st return img.ID, ref, nil } +// generateCacheKey returns a computed digest for the current STEP +// running its history and diff against a hash algorithm and this +// generated CacheKey is further used by buildah to lock and decide +// tag for the intermeidate image which can be pushed and pulled to/from +// the remote repository. +func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) { + hash := sha256.New() + var baseHistory []v1.History + var diffIDs []digest.Digest + var manifestType string + var err error + if s.builder.FromImageID != "" { + manifestType, baseHistory, diffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID) + if err != nil { + return "", fmt.Errorf("error getting history of base image %q: %w", s.builder.FromImageID, err) + } + for i := 0; i < len(diffIDs); i++ { + fmt.Fprintln(hash, diffIDs[i].String()) + } + } + createdBy := s.getCreatedBy(currNode, addedContentDigest) + fmt.Fprintf(hash, "%t", buildAddsLayer) + fmt.Fprintln(hash, createdBy) + fmt.Fprintln(hash, manifestType) + for _, element := range baseHistory { + fmt.Fprintln(hash, element.CreatedBy) + fmt.Fprintln(hash, element.Author) + fmt.Fprintln(hash, element.Comment) + fmt.Fprintln(hash, element.Created) + fmt.Fprintf(hash, "%t", element.EmptyLayer) + fmt.Fprintln(hash) + } + return fmt.Sprintf("%x", hash.Sum(nil)), nil +} + +// cacheImageReference is internal function which generates ImageReference from Named repo sources +// and a tag. +func cacheImageReference(repo reference.Named, cachekey string) (types.ImageReference, error) { + tagged, err := reference.WithTag(repo, cachekey) + if err != nil { + return nil, fmt.Errorf("failed generating tagged reference for %q: %w", repo, err) + } + dest, err := imagedocker.NewReference(tagged) + if err != nil { + return nil, fmt.Errorf("failed generating docker reference for %q: %w", tagged, err) + } + return dest, nil +} + +// pushCache takes the image id of intermediate image and attempts +// to perform push at the remote repository with cacheKey as the tag. +// Returns error if fails otherwise returns nil. +func (s *StageExecutor) pushCache(ctx context.Context, src, cacheKey string) error { + dest, err := cacheImageReference(s.executor.cacheTo, cacheKey) + if err != nil { + return err + } + logrus.Debugf("trying to push cache to dest: %+v from src:%+v", dest, src) + options := buildah.PushOptions{ + Compression: s.executor.compression, + SignaturePolicyPath: s.executor.signaturePolicyPath, + Store: s.executor.store, + SystemContext: s.executor.systemContext, + BlobDirectory: s.executor.blobDirectory, + SignBy: s.executor.signBy, + MaxRetries: s.executor.maxPullPushRetries, + RetryDelay: s.executor.retryPullPushDelay, + } + ref, digest, err := buildah.Push(ctx, src, dest, options) + if err != nil { + return fmt.Errorf("failed pushing cache to %q: %w", dest, err) + } + logrus.Debugf("successfully pushed cache to dest: %+v with ref:%+v and digest: %v", dest, ref, digest) + return nil +} + +// pullCache takes the image source of the cache assuming tag +// already points to the valid cacheKey and pulls the image to +// local storage only if it was not already present on local storage +// or a newer version of cache was found in the upstream repo. If new +// image was pulled function returns image id otherwise returns empty +// string "" or error if any error was encontered while pulling the cache. +func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (string, error) { + src, err := cacheImageReference(s.executor.cacheFrom, cacheKey) + if err != nil { + return "", err + } + logrus.Debugf("trying to pull cache from remote repo: %+v", src.DockerReference()) + options := buildah.PullOptions{ + SignaturePolicyPath: s.executor.signaturePolicyPath, + Store: s.executor.store, + SystemContext: s.executor.systemContext, + BlobDirectory: s.executor.blobDirectory, + MaxRetries: s.executor.maxPullPushRetries, + RetryDelay: s.executor.retryPullPushDelay, + AllTags: false, + ReportWriter: nil, + PullPolicy: define.PullIfNewer, + } + id, err := buildah.Pull(ctx, src.DockerReference().String(), options) + if err != nil { + logrus.Debugf("failed pulling cache from source %s: %v", src, err) + return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err) + } + logrus.Debugf("successfully pulled cache from repo %s: %s", src, id) + return id, nil +} + // intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build. // It verifies this by checking the parent of the top layer of the image and the history. func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) { @@ -1538,6 +1799,17 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p } } for _, image := range images { + // If s.executor.cacheTTL was specified + // then ignore processing image if it + // was created before the specified + // duration. + if int64(s.executor.cacheTTL) != 0 { + timeNow := time.Now() + imageDuration := timeNow.Sub(image.Created) + if s.executor.cacheTTL < imageDuration { + continue + } + } var imageTopLayer *storage.Layer var imageParentLayerID string if image.TopLayer != "" { @@ -1590,15 +1862,6 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p // or commit via any custom exporter if specified. func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string, squash bool) (string, reference.Canonical, error) { ib := s.stage.Builder - var buildOutputOption define.BuildOutputOption - if s.executor.buildOutput != "" { - var err error - logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput) - buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput) - if err != nil { - return "", nil, fmt.Errorf("failed to parse build output: %w", err) - } - } var imageRef types.ImageReference if output != "" { imageRef2, err := s.executor.resolveNameToImageRef(output) @@ -1739,12 +2002,6 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer HistoryTimestamp: s.executor.timestamp, Manifest: s.executor.manifest, } - // generate build output - if s.executor.buildOutput != "" { - if err := s.generateBuildOutput(buildah.CommitOptions{}, buildOutputOption); err != nil { - return "", nil, err - } - } imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { return "", nil, err @@ -1760,7 +2017,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer return imgID, ref, nil } -func (s *StageExecutor) generateBuildOutput(commitOpts buildah.CommitOptions, buildOutputOpts define.BuildOutputOption) error { +func (s *StageExecutor) generateBuildOutput(buildOutputOpts define.BuildOutputOption) error { extractRootfsOpts := buildah.ExtractRootfsOptions{} if unshare.IsRootless() { // In order to maintain as much parity as possible @@ -1775,7 +2032,7 @@ func (s *StageExecutor) generateBuildOutput(commitOpts buildah.CommitOptions, bu extractRootfsOpts.StripSetgidBit = true extractRootfsOpts.StripXattrs = true } - rc, errChan, err := s.builder.ExtractRootfs(commitOpts, extractRootfsOpts) + rc, errChan, err := s.builder.ExtractRootfs(buildah.CommitOptions{}, extractRootfsOpts) if err != nil { return fmt.Errorf("failed to extract rootfs from given container image: %w", err) } diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go index 04a1fd08f..9155bb318 100644 --- a/vendor/github.com/containers/buildah/info.go +++ b/vendor/github.com/containers/buildah/info.go @@ -3,16 +3,14 @@ package buildah import ( "bufio" "bytes" - "errors" "fmt" - "io/ioutil" "os" "runtime" "strconv" "strings" - "time" "github.com/containerd/containerd/platforms" + putil "github.com/containers/buildah/pkg/util" "github.com/containers/buildah/util" "github.com/containers/storage" "github.com/containers/storage/pkg/system" @@ -83,22 +81,16 @@ func hostInfo() map[string]interface{} { "version": hostDistributionInfo["Version"], } - kv, err := readKernelVersion() + kv, err := putil.ReadKernelVersion() if err != nil { logrus.Error(err, "error reading kernel version") } info["kernel"] = kv - up, err := readUptime() + upDuration, err := putil.ReadUptime() if err != nil { logrus.Error(err, "error reading up time") } - // Convert uptime in seconds to a human-readable format - upSeconds := up + "s" - upDuration, err := time.ParseDuration(upSeconds) - if err != nil { - logrus.Error(err, "error parsing system uptime") - } hoursFound := false var timeBuffer bytes.Buffer @@ -170,30 +162,6 @@ func storeInfo(store storage.Store) (map[string]interface{}, error) { return info, nil } -func readKernelVersion() (string, error) { - buf, err := ioutil.ReadFile("/proc/version") - if err != nil { - return "", err - } - f := bytes.Fields(buf) - if len(f) < 2 { - return string(bytes.TrimSpace(buf)), nil - } - return string(f[2]), nil -} - -func readUptime() (string, error) { - buf, err := ioutil.ReadFile("/proc/uptime") - if err != nil { - return "", err - } - f := bytes.Fields(buf) - if len(f) < 1 { - return "", errors.New("invalid uptime") - } - return string(f[0]), nil -} - // getHostDistributionInfo returns a map containing the host's distribution and version func getHostDistributionInfo() map[string]string { dist := make(map[string]string) diff --git a/vendor/github.com/containers/buildah/pkg/cli/build.go b/vendor/github.com/containers/buildah/pkg/cli/build.go index 98c42453b..f424df11f 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/build.go +++ b/vendor/github.com/containers/buildah/pkg/cli/build.go @@ -19,6 +19,7 @@ import ( "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/util" "github.com/containers/common/pkg/auth" + "github.com/containers/image/v5/docker/reference" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -233,10 +234,6 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) ( return options, nil, nil, errors.New("'rm' and 'force-rm' can only be set with either 'layers' or 'no-cache'") } - if c.Flag("cache-from").Changed { - logrus.Debugf("build --cache-from not enabled, has no effect") - } - if c.Flag("compress").Changed { logrus.Debugf("--compress option specified but is ignored") } @@ -290,6 +287,29 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) ( iopts.Quiet = true } } + var cacheTo reference.Named + var cacheFrom reference.Named + cacheTo = nil + cacheFrom = nil + if c.Flag("cache-to").Changed { + cacheTo, err = parse.RepoNameToNamedReference(iopts.CacheTo) + if err != nil { + return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", iopts.CacheTo, err) + } + } + if c.Flag("cache-from").Changed { + cacheFrom, err = parse.RepoNameToNamedReference(iopts.CacheFrom) + if err != nil { + return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", iopts.CacheTo, err) + } + } + var cacheTTL time.Duration + if c.Flag("cache-ttl").Changed { + cacheTTL, err = time.ParseDuration(iopts.CacheTTL) + if err != nil { + return options, nil, nil, fmt.Errorf("unable to parse value provided %q as --cache-ttl: %w", iopts.CacheTTL, err) + } + } options = define.BuildOptions{ AddCapabilities: iopts.CapAdd, AdditionalBuildContexts: additionalBuildContext, @@ -300,6 +320,9 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) ( Args: args, BlobDirectory: iopts.BlobCache, BuildOutput: iopts.BuildOutput, + CacheFrom: cacheFrom, + CacheTo: cacheTo, + CacheTTL: cacheTTL, CNIConfigDir: iopts.CNIConfigDir, CNIPluginPath: iopts.CNIPlugInPath, CPPFlags: iopts.CPPFlags, diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index 97ab95ee1..fb5691b33 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -54,6 +54,8 @@ type BudResults struct { BuildArg []string BuildContext []string CacheFrom string + CacheTo string + CacheTTL string CertDir string Compress bool Creds string @@ -197,7 +199,9 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet { fs.StringArrayVar(&flags.OCIHooksDir, "hooks-dir", []string{}, "set the OCI hooks directory path (may be set multiple times)") fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder") fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder") - fs.StringVar(&flags.CacheFrom, "cache-from", "", "images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.") + fs.StringVar(&flags.CacheFrom, "cache-from", "", "remote repository to utilise as potential cache source.") + fs.StringVar(&flags.CacheTo, "cache-to", "", "remote repository to utilise as potential cache destination.") + fs.StringVar(&flags.CacheTTL, "cache-ttl", "", "only consider cache images under specified duration.") fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image") fs.StringArrayVar(&flags.CPPFlags, "cpp-flag", []string{}, "set additional flag to pass to C preprocessor (cpp)") @@ -276,6 +280,8 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions { flagCompletion["build-arg"] = commonComp.AutocompleteNone flagCompletion["build-context"] = commonComp.AutocompleteNone flagCompletion["cache-from"] = commonComp.AutocompleteNone + flagCompletion["cache-to"] = commonComp.AutocompleteNone + flagCompletion["cache-ttl"] = commonComp.AutocompleteNone flagCompletion["cert-dir"] = commonComp.AutocompleteDefault flagCompletion["cpp-flag"] = commonComp.AutocompleteNone flagCompletion["creds"] = commonComp.AutocompleteNone diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go index 6ab10b13c..07bb2195a 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -250,7 +250,7 @@ func Unmount(contentDir string) error { } // Ignore EINVAL as the specified merge dir is not a mount point - if err := unix.Unmount(mergeDir, 0); err != nil && !os.IsNotExist(err) && err != unix.EINVAL { + if err := unix.Unmount(mergeDir, 0); err != nil && !errors.Is(err, os.ErrNotExist) && err != unix.EINVAL { return fmt.Errorf("unmount overlay %s: %w", mergeDir, err) } return nil @@ -259,7 +259,7 @@ func Unmount(contentDir string) error { func recreate(contentDir string) error { st, err := system.Stat(contentDir) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return nil } return fmt.Errorf("failed to stat overlay upper directory: %w", err) @@ -293,7 +293,7 @@ func CleanupContent(containerDir string) (Err error) { files, err := ioutil.ReadDir(contentDir) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return nil } return fmt.Errorf("read directory: %w", err) @@ -305,7 +305,7 @@ func CleanupContent(containerDir string) (Err error) { } } - if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) { + if err := os.RemoveAll(contentDir); err != nil && !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("failed to cleanup overlay directory: %w", err) } return nil diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 26ee4da77..3492ac968 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -19,6 +19,7 @@ import ( internalParse "github.com/containers/buildah/internal/parse" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/common/pkg/parse" + "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/unshare" @@ -48,6 +49,18 @@ const ( BuildahCacheDir = "buildah-cache" ) +// RepoNameToNamedReference parse the raw string to Named reference +func RepoNameToNamedReference(dest string) (reference.Named, error) { + named, err := reference.ParseNormalizedNamed(dest) + if err != nil { + return nil, fmt.Errorf("invalid repo %q: must contain registry and repository: %w", dest, err) + } + if !reference.IsNameOnly(named) { + return nil, fmt.Errorf("repository must contain neither a tag nor digest: %v", named) + } + return named, nil +} + // CommonBuildOptions parses the build options from the bud cli func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag) @@ -232,11 +245,11 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti if _, err := os.Stat(SeccompOverridePath); err == nil { commonOpts.SeccompProfilePath = SeccompOverridePath } else { - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { return err } if _, err := os.Stat(SeccompDefaultPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { return err } } else { @@ -1059,11 +1072,11 @@ func ContainerIgnoreFile(contextDir, path string) ([]string, string, error) { } path = filepath.Join(contextDir, ".containerignore") excludes, err := imagebuilder.ParseIgnore(path) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { path = filepath.Join(contextDir, ".dockerignore") excludes, err = imagebuilder.ParseIgnore(path) } - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return excludes, "", nil } return excludes, path, err diff --git a/vendor/github.com/containers/buildah/pkg/util/uptime_darwin.go b/vendor/github.com/containers/buildah/pkg/util/uptime_darwin.go new file mode 100644 index 000000000..d185cb45f --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/uptime_darwin.go @@ -0,0 +1,10 @@ +package util + +import ( + "errors" + "time" +) + +func ReadUptime() (time.Duration, error) { + return 0, errors.New("readUptime not supported on darwin") +} diff --git a/vendor/github.com/containers/buildah/pkg/util/uptime_freebsd.go b/vendor/github.com/containers/buildah/pkg/util/uptime_freebsd.go new file mode 100644 index 000000000..7112aba38 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/uptime_freebsd.go @@ -0,0 +1,25 @@ +package util + +import ( + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// For some reason, unix.ClockGettime isn't implemented by x/sys/unix on FreeBSD +func clockGettime(clockid int32, time *unix.Timespec) (err error) { + _, _, e1 := unix.Syscall(unix.SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + return e1 + } + return nil +} + +func ReadUptime() (time.Duration, error) { + var uptime unix.Timespec + if err := clockGettime(unix.CLOCK_UPTIME, &uptime); err != nil { + return 0, err + } + return time.Duration(unix.TimespecToNsec(uptime)), nil +} diff --git a/vendor/github.com/containers/buildah/pkg/util/uptime_linux.go b/vendor/github.com/containers/buildah/pkg/util/uptime_linux.go new file mode 100644 index 000000000..7c8b6ba76 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/uptime_linux.go @@ -0,0 +1,28 @@ +package util + +import ( + "bytes" + "errors" + "io/ioutil" + "time" +) + +func ReadUptime() (time.Duration, error) { + buf, err := ioutil.ReadFile("/proc/uptime") + if err != nil { + return 0, err + } + f := bytes.Fields(buf) + if len(f) < 1 { + return 0, errors.New("invalid uptime") + } + + // Convert uptime in seconds to a human-readable format + up := string(f[0]) + upSeconds := up + "s" + upDuration, err := time.ParseDuration(upSeconds) + if err != nil { + return 0, err + } + return upDuration, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/util/uptime_windows.go b/vendor/github.com/containers/buildah/pkg/util/uptime_windows.go new file mode 100644 index 000000000..ef3adac2a --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/uptime_windows.go @@ -0,0 +1,10 @@ +package util + +import ( + "errors" + "time" +) + +func ReadUptime() (time.Duration, error) { + return 0, errors.New("readUptime not supported on windows") +} diff --git a/vendor/github.com/containers/buildah/pkg/util/version_unix.go b/vendor/github.com/containers/buildah/pkg/util/version_unix.go new file mode 100644 index 000000000..88e8b58a2 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/version_unix.go @@ -0,0 +1,19 @@ +//go:build linux || freebsd || darwin +// +build linux freebsd darwin + +package util + +import ( + "bytes" + + "golang.org/x/sys/unix" +) + +func ReadKernelVersion() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", err + } + n := bytes.IndexByte(uname.Release[:], 0) + return string(uname.Release[:n]), nil +} diff --git a/vendor/github.com/containers/buildah/pkg/util/version_windows.go b/vendor/github.com/containers/buildah/pkg/util/version_windows.go new file mode 100644 index 000000000..9acf469f1 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/version_windows.go @@ -0,0 +1,10 @@ +package util + +import ( + "errors" +) + +func ReadKernelVersion() (string, error) { + return "", errors.New("readKernelVersion not supported on windows") + +} diff --git a/vendor/github.com/containers/buildah/run_common.go b/vendor/github.com/containers/buildah/run_common.go index b50afec0b..2054c5652 100644 --- a/vendor/github.com/containers/buildah/run_common.go +++ b/vendor/github.com/containers/buildah/run_common.go @@ -331,7 +331,7 @@ func DefaultNamespaceOptions() (define.NamespaceOptions, error) { {Name: string(specs.MountNamespace), Host: false}, {Name: string(specs.NetworkNamespace), Host: cfg.NetNS() == "host"}, {Name: string(specs.PIDNamespace), Host: cfg.PidNS() == "host"}, - {Name: string(specs.UserNamespace), Host: cfg.Containers.UserNS == "host"}, + {Name: string(specs.UserNamespace), Host: cfg.Containers.UserNS == "" || cfg.Containers.UserNS == "host"}, {Name: string(specs.UTSNamespace), Host: cfg.UTSNS() == "host"}, } return options, nil @@ -477,8 +477,10 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [ if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { return 1, err } - if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil { - return 1, err + if spec.Linux != nil { + if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil { + return 1, err + } } errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} @@ -1147,7 +1149,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run containerStartR.file, containerStartW.file, err = os.Pipe() if err != nil { - return fmt.Errorf("error creating container create pipe: %w", err) + return fmt.Errorf("error creating container start pipe: %w", err) } defer containerStartR.Close() defer containerStartW.Close() @@ -1365,7 +1367,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin // the volume contents. If we do need to create it, then we'll // need to populate it, too, so make a note of that. if _, err := os.Stat(volumePath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { return nil, err } logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume) @@ -1391,7 +1393,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin return nil, fmt.Errorf("evaluating path %q: %w", srcPath, err) } stat, err := os.Stat(srcPath) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, os.ErrNotExist) { return nil, err } // If we need to populate the mounted volume's contents with @@ -1844,7 +1846,7 @@ func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint var prevErr error for _, path := range artifacts.TmpFiles { err := os.Remove(path) - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { if prevErr != nil { logrus.Error(prevErr) } diff --git a/vendor/github.com/containers/buildah/run_freebsd.go b/vendor/github.com/containers/buildah/run_freebsd.go index c9384d2d2..b8d141eec 100644 --- a/vendor/github.com/containers/buildah/run_freebsd.go +++ b/vendor/github.com/containers/buildah/run_freebsd.go @@ -251,6 +251,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { jconf.Set("enforce_statfs", 0) jconf.Set("devfs_ruleset", 4) jconf.Set("allow.raw_sockets", true) + jconf.Set("allow.chflags", true) jconf.Set("allow.mount", true) jconf.Set("allow.mount.devfs", true) jconf.Set("allow.mount.nullfs", true) diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 100e223f9..a5d51732f 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -381,7 +381,7 @@ func (b *Builder) setupOCIHooks(config *spec.Spec, hasVolumes bool) (map[string] for _, hDir := range []string{hooks.DefaultDir, hooks.OverrideDir} { manager, err := hooks.New(context.Background(), []string{hDir}, []string{}) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { continue } return nil, err @@ -690,7 +690,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti // by the kernel p := filepath.Join("/proc/sys", strings.Replace(name, ".", "/", -1)) _, err := os.Stat(p) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, os.ErrNotExist) { return false, nil, false, err } if err == nil { diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go index b186cb5e9..8cc2bfc62 100644 --- a/vendor/github.com/containers/buildah/selinux.go +++ b/vendor/github.com/containers/buildah/selinux.go @@ -4,6 +4,7 @@ package buildah import ( + "errors" "fmt" "os" @@ -33,7 +34,7 @@ func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) erro } for i := range stdioPipe { pipeFdName := fmt.Sprintf("/proc/self/fd/%d", stdioPipe[i][0]) - if err := selinux.SetFileLabel(pipeFdName, pipeContext); err != nil && !os.IsNotExist(err) { + if err := selinux.SetFileLabel(pipeFdName, pipeContext); err != nil && !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("setting file label on %q: %w", pipeFdName, err) } } diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 6ebd04a0c..b362dec84 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -187,7 +187,7 @@ func IsContainer(id string, store storage.Store) (bool, error) { // Assuming that if the stateFile exists, that this is a Buildah // container. if _, err = os.Stat(filepath.Join(cdir, stateFile)); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return false, nil } return false, err diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index ffebd3146..10b7504a0 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -384,13 +384,15 @@ var ( // fileExistsAndNotADir - Check to see if a file exists // and that it is not a directory. -func fileExistsAndNotADir(path string) bool { +func fileExistsAndNotADir(path string) (bool, error) { file, err := os.Stat(path) - - if file == nil || err != nil || os.IsNotExist(err) { - return false + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return false, nil + } + return false, err } - return !file.IsDir() + return !file.IsDir(), nil } // FindLocalRuntime find the local runtime of the @@ -404,7 +406,11 @@ func FindLocalRuntime(runtime string) string { return localRuntime } for _, val := range conf.Engine.OCIRuntimes[runtime] { - if fileExistsAndNotADir(val) { + exists, err := fileExistsAndNotADir(val) + if err != nil { + logrus.Errorf("Failed to determine if file exists and is not a directory: %v", err) + } + if exists { localRuntime = val break } diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index b1866fa9b..d1548eb23 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -470,6 +470,9 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma } if _, err := i.runtime.store.DeleteImage(i.ID(), true); handleError(err) != nil { + if errors.Is(err, storage.ErrImageUsedByContainer) { + err = fmt.Errorf("%w: consider listing external containers and force-removing image", err) + } return processedIDs, err } report.Untagged = append(report.Untagged, i.Names()...) @@ -478,6 +481,11 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma report.Removed = true } + // Do not delete any parents if NoPrune is true + if options.NoPrune { + return processedIDs, nil + } + // Check if can remove the parent image. if parent == nil { return processedIDs, nil @@ -495,7 +503,6 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma if !danglingParent { return processedIDs, nil } - // Recurse into removing the parent. return parent.removeRecursive(ctx, rmMap, processedIDs, "", options) } diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 6030a179b..7cbf9c95e 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -608,6 +608,8 @@ type RemoveImagesOptions struct { // much space was freed. However, computing the size of an image is // comparatively expensive, so it is made optional. WithSize bool + // NoPrune will not remove dangling images + NoPrune bool } // RemoveImages removes images specified by names. If no names are specified, @@ -653,7 +655,6 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem toDelete := []string{} // Look up images in the local containers storage and fill out // toDelete and the deleteMap. - switch { case len(names) > 0: // prepare lookupOptions diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index 3d90268cd..a6276fbef 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -375,6 +375,9 @@ type EngineConfig struct { // ServiceDestinations mapped by service Names ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"` + // SSHConfig contains the ssh config file path if not the default + SSHConfig string `toml:"ssh_config,omitempty"` + // RuntimePath is the path to OCI runtime binary for launching containers. // The first path pointing to a valid file will be used This is used only // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 161a9c8d6..c7ddf90ee 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -287,6 +287,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot) storeOpts.GraphRoot = _defaultGraphRoot } + c.graphRoot = storeOpts.GraphRoot c.ImageCopyTmpDir = getDefaultTmpDir() c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") @@ -397,6 +398,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.ChownCopiedFiles = true c.PodExitPolicy = defaultPodExitPolicy + c.SSHConfig = getDefaultSSHConfig() return c, nil } @@ -633,3 +635,11 @@ func machineVolumes(volumes []string) ([]string, error) { } return translatedVolumes, nil } + +func getDefaultSSHConfig() string { + if path, ok := os.LookupEnv("CONTAINERS_SSH_CONF"); ok { + return path + } + dirname := homedir.Get() + return filepath.Join(dirname, ".ssh", "config") +} diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go new file mode 100644 index 000000000..b6680b3d8 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go @@ -0,0 +1,326 @@ +package ssh + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net" + "net/url" + "os" + "os/user" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/containers/common/pkg/config" + "github.com/containers/storage/pkg/homedir" + "github.com/pkg/sftp" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + "golang.org/x/crypto/ssh/knownhosts" +) + +func golangConnectionCreate(options ConnectionCreateOptions) error { + var match bool + var err error + if match, err = regexp.Match("^[A-Za-z][A-Za-z0-9+.-]*://", []byte(options.Path)); err != nil { + return fmt.Errorf("invalid destination: %w", err) + } + + if !match { + options.Path = "ssh://" + options.Path + } + + if len(options.Socket) > 0 { + options.Path += options.Socket + } + + dst, uri, err := Validate(options.User, options.Path, options.Port, options.Identity) + if err != nil { + return err + } + + if uri.Path == "" || uri.Path == "/" { + if uri.Path, err = getUDS(uri, options.Identity); err != nil { + return err + } + dst.URI += uri.Path + } + + cfg, err := config.ReadCustomConfig() + if err != nil { + return err + } + if cfg.Engine.ServiceDestinations == nil { + cfg.Engine.ServiceDestinations = map[string]config.Destination{ + options.Name: *dst, + } + cfg.Engine.ActiveService = options.Name + } else { + cfg.Engine.ServiceDestinations[options.Name] = *dst + } + return cfg.Write() +} + +func golangConnectionDial(options ConnectionDialOptions) (*ConnectionDialReport, error) { + _, uri, err := Validate(options.User, options.Host, options.Port, options.Identity) + if err != nil { + return nil, err + } + cfg, err := ValidateAndConfigure(uri, options.Identity) + if err != nil { + return nil, err + } + + dial, err := ssh.Dial("tcp", uri.Host, cfg) // dial the client + if err != nil { + return nil, fmt.Errorf("failed to connect: %w", err) + } + + return &ConnectionDialReport{dial}, nil +} + +func golangConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, error) { + _, uri, err := Validate(options.User, options.Host, options.Port, options.Identity) + if err != nil { + return nil, err + } + + cfg, err := ValidateAndConfigure(uri, options.Identity) + if err != nil { + return nil, err + } + dialAdd, err := ssh.Dial("tcp", uri.Host, cfg) // dial the client + if err != nil { + return nil, fmt.Errorf("failed to connect: %w", err) + } + + out, err := ExecRemoteCommand(dialAdd, strings.Join(options.Args, " ")) + if err != nil { + return nil, err + } + return &ConnectionExecReport{Response: string(out)}, nil +} + +func golangConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, error) { + host, remoteFile, localFile, swap, err := ParseScpArgs(options) + if err != nil { + return nil, err + } + + _, uri, err := Validate(options.User, host, options.Port, options.Identity) + if err != nil { + return nil, err + } + cfg, err := ValidateAndConfigure(uri, options.Identity) + if err != nil { + return nil, err + } + + dial, err := ssh.Dial("tcp", uri.Host, cfg) // dial the client + if err != nil { + return nil, fmt.Errorf("failed to connect: %w", err) + } + sc, err := sftp.NewClient(dial) + if err != nil { + return nil, err + } + + f, err := os.OpenFile(localFile, (os.O_RDWR | os.O_CREATE), 0o644) + if err != nil { + return nil, err + } + + parent := filepath.Dir(remoteFile) + path := string(filepath.Separator) + dirs := strings.Split(parent, path) + for _, dir := range dirs { + path = filepath.Join(path, dir) + // ignore errors due to most of the dirs already existing + _ = sc.Mkdir(path) + } + + remote, err := sc.OpenFile(remoteFile, (os.O_RDWR | os.O_CREATE)) + if err != nil { + return nil, err + } + defer remote.Close() + + if !swap { + _, err = io.Copy(remote, f) + if err != nil { + return nil, err + } + } else { + _, err = io.Copy(f, remote) + if err != nil { + return nil, err + } + } + return &ConnectionScpReport{Response: remote.Name()}, nil +} + +// ExecRemoteCommand takes a ssh client connection and a command to run and executes the +// command on the specified client. The function returns the Stdout from the client or the Stderr +func ExecRemoteCommand(dial *ssh.Client, run string) ([]byte, error) { + sess, err := dial.NewSession() // new ssh client session + if err != nil { + return nil, err + } + defer sess.Close() + + var buffer bytes.Buffer + var bufferErr bytes.Buffer + sess.Stdout = &buffer // output from client funneled into buffer + sess.Stderr = &bufferErr // err form client funneled into buffer + if err := sess.Run(run); err != nil { // run the command on the ssh client + return nil, fmt.Errorf("%v: %w", bufferErr.String(), err) + } + return buffer.Bytes(), nil +} + +func GetUserInfo(uri *url.URL) (*url.Userinfo, error) { + var ( + usr *user.User + err error + ) + if u, found := os.LookupEnv("_CONTAINERS_ROOTLESS_UID"); found { + usr, err = user.LookupId(u) + if err != nil { + return nil, fmt.Errorf("failed to lookup rootless user: %w", err) + } + } else { + usr, err = user.Current() + if err != nil { + return nil, fmt.Errorf("failed to obtain current user: %w", err) + } + } + + pw, set := uri.User.Password() + if set { + return url.UserPassword(usr.Username, pw), nil + } + return url.User(usr.Username), nil +} + +// ValidateAndConfigure will take a ssh url and an identity key (rsa and the like) and ensure the information given is valid +// iden iden can be blank to mean no identity key +// once the function validates the information it creates and returns an ssh.ClientConfig. +func ValidateAndConfigure(uri *url.URL, iden string) (*ssh.ClientConfig, error) { + var signers []ssh.Signer + passwd, passwdSet := uri.User.Password() + if iden != "" { // iden might be blank if coming from image scp or if no validation is needed + value := iden + s, err := PublicKey(value, []byte(passwd)) + if err != nil { + return nil, fmt.Errorf("failed to read identity %q: %w", value, err) + } + signers = append(signers, s) + logrus.Debugf("SSH Ident Key %q %s %s", value, ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) + } else if sock, found := os.LookupEnv("SSH_AUTH_SOCK"); found { // validate ssh information, specifically the unix file socket used by the ssh agent. + logrus.Debugf("Found SSH_AUTH_SOCK %q, ssh-agent signer enabled", sock) + + c, err := net.Dial("unix", sock) + if err != nil { + return nil, err + } + agentSigners, err := agent.NewClient(c).Signers() + if err != nil { + return nil, err + } + + signers = append(signers, agentSigners...) + + if logrus.IsLevelEnabled(logrus.DebugLevel) { + for _, s := range agentSigners { + logrus.Debugf("SSH Agent Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) + } + } + } + var authMethods []ssh.AuthMethod // now we validate and check for the authorization methods, most notaibly public key authorization + if len(signers) > 0 { + dedup := make(map[string]ssh.Signer) + for _, s := range signers { + fp := ssh.FingerprintSHA256(s.PublicKey()) + if _, found := dedup[fp]; found { + logrus.Debugf("Dedup SSH Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) + } + dedup[fp] = s + } + + var uniq []ssh.Signer + for _, s := range dedup { + uniq = append(uniq, s) + } + authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { + return uniq, nil + })) + } + if passwdSet { // if password authentication is given and valid, add to the list + authMethods = append(authMethods, ssh.Password(passwd)) + } + if len(authMethods) == 0 { + authMethods = append(authMethods, ssh.PasswordCallback(func() (string, error) { + pass, err := ReadPassword(fmt.Sprintf("%s's login password:", uri.User.Username())) + return string(pass), err + })) + } + tick, err := time.ParseDuration("40s") + if err != nil { + return nil, err + } + keyFilePath := filepath.Join(homedir.Get(), ".ssh", "known_hosts") + known, err := knownhosts.New(keyFilePath) + if err != nil { + return nil, fmt.Errorf("error creating host key callback function for %s: %w", keyFilePath, err) + } + + cfg := &ssh.ClientConfig{ + User: uri.User.Username(), + Auth: authMethods, + HostKeyCallback: known, + Timeout: tick, + } + return cfg, nil +} + +func getUDS(uri *url.URL, iden string) (string, error) { + cfg, err := ValidateAndConfigure(uri, iden) + if err != nil { + return "", fmt.Errorf("failed to validate: %w", err) + } + dial, err := ssh.Dial("tcp", uri.Host, cfg) + if err != nil { + return "", fmt.Errorf("failed to connect: %w", err) + } + defer dial.Close() + + session, err := dial.NewSession() + if err != nil { + return "", fmt.Errorf("failed to create new ssh session on %q: %w", uri.Host, err) + } + defer session.Close() + + // Override podman binary for testing etc + podman := "podman" + if v, found := os.LookupEnv("PODMAN_BINARY"); found { + podman = v + } + infoJSON, err := ExecRemoteCommand(dial, podman+" info --format=json") + if err != nil { + return "", err + } + + var info Info + if err := json.Unmarshal(infoJSON, &info); err != nil { + return "", fmt.Errorf("failed to parse 'podman info' results: %w", err) + } + + if info.Host.RemoteSocket == nil || len(info.Host.RemoteSocket.Path) == 0 { + return "", fmt.Errorf("remote podman %q failed to report its UDS socket", uri.Host) + } + return info.Host.RemoteSocket.Path, nil +} diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_native.go b/vendor/github.com/containers/common/pkg/ssh/connection_native.go new file mode 100644 index 000000000..4c407360a --- /dev/null +++ b/vendor/github.com/containers/common/pkg/ssh/connection_native.go @@ -0,0 +1,182 @@ +package ssh + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/containers/common/pkg/config" +) + +func nativeConnectionCreate(options ConnectionCreateOptions) error { + var match bool + var err error + if match, err = regexp.Match("^[A-Za-z][A-Za-z0-9+.-]*://", []byte(options.Path)); err != nil { + return fmt.Errorf("invalid destination: %w", err) + } + + if !match { + options.Path = "ssh://" + options.Path + } + + if len(options.Socket) > 0 { + options.Path += options.Socket + } + + dst, uri, err := Validate(options.User, options.Path, options.Port, options.Identity) + if err != nil { + return err + } + + // test connection + ssh, err := exec.LookPath("ssh") + if err != nil { + return fmt.Errorf("no ssh binary found") + } + + if strings.Contains(uri.Host, "/run") { + uri.Host = strings.Split(uri.Host, "/run")[0] + } + conf, err := config.Default() + if err != nil { + return err + } + + args := []string{uri.User.String() + "@" + uri.Hostname()} + + if len(dst.Identity) > 0 { + args = append(args, "-i", dst.Identity) + } + if len(conf.Engine.SSHConfig) > 0 { + args = append(args, "-F", conf.Engine.SSHConfig) + } + + output := &bytes.Buffer{} + args = append(args, "podman", "info", "--format", "json") + info := exec.Command(ssh, args...) + info.Stdout = output + err = info.Run() + if err != nil { + return err + } + + remoteInfo := &Info{} + if err := json.Unmarshal(output.Bytes(), &remoteInfo); err != nil { + return fmt.Errorf("failed to parse 'podman info' results: %w", err) + } + + if remoteInfo.Host.RemoteSocket == nil || len(remoteInfo.Host.RemoteSocket.Path) == 0 { + return fmt.Errorf("remote podman %q failed to report its UDS socket", uri.Host) + } + + cfg, err := config.ReadCustomConfig() + if err != nil { + return err + } + if options.Default { + cfg.Engine.ActiveService = options.Name + } + + if cfg.Engine.ServiceDestinations == nil { + cfg.Engine.ServiceDestinations = map[string]config.Destination{ + options.Name: *dst, + } + cfg.Engine.ActiveService = options.Name + } else { + cfg.Engine.ServiceDestinations[options.Name] = *dst + } + + return cfg.Write() +} + +func nativeConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, error) { + dst, uri, err := Validate(options.User, options.Host, options.Port, options.Identity) + if err != nil { + return nil, err + } + + ssh, err := exec.LookPath("ssh") + if err != nil { + return nil, fmt.Errorf("no ssh binary found") + } + + output := &bytes.Buffer{} + errors := &bytes.Buffer{} + if strings.Contains(uri.Host, "/run") { + uri.Host = strings.Split(uri.Host, "/run")[0] + } + + options.Args = append([]string{uri.User.String() + "@" + uri.Hostname()}, options.Args...) + conf, err := config.Default() + if err != nil { + return nil, err + } + + args := []string{} + if len(dst.Identity) > 0 { + args = append(args, "-i", dst.Identity) + } + if len(conf.Engine.SSHConfig) > 0 { + args = append(args, "-F", conf.Engine.SSHConfig) + } + args = append(args, options.Args...) + info := exec.Command(ssh, args...) + info.Stdout = output + info.Stderr = errors + err = info.Run() + if err != nil { + return nil, err + } + return &ConnectionExecReport{Response: output.String()}, nil +} + +func nativeConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, error) { + host, remotePath, localPath, swap, err := ParseScpArgs(options) + if err != nil { + return nil, err + } + dst, uri, err := Validate(options.User, host, options.Port, options.Identity) + if err != nil { + return nil, err + } + + scp, err := exec.LookPath("scp") + if err != nil { + return nil, fmt.Errorf("no scp binary found") + } + + conf, err := config.Default() + if err != nil { + return nil, err + } + + args := []string{} + if len(dst.Identity) > 0 { + args = append(args, "-i", dst.Identity) + } + if len(conf.Engine.SSHConfig) > 0 { + args = append(args, "-F", conf.Engine.SSHConfig) + } + + userString := "" + if !strings.Contains(host, "@") { + userString = uri.User.String() + "@" + } + // meaning, we are copying from a remote host + if swap { + args = append(args, userString+host+":"+remotePath, localPath) + } else { + args = append(args, localPath, userString+host+":"+remotePath) + } + + info := exec.Command(scp, args...) + err = info.Run() + if err != nil { + return nil, err + } + + return &ConnectionScpReport{Response: remotePath}, nil +} diff --git a/vendor/github.com/containers/common/pkg/ssh/ssh.go b/vendor/github.com/containers/common/pkg/ssh/ssh.go new file mode 100644 index 000000000..d638d69ad --- /dev/null +++ b/vendor/github.com/containers/common/pkg/ssh/ssh.go @@ -0,0 +1,59 @@ +package ssh + +import ( + "fmt" + + "golang.org/x/crypto/ssh" +) + +func Create(options *ConnectionCreateOptions, kind EngineMode) error { + if kind == NativeMode { + return nativeConnectionCreate(*options) + } + return golangConnectionCreate(*options) +} + +func Dial(options *ConnectionDialOptions, kind EngineMode) (*ssh.Client, error) { + var rep *ConnectionDialReport + var err error + if kind == NativeMode { + return nil, fmt.Errorf("ssh dial failed: you cannot create a dial-able client with native ssh") + } + rep, err = golangConnectionDial(*options) + if err != nil { + return nil, err + } + return rep.Client, nil +} + +func Exec(options *ConnectionExecOptions, kind EngineMode) (string, error) { + var rep *ConnectionExecReport + var err error + if kind == NativeMode { + rep, err = nativeConnectionExec(*options) + if err != nil { + return "", err + } + } else { + rep, err = golangConnectionExec(*options) + if err != nil { + return "", err + } + } + return rep.Response, nil +} + +func Scp(options *ConnectionScpOptions, kind EngineMode) (string, error) { + var rep *ConnectionScpReport + var err error + if kind == NativeMode { + if rep, err = nativeConnectionScp(*options); err != nil { + return "", err + } + return rep.Response, nil + } + if rep, err = golangConnectionScp(*options); err != nil { + return "", err + } + return rep.Response, nil +} diff --git a/vendor/github.com/containers/common/pkg/ssh/types.go b/vendor/github.com/containers/common/pkg/ssh/types.go new file mode 100644 index 000000000..f22b5fba9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/ssh/types.go @@ -0,0 +1,223 @@ +package ssh + +import ( + "net/url" + "time" + + "github.com/containers/storage/pkg/idtools" + "golang.org/x/crypto/ssh" +) + +type EngineMode string + +const ( + NativeMode = EngineMode("native") + GolangMode = EngineMode("golang") + InvalidMode = EngineMode("invalid") +) + +type ConnectionCreateOptions struct { + Name string + Path string + User *url.Userinfo + Port int + Identity string + Socket string + Default bool +} + +type ConnectionDialOptions struct { + Host string + Identity string + User *url.Userinfo + Port int + Auth []string + Timeout time.Duration +} + +type ConnectionDialReport struct { + Client *ssh.Client +} + +type ConnectionExecOptions struct { + Host string + Identity string + User *url.Userinfo + Port int + Auth []string + Args []string + Timeout time.Duration +} + +type ConnectionExecReport struct { + Response string +} + +type ConnectionScpOptions struct { + User *url.Userinfo + Source string + Destination string + Identity string + Port int +} + +type ConnectionScpReport struct { + Response string +} + +// Info is the overall struct that describes the host system +// running libpod/podman +type Info struct { + Host *HostInfo `json:"host"` + Store *StoreInfo `json:"store"` + Registries map[string]interface{} `json:"registries"` + Plugins Plugins `json:"plugins"` + Version Version `json:"version"` +} + +// Version is an output struct for API +type Version struct { + APIVersion string + Version string + GoVersion string + GitCommit string + BuiltTime string + Built int64 + OsArch string + Os string +} + +// SecurityInfo describes the libpod host +type SecurityInfo struct { + AppArmorEnabled bool `json:"apparmorEnabled"` + DefaultCapabilities string `json:"capabilities"` + Rootless bool `json:"rootless"` + SECCOMPEnabled bool `json:"seccompEnabled"` + SECCOMPProfilePath string `json:"seccompProfilePath"` + SELinuxEnabled bool `json:"selinuxEnabled"` +} + +// HostInfo describes the libpod host +type HostInfo struct { + Arch string `json:"arch"` + BuildahVersion string `json:"buildahVersion"` + CgroupManager string `json:"cgroupManager"` + CgroupsVersion string `json:"cgroupVersion"` + CgroupControllers []string `json:"cgroupControllers"` + Conmon *ConmonInfo `json:"conmon"` + CPUs int `json:"cpus"` + CPUUtilization *CPUUsage `json:"cpuUtilization"` + Distribution DistributionInfo `json:"distribution"` + EventLogger string `json:"eventLogger"` + Hostname string `json:"hostname"` + IDMappings IDMappings `json:"idMappings,omitempty"` + Kernel string `json:"kernel"` + LogDriver string `json:"logDriver"` + MemFree int64 `json:"memFree"` + MemTotal int64 `json:"memTotal"` + NetworkBackend string `json:"networkBackend"` + OCIRuntime *OCIRuntimeInfo `json:"ociRuntime"` + OS string `json:"os"` + // RemoteSocket returns the UNIX domain socket the Podman service is listening on + RemoteSocket *RemoteSocket `json:"remoteSocket,omitempty"` + RuntimeInfo map[string]interface{} `json:"runtimeInfo,omitempty"` + // ServiceIsRemote is true when the podman/libpod service is remote to the client + ServiceIsRemote bool `json:"serviceIsRemote"` + Security SecurityInfo `json:"security"` + Slirp4NetNS SlirpInfo `json:"slirp4netns,omitempty"` + SwapFree int64 `json:"swapFree"` + SwapTotal int64 `json:"swapTotal"` + Uptime string `json:"uptime"` + Linkmode string `json:"linkmode"` +} + +// RemoteSocket describes information about the API socket +type RemoteSocket struct { + Path string `json:"path,omitempty"` + Exists bool `json:"exists,omitempty"` +} + +// SlirpInfo describes the slirp executable that is being used +type SlirpInfo struct { + Executable string `json:"executable"` + Package string `json:"package"` + Version string `json:"version"` +} + +// IDMappings describe the GID and UID mappings +type IDMappings struct { + GIDMap []idtools.IDMap `json:"gidmap"` + UIDMap []idtools.IDMap `json:"uidmap"` +} + +// DistributionInfo describes the host distribution for libpod +type DistributionInfo struct { + Distribution string `json:"distribution"` + Variant string `json:"variant,omitempty"` + Version string `json:"version"` + Codename string `json:"codename,omitempty"` +} + +// ConmonInfo describes the conmon executable being used +type ConmonInfo struct { + Package string `json:"package"` + Path string `json:"path"` + Version string `json:"version"` +} + +// OCIRuntimeInfo describes the runtime (crun or runc) being +// used with podman +type OCIRuntimeInfo struct { + Name string `json:"name"` + Package string `json:"package"` + Path string `json:"path"` + Version string `json:"version"` +} + +// StoreInfo describes the container storage and its +// attributes +type StoreInfo struct { + ConfigFile string `json:"configFile"` + ContainerStore ContainerStore `json:"containerStore"` + GraphDriverName string `json:"graphDriverName"` + GraphOptions map[string]interface{} `json:"graphOptions"` + GraphRoot string `json:"graphRoot"` + // GraphRootAllocated is how much space the graphroot has in bytes + GraphRootAllocated uint64 `json:"graphRootAllocated"` + // GraphRootUsed is how much of graphroot is used in bytes + GraphRootUsed uint64 `json:"graphRootUsed"` + GraphStatus map[string]string `json:"graphStatus"` + ImageCopyTmpDir string `json:"imageCopyTmpDir"` + ImageStore ImageStore `json:"imageStore"` + RunRoot string `json:"runRoot"` + VolumePath string `json:"volumePath"` +} + +// ImageStore describes the image store. Right now only the number +// of images present +type ImageStore struct { + Number int `json:"number"` +} + +// ContainerStore describes the quantity of containers in the +// store by status +type ContainerStore struct { + Number int `json:"number"` + Paused int `json:"paused"` + Running int `json:"running"` + Stopped int `json:"stopped"` +} + +type Plugins struct { + Volume []string `json:"volume"` + Network []string `json:"network"` + Log []string `json:"log"` + // Authorization is provided for compatibility, will always be nil as Podman has no daemon + Authorization []string `json:"authorization"` +} + +type CPUUsage struct { + UserPercent float64 `json:"userPercent"` + SystemPercent float64 `json:"systemPercent"` + IdlePercent float64 `json:"idlePercent"` +} diff --git a/vendor/github.com/containers/common/pkg/ssh/utils.go b/vendor/github.com/containers/common/pkg/ssh/utils.go new file mode 100644 index 000000000..c15745015 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/ssh/utils.go @@ -0,0 +1,198 @@ +package ssh + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" + + "github.com/containers/common/pkg/config" + "golang.org/x/crypto/ssh" + "golang.org/x/term" +) + +func Validate(user *url.Userinfo, path string, port int, identity string) (*config.Destination, *url.URL, error) { + sock := "" + if strings.Contains(path, "/run") { + sock = strings.Split(path, "/run")[1] + } + uri, err := url.Parse(path) + if err != nil { + return nil, nil, err + } + + // sometimes we are not going to have a path, this breaks uri.Hostname() + if uri.Host == "" && strings.Contains(uri.String(), "@") { + uri.Host = strings.Split(uri.String(), "@")[1] + } + + if uri.Port() == "" { + if port != 0 { + uri.Host = net.JoinHostPort(uri.Hostname(), strconv.Itoa(port)) + } else { + uri.Host = net.JoinHostPort(uri.Hostname(), "22") + } + } + + if user != nil { + uri.User = user + } + + uriStr := "" + if len(sock) > 0 { + uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host + "/run" + sock + } else { + uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host + } + + dst := config.Destination{ + URI: uriStr, + } + + if len(identity) > 0 { + dst.Identity = identity + } + return &dst, uri, err +} + +var ( + passPhrase []byte + phraseSync sync.Once + password []byte + passwordSync sync.Once +) + +// ReadPassword prompts for a secret and returns value input by user from stdin +// Unlike terminal.ReadPassword(), $(echo $SECRET | podman...) is supported. +// Additionally, all input after `<secret>/n` is queued to podman command. +func ReadPassword(prompt string) (pw []byte, err error) { + fd := int(os.Stdin.Fd()) + if term.IsTerminal(fd) { + fmt.Fprint(os.Stderr, prompt) + pw, err = term.ReadPassword(fd) + fmt.Fprintln(os.Stderr) + return + } + + var b [1]byte + for { + n, err := os.Stdin.Read(b[:]) + // terminal.ReadPassword discards any '\r', so we do the same + if n > 0 && b[0] != '\r' { + if b[0] == '\n' { + return pw, nil + } + pw = append(pw, b[0]) + // limit size, so that a wrong input won't fill up the memory + if len(pw) > 1024 { + err = fmt.Errorf("password too long, 1024 byte limit") + } + } + if err != nil { + // terminal.ReadPassword accepts EOF-terminated passwords + // if non-empty, so we do the same + if err == io.EOF && len(pw) > 0 { + err = nil + } + return pw, err + } + } +} + +func PublicKey(path string, passphrase []byte) (ssh.Signer, error) { + key, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + if _, ok := err.(*ssh.PassphraseMissingError); !ok { + return nil, err + } + if len(passphrase) == 0 { + passphrase = ReadPassphrase() + } + return ssh.ParsePrivateKeyWithPassphrase(key, passphrase) + } + return signer, nil +} + +func ReadPassphrase() []byte { + phraseSync.Do(func() { + secret, err := ReadPassword("Key Passphrase: ") + if err != nil { + secret = []byte{} + } + passPhrase = secret + }) + return passPhrase +} + +func ReadLogin() []byte { + passwordSync.Do(func() { + secret, err := ReadPassword("Login password: ") + if err != nil { + secret = []byte{} + } + password = secret + }) + return password +} + +func ParseScpArgs(options ConnectionScpOptions) (string, string, string, bool, error) { + // assume load to remote + host := options.Destination + if strings.Contains(host, "ssh://") { + host = strings.Split(host, "ssh://")[1] + } + localPath := options.Source + if strings.Contains(localPath, "ssh://") { + localPath = strings.Split(localPath, "ssh://")[1] + } + remotePath := "" + swap := false + if split := strings.Split(localPath, ":"); len(split) == 2 { + // save to remote, load to local + host = split[0] + remotePath = split[1] + localPath = options.Destination + swap = true + } else { + split = strings.Split(host, ":") + if len(split) != 2 { + return "", "", "", false, fmt.Errorf("no remote destination provided") + } + host = split[0] + remotePath = split[1] + } + remotePath = strings.TrimSuffix(remotePath, "\n") + return host, remotePath, localPath, swap, nil +} + +func DialNet(sshClient *ssh.Client, mode string, url *url.URL) (net.Conn, error) { + port, err := strconv.Atoi(url.Port()) + if err != nil { + return nil, err + } + if _, _, err = Validate(url.User, url.Hostname(), port, ""); err != nil { + return nil, err + } + return sshClient.Dial(mode, url.Path) +} + +func DefineMode(flag string) EngineMode { + switch flag { + case "native": + return NativeMode + case "golang": + return GolangMode + default: + return InvalidMode + } +} diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index 7b44a84fc..34e9fe6ba 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.49.1-dev" +const Version = "0.49.2-dev" diff --git a/vendor/github.com/dtylman/scp/.gitignore b/vendor/github.com/dtylman/scp/.gitignore deleted file mode 100644 index 6e1690ed6..000000000 --- a/vendor/github.com/dtylman/scp/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -example/example diff --git a/vendor/github.com/dtylman/scp/LICENSE b/vendor/github.com/dtylman/scp/LICENSE deleted file mode 100644 index 6565de59d..000000000 --- a/vendor/github.com/dtylman/scp/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 Danny - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/dtylman/scp/README.md b/vendor/github.com/dtylman/scp/README.md deleted file mode 100644 index 48cfefe02..000000000 --- a/vendor/github.com/dtylman/scp/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# scp - -[](https://goreportcard.com/report/github.com/dtylman/scp) - -A Simple `go` SCP client library. - -## Usage - -```go -import ( - "github.com/dtylman/scp" - "golang.org/x/crypto/ssh" -) -``` - -## Sending Files - -Copies `/var/log/messages` to remote `/tmp/lala`: - -```go -var sc* ssh.Client -// establish ssh connection into sc here... -n,err:=scp.CopyTo(sc, "/var/log/messages", "/tmp/lala") -if err==nil{ - fmt.Printf("Sent %v bytes",n) -} -``` - -## Receiving Files - -Copies remote `/var/log/message` to local `/tmp/lala`: - -```go -var sc* ssh.Client -// establish ssh connection into sc here... -n,err:=scp.CopyFrom(sc, "/var/log/message", "/tmp/lala") -if err==nil{ - fmt.Printf("Sent %v bytes",n) -} -``` - - diff --git a/vendor/github.com/dtylman/scp/msg.go b/vendor/github.com/dtylman/scp/msg.go deleted file mode 100644 index 6dfc53535..000000000 --- a/vendor/github.com/dtylman/scp/msg.go +++ /dev/null @@ -1,121 +0,0 @@ -package scp - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" -) - -const ( - //CopyMessage Copy Message Opcode - CopyMessage = 'C' - //ErrorMessage Error OpCode - ErrorMessage = 0x1 - //WarnMessage Warning Opcode - WarnMessage = 0x2 -) - -//Message is scp control message -type Message struct { - Type byte - Error error - Mode string - Size int64 - FileName string -} - -func (m *Message) readByte(reader io.Reader) (byte, error) { - buff := make([]byte, 1) - _, err := io.ReadFull(reader, buff) - if err != nil { - return 0, err - } - return buff[0], nil - -} - -func (m *Message) readOpCode(reader io.Reader) error { - var err error - m.Type, err = m.readByte(reader) - return err -} - -//ReadError reads an error message -func (m *Message) ReadError(reader io.Reader) error { - msg, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - m.Error = errors.New(strings.TrimSpace(string(msg))) - return nil -} - -func (m *Message) readLine(reader io.Reader) (string, error) { - line := "" - b, err := m.readByte(reader) - if err != nil { - return "", err - } - for b != 10 { - line += string(b) - b, err = m.readByte(reader) - if err != nil { - return "", err - } - } - return line, nil -} - -func (m *Message) readCopy(reader io.Reader) error { - line, err := m.readLine(reader) - if err != nil { - return err - } - parts := strings.Split(line, " ") - if len(parts) < 2 { - return errors.New("Invalid copy line: " + line) - } - m.Mode = parts[0] - m.Size, err = strconv.ParseInt(parts[1], 10, 0) - if err != nil { - return err - } - m.FileName = parts[2] - return nil -} - -//ReadFrom reads message from reader -func (m *Message) ReadFrom(reader io.Reader) (int64, error) { - err := m.readOpCode(reader) - if err != nil { - return 0, err - } - switch m.Type { - case CopyMessage: - err = m.readCopy(reader) - if err != nil { - return 0, err - } - case ErrorMessage, WarnMessage: - err = m.ReadError(reader) - if err != nil { - return 0, err - } - default: - return 0, fmt.Errorf("Unsupported opcode: %v", m.Type) - } - return m.Size, nil -} - -//NewMessageFromReader constructs a new message from a data in reader -func NewMessageFromReader(reader io.Reader) (*Message, error) { - m := new(Message) - _, err := m.ReadFrom(reader) - if err != nil { - return nil, err - } - return m, nil -} diff --git a/vendor/github.com/dtylman/scp/scp.go b/vendor/github.com/dtylman/scp/scp.go deleted file mode 100644 index 841c16965..000000000 --- a/vendor/github.com/dtylman/scp/scp.go +++ /dev/null @@ -1,153 +0,0 @@ -package scp - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "path/filepath" - - log "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh" -) - -const ( - fileMode = "0644" - buffSize = 1024 * 256 -) - -//CopyTo copy from local to remote -func CopyTo(sshClient *ssh.Client, local string, remote string) (int64, error) { - session, err := sshClient.NewSession() - if err != nil { - return 0, err - } - defer session.Close() - stderr := &bytes.Buffer{} - session.Stderr = stderr - stdout := &bytes.Buffer{} - session.Stdout = stdout - writer, err := session.StdinPipe() - if err != nil { - return 0, err - } - defer writer.Close() - err = session.Start("scp -t " + filepath.Dir(remote)) - if err != nil { - return 0, err - } - - localFile, err := os.Open(local) - if err != nil { - return 0, err - } - fileInfo, err := localFile.Stat() - if err != nil { - return 0, err - } - _, err = fmt.Fprintf(writer, "C%s %d %s\n", fileMode, fileInfo.Size(), filepath.Base(remote)) - if err != nil { - return 0, err - } - n, err := copyN(writer, localFile, fileInfo.Size()) - if err != nil { - return 0, err - } - err = ack(writer) - if err != nil { - return 0, err - } - - err = session.Wait() - log.Debugf("Copied %v bytes out of %v. err: %v stdout:%v. stderr:%v", n, fileInfo.Size(), err, stdout, stderr) - //NOTE: Process exited with status 1 is not an error, it just how scp work. (waiting for the next control message and we send EOF) - return n, nil -} - -//CopyFrom copy from remote to local -func CopyFrom(sshClient *ssh.Client, remote string, local string) (int64, error) { - session, err := sshClient.NewSession() - if err != nil { - return 0, err - } - defer session.Close() - stderr := &bytes.Buffer{} - session.Stderr = stderr - writer, err := session.StdinPipe() - if err != nil { - return 0, err - } - defer writer.Close() - reader, err := session.StdoutPipe() - if err != nil { - return 0, err - } - err = session.Start("scp -f " + remote) - if err != nil { - return 0, err - } - err = ack(writer) - if err != nil { - return 0, err - } - msg, err := NewMessageFromReader(reader) - if err != nil { - return 0, err - } - if msg.Type == ErrorMessage || msg.Type == WarnMessage { - return 0, msg.Error - } - log.Debugf("Receiving %v", msg) - - err = ack(writer) - if err != nil { - return 0, err - } - outFile, err := os.Create(local) - if err != nil { - return 0, err - } - defer outFile.Close() - n, err := copyN(outFile, reader, msg.Size) - if err != nil { - return 0, err - } - err = outFile.Sync() - if err != nil { - return 0, err - } - err = outFile.Close() - if err != nil { - return 0, err - } - err = session.Wait() - log.Debugf("Copied %v bytes out of %v. err: %v stderr:%v", n, msg.Size, err, stderr) - return n, nil -} - -func ack(writer io.Writer) error { - var msg = []byte{0, 0, 10, 13} - n, err := writer.Write(msg) - if err != nil { - return err - } - if n < len(msg) { - return errors.New("Failed to write ack buffer") - } - return nil -} - -func copyN(writer io.Writer, src io.Reader, size int64) (int64, error) { - reader := io.LimitReader(src, size) - var total int64 - for total < size { - n, err := io.CopyBuffer(writer, reader, make([]byte, buffSize)) - log.Debugf("Copied chunk %v total: %v out of %v err: %v ", n, total, size, err) - if err != nil { - return 0, err - } - total += n - } - return total, nil -} diff --git a/vendor/github.com/kr/fs/LICENSE b/vendor/github.com/kr/fs/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/kr/fs/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kr/fs/Readme b/vendor/github.com/kr/fs/Readme new file mode 100644 index 000000000..c95e13fc8 --- /dev/null +++ b/vendor/github.com/kr/fs/Readme @@ -0,0 +1,3 @@ +Filesystem Package + +http://godoc.org/github.com/kr/fs diff --git a/vendor/github.com/kr/fs/filesystem.go b/vendor/github.com/kr/fs/filesystem.go new file mode 100644 index 000000000..f1c4805fb --- /dev/null +++ b/vendor/github.com/kr/fs/filesystem.go @@ -0,0 +1,36 @@ +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// FileSystem defines the methods of an abstract filesystem. +type FileSystem interface { + + // ReadDir reads the directory named by dirname and returns a + // list of directory entries. + ReadDir(dirname string) ([]os.FileInfo, error) + + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. + Lstat(name string) (os.FileInfo, error) + + // Join joins any number of path elements into a single path, adding a + // separator if necessary. The result is Cleaned; in particular, all + // empty strings are ignored. + // + // The separator is FileSystem specific. + Join(elem ...string) string +} + +// fs represents a FileSystem provided by the os package. +type fs struct{} + +func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) } + +func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) } diff --git a/vendor/github.com/kr/fs/go.mod b/vendor/github.com/kr/fs/go.mod new file mode 100644 index 000000000..7c206e04c --- /dev/null +++ b/vendor/github.com/kr/fs/go.mod @@ -0,0 +1 @@ +module "github.com/kr/fs" diff --git a/vendor/github.com/kr/fs/walk.go b/vendor/github.com/kr/fs/walk.go new file mode 100644 index 000000000..6ffa1e0b2 --- /dev/null +++ b/vendor/github.com/kr/fs/walk.go @@ -0,0 +1,95 @@ +// Package fs provides filesystem-related functions. +package fs + +import ( + "os" +) + +// Walker provides a convenient interface for iterating over the +// descendants of a filesystem path. +// Successive calls to the Step method will step through each +// file or directory in the tree, including the root. The files +// are walked in lexical order, which makes the output deterministic +// but means that for very large directories Walker can be inefficient. +// Walker does not follow symbolic links. +type Walker struct { + fs FileSystem + cur item + stack []item + descend bool +} + +type item struct { + path string + info os.FileInfo + err error +} + +// Walk returns a new Walker rooted at root. +func Walk(root string) *Walker { + return WalkFS(root, new(fs)) +} + +// WalkFS returns a new Walker rooted at root on the FileSystem fs. +func WalkFS(root string, fs FileSystem) *Walker { + info, err := fs.Lstat(root) + return &Walker{ + fs: fs, + stack: []item{{root, info, err}}, + } +} + +// Step advances the Walker to the next file or directory, +// which will then be available through the Path, Stat, +// and Err methods. +// It returns false when the walk stops at the end of the tree. +func (w *Walker) Step() bool { + if w.descend && w.cur.err == nil && w.cur.info.IsDir() { + list, err := w.fs.ReadDir(w.cur.path) + if err != nil { + w.cur.err = err + w.stack = append(w.stack, w.cur) + } else { + for i := len(list) - 1; i >= 0; i-- { + path := w.fs.Join(w.cur.path, list[i].Name()) + w.stack = append(w.stack, item{path, list[i], nil}) + } + } + } + + if len(w.stack) == 0 { + return false + } + i := len(w.stack) - 1 + w.cur = w.stack[i] + w.stack = w.stack[:i] + w.descend = true + return true +} + +// Path returns the path to the most recent file or directory +// visited by a call to Step. It contains the argument to Walk +// as a prefix; that is, if Walk is called with "dir", which is +// a directory containing the file "a", Path will return "dir/a". +func (w *Walker) Path() string { + return w.cur.path +} + +// Stat returns info for the most recent file or directory +// visited by a call to Step. +func (w *Walker) Stat() os.FileInfo { + return w.cur.info +} + +// Err returns the error, if any, for the most recent attempt +// by Step to visit a file or directory. If a directory has +// an error, w will not descend into that directory. +func (w *Walker) Err() error { + return w.cur.err +} + +// SkipDir causes the currently visited directory to be skipped. +// If w is not on a directory, SkipDir has no effect. +func (w *Walker) SkipDir() { + w.descend = false +} diff --git a/vendor/github.com/pkg/sftp/.gitignore b/vendor/github.com/pkg/sftp/.gitignore new file mode 100644 index 000000000..caf2dca22 --- /dev/null +++ b/vendor/github.com/pkg/sftp/.gitignore @@ -0,0 +1,10 @@ +.*.swo +.*.swp + +server_standalone/server_standalone + +examples/*/id_rsa +examples/*/id_rsa.pub + +memprofile.out +memprofile.svg diff --git a/vendor/github.com/pkg/sftp/CONTRIBUTORS b/vendor/github.com/pkg/sftp/CONTRIBUTORS new file mode 100644 index 000000000..5c7196ae6 --- /dev/null +++ b/vendor/github.com/pkg/sftp/CONTRIBUTORS @@ -0,0 +1,3 @@ +Dave Cheney <dave@cheney.net> +Saulius Gurklys <s4uliu5@gmail.com> +John Eikenberry <jae@zhar.net> diff --git a/vendor/github.com/pkg/sftp/LICENSE b/vendor/github.com/pkg/sftp/LICENSE new file mode 100644 index 000000000..b7b53921e --- /dev/null +++ b/vendor/github.com/pkg/sftp/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2013, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/sftp/Makefile b/vendor/github.com/pkg/sftp/Makefile new file mode 100644 index 000000000..4d3a00799 --- /dev/null +++ b/vendor/github.com/pkg/sftp/Makefile @@ -0,0 +1,27 @@ +.PHONY: integration integration_w_race benchmark + +integration: + go test -integration -v ./... + go test -testserver -v ./... + go test -integration -testserver -v ./... + go test -integration -allocator -v ./... + go test -testserver -allocator -v ./... + go test -integration -testserver -allocator -v ./... + +integration_w_race: + go test -race -integration -v ./... + go test -race -testserver -v ./... + go test -race -integration -testserver -v ./... + go test -race -integration -allocator -v ./... + go test -race -testserver -allocator -v ./... + go test -race -integration -allocator -testserver -v ./... + +COUNT ?= 1 +BENCHMARK_PATTERN ?= "." + +benchmark: + go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT) + +benchmark_w_memprofile: + go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT) -memprofile memprofile.out + go tool pprof -svg -output=memprofile.svg memprofile.out diff --git a/vendor/github.com/pkg/sftp/README.md b/vendor/github.com/pkg/sftp/README.md new file mode 100644 index 000000000..5e78cd396 --- /dev/null +++ b/vendor/github.com/pkg/sftp/README.md @@ -0,0 +1,44 @@ +sftp +---- + +The `sftp` package provides support for file system operations on remote ssh +servers using the SFTP subsystem. It also implements an SFTP server for serving +files from the filesystem. + + [](https://pkg.go.dev/github.com/pkg/sftp) + +usage and examples +------------------ + +See [https://pkg.go.dev/github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) for +examples and usage. + +The basic operation of the package mirrors the facilities of the +[os](http://golang.org/pkg/os) package. + +The Walker interface for directory traversal is heavily inspired by Keith +Rarick's [fs](https://pkg.go.dev/github.com/kr/fs) package. + +roadmap +------- + +* There is way too much duplication in the Client methods. If there was an + unmarshal(interface{}) method this would reduce a heap of the duplication. + +contributing +------------ + +We welcome pull requests, bug fixes and issue reports. + +Before proposing a large change, first please discuss your change by raising an +issue. + +For API/code bugs, please include a small, self contained code example to +reproduce the issue. For pull requests, remember test coverage. + +We try to handle issues and pull requests with a 0 open philosophy. That means +we will try to address the submission as soon as possible and will work toward +a resolution. If progress can no longer be made (eg. unreproducible bug) or +stops (eg. unresponsive submitter), we will close the bug. + +Thanks. diff --git a/vendor/github.com/pkg/sftp/allocator.go b/vendor/github.com/pkg/sftp/allocator.go new file mode 100644 index 000000000..3e67e5433 --- /dev/null +++ b/vendor/github.com/pkg/sftp/allocator.go @@ -0,0 +1,96 @@ +package sftp + +import ( + "sync" +) + +type allocator struct { + sync.Mutex + available [][]byte + // map key is the request order + used map[uint32][][]byte +} + +func newAllocator() *allocator { + return &allocator{ + // micro optimization: initialize available pages with an initial capacity + available: make([][]byte, 0, SftpServerWorkerCount*2), + used: make(map[uint32][][]byte), + } +} + +// GetPage returns a previously allocated and unused []byte or create a new one. +// The slice have a fixed size = maxMsgLength, this value is suitable for both +// receiving new packets and reading the files to serve +func (a *allocator) GetPage(requestOrderID uint32) []byte { + a.Lock() + defer a.Unlock() + + var result []byte + + // get an available page and remove it from the available ones. + if len(a.available) > 0 { + truncLength := len(a.available) - 1 + result = a.available[truncLength] + + a.available[truncLength] = nil // clear out the internal pointer + a.available = a.available[:truncLength] // truncate the slice + } + + // no preallocated slice found, just allocate a new one + if result == nil { + result = make([]byte, maxMsgLength) + } + + // put result in used pages + a.used[requestOrderID] = append(a.used[requestOrderID], result) + + return result +} + +// ReleasePages marks unused all pages in use for the given requestID +func (a *allocator) ReleasePages(requestOrderID uint32) { + a.Lock() + defer a.Unlock() + + if used := a.used[requestOrderID]; len(used) > 0 { + a.available = append(a.available, used...) + } + delete(a.used, requestOrderID) +} + +// Free removes all the used and available pages. +// Call this method when the allocator is not needed anymore +func (a *allocator) Free() { + a.Lock() + defer a.Unlock() + + a.available = nil + a.used = make(map[uint32][][]byte) +} + +func (a *allocator) countUsedPages() int { + a.Lock() + defer a.Unlock() + + num := 0 + for _, p := range a.used { + num += len(p) + } + return num +} + +func (a *allocator) countAvailablePages() int { + a.Lock() + defer a.Unlock() + + return len(a.available) +} + +func (a *allocator) isRequestOrderIDUsed(requestOrderID uint32) bool { + a.Lock() + defer a.Unlock() + + _, ok := a.used[requestOrderID] + return ok +} diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go new file mode 100644 index 000000000..2bb2d5764 --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs.go @@ -0,0 +1,90 @@ +package sftp + +// ssh_FXP_ATTRS support +// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 + +import ( + "os" + "time" +) + +const ( + sshFileXferAttrSize = 0x00000001 + sshFileXferAttrUIDGID = 0x00000002 + sshFileXferAttrPermissions = 0x00000004 + sshFileXferAttrACmodTime = 0x00000008 + sshFileXferAttrExtended = 0x80000000 + + sshFileXferAttrAll = sshFileXferAttrSize | sshFileXferAttrUIDGID | sshFileXferAttrPermissions | + sshFileXferAttrACmodTime | sshFileXferAttrExtended +) + +// fileInfo is an artificial type designed to satisfy os.FileInfo. +type fileInfo struct { + name string + stat *FileStat +} + +// Name returns the base name of the file. +func (fi *fileInfo) Name() string { return fi.name } + +// Size returns the length in bytes for regular files; system-dependent for others. +func (fi *fileInfo) Size() int64 { return int64(fi.stat.Size) } + +// Mode returns file mode bits. +func (fi *fileInfo) Mode() os.FileMode { return toFileMode(fi.stat.Mode) } + +// ModTime returns the last modification time of the file. +func (fi *fileInfo) ModTime() time.Time { return time.Unix(int64(fi.stat.Mtime), 0) } + +// IsDir returns true if the file is a directory. +func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() } + +func (fi *fileInfo) Sys() interface{} { return fi.stat } + +// FileStat holds the original unmarshalled values from a call to READDIR or +// *STAT. It is exported for the purposes of accessing the raw values via +// os.FileInfo.Sys(). It is also used server side to store the unmarshalled +// values for SetStat. +type FileStat struct { + Size uint64 + Mode uint32 + Mtime uint32 + Atime uint32 + UID uint32 + GID uint32 + Extended []StatExtended +} + +// StatExtended contains additional, extended information for a FileStat. +type StatExtended struct { + ExtType string + ExtData string +} + +func fileInfoFromStat(stat *FileStat, name string) os.FileInfo { + return &fileInfo{ + name: name, + stat: stat, + } +} + +func fileStatFromInfo(fi os.FileInfo) (uint32, *FileStat) { + mtime := fi.ModTime().Unix() + atime := mtime + var flags uint32 = sshFileXferAttrSize | + sshFileXferAttrPermissions | + sshFileXferAttrACmodTime + + fileStat := &FileStat{ + Size: uint64(fi.Size()), + Mode: fromFileMode(fi.Mode()), + Mtime: uint32(mtime), + Atime: uint32(atime), + } + + // os specific file stat decoding + fileStatFromInfoOs(fi, &flags, fileStat) + + return flags, fileStat +} diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go new file mode 100644 index 000000000..c01f33677 --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs_stubs.go @@ -0,0 +1,11 @@ +// +build plan9 windows android + +package sftp + +import ( + "os" +) + +func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { + // todo +} diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go new file mode 100644 index 000000000..d1f445241 --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs_unix.go @@ -0,0 +1,16 @@ +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix js + +package sftp + +import ( + "os" + "syscall" +) + +func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { + if statt, ok := fi.Sys().(*syscall.Stat_t); ok { + *flags |= sshFileXferAttrUIDGID + fileStat.UID = statt.Uid + fileStat.GID = statt.Gid + } +} diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go new file mode 100644 index 000000000..9e0b61645 --- /dev/null +++ b/vendor/github.com/pkg/sftp/client.go @@ -0,0 +1,1977 @@ +package sftp + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "math" + "os" + "path" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/kr/fs" + "golang.org/x/crypto/ssh" +) + +var ( + // ErrInternalInconsistency indicates the packets sent and the data queued to be + // written to the file don't match up. It is an unusual error and usually is + // caused by bad behavior server side or connection issues. The error is + // limited in scope to the call where it happened, the client object is still + // OK to use as long as the connection is still open. + ErrInternalInconsistency = errors.New("internal inconsistency") + // InternalInconsistency alias for ErrInternalInconsistency. + // + // Deprecated: please use ErrInternalInconsistency + InternalInconsistency = ErrInternalInconsistency +) + +// A ClientOption is a function which applies configuration to a Client. +type ClientOption func(*Client) error + +// MaxPacketChecked sets the maximum size of the payload, measured in bytes. +// This option only accepts sizes servers should support, ie. <= 32768 bytes. +// +// If you get the error "failed to send packet header: EOF" when copying a +// large file, try lowering this number. +// +// The default packet size is 32768 bytes. +func MaxPacketChecked(size int) ClientOption { + return func(c *Client) error { + if size < 1 { + return errors.New("size must be greater or equal to 1") + } + if size > 32768 { + return errors.New("sizes larger than 32KB might not work with all servers") + } + c.maxPacket = size + return nil + } +} + +// MaxPacketUnchecked sets the maximum size of the payload, measured in bytes. +// It accepts sizes larger than the 32768 bytes all servers should support. +// Only use a setting higher than 32768 if your application always connects to +// the same server or after sufficiently broad testing. +// +// If you get the error "failed to send packet header: EOF" when copying a +// large file, try lowering this number. +// +// The default packet size is 32768 bytes. +func MaxPacketUnchecked(size int) ClientOption { + return func(c *Client) error { + if size < 1 { + return errors.New("size must be greater or equal to 1") + } + c.maxPacket = size + return nil + } +} + +// MaxPacket sets the maximum size of the payload, measured in bytes. +// This option only accepts sizes servers should support, ie. <= 32768 bytes. +// This is a synonym for MaxPacketChecked that provides backward compatibility. +// +// If you get the error "failed to send packet header: EOF" when copying a +// large file, try lowering this number. +// +// The default packet size is 32768 bytes. +func MaxPacket(size int) ClientOption { + return MaxPacketChecked(size) +} + +// MaxConcurrentRequestsPerFile sets the maximum concurrent requests allowed for a single file. +// +// The default maximum concurrent requests is 64. +func MaxConcurrentRequestsPerFile(n int) ClientOption { + return func(c *Client) error { + if n < 1 { + return errors.New("n must be greater or equal to 1") + } + c.maxConcurrentRequests = n + return nil + } +} + +// UseConcurrentWrites allows the Client to perform concurrent Writes. +// +// Using concurrency while doing writes, requires special consideration. +// A write to a later offset in a file after an error, +// could end up with a file length longer than what was successfully written. +// +// When using this option, if you receive an error during `io.Copy` or `io.WriteTo`, +// you may need to `Truncate` the target Writer to avoid “holes” in the data written. +func UseConcurrentWrites(value bool) ClientOption { + return func(c *Client) error { + c.useConcurrentWrites = value + return nil + } +} + +// UseConcurrentReads allows the Client to perform concurrent Reads. +// +// Concurrent reads are generally safe to use and not using them will degrade +// performance, so this option is enabled by default. +// +// When enabled, WriteTo will use Stat/Fstat to get the file size and determines +// how many concurrent workers to use. +// Some "read once" servers will delete the file if they receive a stat call on an +// open file and then the download will fail. +// Disabling concurrent reads you will be able to download files from these servers. +// If concurrent reads are disabled, the UseFstat option is ignored. +func UseConcurrentReads(value bool) ClientOption { + return func(c *Client) error { + c.disableConcurrentReads = !value + return nil + } +} + +// UseFstat sets whether to use Fstat or Stat when File.WriteTo is called +// (usually when copying files). +// Some servers limit the amount of open files and calling Stat after opening +// the file will throw an error From the server. Setting this flag will call +// Fstat instead of Stat which is suppose to be called on an open file handle. +// +// It has been found that that with IBM Sterling SFTP servers which have +// "extractability" level set to 1 which means only 1 file can be opened at +// any given time. +// +// If the server you are working with still has an issue with both Stat and +// Fstat calls you can always open a file and read it until the end. +// +// Another reason to read the file until its end and Fstat doesn't work is +// that in some servers, reading a full file will automatically delete the +// file as some of these mainframes map the file to a message in a queue. +// Once the file has been read it will get deleted. +func UseFstat(value bool) ClientOption { + return func(c *Client) error { + c.useFstat = value + return nil + } +} + +// Client represents an SFTP session on a *ssh.ClientConn SSH connection. +// Multiple Clients can be active on a single SSH connection, and a Client +// may be called concurrently from multiple Goroutines. +// +// Client implements the github.com/kr/fs.FileSystem interface. +type Client struct { + clientConn + + ext map[string]string // Extensions (name -> data). + + maxPacket int // max packet size read or written. + maxConcurrentRequests int + nextid uint32 + + // write concurrency is… error prone. + // Default behavior should be to not use it. + useConcurrentWrites bool + useFstat bool + disableConcurrentReads bool +} + +// NewClient creates a new SFTP client on conn, using zero or more option +// functions. +func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) { + s, err := conn.NewSession() + if err != nil { + return nil, err + } + if err := s.RequestSubsystem("sftp"); err != nil { + return nil, err + } + pw, err := s.StdinPipe() + if err != nil { + return nil, err + } + pr, err := s.StdoutPipe() + if err != nil { + return nil, err + } + + return NewClientPipe(pr, pw, opts...) +} + +// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser. +// This can be used for connecting to an SFTP server over TCP/TLS or by using +// the system's ssh client program (e.g. via exec.Command). +func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) { + sftp := &Client{ + clientConn: clientConn{ + conn: conn{ + Reader: rd, + WriteCloser: wr, + }, + inflight: make(map[uint32]chan<- result), + closed: make(chan struct{}), + }, + + ext: make(map[string]string), + + maxPacket: 1 << 15, + maxConcurrentRequests: 64, + } + + for _, opt := range opts { + if err := opt(sftp); err != nil { + wr.Close() + return nil, err + } + } + + if err := sftp.sendInit(); err != nil { + wr.Close() + return nil, err + } + if err := sftp.recvVersion(); err != nil { + wr.Close() + return nil, err + } + + sftp.clientConn.wg.Add(1) + go sftp.loop() + + return sftp, nil +} + +// Create creates the named file mode 0666 (before umask), truncating it if it +// already exists. If successful, methods on the returned File can be used for +// I/O; the associated file descriptor has mode O_RDWR. If you need more +// control over the flags/mode used to open the file see client.OpenFile. +// +// Note that some SFTP servers (eg. AWS Transfer) do not support opening files +// read/write at the same time. For those services you will need to use +// `client.OpenFile(os.O_WRONLY|os.O_CREATE|os.O_TRUNC)`. +func (c *Client) Create(path string) (*File, error) { + return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) +} + +const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 + +func (c *Client) sendInit() error { + return c.clientConn.conn.sendPacket(&sshFxInitPacket{ + Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 + }) +} + +// returns the next value of c.nextid +func (c *Client) nextID() uint32 { + return atomic.AddUint32(&c.nextid, 1) +} + +func (c *Client) recvVersion() error { + typ, data, err := c.recvPacket(0) + if err != nil { + return err + } + if typ != sshFxpVersion { + return &unexpectedPacketErr{sshFxpVersion, typ} + } + + version, data, err := unmarshalUint32Safe(data) + if err != nil { + return err + } + if version != sftpProtocolVersion { + return &unexpectedVersionErr{sftpProtocolVersion, version} + } + + for len(data) > 0 { + var ext extensionPair + ext, data, err = unmarshalExtensionPair(data) + if err != nil { + return err + } + c.ext[ext.Name] = ext.Data + } + + return nil +} + +// HasExtension checks whether the server supports a named extension. +// +// The first return value is the extension data reported by the server +// (typically a version number). +func (c *Client) HasExtension(name string) (string, bool) { + data, ok := c.ext[name] + return data, ok +} + +// Walk returns a new Walker rooted at root. +func (c *Client) Walk(root string) *fs.Walker { + return fs.WalkFS(root, c) +} + +// ReadDir reads the directory named by dirname and returns a list of +// directory entries. +func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { + handle, err := c.opendir(p) + if err != nil { + return nil, err + } + defer c.close(handle) // this has to defer earlier than the lock below + var attrs []os.FileInfo + var done = false + for !done { + id := c.nextID() + typ, data, err1 := c.sendPacket(nil, &sshFxpReaddirPacket{ + ID: id, + Handle: handle, + }) + if err1 != nil { + err = err1 + done = true + break + } + switch typ { + case sshFxpName: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + for i := uint32(0); i < count; i++ { + var filename string + filename, data = unmarshalString(data) + _, data = unmarshalString(data) // discard longname + var attr *FileStat + attr, data = unmarshalAttrs(data) + if filename == "." || filename == ".." { + continue + } + attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename))) + } + case sshFxpStatus: + // TODO(dfc) scope warning! + err = normaliseError(unmarshalStatus(id, data)) + done = true + default: + return nil, unimplementedPacketErr(typ) + } + } + if err == io.EOF { + err = nil + } + return attrs, err +} + +func (c *Client) opendir(path string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpOpendirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return "", err + } + switch typ { + case sshFxpHandle: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + handle, _ := unmarshalString(data) + return handle, nil + case sshFxpStatus: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Stat returns a FileInfo structure describing the file specified by path 'p'. +// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file. +func (c *Client) Stat(p string) (os.FileInfo, error) { + fs, err := c.stat(p) + if err != nil { + return nil, err + } + return fileInfoFromStat(fs, path.Base(p)), nil +} + +// Lstat returns a FileInfo structure describing the file specified by path 'p'. +// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link. +func (c *Client) Lstat(p string) (os.FileInfo, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpLstatPacket{ + ID: id, + Path: p, + }) + if err != nil { + return nil, err + } + switch typ { + case sshFxpAttrs: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return fileInfoFromStat(attr, path.Base(p)), nil + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// ReadLink reads the target of a symbolic link. +func (c *Client) ReadLink(p string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpReadlinkPacket{ + ID: id, + Path: p, + }) + if err != nil { + return "", err + } + switch typ { + case sshFxpName: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + if count != 1 { + return "", unexpectedCount(1, count) + } + filename, _ := unmarshalString(data) // ignore dummy attributes + return filename, nil + case sshFxpStatus: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Link creates a hard link at 'newname', pointing at the same inode as 'oldname' +func (c *Client) Link(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpHardlinkPacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Symlink creates a symbolic link at 'newname', pointing at target 'oldname' +func (c *Client) Symlink(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpSymlinkPacket{ + ID: id, + Linkpath: newname, + Targetpath: oldname, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) setfstat(handle string, flags uint32, attrs interface{}) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpFsetstatPacket{ + ID: id, + Handle: handle, + Flags: flags, + Attrs: attrs, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// setstat is a convience wrapper to allow for changing of various parts of the file descriptor. +func (c *Client) setstat(path string, flags uint32, attrs interface{}) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpSetstatPacket{ + ID: id, + Path: path, + Flags: flags, + Attrs: attrs, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Chtimes changes the access and modification times of the named file. +func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error { + type times struct { + Atime uint32 + Mtime uint32 + } + attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())} + return c.setstat(path, sshFileXferAttrACmodTime, attrs) +} + +// Chown changes the user and group owners of the named file. +func (c *Client) Chown(path string, uid, gid int) error { + type owner struct { + UID uint32 + GID uint32 + } + attrs := owner{uint32(uid), uint32(gid)} + return c.setstat(path, sshFileXferAttrUIDGID, attrs) +} + +// Chmod changes the permissions of the named file. +// +// Chmod does not apply a umask, because even retrieving the umask is not +// possible in a portable way without causing a race condition. Callers +// should mask off umask bits, if desired. +func (c *Client) Chmod(path string, mode os.FileMode) error { + return c.setstat(path, sshFileXferAttrPermissions, toChmodPerm(mode)) +} + +// Truncate sets the size of the named file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +func (c *Client) Truncate(path string, size int64) error { + return c.setstat(path, sshFileXferAttrSize, uint64(size)) +} + +// Open opens the named file for reading. If successful, methods on the +// returned file can be used for reading; the associated file descriptor +// has mode O_RDONLY. +func (c *Client) Open(path string) (*File, error) { + return c.open(path, flags(os.O_RDONLY)) +} + +// OpenFile is the generalized open call; most users will use Open or +// Create instead. It opens the named file with specified flag (O_RDONLY +// etc.). If successful, methods on the returned File can be used for I/O. +func (c *Client) OpenFile(path string, f int) (*File, error) { + return c.open(path, flags(f)) +} + +func (c *Client) open(path string, pflags uint32) (*File, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpOpenPacket{ + ID: id, + Path: path, + Pflags: pflags, + }) + if err != nil { + return nil, err + } + switch typ { + case sshFxpHandle: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + handle, _ := unmarshalString(data) + return &File{c: c, path: path, handle: handle}, nil + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// close closes a handle handle previously returned in the response +// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid +// immediately after this request has been sent. +func (c *Client) close(handle string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpClosePacket{ + ID: id, + Handle: handle, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) stat(path string) (*FileStat, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpStatPacket{ + ID: id, + Path: path, + }) + if err != nil { + return nil, err + } + switch typ { + case sshFxpAttrs: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return attr, nil + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +func (c *Client) fstat(handle string) (*FileStat, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpFstatPacket{ + ID: id, + Handle: handle, + }) + if err != nil { + return nil, err + } + switch typ { + case sshFxpAttrs: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return attr, nil + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// StatVFS retrieves VFS statistics from a remote host. +// +// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature +// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt. +func (c *Client) StatVFS(path string) (*StatVFS, error) { + // send the StatVFS packet to the server + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpStatvfsPacket{ + ID: id, + Path: path, + }) + if err != nil { + return nil, err + } + + switch typ { + // server responded with valid data + case sshFxpExtendedReply: + var response StatVFS + err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response) + if err != nil { + return nil, errors.New("can not parse reply") + } + + return &response, nil + + // the resquest failed + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + + default: + return nil, unimplementedPacketErr(typ) + } +} + +// Join joins any number of path elements into a single path, adding a +// separating slash if necessary. The result is Cleaned; in particular, all +// empty strings are ignored. +func (c *Client) Join(elem ...string) string { return path.Join(elem...) } + +// Remove removes the specified file or directory. An error will be returned if no +// file or directory with the specified path exists, or if the specified directory +// is not empty. +func (c *Client) Remove(path string) error { + err := c.removeFile(path) + // some servers, *cough* osx *cough*, return EPERM, not ENODIR. + // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY + // EPERM is converted to os.ErrPermission so it is not a StatusError + if err, ok := err.(*StatusError); ok { + switch err.Code { + case sshFxFailure, sshFxFileIsADirectory: + return c.RemoveDirectory(path) + } + } + if os.IsPermission(err) { + return c.RemoveDirectory(path) + } + return err +} + +func (c *Client) removeFile(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpRemovePacket{ + ID: id, + Filename: path, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// RemoveDirectory removes a directory path. +func (c *Client) RemoveDirectory(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpRmdirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Rename renames a file. +func (c *Client) Rename(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpRenamePacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// PosixRename renames a file using the posix-rename@openssh.com extension +// which will replace newname if it already exists. +func (c *Client) PosixRename(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpPosixRenamePacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// RealPath can be used to have the server canonicalize any given path name to an absolute path. +// +// This is useful for converting path names containing ".." components, +// or relative pathnames without a leading slash into absolute paths. +func (c *Client) RealPath(path string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpRealpathPacket{ + ID: id, + Path: path, + }) + if err != nil { + return "", err + } + switch typ { + case sshFxpName: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + if count != 1 { + return "", unexpectedCount(1, count) + } + filename, _ := unmarshalString(data) // ignore attributes + return filename, nil + case sshFxpStatus: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Getwd returns the current working directory of the server. Operations +// involving relative paths will be based at this location. +func (c *Client) Getwd() (string, error) { + return c.RealPath(".") +} + +// Mkdir creates the specified directory. An error will be returned if a file or +// directory with the specified path already exists, or if the directory's +// parent folder does not exist (the method cannot create complete paths). +func (c *Client) Mkdir(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpMkdirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// MkdirAll creates a directory named path, along with any necessary parents, +// and returns nil, or else returns an error. +// If path is already a directory, MkdirAll does nothing and returns nil. +// If path contains a regular file, an error is returned +func (c *Client) MkdirAll(path string) error { + // Most of this code mimics https://golang.org/src/os/path.go?s=514:561#L13 + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := c.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && path[i-1] == '/' { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && path[j-1] != '/' { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = c.MkdirAll(path[0 : j-1]) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = c.Mkdir(path) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := c.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// File represents a remote file. +type File struct { + c *Client + path string + handle string + + mu sync.Mutex + offset int64 // current offset within remote file +} + +// Close closes the File, rendering it unusable for I/O. It returns an +// error, if any. +func (f *File) Close() error { + return f.c.close(f.handle) +} + +// Name returns the name of the file as presented to Open or Create. +func (f *File) Name() string { + return f.path +} + +// Read reads up to len(b) bytes from the File. It returns the number of bytes +// read and an error, if any. Read follows io.Reader semantics, so when Read +// encounters an error or EOF condition after successfully reading n > 0 bytes, +// it returns the number of bytes read. +// +// To maximise throughput for transferring the entire file (especially +// over high latency links) it is recommended to use WriteTo rather +// than calling Read multiple times. io.Copy will do this +// automatically. +func (f *File) Read(b []byte) (int, error) { + f.mu.Lock() + defer f.mu.Unlock() + + n, err := f.ReadAt(b, f.offset) + f.offset += int64(n) + return n, err +} + +// readChunkAt attempts to read the whole entire length of the buffer from the file starting at the offset. +// It will continue progressively reading into the buffer until it fills the whole buffer, or an error occurs. +func (f *File) readChunkAt(ch chan result, b []byte, off int64) (n int, err error) { + for err == nil && n < len(b) { + id := f.c.nextID() + typ, data, err := f.c.sendPacket(ch, &sshFxpReadPacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off) + uint64(n), + Len: uint32(len(b) - n), + }) + if err != nil { + return n, err + } + + switch typ { + case sshFxpStatus: + return n, normaliseError(unmarshalStatus(id, data)) + + case sshFxpData: + sid, data := unmarshalUint32(data) + if id != sid { + return n, &unexpectedIDErr{id, sid} + } + + l, data := unmarshalUint32(data) + n += copy(b[n:], data[:l]) + + default: + return n, unimplementedPacketErr(typ) + } + } + + return +} + +func (f *File) readAtSequential(b []byte, off int64) (read int, err error) { + for read < len(b) { + rb := b[read:] + if len(rb) > f.c.maxPacket { + rb = rb[:f.c.maxPacket] + } + n, err := f.readChunkAt(nil, rb, off+int64(read)) + if n < 0 { + panic("sftp.File: returned negative count from readChunkAt") + } + if n > 0 { + read += n + } + if err != nil { + return read, err + } + } + return read, nil +} + +// ReadAt reads up to len(b) byte from the File at a given offset `off`. It returns +// the number of bytes read and an error, if any. ReadAt follows io.ReaderAt semantics, +// so the file offset is not altered during the read. +func (f *File) ReadAt(b []byte, off int64) (int, error) { + if len(b) <= f.c.maxPacket { + // This should be able to be serviced with 1/2 requests. + // So, just do it directly. + return f.readChunkAt(nil, b, off) + } + + if f.c.disableConcurrentReads { + return f.readAtSequential(b, off) + } + + // Split the read into multiple maxPacket-sized concurrent reads bounded by maxConcurrentRequests. + // This allows writes with a suitably large buffer to transfer data at a much faster rate + // by overlapping round trip times. + + cancel := make(chan struct{}) + + concurrency := len(b)/f.c.maxPacket + 1 + if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { + concurrency = f.c.maxConcurrentRequests + } + + resPool := newResChanPool(concurrency) + + type work struct { + id uint32 + res chan result + + b []byte + off int64 + } + workCh := make(chan work) + + // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. + go func() { + defer close(workCh) + + b := b + offset := off + chunkSize := f.c.maxPacket + + for len(b) > 0 { + rb := b + if len(rb) > chunkSize { + rb = rb[:chunkSize] + } + + id := f.c.nextID() + res := resPool.Get() + + f.c.dispatchRequest(res, &sshFxpReadPacket{ + ID: id, + Handle: f.handle, + Offset: uint64(offset), + Len: uint32(chunkSize), + }) + + select { + case workCh <- work{id, res, rb, offset}: + case <-cancel: + return + } + + offset += int64(len(rb)) + b = b[len(rb):] + } + }() + + type rErr struct { + off int64 + err error + } + errCh := make(chan rErr) + + var wg sync.WaitGroup + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + // Map_i: each worker gets work, and then performs the Read into its buffer from its respective offset. + go func() { + defer wg.Done() + + for packet := range workCh { + var n int + + s := <-packet.res + resPool.Put(packet.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(packet.id, s.data)) + + case sshFxpData: + sid, data := unmarshalUint32(s.data) + if packet.id != sid { + err = &unexpectedIDErr{packet.id, sid} + + } else { + l, data := unmarshalUint32(data) + n = copy(packet.b, data[:l]) + + // For normal disk files, it is guaranteed that this will read + // the specified number of bytes, or up to end of file. + // This implies, if we have a short read, that means EOF. + if n < len(packet.b) { + err = io.EOF + } + } + + default: + err = unimplementedPacketErr(s.typ) + } + } + + if err != nil { + // return the offset as the start + how much we read before the error. + errCh <- rErr{packet.off + int64(n), err} + return + } + } + }() + } + + // Wait for long tail, before closing results. + go func() { + wg.Wait() + close(errCh) + }() + + // Reduce: collect all the results into a relevant return: the earliest offset to return an error. + firstErr := rErr{math.MaxInt64, nil} + for rErr := range errCh { + if rErr.off <= firstErr.off { + firstErr = rErr + } + + select { + case <-cancel: + default: + // stop any more work from being distributed. (Just in case.) + close(cancel) + } + } + + if firstErr.err != nil { + // firstErr.err != nil if and only if firstErr.off > our starting offset. + return int(firstErr.off - off), firstErr.err + } + + // As per spec for io.ReaderAt, we return nil error if and only if we read everything. + return len(b), nil +} + +// writeToSequential implements WriteTo, but works sequentially with no parallelism. +func (f *File) writeToSequential(w io.Writer) (written int64, err error) { + b := make([]byte, f.c.maxPacket) + ch := make(chan result, 1) // reusable channel + + for { + n, err := f.readChunkAt(ch, b, f.offset) + if n < 0 { + panic("sftp.File: returned negative count from readChunkAt") + } + + if n > 0 { + f.offset += int64(n) + + m, err := w.Write(b[:n]) + written += int64(m) + + if err != nil { + return written, err + } + } + + if err != nil { + if err == io.EOF { + return written, nil // return nil explicitly. + } + + return written, err + } + } +} + +// WriteTo writes the file to the given Writer. +// The return value is the number of bytes written. +// Any error encountered during the write is also returned. +// +// This method is preferred over calling Read multiple times +// to maximise throughput for transferring the entire file, +// especially over high latency links. +func (f *File) WriteTo(w io.Writer) (written int64, err error) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.c.disableConcurrentReads { + return f.writeToSequential(w) + } + + // For concurrency, we want to guess how many concurrent workers we should use. + var fileStat *FileStat + if f.c.useFstat { + fileStat, err = f.c.fstat(f.handle) + } else { + fileStat, err = f.c.stat(f.path) + } + if err != nil { + return 0, err + } + + fileSize := fileStat.Size + if fileSize <= uint64(f.c.maxPacket) || !isRegular(fileStat.Mode) { + // only regular files are guaranteed to return (full read) xor (partial read, next error) + return f.writeToSequential(w) + } + + concurrency64 := fileSize/uint64(f.c.maxPacket) + 1 // a bad guess, but better than no guess + if concurrency64 > uint64(f.c.maxConcurrentRequests) || concurrency64 < 1 { + concurrency64 = uint64(f.c.maxConcurrentRequests) + } + // Now that concurrency64 is saturated to an int value, we know this assignment cannot possibly overflow. + concurrency := int(concurrency64) + + chunkSize := f.c.maxPacket + pool := newBufPool(concurrency, chunkSize) + resPool := newResChanPool(concurrency) + + cancel := make(chan struct{}) + var wg sync.WaitGroup + defer func() { + // Once the writing Reduce phase has ended, all the feed work needs to unconditionally stop. + close(cancel) + + // We want to wait until all outstanding goroutines with an `f` or `f.c` reference have completed. + // Just to be sure we don’t orphan any goroutines any hanging references. + wg.Wait() + }() + + type writeWork struct { + b []byte + off int64 + err error + + next chan writeWork + } + writeCh := make(chan writeWork) + + type readWork struct { + id uint32 + res chan result + off int64 + + cur, next chan writeWork + } + readCh := make(chan readWork) + + // Slice: hand out chunks of work on demand, with a `cur` and `next` channel built-in for sequencing. + go func() { + defer close(readCh) + + off := f.offset + + cur := writeCh + for { + id := f.c.nextID() + res := resPool.Get() + + next := make(chan writeWork) + readWork := readWork{ + id: id, + res: res, + off: off, + + cur: cur, + next: next, + } + + f.c.dispatchRequest(res, &sshFxpReadPacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off), + Len: uint32(chunkSize), + }) + + select { + case readCh <- readWork: + case <-cancel: + return + } + + off += int64(chunkSize) + cur = next + } + }() + + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + // Map_i: each worker gets readWork, and does the Read into a buffer at the given offset. + go func() { + defer wg.Done() + + for readWork := range readCh { + var b []byte + var n int + + s := <-readWork.res + resPool.Put(readWork.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(readWork.id, s.data)) + + case sshFxpData: + sid, data := unmarshalUint32(s.data) + if readWork.id != sid { + err = &unexpectedIDErr{readWork.id, sid} + + } else { + l, data := unmarshalUint32(data) + b = pool.Get()[:l] + n = copy(b, data[:l]) + b = b[:n] + } + + default: + err = unimplementedPacketErr(s.typ) + } + } + + writeWork := writeWork{ + b: b, + off: readWork.off, + err: err, + + next: readWork.next, + } + + select { + case readWork.cur <- writeWork: + case <-cancel: + return + } + + if err != nil { + return + } + } + }() + } + + // Reduce: serialize the results from the reads into sequential writes. + cur := writeCh + for { + packet, ok := <-cur + if !ok { + return written, errors.New("sftp.File.WriteTo: unexpectedly closed channel") + } + + // Because writes are serialized, this will always be the last successfully read byte. + f.offset = packet.off + int64(len(packet.b)) + + if len(packet.b) > 0 { + n, err := w.Write(packet.b) + written += int64(n) + if err != nil { + return written, err + } + } + + if packet.err != nil { + if packet.err == io.EOF { + return written, nil + } + + return written, packet.err + } + + pool.Put(packet.b) + cur = packet.next + } +} + +// Stat returns the FileInfo structure describing file. If there is an +// error. +func (f *File) Stat() (os.FileInfo, error) { + fs, err := f.c.fstat(f.handle) + if err != nil { + return nil, err + } + return fileInfoFromStat(fs, path.Base(f.path)), nil +} + +// Write writes len(b) bytes to the File. It returns the number of bytes +// written and an error, if any. Write returns a non-nil error when n != +// len(b). +// +// To maximise throughput for transferring the entire file (especially +// over high latency links) it is recommended to use ReadFrom rather +// than calling Write multiple times. io.Copy will do this +// automatically. +func (f *File) Write(b []byte) (int, error) { + f.mu.Lock() + defer f.mu.Unlock() + + n, err := f.WriteAt(b, f.offset) + f.offset += int64(n) + return n, err +} + +func (f *File) writeChunkAt(ch chan result, b []byte, off int64) (int, error) { + typ, data, err := f.c.sendPacket(ch, &sshFxpWritePacket{ + ID: f.c.nextID(), + Handle: f.handle, + Offset: uint64(off), + Length: uint32(len(b)), + Data: b, + }) + if err != nil { + return 0, err + } + + switch typ { + case sshFxpStatus: + id, _ := unmarshalUint32(data) + err := normaliseError(unmarshalStatus(id, data)) + if err != nil { + return 0, err + } + + default: + return 0, unimplementedPacketErr(typ) + } + + return len(b), nil +} + +// writeAtConcurrent implements WriterAt, but works concurrently rather than sequentially. +func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) { + // Split the write into multiple maxPacket sized concurrent writes + // bounded by maxConcurrentRequests. This allows writes with a suitably + // large buffer to transfer data at a much faster rate due to + // overlapping round trip times. + + cancel := make(chan struct{}) + + type work struct { + id uint32 + res chan result + + off int64 + } + workCh := make(chan work) + + concurrency := len(b)/f.c.maxPacket + 1 + if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { + concurrency = f.c.maxConcurrentRequests + } + + pool := newResChanPool(concurrency) + + // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. + go func() { + defer close(workCh) + + var read int + chunkSize := f.c.maxPacket + + for read < len(b) { + wb := b[read:] + if len(wb) > chunkSize { + wb = wb[:chunkSize] + } + + id := f.c.nextID() + res := pool.Get() + off := off + int64(read) + + f.c.dispatchRequest(res, &sshFxpWritePacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off), + Length: uint32(len(wb)), + Data: wb, + }) + + select { + case workCh <- work{id, res, off}: + case <-cancel: + return + } + + read += len(wb) + } + }() + + type wErr struct { + off int64 + err error + } + errCh := make(chan wErr) + + var wg sync.WaitGroup + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + // Map_i: each worker gets work, and does the Write from each buffer to its respective offset. + go func() { + defer wg.Done() + + for work := range workCh { + s := <-work.res + pool.Put(work.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(work.id, s.data)) + default: + err = unimplementedPacketErr(s.typ) + } + } + + if err != nil { + errCh <- wErr{work.off, err} + } + } + }() + } + + // Wait for long tail, before closing results. + go func() { + wg.Wait() + close(errCh) + }() + + // Reduce: collect all the results into a relevant return: the earliest offset to return an error. + firstErr := wErr{math.MaxInt64, nil} + for wErr := range errCh { + if wErr.off <= firstErr.off { + firstErr = wErr + } + + select { + case <-cancel: + default: + // stop any more work from being distributed. (Just in case.) + close(cancel) + } + } + + if firstErr.err != nil { + // firstErr.err != nil if and only if firstErr.off >= our starting offset. + return int(firstErr.off - off), firstErr.err + } + + return len(b), nil +} + +// WriteAt writes up to len(b) byte to the File at a given offset `off`. It returns +// the number of bytes written and an error, if any. WriteAt follows io.WriterAt semantics, +// so the file offset is not altered during the write. +func (f *File) WriteAt(b []byte, off int64) (written int, err error) { + if len(b) <= f.c.maxPacket { + // We can do this in one write. + return f.writeChunkAt(nil, b, off) + } + + if f.c.useConcurrentWrites { + return f.writeAtConcurrent(b, off) + } + + ch := make(chan result, 1) // reusable channel + + chunkSize := f.c.maxPacket + + for written < len(b) { + wb := b[written:] + if len(wb) > chunkSize { + wb = wb[:chunkSize] + } + + n, err := f.writeChunkAt(ch, wb, off+int64(written)) + if n > 0 { + written += n + } + + if err != nil { + return written, err + } + } + + return len(b), nil +} + +// ReadFromWithConcurrency implements ReaderFrom, +// but uses the given concurrency to issue multiple requests at the same time. +// +// Giving a concurrency of less than one will default to the Client’s max concurrency. +// +// Otherwise, the given concurrency will be capped by the Client's max concurrency. +func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64, err error) { + // Split the write into multiple maxPacket sized concurrent writes. + // This allows writes with a suitably large reader + // to transfer data at a much faster rate due to overlapping round trip times. + + cancel := make(chan struct{}) + + type work struct { + id uint32 + res chan result + + off int64 + } + workCh := make(chan work) + + type rwErr struct { + off int64 + err error + } + errCh := make(chan rwErr) + + if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { + concurrency = f.c.maxConcurrentRequests + } + + pool := newResChanPool(concurrency) + + // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. + go func() { + defer close(workCh) + + b := make([]byte, f.c.maxPacket) + off := f.offset + + for { + n, err := r.Read(b) + + if n > 0 { + read += int64(n) + + id := f.c.nextID() + res := pool.Get() + + f.c.dispatchRequest(res, &sshFxpWritePacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off), + Length: uint32(n), + Data: b, + }) + + select { + case workCh <- work{id, res, off}: + case <-cancel: + return + } + + off += int64(n) + } + + if err != nil { + if err != io.EOF { + errCh <- rwErr{off, err} + } + return + } + } + }() + + var wg sync.WaitGroup + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + // Map_i: each worker gets work, and does the Write from each buffer to its respective offset. + go func() { + defer wg.Done() + + for work := range workCh { + s := <-work.res + pool.Put(work.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(work.id, s.data)) + default: + err = unimplementedPacketErr(s.typ) + } + } + + if err != nil { + errCh <- rwErr{work.off, err} + } + } + }() + } + + // Wait for long tail, before closing results. + go func() { + wg.Wait() + close(errCh) + }() + + // Reduce: Collect all the results into a relevant return: the earliest offset to return an error. + firstErr := rwErr{math.MaxInt64, nil} + for rwErr := range errCh { + if rwErr.off <= firstErr.off { + firstErr = rwErr + } + + select { + case <-cancel: + default: + // stop any more work from being distributed. + close(cancel) + } + } + + if firstErr.err != nil { + // firstErr.err != nil if and only if firstErr.off is a valid offset. + // + // firstErr.off will then be the lesser of: + // * the offset of the first error from writing, + // * the last successfully read offset. + // + // This could be less than the last successfully written offset, + // which is the whole reason for the UseConcurrentWrites() ClientOption. + // + // Callers are responsible for truncating any SFTP files to a safe length. + f.offset = firstErr.off + + // ReadFrom is defined to return the read bytes, regardless of any writer errors. + return read, firstErr.err + } + + f.offset += read + return read, nil +} + +// ReadFrom reads data from r until EOF and writes it to the file. The return +// value is the number of bytes read. Any error except io.EOF encountered +// during the read is also returned. +// +// This method is preferred over calling Write multiple times +// to maximise throughput for transferring the entire file, +// especially over high-latency links. +func (f *File) ReadFrom(r io.Reader) (int64, error) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.c.useConcurrentWrites { + var remain int64 + switch r := r.(type) { + case interface{ Len() int }: + remain = int64(r.Len()) + + case interface{ Size() int64 }: + remain = r.Size() + + case *io.LimitedReader: + remain = r.N + + case interface{ Stat() (os.FileInfo, error) }: + info, err := r.Stat() + if err == nil { + remain = info.Size() + } + } + + if remain < 0 { + // We can strongly assert that we want default max concurrency here. + return f.ReadFromWithConcurrency(r, f.c.maxConcurrentRequests) + } + + if remain > int64(f.c.maxPacket) { + // Otherwise, only use concurrency, if it would be at least two packets. + + // This is the best reasonable guess we can make. + concurrency64 := remain/int64(f.c.maxPacket) + 1 + + // We need to cap this value to an `int` size value to avoid overflow on 32-bit machines. + // So, we may as well pre-cap it to `f.c.maxConcurrentRequests`. + if concurrency64 > int64(f.c.maxConcurrentRequests) { + concurrency64 = int64(f.c.maxConcurrentRequests) + } + + return f.ReadFromWithConcurrency(r, int(concurrency64)) + } + } + + ch := make(chan result, 1) // reusable channel + + b := make([]byte, f.c.maxPacket) + + var read int64 + for { + n, err := r.Read(b) + if n < 0 { + panic("sftp.File: reader returned negative count from Read") + } + + if n > 0 { + read += int64(n) + + m, err2 := f.writeChunkAt(ch, b[:n], f.offset) + f.offset += int64(m) + + if err == nil { + err = err2 + } + } + + if err != nil { + if err == io.EOF { + return read, nil // return nil explicitly. + } + + return read, err + } + } +} + +// Seek implements io.Seeker by setting the client offset for the next Read or +// Write. It returns the next offset read. Seeking before or after the end of +// the file is undefined. Seeking relative to the end calls Stat. +func (f *File) Seek(offset int64, whence int) (int64, error) { + f.mu.Lock() + defer f.mu.Unlock() + + switch whence { + case io.SeekStart: + case io.SeekCurrent: + offset += f.offset + case io.SeekEnd: + fi, err := f.Stat() + if err != nil { + return f.offset, err + } + offset += fi.Size() + default: + return f.offset, unimplementedSeekWhence(whence) + } + + if offset < 0 { + return f.offset, os.ErrInvalid + } + + f.offset = offset + return f.offset, nil +} + +// Chown changes the uid/gid of the current file. +func (f *File) Chown(uid, gid int) error { + return f.c.Chown(f.path, uid, gid) +} + +// Chmod changes the permissions of the current file. +// +// See Client.Chmod for details. +func (f *File) Chmod(mode os.FileMode) error { + return f.c.setfstat(f.handle, sshFileXferAttrPermissions, toChmodPerm(mode)) +} + +// Sync requests a flush of the contents of a File to stable storage. +// +// Sync requires the server to support the fsync@openssh.com extension. +func (f *File) Sync() error { + id := f.c.nextID() + typ, data, err := f.c.sendPacket(nil, &sshFxpFsyncPacket{ + ID: id, + Handle: f.handle, + }) + + switch { + case err != nil: + return err + case typ == sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return &unexpectedPacketErr{want: sshFxpStatus, got: typ} + } +} + +// Truncate sets the size of the current file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +// We send a SSH_FXP_FSETSTAT here since we have a file handle +func (f *File) Truncate(size int64) error { + return f.c.setfstat(f.handle, sshFileXferAttrSize, uint64(size)) +} + +// normaliseError normalises an error into a more standard form that can be +// checked against stdlib errors like io.EOF or os.ErrNotExist. +func normaliseError(err error) error { + switch err := err.(type) { + case *StatusError: + switch err.Code { + case sshFxEOF: + return io.EOF + case sshFxNoSuchFile: + return os.ErrNotExist + case sshFxPermissionDenied: + return os.ErrPermission + case sshFxOk: + return nil + default: + return err + } + default: + return err + } +} + +// flags converts the flags passed to OpenFile into ssh flags. +// Unsupported flags are ignored. +func flags(f int) uint32 { + var out uint32 + switch f & os.O_WRONLY { + case os.O_WRONLY: + out |= sshFxfWrite + case os.O_RDONLY: + out |= sshFxfRead + } + if f&os.O_RDWR == os.O_RDWR { + out |= sshFxfRead | sshFxfWrite + } + if f&os.O_APPEND == os.O_APPEND { + out |= sshFxfAppend + } + if f&os.O_CREATE == os.O_CREATE { + out |= sshFxfCreat + } + if f&os.O_TRUNC == os.O_TRUNC { + out |= sshFxfTrunc + } + if f&os.O_EXCL == os.O_EXCL { + out |= sshFxfExcl + } + return out +} + +// toChmodPerm converts Go permission bits to POSIX permission bits. +// +// This differs from fromFileMode in that we preserve the POSIX versions of +// setuid, setgid and sticky in m, because we've historically supported those +// bits, and we mask off any non-permission bits. +func toChmodPerm(m os.FileMode) (perm uint32) { + const mask = os.ModePerm | s_ISUID | s_ISGID | s_ISVTX + perm = uint32(m & mask) + + if m&os.ModeSetuid != 0 { + perm |= s_ISUID + } + if m&os.ModeSetgid != 0 { + perm |= s_ISGID + } + if m&os.ModeSticky != 0 { + perm |= s_ISVTX + } + + return perm +} diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go new file mode 100644 index 000000000..7d9514237 --- /dev/null +++ b/vendor/github.com/pkg/sftp/conn.go @@ -0,0 +1,189 @@ +package sftp + +import ( + "encoding" + "fmt" + "io" + "sync" +) + +// conn implements a bidirectional channel on which client and server +// connections are multiplexed. +type conn struct { + io.Reader + io.WriteCloser + // this is the same allocator used in packet manager + alloc *allocator + sync.Mutex // used to serialise writes to sendPacket +} + +// the orderID is used in server mode if the allocator is enabled. +// For the client mode just pass 0 +func (c *conn) recvPacket(orderID uint32) (uint8, []byte, error) { + return recvPacket(c, c.alloc, orderID) +} + +func (c *conn) sendPacket(m encoding.BinaryMarshaler) error { + c.Lock() + defer c.Unlock() + + return sendPacket(c, m) +} + +func (c *conn) Close() error { + c.Lock() + defer c.Unlock() + return c.WriteCloser.Close() +} + +type clientConn struct { + conn + wg sync.WaitGroup + + sync.Mutex // protects inflight + inflight map[uint32]chan<- result // outstanding requests + + closed chan struct{} + err error +} + +// Wait blocks until the conn has shut down, and return the error +// causing the shutdown. It can be called concurrently from multiple +// goroutines. +func (c *clientConn) Wait() error { + <-c.closed + return c.err +} + +// Close closes the SFTP session. +func (c *clientConn) Close() error { + defer c.wg.Wait() + return c.conn.Close() +} + +func (c *clientConn) loop() { + defer c.wg.Done() + err := c.recv() + if err != nil { + c.broadcastErr(err) + } +} + +// recv continuously reads from the server and forwards responses to the +// appropriate channel. +func (c *clientConn) recv() error { + defer c.conn.Close() + + for { + typ, data, err := c.recvPacket(0) + if err != nil { + return err + } + sid, _, err := unmarshalUint32Safe(data) + if err != nil { + return err + } + + ch, ok := c.getChannel(sid) + if !ok { + // This is an unexpected occurrence. Send the error + // back to all listeners so that they terminate + // gracefully. + return fmt.Errorf("sid not found: %d", sid) + } + + ch <- result{typ: typ, data: data} + } +} + +func (c *clientConn) putChannel(ch chan<- result, sid uint32) bool { + c.Lock() + defer c.Unlock() + + select { + case <-c.closed: + // already closed with broadcastErr, return error on chan. + ch <- result{err: ErrSSHFxConnectionLost} + return false + default: + } + + c.inflight[sid] = ch + return true +} + +func (c *clientConn) getChannel(sid uint32) (chan<- result, bool) { + c.Lock() + defer c.Unlock() + + ch, ok := c.inflight[sid] + delete(c.inflight, sid) + + return ch, ok +} + +// result captures the result of receiving the a packet from the server +type result struct { + typ byte + data []byte + err error +} + +type idmarshaler interface { + id() uint32 + encoding.BinaryMarshaler +} + +func (c *clientConn) sendPacket(ch chan result, p idmarshaler) (byte, []byte, error) { + if cap(ch) < 1 { + ch = make(chan result, 1) + } + + c.dispatchRequest(ch, p) + s := <-ch + return s.typ, s.data, s.err +} + +// dispatchRequest should ideally only be called by race-detection tests outside of this file, +// where you have to ensure two packets are in flight sequentially after each other. +func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) { + sid := p.id() + + if !c.putChannel(ch, sid) { + // already closed. + return + } + + if err := c.conn.sendPacket(p); err != nil { + if ch, ok := c.getChannel(sid); ok { + ch <- result{err: err} + } + } +} + +// broadcastErr sends an error to all goroutines waiting for a response. +func (c *clientConn) broadcastErr(err error) { + c.Lock() + defer c.Unlock() + + bcastRes := result{err: ErrSSHFxConnectionLost} + for sid, ch := range c.inflight { + ch <- bcastRes + + // Replace the chan in inflight, + // we have hijacked this chan, + // and this guarantees always-only-once sending. + c.inflight[sid] = make(chan<- result, 1) + } + + c.err = err + close(c.closed) +} + +type serverConn struct { + conn +} + +func (s *serverConn) sendError(id uint32, err error) error { + return s.sendPacket(statusFromError(id, err)) +} diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go new file mode 100644 index 000000000..3e264abe3 --- /dev/null +++ b/vendor/github.com/pkg/sftp/debug.go @@ -0,0 +1,9 @@ +// +build debug + +package sftp + +import "log" + +func debug(fmt string, args ...interface{}) { + log.Printf(fmt, args...) +} diff --git a/vendor/github.com/pkg/sftp/fuzz.go b/vendor/github.com/pkg/sftp/fuzz.go new file mode 100644 index 000000000..169aebc28 --- /dev/null +++ b/vendor/github.com/pkg/sftp/fuzz.go @@ -0,0 +1,22 @@ +// +build gofuzz + +package sftp + +import "bytes" + +type sinkfuzz struct{} + +func (*sinkfuzz) Close() error { return nil } +func (*sinkfuzz) Write(p []byte) (int, error) { return len(p), nil } + +var devnull = &sinkfuzz{} + +// To run: go-fuzz-build && go-fuzz +func Fuzz(data []byte) int { + c, err := NewClientPipe(bytes.NewReader(data), devnull) + if err != nil { + return 0 + } + c.Close() + return 1 +} diff --git a/vendor/github.com/pkg/sftp/go.mod b/vendor/github.com/pkg/sftp/go.mod new file mode 100644 index 000000000..b0347871f --- /dev/null +++ b/vendor/github.com/pkg/sftp/go.mod @@ -0,0 +1,10 @@ +module github.com/pkg/sftp + +go 1.15 + +require ( + github.com/kr/fs v0.1.0 + github.com/stretchr/testify v1.7.0 + golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect +) diff --git a/vendor/github.com/pkg/sftp/go.sum b/vendor/github.com/pkg/sftp/go.sum new file mode 100644 index 000000000..2b66d87e3 --- /dev/null +++ b/vendor/github.com/pkg/sftp/go.sum @@ -0,0 +1,25 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go new file mode 100644 index 000000000..eed61bfc6 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go @@ -0,0 +1,325 @@ +package filexfer + +// Attributes related flags. +const ( + AttrSize = 1 << iota // SSH_FILEXFER_ATTR_SIZE + AttrUIDGID // SSH_FILEXFER_ATTR_UIDGID + AttrPermissions // SSH_FILEXFER_ATTR_PERMISSIONS + AttrACModTime // SSH_FILEXFER_ACMODTIME + + AttrExtended = 1 << 31 // SSH_FILEXFER_ATTR_EXTENDED +) + +// Attributes defines the file attributes type defined in draft-ietf-secsh-filexfer-02 +// +// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 +type Attributes struct { + Flags uint32 + + // AttrSize + Size uint64 + + // AttrUIDGID + UID uint32 + GID uint32 + + // AttrPermissions + Permissions FileMode + + // AttrACmodTime + ATime uint32 + MTime uint32 + + // AttrExtended + ExtendedAttributes []ExtendedAttribute +} + +// GetSize returns the Size field and a bool that is true if and only if the value is valid/defined. +func (a *Attributes) GetSize() (size uint64, ok bool) { + return a.Size, a.Flags&AttrSize != 0 +} + +// SetSize is a convenience function that sets the Size field, +// and marks the field as valid/defined in Flags. +func (a *Attributes) SetSize(size uint64) { + a.Flags |= AttrSize + a.Size = size +} + +// GetUIDGID returns the UID and GID fields and a bool that is true if and only if the values are valid/defined. +func (a *Attributes) GetUIDGID() (uid, gid uint32, ok bool) { + return a.UID, a.GID, a.Flags&AttrUIDGID != 0 +} + +// SetUIDGID is a convenience function that sets the UID and GID fields, +// and marks the fields as valid/defined in Flags. +func (a *Attributes) SetUIDGID(uid, gid uint32) { + a.Flags |= AttrUIDGID + a.UID = uid + a.GID = gid +} + +// GetPermissions returns the Permissions field and a bool that is true if and only if the value is valid/defined. +func (a *Attributes) GetPermissions() (perms FileMode, ok bool) { + return a.Permissions, a.Flags&AttrPermissions != 0 +} + +// SetPermissions is a convenience function that sets the Permissions field, +// and marks the field as valid/defined in Flags. +func (a *Attributes) SetPermissions(perms FileMode) { + a.Flags |= AttrPermissions + a.Permissions = perms +} + +// GetACModTime returns the ATime and MTime fields and a bool that is true if and only if the values are valid/defined. +func (a *Attributes) GetACModTime() (atime, mtime uint32, ok bool) { + return a.ATime, a.MTime, a.Flags&AttrACModTime != 0 +} + +// SetACModTime is a convenience function that sets the ATime and MTime fields, +// and marks the fields as valid/defined in Flags. +func (a *Attributes) SetACModTime(atime, mtime uint32) { + a.Flags |= AttrACModTime + a.ATime = atime + a.MTime = mtime +} + +// Len returns the number of bytes a would marshal into. +func (a *Attributes) Len() int { + length := 4 + + if a.Flags&AttrSize != 0 { + length += 8 + } + + if a.Flags&AttrUIDGID != 0 { + length += 4 + 4 + } + + if a.Flags&AttrPermissions != 0 { + length += 4 + } + + if a.Flags&AttrACModTime != 0 { + length += 4 + 4 + } + + if a.Flags&AttrExtended != 0 { + length += 4 + + for _, ext := range a.ExtendedAttributes { + length += ext.Len() + } + } + + return length +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (a *Attributes) MarshalInto(b *Buffer) { + b.AppendUint32(a.Flags) + + if a.Flags&AttrSize != 0 { + b.AppendUint64(a.Size) + } + + if a.Flags&AttrUIDGID != 0 { + b.AppendUint32(a.UID) + b.AppendUint32(a.GID) + } + + if a.Flags&AttrPermissions != 0 { + b.AppendUint32(uint32(a.Permissions)) + } + + if a.Flags&AttrACModTime != 0 { + b.AppendUint32(a.ATime) + b.AppendUint32(a.MTime) + } + + if a.Flags&AttrExtended != 0 { + b.AppendUint32(uint32(len(a.ExtendedAttributes))) + + for _, ext := range a.ExtendedAttributes { + ext.MarshalInto(b) + } + } +} + +// MarshalBinary returns a as the binary encoding of a. +func (a *Attributes) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, a.Len())) + a.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an Attributes from the given Buffer into e. +// +// NOTE: The values of fields not covered in the a.Flags are explicitly undefined. +func (a *Attributes) UnmarshalFrom(b *Buffer) (err error) { + flags, err := b.ConsumeUint32() + if err != nil { + return err + } + + return a.XXX_UnmarshalByFlags(flags, b) +} + +// XXX_UnmarshalByFlags uses the pre-existing a.Flags field to determine which fields to decode. +// DO NOT USE THIS: it is an anti-corruption function to implement existing internal usage in pkg/sftp. +// This function is not a part of any compatibility promise. +func (a *Attributes) XXX_UnmarshalByFlags(flags uint32, b *Buffer) (err error) { + a.Flags = flags + + // Short-circuit dummy attributes. + if a.Flags == 0 { + return nil + } + + if a.Flags&AttrSize != 0 { + if a.Size, err = b.ConsumeUint64(); err != nil { + return err + } + } + + if a.Flags&AttrUIDGID != 0 { + if a.UID, err = b.ConsumeUint32(); err != nil { + return err + } + + if a.GID, err = b.ConsumeUint32(); err != nil { + return err + } + } + + if a.Flags&AttrPermissions != 0 { + m, err := b.ConsumeUint32() + if err != nil { + return err + } + + a.Permissions = FileMode(m) + } + + if a.Flags&AttrACModTime != 0 { + if a.ATime, err = b.ConsumeUint32(); err != nil { + return err + } + + if a.MTime, err = b.ConsumeUint32(); err != nil { + return err + } + } + + if a.Flags&AttrExtended != 0 { + count, err := b.ConsumeUint32() + if err != nil { + return err + } + + a.ExtendedAttributes = make([]ExtendedAttribute, count) + for i := range a.ExtendedAttributes { + a.ExtendedAttributes[i].UnmarshalFrom(b) + } + } + + return nil +} + +// UnmarshalBinary decodes the binary encoding of Attributes into e. +func (a *Attributes) UnmarshalBinary(data []byte) error { + return a.UnmarshalFrom(NewBuffer(data)) +} + +// ExtendedAttribute defines the extended file attribute type defined in draft-ietf-secsh-filexfer-02 +// +// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 +type ExtendedAttribute struct { + Type string + Data string +} + +// Len returns the number of bytes e would marshal into. +func (e *ExtendedAttribute) Len() int { + return 4 + len(e.Type) + 4 + len(e.Data) +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (e *ExtendedAttribute) MarshalInto(b *Buffer) { + b.AppendString(e.Type) + b.AppendString(e.Data) +} + +// MarshalBinary returns e as the binary encoding of e. +func (e *ExtendedAttribute) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, e.Len())) + e.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an ExtendedAattribute from the given Buffer into e. +func (e *ExtendedAttribute) UnmarshalFrom(b *Buffer) (err error) { + if e.Type, err = b.ConsumeString(); err != nil { + return err + } + + if e.Data, err = b.ConsumeString(); err != nil { + return err + } + + return nil +} + +// UnmarshalBinary decodes the binary encoding of ExtendedAttribute into e. +func (e *ExtendedAttribute) UnmarshalBinary(data []byte) error { + return e.UnmarshalFrom(NewBuffer(data)) +} + +// NameEntry implements the SSH_FXP_NAME repeated data type from draft-ietf-secsh-filexfer-02 +// +// This type is incompatible with versions 4 or higher. +type NameEntry struct { + Filename string + Longname string + Attrs Attributes +} + +// Len returns the number of bytes e would marshal into. +func (e *NameEntry) Len() int { + return 4 + len(e.Filename) + 4 + len(e.Longname) + e.Attrs.Len() +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (e *NameEntry) MarshalInto(b *Buffer) { + b.AppendString(e.Filename) + b.AppendString(e.Longname) + + e.Attrs.MarshalInto(b) +} + +// MarshalBinary returns e as the binary encoding of e. +func (e *NameEntry) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, e.Len())) + e.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an NameEntry from the given Buffer into e. +// +// NOTE: The values of fields not covered in the a.Flags are explicitly undefined. +func (e *NameEntry) UnmarshalFrom(b *Buffer) (err error) { + if e.Filename, err = b.ConsumeString(); err != nil { + return err + } + + if e.Longname, err = b.ConsumeString(); err != nil { + return err + } + + return e.Attrs.UnmarshalFrom(b) +} + +// UnmarshalBinary decodes the binary encoding of NameEntry into e. +func (e *NameEntry) UnmarshalBinary(data []byte) error { + return e.UnmarshalFrom(NewBuffer(data)) +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go new file mode 100644 index 000000000..a6086036e --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go @@ -0,0 +1,293 @@ +package filexfer + +import ( + "encoding/binary" + "errors" +) + +// Various encoding errors. +var ( + ErrShortPacket = errors.New("packet too short") + ErrLongPacket = errors.New("packet too long") +) + +// Buffer wraps up the various encoding details of the SSH format. +// +// Data types are encoded as per section 4 from https://tools.ietf.org/html/draft-ietf-secsh-architecture-09#page-8 +type Buffer struct { + b []byte + off int +} + +// NewBuffer creates and initializes a new buffer using buf as its initial contents. +// The new buffer takes ownership of buf, and the caller should not use buf after this call. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{ + b: buf, + } +} + +// NewMarshalBuffer creates a new Buffer ready to start marshaling a Packet into. +// It preallocates enough space for uint32(length), uint8(type), uint32(request-id) and size more bytes. +func NewMarshalBuffer(size int) *Buffer { + return NewBuffer(make([]byte, 4+1+4+size)) +} + +// Bytes returns a slice of length b.Len() holding the unconsumed bytes in the Buffer. +// The slice is valid for use only until the next buffer modification +// (that is, only until the next call to an Append or Consume method). +func (b *Buffer) Bytes() []byte { + return b.b[b.off:] +} + +// Len returns the number of unconsumed bytes in the buffer. +func (b *Buffer) Len() int { return len(b.b) - b.off } + +// Cap returns the capacity of the buffer’s underlying byte slice, +// that is, the total space allocated for the buffer’s data. +func (b *Buffer) Cap() int { return cap(b.b) } + +// Reset resets the buffer to be empty, but it retains the underlying storage for use by future Appends. +func (b *Buffer) Reset() { + b.b = b.b[:0] + b.off = 0 +} + +// StartPacket resets and initializes the buffer to be ready to start marshaling a packet into. +// It truncates the buffer, reserves space for uint32(length), then appends the given packetType and requestID. +func (b *Buffer) StartPacket(packetType PacketType, requestID uint32) { + b.b, b.off = append(b.b[:0], make([]byte, 4)...), 0 + + b.AppendUint8(uint8(packetType)) + b.AppendUint32(requestID) +} + +// Packet finalizes the packet started from StartPacket. +// It is expected that this will end the ownership of the underlying byte-slice, +// and so the returned byte-slices may be reused the same as any other byte-slice, +// the caller should not use this buffer after this call. +// +// It writes the packet body length into the first four bytes of the buffer in network byte order (big endian). +// The packet body length is the length of this buffer less the 4-byte length itself, plus the length of payload. +// +// It is assumed that no Consume methods have been called on this buffer, +// and so it returns the whole underlying slice. +func (b *Buffer) Packet(payload []byte) (header, payloadPassThru []byte, err error) { + b.PutLength(len(b.b) - 4 + len(payload)) + + return b.b, payload, nil +} + +// ConsumeUint8 consumes a single byte from the buffer. +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeUint8() (uint8, error) { + if b.Len() < 1 { + return 0, ErrShortPacket + } + + var v uint8 + v, b.off = b.b[b.off], b.off+1 + return v, nil +} + +// AppendUint8 appends a single byte into the buffer. +func (b *Buffer) AppendUint8(v uint8) { + b.b = append(b.b, v) +} + +// ConsumeBool consumes a single byte from the buffer, and returns true if that byte is non-zero. +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeBool() (bool, error) { + v, err := b.ConsumeUint8() + if err != nil { + return false, err + } + + return v != 0, nil +} + +// AppendBool appends a single bool into the buffer. +// It encodes it as a single byte, with false as 0, and true as 1. +func (b *Buffer) AppendBool(v bool) { + if v { + b.AppendUint8(1) + } else { + b.AppendUint8(0) + } +} + +// ConsumeUint16 consumes a single uint16 from the buffer, in network byte order (big-endian). +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeUint16() (uint16, error) { + if b.Len() < 2 { + return 0, ErrShortPacket + } + + v := binary.BigEndian.Uint16(b.b[b.off:]) + b.off += 2 + return v, nil +} + +// AppendUint16 appends single uint16 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendUint16(v uint16) { + b.b = append(b.b, + byte(v>>8), + byte(v>>0), + ) +} + +// unmarshalUint32 is used internally to read the packet length. +// It is unsafe, and so not exported. +// Even within this package, its use should be avoided. +func unmarshalUint32(b []byte) uint32 { + return binary.BigEndian.Uint32(b[:4]) +} + +// ConsumeUint32 consumes a single uint32 from the buffer, in network byte order (big-endian). +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeUint32() (uint32, error) { + if b.Len() < 4 { + return 0, ErrShortPacket + } + + v := binary.BigEndian.Uint32(b.b[b.off:]) + b.off += 4 + return v, nil +} + +// AppendUint32 appends a single uint32 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendUint32(v uint32) { + b.b = append(b.b, + byte(v>>24), + byte(v>>16), + byte(v>>8), + byte(v>>0), + ) +} + +// ConsumeUint64 consumes a single uint64 from the buffer, in network byte order (big-endian). +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeUint64() (uint64, error) { + if b.Len() < 8 { + return 0, ErrShortPacket + } + + v := binary.BigEndian.Uint64(b.b[b.off:]) + b.off += 8 + return v, nil +} + +// AppendUint64 appends a single uint64 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendUint64(v uint64) { + b.b = append(b.b, + byte(v>>56), + byte(v>>48), + byte(v>>40), + byte(v>>32), + byte(v>>24), + byte(v>>16), + byte(v>>8), + byte(v>>0), + ) +} + +// ConsumeInt64 consumes a single int64 from the buffer, in network byte order (big-endian) with two’s complement. +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeInt64() (int64, error) { + u, err := b.ConsumeUint64() + if err != nil { + return 0, err + } + + return int64(u), err +} + +// AppendInt64 appends a single int64 into the buffer, in network byte order (big-endian) with two’s complement. +func (b *Buffer) AppendInt64(v int64) { + b.AppendUint64(uint64(v)) +} + +// ConsumeByteSlice consumes a single string of raw binary data from the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket. +// +// The returned slice aliases the buffer contents, and is valid only as long as the buffer is not reused +// (that is, only until the next call to Reset, PutLength, StartPacket, or UnmarshalBinary). +// +// In no case will any Consume calls return overlapping slice aliases, +// and Append calls are guaranteed to not disturb this slice alias. +func (b *Buffer) ConsumeByteSlice() ([]byte, error) { + length, err := b.ConsumeUint32() + if err != nil { + return nil, err + } + + if b.Len() < int(length) { + return nil, ErrShortPacket + } + + v := b.b[b.off:] + if len(v) > int(length) { + v = v[:length:length] + } + b.off += int(length) + return v, nil +} + +// AppendByteSlice appends a single string of raw binary data into the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +func (b *Buffer) AppendByteSlice(v []byte) { + b.AppendUint32(uint32(len(v))) + b.b = append(b.b, v...) +} + +// ConsumeString consumes a single string of binary data from the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket. +// +// NOTE: Go implicitly assumes that strings contain UTF-8 encoded data. +// All caveats on using arbitrary binary data in Go strings applies. +func (b *Buffer) ConsumeString() (string, error) { + v, err := b.ConsumeByteSlice() + if err != nil { + return "", err + } + + return string(v), nil +} + +// AppendString appends a single string of binary data into the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +func (b *Buffer) AppendString(v string) { + b.AppendByteSlice([]byte(v)) +} + +// PutLength writes the given size into the first four bytes of the buffer in network byte order (big endian). +func (b *Buffer) PutLength(size int) { + if len(b.b) < 4 { + b.b = append(b.b, make([]byte, 4-len(b.b))...) + } + + binary.BigEndian.PutUint32(b.b, uint32(size)) +} + +// MarshalBinary returns a clone of the full internal buffer. +func (b *Buffer) MarshalBinary() ([]byte, error) { + clone := make([]byte, len(b.b)) + n := copy(clone, b.b) + return clone[:n], nil +} + +// UnmarshalBinary sets the internal buffer of b to be a clone of data, and zeros the internal offset. +func (b *Buffer) UnmarshalBinary(data []byte) error { + if grow := len(data) - len(b.b); grow > 0 { + b.b = append(b.b, make([]byte, grow)...) + } + + n := copy(b.b, data) + b.b = b.b[:n] + b.off = 0 + return nil +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go new file mode 100644 index 000000000..6b7b2cef4 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go @@ -0,0 +1,142 @@ +package filexfer + +import ( + "encoding" + "sync" +) + +// ExtendedData aliases the untyped interface composition of encoding.BinaryMarshaler and encoding.BinaryUnmarshaler. +type ExtendedData = interface { + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler +} + +// ExtendedDataConstructor defines a function that returns a new(ArbitraryExtendedPacket). +type ExtendedDataConstructor func() ExtendedData + +var extendedPacketTypes = struct { + mu sync.RWMutex + constructors map[string]ExtendedDataConstructor +}{ + constructors: make(map[string]ExtendedDataConstructor), +} + +// RegisterExtendedPacketType defines a specific ExtendedDataConstructor for the given extension string. +func RegisterExtendedPacketType(extension string, constructor ExtendedDataConstructor) { + extendedPacketTypes.mu.Lock() + defer extendedPacketTypes.mu.Unlock() + + if _, exist := extendedPacketTypes.constructors[extension]; exist { + panic("encoding/ssh/filexfer: multiple registration of extended packet type " + extension) + } + + extendedPacketTypes.constructors[extension] = constructor +} + +func newExtendedPacket(extension string) ExtendedData { + extendedPacketTypes.mu.RLock() + defer extendedPacketTypes.mu.RUnlock() + + if f := extendedPacketTypes.constructors[extension]; f != nil { + return f() + } + + return new(Buffer) +} + +// ExtendedPacket defines the SSH_FXP_CLOSE packet. +type ExtendedPacket struct { + ExtendedRequest string + + Data ExtendedData +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ExtendedPacket) Type() PacketType { + return PacketTypeExtended +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The Data is marshaled into binary, and returned as the payload. +func (p *ExtendedPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.ExtendedRequest) // string(extended-request) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeExtended, reqid) + buf.AppendString(p.ExtendedRequest) + + if p.Data != nil { + payload, err = p.Data.MarshalBinary() + if err != nil { + return nil, nil, err + } + } + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +// +// If p.Data is nil, and the extension has been registered, a new type will be made from the registration. +// If the extension has not been registered, then a new Buffer will be allocated. +// Then the request-specific-data will be unmarshaled from the rest of the buffer. +func (p *ExtendedPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.ExtendedRequest, err = buf.ConsumeString(); err != nil { + return err + } + + if p.Data == nil { + p.Data = newExtendedPacket(p.ExtendedRequest) + } + + return p.Data.UnmarshalBinary(buf.Bytes()) +} + +// ExtendedReplyPacket defines the SSH_FXP_CLOSE packet. +type ExtendedReplyPacket struct { + Data ExtendedData +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ExtendedReplyPacket) Type() PacketType { + return PacketTypeExtendedReply +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The Data is marshaled into binary, and returned as the payload. +func (p *ExtendedReplyPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + buf = NewMarshalBuffer(0) + } + + buf.StartPacket(PacketTypeExtendedReply, reqid) + + if p.Data != nil { + payload, err = p.Data.MarshalBinary() + if err != nil { + return nil, nil, err + } + } + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +// +// If p.Data is nil, and there is request-specific-data, +// then the request-specific-data will be wrapped in a Buffer and assigned to p.Data. +func (p *ExtendedReplyPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Data == nil { + p.Data = new(Buffer) + } + + return p.Data.UnmarshalBinary(buf.Bytes()) +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go new file mode 100644 index 000000000..11c0b99c2 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go @@ -0,0 +1,46 @@ +package filexfer + +// ExtensionPair defines the extension-pair type defined in draft-ietf-secsh-filexfer-13. +// This type is backwards-compatible with how draft-ietf-secsh-filexfer-02 defines extensions. +// +// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-4.2 +type ExtensionPair struct { + Name string + Data string +} + +// Len returns the number of bytes e would marshal into. +func (e *ExtensionPair) Len() int { + return 4 + len(e.Name) + 4 + len(e.Data) +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (e *ExtensionPair) MarshalInto(buf *Buffer) { + buf.AppendString(e.Name) + buf.AppendString(e.Data) +} + +// MarshalBinary returns e as the binary encoding of e. +func (e *ExtensionPair) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, e.Len())) + e.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an ExtensionPair from the given Buffer into e. +func (e *ExtensionPair) UnmarshalFrom(buf *Buffer) (err error) { + if e.Name, err = buf.ConsumeString(); err != nil { + return err + } + + if e.Data, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// UnmarshalBinary decodes the binary encoding of ExtensionPair into e. +func (e *ExtensionPair) UnmarshalBinary(data []byte) error { + return e.UnmarshalFrom(NewBuffer(data)) +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go new file mode 100644 index 000000000..1e5abf746 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go @@ -0,0 +1,54 @@ +// Package filexfer implements the wire encoding for secsh-filexfer as described in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +package filexfer + +// PacketMarshaller narrowly defines packets that will only be transmitted. +// +// ExtendedPacket types will often only implement this interface, +// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field. +type PacketMarshaller interface { + // MarshalPacket is the primary intended way to encode a packet. + // The request-id for the packet is set from reqid. + // + // An optional buffer may be given in b. + // If the buffer has a minimum capacity, it shall be truncated and used to marshal the header into. + // The minimum capacity for the packet must be a constant expression, and should be at least 9. + // + // It shall return the main body of the encoded packet in header, + // and may optionally return an additional payload to be written immediately after the header. + // + // It shall encode in the first 4-bytes of the header the proper length of the rest of the header+payload. + MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) +} + +// Packet defines the behavior of a full generic SFTP packet. +// +// InitPacket, and VersionPacket are not generic SFTP packets, and instead implement (Un)MarshalBinary. +// +// ExtendedPacket types should not iplement this interface, +// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field. +type Packet interface { + PacketMarshaller + + // Type returns the SSH_FXP_xy value associated with the specific packet. + Type() PacketType + + // UnmarshalPacketBody decodes a packet body from the given Buffer. + // It is assumed that the common header values of the length, type and request-id have already been consumed. + // + // Implementations should not alias the given Buffer, + // instead they can consider prepopulating an internal buffer as a hint, + // and copying into that buffer if it has sufficient length. + UnmarshalPacketBody(buf *Buffer) error +} + +// ComposePacket converts returns from MarshalPacket into an equivalent call to MarshalBinary. +func ComposePacket(header, payload []byte, err error) ([]byte, error) { + return append(header, payload...), err +} + +// Default length values, +// Defined in draft-ietf-secsh-filexfer-02 section 3. +const ( + DefaultMaxPacketLength = 34000 + DefaultMaxDataLength = 32768 +) diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go new file mode 100644 index 000000000..48f869861 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go @@ -0,0 +1,147 @@ +package filexfer + +import ( + "fmt" +) + +// Status defines the SFTP error codes used in SSH_FXP_STATUS response packets. +type Status uint32 + +// Defines the various SSH_FX_* values. +const ( + // see draft-ietf-secsh-filexfer-02 + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-7 + StatusOK = Status(iota) + StatusEOF + StatusNoSuchFile + StatusPermissionDenied + StatusFailure + StatusBadMessage + StatusNoConnection + StatusConnectionLost + StatusOPUnsupported + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-03#section-7 + StatusV4InvalidHandle + StatusV4NoSuchPath + StatusV4FileAlreadyExists + StatusV4WriteProtect + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-04#section-7 + StatusV4NoMedia + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-05#section-7 + StatusV5NoSpaceOnFilesystem + StatusV5QuotaExceeded + StatusV5UnknownPrincipal + StatusV5LockConflict + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-06#section-8 + StatusV6DirNotEmpty + StatusV6NotADirectory + StatusV6InvalidFilename + StatusV6LinkLoop + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-8 + StatusV6CannotDelete + StatusV6InvalidParameter + StatusV6FileIsADirectory + StatusV6ByteRangeLockConflict + StatusV6ByteRangeLockRefused + StatusV6DeletePending + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-8.1 + StatusV6FileCorrupt + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-10#section-9.1 + StatusV6OwnerInvalid + StatusV6GroupInvalid + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 + StatusV6NoMatchingByteRangeLock +) + +func (s Status) Error() string { + return s.String() +} + +// Is returns true if the target is the same Status code, +// or target is a StatusPacket with the same Status code. +func (s Status) Is(target error) bool { + if target, ok := target.(*StatusPacket); ok { + return target.StatusCode == s + } + + return s == target +} + +func (s Status) String() string { + switch s { + case StatusOK: + return "SSH_FX_OK" + case StatusEOF: + return "SSH_FX_EOF" + case StatusNoSuchFile: + return "SSH_FX_NO_SUCH_FILE" + case StatusPermissionDenied: + return "SSH_FX_PERMISSION_DENIED" + case StatusFailure: + return "SSH_FX_FAILURE" + case StatusBadMessage: + return "SSH_FX_BAD_MESSAGE" + case StatusNoConnection: + return "SSH_FX_NO_CONNECTION" + case StatusConnectionLost: + return "SSH_FX_CONNECTION_LOST" + case StatusOPUnsupported: + return "SSH_FX_OP_UNSUPPORTED" + case StatusV4InvalidHandle: + return "SSH_FX_INVALID_HANDLE" + case StatusV4NoSuchPath: + return "SSH_FX_NO_SUCH_PATH" + case StatusV4FileAlreadyExists: + return "SSH_FX_FILE_ALREADY_EXISTS" + case StatusV4WriteProtect: + return "SSH_FX_WRITE_PROTECT" + case StatusV4NoMedia: + return "SSH_FX_NO_MEDIA" + case StatusV5NoSpaceOnFilesystem: + return "SSH_FX_NO_SPACE_ON_FILESYSTEM" + case StatusV5QuotaExceeded: + return "SSH_FX_QUOTA_EXCEEDED" + case StatusV5UnknownPrincipal: + return "SSH_FX_UNKNOWN_PRINCIPAL" + case StatusV5LockConflict: + return "SSH_FX_LOCK_CONFLICT" + case StatusV6DirNotEmpty: + return "SSH_FX_DIR_NOT_EMPTY" + case StatusV6NotADirectory: + return "SSH_FX_NOT_A_DIRECTORY" + case StatusV6InvalidFilename: + return "SSH_FX_INVALID_FILENAME" + case StatusV6LinkLoop: + return "SSH_FX_LINK_LOOP" + case StatusV6CannotDelete: + return "SSH_FX_CANNOT_DELETE" + case StatusV6InvalidParameter: + return "SSH_FX_INVALID_PARAMETER" + case StatusV6FileIsADirectory: + return "SSH_FX_FILE_IS_A_DIRECTORY" + case StatusV6ByteRangeLockConflict: + return "SSH_FX_BYTE_RANGE_LOCK_CONFLICT" + case StatusV6ByteRangeLockRefused: + return "SSH_FX_BYTE_RANGE_LOCK_REFUSED" + case StatusV6DeletePending: + return "SSH_FX_DELETE_PENDING" + case StatusV6FileCorrupt: + return "SSH_FX_FILE_CORRUPT" + case StatusV6OwnerInvalid: + return "SSH_FX_OWNER_INVALID" + case StatusV6GroupInvalid: + return "SSH_FX_GROUP_INVALID" + case StatusV6NoMatchingByteRangeLock: + return "SSH_FX_NO_MATCHING_BYTE_RANGE_LOCK" + default: + return fmt.Sprintf("SSH_FX_UNKNOWN(%d)", s) + } +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go new file mode 100644 index 000000000..15caf6d28 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go @@ -0,0 +1,124 @@ +package filexfer + +import ( + "fmt" +) + +// PacketType defines the various SFTP packet types. +type PacketType uint8 + +// Request packet types. +const ( + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + PacketTypeInit = PacketType(iota + 1) + PacketTypeVersion + PacketTypeOpen + PacketTypeClose + PacketTypeRead + PacketTypeWrite + PacketTypeLStat + PacketTypeFStat + PacketTypeSetstat + PacketTypeFSetstat + PacketTypeOpenDir + PacketTypeReadDir + PacketTypeRemove + PacketTypeMkdir + PacketTypeRmdir + PacketTypeRealPath + PacketTypeStat + PacketTypeRename + PacketTypeReadLink + PacketTypeSymlink + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-3.3 + PacketTypeV6Link + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-3.3 + PacketTypeV6Block + PacketTypeV6Unblock +) + +// Response packet types. +const ( + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + PacketTypeStatus = PacketType(iota + 101) + PacketTypeHandle + PacketTypeData + PacketTypeName + PacketTypeAttrs +) + +// Extended packet types. +const ( + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + PacketTypeExtended = PacketType(iota + 200) + PacketTypeExtendedReply +) + +func (f PacketType) String() string { + switch f { + case PacketTypeInit: + return "SSH_FXP_INIT" + case PacketTypeVersion: + return "SSH_FXP_VERSION" + case PacketTypeOpen: + return "SSH_FXP_OPEN" + case PacketTypeClose: + return "SSH_FXP_CLOSE" + case PacketTypeRead: + return "SSH_FXP_READ" + case PacketTypeWrite: + return "SSH_FXP_WRITE" + case PacketTypeLStat: + return "SSH_FXP_LSTAT" + case PacketTypeFStat: + return "SSH_FXP_FSTAT" + case PacketTypeSetstat: + return "SSH_FXP_SETSTAT" + case PacketTypeFSetstat: + return "SSH_FXP_FSETSTAT" + case PacketTypeOpenDir: + return "SSH_FXP_OPENDIR" + case PacketTypeReadDir: + return "SSH_FXP_READDIR" + case PacketTypeRemove: + return "SSH_FXP_REMOVE" + case PacketTypeMkdir: + return "SSH_FXP_MKDIR" + case PacketTypeRmdir: + return "SSH_FXP_RMDIR" + case PacketTypeRealPath: + return "SSH_FXP_REALPATH" + case PacketTypeStat: + return "SSH_FXP_STAT" + case PacketTypeRename: + return "SSH_FXP_RENAME" + case PacketTypeReadLink: + return "SSH_FXP_READLINK" + case PacketTypeSymlink: + return "SSH_FXP_SYMLINK" + case PacketTypeV6Link: + return "SSH_FXP_LINK" + case PacketTypeV6Block: + return "SSH_FXP_BLOCK" + case PacketTypeV6Unblock: + return "SSH_FXP_UNBLOCK" + case PacketTypeStatus: + return "SSH_FXP_STATUS" + case PacketTypeHandle: + return "SSH_FXP_HANDLE" + case PacketTypeData: + return "SSH_FXP_DATA" + case PacketTypeName: + return "SSH_FXP_NAME" + case PacketTypeAttrs: + return "SSH_FXP_ATTRS" + case PacketTypeExtended: + return "SSH_FXP_EXTENDED" + case PacketTypeExtendedReply: + return "SSH_FXP_EXTENDED_REPLY" + default: + return fmt.Sprintf("SSH_FXP_UNKNOWN(%d)", f) + } +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go new file mode 100644 index 000000000..a14277128 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go @@ -0,0 +1,249 @@ +package filexfer + +// ClosePacket defines the SSH_FXP_CLOSE packet. +type ClosePacket struct { + Handle string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ClosePacket) Type() PacketType { + return PacketTypeClose +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ClosePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) // string(handle) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeClose, reqid) + buf.AppendString(p.Handle) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ClosePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// ReadPacket defines the SSH_FXP_READ packet. +type ReadPacket struct { + Handle string + Offset uint64 + Len uint32 +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ReadPacket) Type() PacketType { + return PacketTypeRead +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ReadPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(handle) + uint64(offset) + uint32(len) + size := 4 + len(p.Handle) + 8 + 4 + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRead, reqid) + buf.AppendString(p.Handle) + buf.AppendUint64(p.Offset) + buf.AppendUint32(p.Len) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ReadPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + if p.Offset, err = buf.ConsumeUint64(); err != nil { + return err + } + + if p.Len, err = buf.ConsumeUint32(); err != nil { + return err + } + + return nil +} + +// WritePacket defines the SSH_FXP_WRITE packet. +type WritePacket struct { + Handle string + Offset uint64 + Data []byte +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *WritePacket) Type() PacketType { + return PacketTypeWrite +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *WritePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(handle) + uint64(offset) + uint32(len(data)); data content in payload + size := 4 + len(p.Handle) + 8 + 4 + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeWrite, reqid) + buf.AppendString(p.Handle) + buf.AppendUint64(p.Offset) + buf.AppendUint32(uint32(len(p.Data))) + + return buf.Packet(p.Data) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +// +// If p.Data is already populated, and of sufficient length to hold the data, +// then this will copy the data into that byte slice. +// +// If p.Data has a length insufficient to hold the data, +// then this will make a new slice of sufficient length, and copy the data into that. +// +// This means this _does not_ alias any of the data buffer that is passed in. +func (p *WritePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + if p.Offset, err = buf.ConsumeUint64(); err != nil { + return err + } + + data, err := buf.ConsumeByteSlice() + if err != nil { + return err + } + + if len(p.Data) < len(data) { + p.Data = make([]byte, len(data)) + } + + n := copy(p.Data, data) + p.Data = p.Data[:n] + return nil +} + +// FStatPacket defines the SSH_FXP_FSTAT packet. +type FStatPacket struct { + Handle string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *FStatPacket) Type() PacketType { + return PacketTypeFStat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *FStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) // string(handle) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeFStat, reqid) + buf.AppendString(p.Handle) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *FStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// FSetstatPacket defines the SSH_FXP_FSETSTAT packet. +type FSetstatPacket struct { + Handle string + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *FSetstatPacket) Type() PacketType { + return PacketTypeFSetstat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *FSetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) + p.Attrs.Len() // string(handle) + ATTRS(attrs) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeFSetstat, reqid) + buf.AppendString(p.Handle) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *FSetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// ReadDirPacket defines the SSH_FXP_READDIR packet. +type ReadDirPacket struct { + Handle string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ReadDirPacket) Type() PacketType { + return PacketTypeReadDir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ReadDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) // string(handle) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeReadDir, reqid) + buf.AppendString(p.Handle) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ReadDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go new file mode 100644 index 000000000..b0bc6f505 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go @@ -0,0 +1,99 @@ +package filexfer + +// InitPacket defines the SSH_FXP_INIT packet. +type InitPacket struct { + Version uint32 + Extensions []*ExtensionPair +} + +// MarshalBinary returns p as the binary encoding of p. +func (p *InitPacket) MarshalBinary() ([]byte, error) { + size := 1 + 4 // byte(type) + uint32(version) + + for _, ext := range p.Extensions { + size += ext.Len() + } + + b := NewBuffer(make([]byte, 4, 4+size)) + b.AppendUint8(uint8(PacketTypeInit)) + b.AppendUint32(p.Version) + + for _, ext := range p.Extensions { + ext.MarshalInto(b) + } + + b.PutLength(size) + + return b.Bytes(), nil +} + +// UnmarshalBinary unmarshals a full raw packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into. +func (p *InitPacket) UnmarshalBinary(data []byte) (err error) { + buf := NewBuffer(data) + + if p.Version, err = buf.ConsumeUint32(); err != nil { + return err + } + + for buf.Len() > 0 { + var ext ExtensionPair + if err := ext.UnmarshalFrom(buf); err != nil { + return err + } + + p.Extensions = append(p.Extensions, &ext) + } + + return nil +} + +// VersionPacket defines the SSH_FXP_VERSION packet. +type VersionPacket struct { + Version uint32 + Extensions []*ExtensionPair +} + +// MarshalBinary returns p as the binary encoding of p. +func (p *VersionPacket) MarshalBinary() ([]byte, error) { + size := 1 + 4 // byte(type) + uint32(version) + + for _, ext := range p.Extensions { + size += ext.Len() + } + + b := NewBuffer(make([]byte, 4, 4+size)) + b.AppendUint8(uint8(PacketTypeVersion)) + b.AppendUint32(p.Version) + + for _, ext := range p.Extensions { + ext.MarshalInto(b) + } + + b.PutLength(size) + + return b.Bytes(), nil +} + +// UnmarshalBinary unmarshals a full raw packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into. +func (p *VersionPacket) UnmarshalBinary(data []byte) (err error) { + buf := NewBuffer(data) + + if p.Version, err = buf.ConsumeUint32(); err != nil { + return err + } + + for buf.Len() > 0 { + var ext ExtensionPair + if err := ext.UnmarshalFrom(buf); err != nil { + return err + } + + p.Extensions = append(p.Extensions, &ext) + } + + return nil +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go new file mode 100644 index 000000000..135871142 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go @@ -0,0 +1,89 @@ +package filexfer + +// SSH_FXF_* flags. +const ( + FlagRead = 1 << iota // SSH_FXF_READ + FlagWrite // SSH_FXF_WRITE + FlagAppend // SSH_FXF_APPEND + FlagCreate // SSH_FXF_CREAT + FlagTruncate // SSH_FXF_TRUNC + FlagExclusive // SSH_FXF_EXCL +) + +// OpenPacket defines the SSH_FXP_OPEN packet. +type OpenPacket struct { + Filename string + PFlags uint32 + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *OpenPacket) Type() PacketType { + return PacketTypeOpen +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *OpenPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(filename) + uint32(pflags) + ATTRS(attrs) + size := 4 + len(p.Filename) + 4 + p.Attrs.Len() + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeOpen, reqid) + buf.AppendString(p.Filename) + buf.AppendUint32(p.PFlags) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *OpenPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Filename, err = buf.ConsumeString(); err != nil { + return err + } + + if p.PFlags, err = buf.ConsumeUint32(); err != nil { + return err + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// OpenDirPacket defines the SSH_FXP_OPENDIR packet. +type OpenDirPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *OpenDirPacket) Type() PacketType { + return PacketTypeOpenDir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *OpenDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeOpenDir, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *OpenDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go new file mode 100644 index 000000000..3f24e9c22 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go @@ -0,0 +1,323 @@ +package filexfer + +import ( + "errors" + "fmt" + "io" +) + +// smallBufferSize is an initial allocation minimal capacity. +const smallBufferSize = 64 + +func newPacketFromType(typ PacketType) (Packet, error) { + switch typ { + case PacketTypeOpen: + return new(OpenPacket), nil + case PacketTypeClose: + return new(ClosePacket), nil + case PacketTypeRead: + return new(ReadPacket), nil + case PacketTypeWrite: + return new(WritePacket), nil + case PacketTypeLStat: + return new(LStatPacket), nil + case PacketTypeFStat: + return new(FStatPacket), nil + case PacketTypeSetstat: + return new(SetstatPacket), nil + case PacketTypeFSetstat: + return new(FSetstatPacket), nil + case PacketTypeOpenDir: + return new(OpenDirPacket), nil + case PacketTypeReadDir: + return new(ReadDirPacket), nil + case PacketTypeRemove: + return new(RemovePacket), nil + case PacketTypeMkdir: + return new(MkdirPacket), nil + case PacketTypeRmdir: + return new(RmdirPacket), nil + case PacketTypeRealPath: + return new(RealPathPacket), nil + case PacketTypeStat: + return new(StatPacket), nil + case PacketTypeRename: + return new(RenamePacket), nil + case PacketTypeReadLink: + return new(ReadLinkPacket), nil + case PacketTypeSymlink: + return new(SymlinkPacket), nil + case PacketTypeExtended: + return new(ExtendedPacket), nil + default: + return nil, fmt.Errorf("unexpected request packet type: %v", typ) + } +} + +// RawPacket implements the general packet format from draft-ietf-secsh-filexfer-02 +// +// RawPacket is intended for use in clients receiving responses, +// where a response will be expected to be of a limited number of types, +// and unmarshaling unknown/unexpected response packets is unnecessary. +// +// For servers expecting to receive arbitrary request packet types, +// use RequestPacket. +// +// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 +type RawPacket struct { + PacketType PacketType + RequestID uint32 + + Data Buffer +} + +// Type returns the Type field defining the SSH_FXP_xy type for this packet. +func (p *RawPacket) Type() PacketType { + return p.PacketType +} + +// Reset clears the pointers and reference-semantic variables of RawPacket, +// releasing underlying resources, and making them and the RawPacket suitable to be reused, +// so long as no other references have been kept. +func (p *RawPacket) Reset() { + p.Data = Buffer{} +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The internal p.RequestID is overridden by the reqid argument. +func (p *RawPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + buf = NewMarshalBuffer(0) + } + + buf.StartPacket(p.PacketType, reqid) + + return buf.Packet(p.Data.Bytes()) +} + +// MarshalBinary returns p as the binary encoding of p. +// +// This is a convenience implementation primarily intended for tests, +// because it is inefficient with allocations. +func (p *RawPacket) MarshalBinary() ([]byte, error) { + return ComposePacket(p.MarshalPacket(p.RequestID, nil)) +} + +// UnmarshalFrom decodes a RawPacket from the given Buffer into p. +// +// The Data field will alias the passed in Buffer, +// so the buffer passed in should not be reused before RawPacket.Reset(). +func (p *RawPacket) UnmarshalFrom(buf *Buffer) error { + typ, err := buf.ConsumeUint8() + if err != nil { + return err + } + + p.PacketType = PacketType(typ) + + if p.RequestID, err = buf.ConsumeUint32(); err != nil { + return err + } + + p.Data = *buf + return nil +} + +// UnmarshalBinary decodes a full raw packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// +// This is a convenience implementation primarily intended for tests, +// because this must clone the given data byte slice, +// as Data is not allowed to alias any part of the data byte slice. +func (p *RawPacket) UnmarshalBinary(data []byte) error { + clone := make([]byte, len(data)) + n := copy(clone, data) + return p.UnmarshalFrom(NewBuffer(clone[:n])) +} + +// readPacket reads a uint32 length-prefixed binary data packet from r. +// using the given byte slice as a backing array. +// +// If the packet length read from r is bigger than maxPacketLength, +// or greater than math.MaxInt32 on a 32-bit implementation, +// then a `ErrLongPacket` error will be returned. +// +// If the given byte slice is insufficient to hold the packet, +// then it will be extended to fill the packet size. +func readPacket(r io.Reader, b []byte, maxPacketLength uint32) ([]byte, error) { + if cap(b) < 4 { + // We will need allocate our own buffer just for reading the packet length. + + // However, we don’t really want to allocate an extremely narrow buffer (4-bytes), + // and cause unnecessary allocation churn from both length reads and small packet reads, + // so we use smallBufferSize from the bytes package as a reasonable guess. + + // But if callers really do want to force narrow throw-away allocation of every packet body, + // they can do so with a buffer of capacity 4. + b = make([]byte, smallBufferSize) + } + + if _, err := io.ReadFull(r, b[:4]); err != nil { + return nil, err + } + + length := unmarshalUint32(b) + if int(length) < 5 { + // Must have at least uint8(type) and uint32(request-id) + + if int(length) < 0 { + // Only possible when strconv.IntSize == 32, + // the packet length is longer than math.MaxInt32, + // and thus longer than any possible slice. + return nil, ErrLongPacket + } + + return nil, ErrShortPacket + } + if length > maxPacketLength { + return nil, ErrLongPacket + } + + if int(length) > cap(b) { + // We know int(length) must be positive, because of tests above. + b = make([]byte, length) + } + + n, err := io.ReadFull(r, b[:length]) + return b[:n], err +} + +// ReadFrom provides a simple functional packet reader, +// using the given byte slice as a backing array. +// +// To protect against potential denial of service attacks, +// if the read packet length is longer than maxPacketLength, +// then no packet data will be read, and ErrLongPacket will be returned. +// (On 32-bit int architectures, all packets >= 2^31 in length +// will return ErrLongPacket regardless of maxPacketLength.) +// +// If the read packet length is longer than cap(b), +// then a throw-away slice will allocated to meet the exact packet length. +// This can be used to limit the length of reused buffers, +// while still allowing reception of occasional large packets. +// +// The Data field may alias the passed in byte slice, +// so the byte slice passed in should not be reused before RawPacket.Reset(). +func (p *RawPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error { + b, err := readPacket(r, b, maxPacketLength) + if err != nil { + return err + } + + return p.UnmarshalFrom(NewBuffer(b)) +} + +// RequestPacket implements the general packet format from draft-ietf-secsh-filexfer-02 +// but also automatically decode/encodes valid request packets (2 < type < 100 || type == 200). +// +// RequestPacket is intended for use in servers receiving requests, +// where any arbitrary request may be received, and so decoding them automatically +// is useful. +// +// For clients expecting to receive specific response packet types, +// where automatic unmarshaling of the packet body does not make sense, +// use RawPacket. +// +// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 +type RequestPacket struct { + RequestID uint32 + + Request Packet +} + +// Type returns the SSH_FXP_xy value associated with the underlying packet. +func (p *RequestPacket) Type() PacketType { + return p.Request.Type() +} + +// Reset clears the pointers and reference-semantic variables in RequestPacket, +// releasing underlying resources, and making them and the RequestPacket suitable to be reused, +// so long as no other references have been kept. +func (p *RequestPacket) Reset() { + p.Request = nil +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The internal p.RequestID is overridden by the reqid argument. +func (p *RequestPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + if p.Request == nil { + return nil, nil, errors.New("empty request packet") + } + + return p.Request.MarshalPacket(reqid, b) +} + +// MarshalBinary returns p as the binary encoding of p. +// +// This is a convenience implementation primarily intended for tests, +// because it is inefficient with allocations. +func (p *RequestPacket) MarshalBinary() ([]byte, error) { + return ComposePacket(p.MarshalPacket(p.RequestID, nil)) +} + +// UnmarshalFrom decodes a RequestPacket from the given Buffer into p. +// +// The Request field may alias the passed in Buffer, (e.g. SSH_FXP_WRITE), +// so the buffer passed in should not be reused before RequestPacket.Reset(). +func (p *RequestPacket) UnmarshalFrom(buf *Buffer) error { + typ, err := buf.ConsumeUint8() + if err != nil { + return err + } + + p.Request, err = newPacketFromType(PacketType(typ)) + if err != nil { + return err + } + + if p.RequestID, err = buf.ConsumeUint32(); err != nil { + return err + } + + return p.Request.UnmarshalPacketBody(buf) +} + +// UnmarshalBinary decodes a full request packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// +// This is a convenience implementation primarily intended for tests, +// because this must clone the given data byte slice, +// as Request is not allowed to alias any part of the data byte slice. +func (p *RequestPacket) UnmarshalBinary(data []byte) error { + clone := make([]byte, len(data)) + n := copy(clone, data) + return p.UnmarshalFrom(NewBuffer(clone[:n])) +} + +// ReadFrom provides a simple functional packet reader, +// using the given byte slice as a backing array. +// +// To protect against potential denial of service attacks, +// if the read packet length is longer than maxPacketLength, +// then no packet data will be read, and ErrLongPacket will be returned. +// (On 32-bit int architectures, all packets >= 2^31 in length +// will return ErrLongPacket regardless of maxPacketLength.) +// +// If the read packet length is longer than cap(b), +// then a throw-away slice will allocated to meet the exact packet length. +// This can be used to limit the length of reused buffers, +// while still allowing reception of occasional large packets. +// +// The Request field may alias the passed in byte slice, +// so the byte slice passed in should not be reused before RawPacket.Reset(). +func (p *RequestPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error { + b, err := readPacket(r, b, maxPacketLength) + if err != nil { + return err + } + + return p.UnmarshalFrom(NewBuffer(b)) +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go new file mode 100644 index 000000000..e6f692d9f --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go @@ -0,0 +1,368 @@ +package filexfer + +// LStatPacket defines the SSH_FXP_LSTAT packet. +type LStatPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *LStatPacket) Type() PacketType { + return PacketTypeLStat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *LStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeLStat, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *LStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// SetstatPacket defines the SSH_FXP_SETSTAT packet. +type SetstatPacket struct { + Path string + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *SetstatPacket) Type() PacketType { + return PacketTypeSetstat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *SetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeSetstat, reqid) + buf.AppendString(p.Path) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *SetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// RemovePacket defines the SSH_FXP_REMOVE packet. +type RemovePacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RemovePacket) Type() PacketType { + return PacketTypeRemove +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RemovePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRemove, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RemovePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// MkdirPacket defines the SSH_FXP_MKDIR packet. +type MkdirPacket struct { + Path string + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *MkdirPacket) Type() PacketType { + return PacketTypeMkdir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *MkdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeMkdir, reqid) + buf.AppendString(p.Path) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *MkdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// RmdirPacket defines the SSH_FXP_RMDIR packet. +type RmdirPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RmdirPacket) Type() PacketType { + return PacketTypeRmdir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RmdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRmdir, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RmdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// RealPathPacket defines the SSH_FXP_REALPATH packet. +type RealPathPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RealPathPacket) Type() PacketType { + return PacketTypeRealPath +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RealPathPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRealPath, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RealPathPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// StatPacket defines the SSH_FXP_STAT packet. +type StatPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *StatPacket) Type() PacketType { + return PacketTypeStat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *StatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeStat, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *StatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// RenamePacket defines the SSH_FXP_RENAME packet. +type RenamePacket struct { + OldPath string + NewPath string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RenamePacket) Type() PacketType { + return PacketTypeRename +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RenamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(oldpath) + string(newpath) + size := 4 + len(p.OldPath) + 4 + len(p.NewPath) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRename, reqid) + buf.AppendString(p.OldPath) + buf.AppendString(p.NewPath) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RenamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.OldPath, err = buf.ConsumeString(); err != nil { + return err + } + + if p.NewPath, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// ReadLinkPacket defines the SSH_FXP_READLINK packet. +type ReadLinkPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ReadLinkPacket) Type() PacketType { + return PacketTypeReadLink +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ReadLinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeReadLink, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ReadLinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// SymlinkPacket defines the SSH_FXP_SYMLINK packet. +// +// The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed. +// Unfortunately, the reversal was not noticed until the server was widely deployed. +// Covered in Section 3.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL +type SymlinkPacket struct { + LinkPath string + TargetPath string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *SymlinkPacket) Type() PacketType { + return PacketTypeSymlink +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *SymlinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(targetpath) + string(linkpath) + size := 4 + len(p.TargetPath) + 4 + len(p.LinkPath) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeSymlink, reqid) + + // Arguments were inadvertently reversed. + buf.AppendString(p.TargetPath) + buf.AppendString(p.LinkPath) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *SymlinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + // Arguments were inadvertently reversed. + if p.TargetPath, err = buf.ConsumeString(); err != nil { + return err + } + + if p.LinkPath, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go new file mode 100644 index 000000000..2fe63d591 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go @@ -0,0 +1,114 @@ +package filexfer + +// FileMode represents a file’s mode and permission bits. +// The bits are defined according to POSIX standards, +// and may not apply to the OS being built for. +type FileMode uint32 + +// Permission flags, defined here to avoid potential inconsistencies in individual OS implementations. +const ( + ModePerm FileMode = 0o0777 // S_IRWXU | S_IRWXG | S_IRWXO + ModeUserRead FileMode = 0o0400 // S_IRUSR + ModeUserWrite FileMode = 0o0200 // S_IWUSR + ModeUserExec FileMode = 0o0100 // S_IXUSR + ModeGroupRead FileMode = 0o0040 // S_IRGRP + ModeGroupWrite FileMode = 0o0020 // S_IWGRP + ModeGroupExec FileMode = 0o0010 // S_IXGRP + ModeOtherRead FileMode = 0o0004 // S_IROTH + ModeOtherWrite FileMode = 0o0002 // S_IWOTH + ModeOtherExec FileMode = 0o0001 // S_IXOTH + + ModeSetUID FileMode = 0o4000 // S_ISUID + ModeSetGID FileMode = 0o2000 // S_ISGID + ModeSticky FileMode = 0o1000 // S_ISVTX + + ModeType FileMode = 0xF000 // S_IFMT + ModeNamedPipe FileMode = 0x1000 // S_IFIFO + ModeCharDevice FileMode = 0x2000 // S_IFCHR + ModeDir FileMode = 0x4000 // S_IFDIR + ModeDevice FileMode = 0x6000 // S_IFBLK + ModeRegular FileMode = 0x8000 // S_IFREG + ModeSymlink FileMode = 0xA000 // S_IFLNK + ModeSocket FileMode = 0xC000 // S_IFSOCK +) + +// IsDir reports whether m describes a directory. +// That is, it tests for m.Type() == ModeDir. +func (m FileMode) IsDir() bool { + return (m & ModeType) == ModeDir +} + +// IsRegular reports whether m describes a regular file. +// That is, it tests for m.Type() == ModeRegular +func (m FileMode) IsRegular() bool { + return (m & ModeType) == ModeRegular +} + +// Perm returns the POSIX permission bits in m (m & ModePerm). +func (m FileMode) Perm() FileMode { + return (m & ModePerm) +} + +// Type returns the type bits in m (m & ModeType). +func (m FileMode) Type() FileMode { + return (m & ModeType) +} + +// String returns a `-rwxrwxrwx` style string representing the `ls -l` POSIX permissions string. +func (m FileMode) String() string { + var buf [10]byte + + switch m.Type() { + case ModeRegular: + buf[0] = '-' + case ModeDir: + buf[0] = 'd' + case ModeSymlink: + buf[0] = 'l' + case ModeDevice: + buf[0] = 'b' + case ModeCharDevice: + buf[0] = 'c' + case ModeNamedPipe: + buf[0] = 'p' + case ModeSocket: + buf[0] = 's' + default: + buf[0] = '?' + } + + const rwx = "rwxrwxrwx" + for i, c := range rwx { + if m&(1<<uint(9-1-i)) != 0 { + buf[i+1] = byte(c) + } else { + buf[i+1] = '-' + } + } + + if m&ModeSetUID != 0 { + if buf[3] == 'x' { + buf[3] = 's' + } else { + buf[3] = 'S' + } + } + + if m&ModeSetGID != 0 { + if buf[6] == 'x' { + buf[6] = 's' + } else { + buf[6] = 'S' + } + } + + if m&ModeSticky != 0 { + if buf[9] == 'x' { + buf[9] = 't' + } else { + buf[9] = 'T' + } + } + + return string(buf[:]) +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go new file mode 100644 index 000000000..7a9b3eae8 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go @@ -0,0 +1,243 @@ +package filexfer + +import ( + "fmt" +) + +// StatusPacket defines the SSH_FXP_STATUS packet. +// +// Specified in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-7 +type StatusPacket struct { + StatusCode Status + ErrorMessage string + LanguageTag string +} + +// Error makes StatusPacket an error type. +func (p *StatusPacket) Error() string { + if p.ErrorMessage == "" { + return "sftp: " + p.StatusCode.String() + } + + return fmt.Sprintf("sftp: %q (%s)", p.ErrorMessage, p.StatusCode) +} + +// Is returns true if target is a StatusPacket with the same StatusCode, +// or target is a Status code which is the same as SatusCode. +func (p *StatusPacket) Is(target error) bool { + if target, ok := target.(*StatusPacket); ok { + return p.StatusCode == target.StatusCode + } + + return p.StatusCode == target +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *StatusPacket) Type() PacketType { + return PacketTypeStatus +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *StatusPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // uint32(error/status code) + string(error message) + string(language tag) + size := 4 + 4 + len(p.ErrorMessage) + 4 + len(p.LanguageTag) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeStatus, reqid) + buf.AppendUint32(uint32(p.StatusCode)) + buf.AppendString(p.ErrorMessage) + buf.AppendString(p.LanguageTag) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *StatusPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + statusCode, err := buf.ConsumeUint32() + if err != nil { + return err + } + p.StatusCode = Status(statusCode) + + if p.ErrorMessage, err = buf.ConsumeString(); err != nil { + return err + } + + if p.LanguageTag, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// HandlePacket defines the SSH_FXP_HANDLE packet. +type HandlePacket struct { + Handle string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *HandlePacket) Type() PacketType { + return PacketTypeHandle +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *HandlePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) // string(handle) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeHandle, reqid) + buf.AppendString(p.Handle) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *HandlePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// DataPacket defines the SSH_FXP_DATA packet. +type DataPacket struct { + Data []byte +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *DataPacket) Type() PacketType { + return PacketTypeData +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *DataPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 // uint32(len(data)); data content in payload + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeData, reqid) + buf.AppendUint32(uint32(len(p.Data))) + + return buf.Packet(p.Data) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +// +// If p.Data is already populated, and of sufficient length to hold the data, +// then this will copy the data into that byte slice. +// +// If p.Data has a length insufficient to hold the data, +// then this will make a new slice of sufficient length, and copy the data into that. +// +// This means this _does not_ alias any of the data buffer that is passed in. +func (p *DataPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + data, err := buf.ConsumeByteSlice() + if err != nil { + return err + } + + if len(p.Data) < len(data) { + p.Data = make([]byte, len(data)) + } + + n := copy(p.Data, data) + p.Data = p.Data[:n] + return nil +} + +// NamePacket defines the SSH_FXP_NAME packet. +type NamePacket struct { + Entries []*NameEntry +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *NamePacket) Type() PacketType { + return PacketTypeName +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *NamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 // uint32(len(entries)) + + for _, e := range p.Entries { + size += e.Len() + } + + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeName, reqid) + buf.AppendUint32(uint32(len(p.Entries))) + + for _, e := range p.Entries { + e.MarshalInto(buf) + } + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *NamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + count, err := buf.ConsumeUint32() + if err != nil { + return err + } + + p.Entries = make([]*NameEntry, 0, count) + + for i := uint32(0); i < count; i++ { + var e NameEntry + if err := e.UnmarshalFrom(buf); err != nil { + return err + } + + p.Entries = append(p.Entries, &e) + } + + return nil +} + +// AttrsPacket defines the SSH_FXP_ATTRS packet. +type AttrsPacket struct { + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *AttrsPacket) Type() PacketType { + return PacketTypeAttrs +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *AttrsPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := p.Attrs.Len() // ATTRS(attrs) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeAttrs, reqid) + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *AttrsPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + return p.Attrs.UnmarshalFrom(buf) +} diff --git a/vendor/github.com/pkg/sftp/ls_formatting.go b/vendor/github.com/pkg/sftp/ls_formatting.go new file mode 100644 index 000000000..e083e22a4 --- /dev/null +++ b/vendor/github.com/pkg/sftp/ls_formatting.go @@ -0,0 +1,81 @@ +package sftp + +import ( + "errors" + "fmt" + "os" + "os/user" + "strconv" + "time" + + sshfx "github.com/pkg/sftp/internal/encoding/ssh/filexfer" +) + +func lsFormatID(id uint32) string { + return strconv.FormatUint(uint64(id), 10) +} + +type osIDLookup struct{} + +func (osIDLookup) Filelist(*Request) (ListerAt, error) { + return nil, errors.New("unimplemented stub") +} + +func (osIDLookup) LookupUserName(uid string) string { + u, err := user.LookupId(uid) + if err != nil { + return uid + } + + return u.Username +} + +func (osIDLookup) LookupGroupName(gid string) string { + g, err := user.LookupGroupId(gid) + if err != nil { + return gid + } + + return g.Name +} + +// runLs formats the FileInfo as per `ls -l` style, which is in the 'longname' field of a SSH_FXP_NAME entry. +// This is a fairly simple implementation, just enough to look close to openssh in simple cases. +func runLs(idLookup NameLookupFileLister, dirent os.FileInfo) string { + // example from openssh sftp server: + // crw-rw-rw- 1 root wheel 0 Jul 31 20:52 ttyvd + // format: + // {directory / char device / etc}{rwxrwxrwx} {number of links} owner group size month day [time (this year) | year (otherwise)] name + + symPerms := sshfx.FileMode(fromFileMode(dirent.Mode())).String() + + var numLinks uint64 = 1 + uid, gid := "0", "0" + + switch sys := dirent.Sys().(type) { + case *sshfx.Attributes: + uid = lsFormatID(sys.UID) + gid = lsFormatID(sys.GID) + case *FileStat: + uid = lsFormatID(sys.UID) + gid = lsFormatID(sys.GID) + default: + numLinks, uid, gid = lsLinksUIDGID(dirent) + } + + if idLookup != nil { + uid, gid = idLookup.LookupUserName(uid), idLookup.LookupGroupName(gid) + } + + mtime := dirent.ModTime() + date := mtime.Format("Jan 2") + + var yearOrTime string + if mtime.Before(time.Now().AddDate(0, -6, 0)) { + yearOrTime = mtime.Format("2006") + } else { + yearOrTime = mtime.Format("15:04") + } + + return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %5s %s", symPerms, numLinks, uid, gid, dirent.Size(), date, yearOrTime, dirent.Name()) +} diff --git a/vendor/github.com/pkg/sftp/ls_plan9.go b/vendor/github.com/pkg/sftp/ls_plan9.go new file mode 100644 index 000000000..a16a3ea06 --- /dev/null +++ b/vendor/github.com/pkg/sftp/ls_plan9.go @@ -0,0 +1,21 @@ +// +build plan9 + +package sftp + +import ( + "os" + "syscall" +) + +func lsLinksUIDGID(fi os.FileInfo) (numLinks uint64, uid, gid string) { + numLinks = 1 + uid, gid = "0", "0" + + switch sys := fi.Sys().(type) { + case *syscall.Dir: + uid = sys.Uid + gid = sys.Gid + } + + return numLinks, uid, gid +} diff --git a/vendor/github.com/pkg/sftp/ls_stub.go b/vendor/github.com/pkg/sftp/ls_stub.go new file mode 100644 index 000000000..6dec39378 --- /dev/null +++ b/vendor/github.com/pkg/sftp/ls_stub.go @@ -0,0 +1,11 @@ +// +build windows android + +package sftp + +import ( + "os" +) + +func lsLinksUIDGID(fi os.FileInfo) (numLinks uint64, uid, gid string) { + return 1, "0", "0" +} diff --git a/vendor/github.com/pkg/sftp/ls_unix.go b/vendor/github.com/pkg/sftp/ls_unix.go new file mode 100644 index 000000000..59ccffde5 --- /dev/null +++ b/vendor/github.com/pkg/sftp/ls_unix.go @@ -0,0 +1,23 @@ +// +build aix darwin dragonfly freebsd !android,linux netbsd openbsd solaris js + +package sftp + +import ( + "os" + "syscall" +) + +func lsLinksUIDGID(fi os.FileInfo) (numLinks uint64, uid, gid string) { + numLinks = 1 + uid, gid = "0", "0" + + switch sys := fi.Sys().(type) { + case *syscall.Stat_t: + numLinks = uint64(sys.Nlink) + uid = lsFormatID(sys.Uid) + gid = lsFormatID(sys.Gid) + default: + } + + return numLinks, uid, gid +} diff --git a/vendor/github.com/pkg/sftp/match.go b/vendor/github.com/pkg/sftp/match.go new file mode 100644 index 000000000..875006afd --- /dev/null +++ b/vendor/github.com/pkg/sftp/match.go @@ -0,0 +1,137 @@ +package sftp + +import ( + "path" + "strings" +) + +// ErrBadPattern indicates a globbing pattern was malformed. +var ErrBadPattern = path.ErrBadPattern + +// Match reports whether name matches the shell pattern. +// +// This is an alias for path.Match from the standard library, +// offered so that callers need not import the path package. +// For details, see https://golang.org/pkg/path/#Match. +func Match(pattern, name string) (matched bool, err error) { + return path.Match(pattern, name) +} + +// detect if byte(char) is path separator +func isPathSeparator(c byte) bool { + return c == '/' +} + +// Split splits the path p immediately following the final slash, +// separating it into a directory and file name component. +// +// This is an alias for path.Split from the standard library, +// offered so that callers need not import the path package. +// For details, see https://golang.org/pkg/path/#Split. +func Split(p string) (dir, file string) { + return path.Split(p) +} + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed. +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +func (c *Client) Glob(pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + file, err := c.Lstat(pattern) + if err != nil { + return nil, nil + } + dir, _ := Split(pattern) + dir = cleanGlobPath(dir) + return []string{Join(dir, file.Name())}, nil + } + + dir, file := Split(pattern) + dir = cleanGlobPath(dir) + + if !hasMeta(dir) { + return c.glob(dir, file, nil) + } + + // Prevent infinite recursion. See issue 15879. + if dir == pattern { + return nil, ErrBadPattern + } + + var m []string + m, err = c.Glob(dir) + if err != nil { + return + } + for _, d := range m { + matches, err = c.glob(d, file, matches) + if err != nil { + return + } + } + return +} + +// cleanGlobPath prepares path for glob matching. +func cleanGlobPath(path string) string { + switch path { + case "": + return "." + case "/": + return path + default: + return path[0 : len(path)-1] // chop off trailing separator + } +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func (c *Client) glob(dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := c.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + names, err := c.ReadDir(dir) + if err != nil { + return + } + //sort.Strings(names) + + for _, n := range names { + matched, err := Match(pattern, n.Name()) + if err != nil { + return m, err + } + if matched { + m = append(m, Join(dir, n.Name())) + } + } + return +} + +// Join joins any number of path elements into a single path, separating +// them with slashes. +// +// This is an alias for path.Join from the standard library, +// offered so that callers need not import the path package. +// For details, see https://golang.org/pkg/path/#Join. +func Join(elem ...string) string { + return path.Join(elem...) +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + return strings.ContainsAny(path, "\\*?[") +} diff --git a/vendor/github.com/pkg/sftp/packet-manager.go b/vendor/github.com/pkg/sftp/packet-manager.go new file mode 100644 index 000000000..c740c4c8c --- /dev/null +++ b/vendor/github.com/pkg/sftp/packet-manager.go @@ -0,0 +1,216 @@ +package sftp + +import ( + "encoding" + "sort" + "sync" +) + +// The goal of the packetManager is to keep the outgoing packets in the same +// order as the incoming as is requires by section 7 of the RFC. + +type packetManager struct { + requests chan orderedPacket + responses chan orderedPacket + fini chan struct{} + incoming orderedPackets + outgoing orderedPackets + sender packetSender // connection object + working *sync.WaitGroup + packetCount uint32 + // it is not nil if the allocator is enabled + alloc *allocator +} + +type packetSender interface { + sendPacket(encoding.BinaryMarshaler) error +} + +func newPktMgr(sender packetSender) *packetManager { + s := &packetManager{ + requests: make(chan orderedPacket, SftpServerWorkerCount), + responses: make(chan orderedPacket, SftpServerWorkerCount), + fini: make(chan struct{}), + incoming: make([]orderedPacket, 0, SftpServerWorkerCount), + outgoing: make([]orderedPacket, 0, SftpServerWorkerCount), + sender: sender, + working: &sync.WaitGroup{}, + } + go s.controller() + return s +} + +//// packet ordering +func (s *packetManager) newOrderID() uint32 { + s.packetCount++ + return s.packetCount +} + +// returns the next orderID without incrementing it. +// This is used before receiving a new packet, with the allocator enabled, to associate +// the slice allocated for the received packet with the orderID that will be used to mark +// the allocated slices for reuse once the request is served +func (s *packetManager) getNextOrderID() uint32 { + return s.packetCount + 1 +} + +type orderedRequest struct { + requestPacket + orderid uint32 +} + +func (s *packetManager) newOrderedRequest(p requestPacket) orderedRequest { + return orderedRequest{requestPacket: p, orderid: s.newOrderID()} +} +func (p orderedRequest) orderID() uint32 { return p.orderid } +func (p orderedRequest) setOrderID(oid uint32) { p.orderid = oid } + +type orderedResponse struct { + responsePacket + orderid uint32 +} + +func (s *packetManager) newOrderedResponse(p responsePacket, id uint32, +) orderedResponse { + return orderedResponse{responsePacket: p, orderid: id} +} +func (p orderedResponse) orderID() uint32 { return p.orderid } +func (p orderedResponse) setOrderID(oid uint32) { p.orderid = oid } + +type orderedPacket interface { + id() uint32 + orderID() uint32 +} +type orderedPackets []orderedPacket + +func (o orderedPackets) Sort() { + sort.Slice(o, func(i, j int) bool { + return o[i].orderID() < o[j].orderID() + }) +} + +//// packet registry +// register incoming packets to be handled +func (s *packetManager) incomingPacket(pkt orderedRequest) { + s.working.Add(1) + s.requests <- pkt +} + +// register outgoing packets as being ready +func (s *packetManager) readyPacket(pkt orderedResponse) { + s.responses <- pkt + s.working.Done() +} + +// shut down packetManager controller +func (s *packetManager) close() { + // pause until current packets are processed + s.working.Wait() + close(s.fini) +} + +// Passed a worker function, returns a channel for incoming packets. +// Keep process packet responses in the order they are received while +// maximizing throughput of file transfers. +func (s *packetManager) workerChan(runWorker func(chan orderedRequest), +) chan orderedRequest { + // multiple workers for faster read/writes + rwChan := make(chan orderedRequest, SftpServerWorkerCount) + for i := 0; i < SftpServerWorkerCount; i++ { + runWorker(rwChan) + } + + // single worker to enforce sequential processing of everything else + cmdChan := make(chan orderedRequest) + runWorker(cmdChan) + + pktChan := make(chan orderedRequest, SftpServerWorkerCount) + go func() { + for pkt := range pktChan { + switch pkt.requestPacket.(type) { + case *sshFxpReadPacket, *sshFxpWritePacket: + s.incomingPacket(pkt) + rwChan <- pkt + continue + case *sshFxpClosePacket: + // wait for reads/writes to finish when file is closed + // incomingPacket() call must occur after this + s.working.Wait() + } + s.incomingPacket(pkt) + // all non-RW use sequential cmdChan + cmdChan <- pkt + } + close(rwChan) + close(cmdChan) + s.close() + }() + + return pktChan +} + +// process packets +func (s *packetManager) controller() { + for { + select { + case pkt := <-s.requests: + debug("incoming id (oid): %v (%v)", pkt.id(), pkt.orderID()) + s.incoming = append(s.incoming, pkt) + s.incoming.Sort() + case pkt := <-s.responses: + debug("outgoing id (oid): %v (%v)", pkt.id(), pkt.orderID()) + s.outgoing = append(s.outgoing, pkt) + s.outgoing.Sort() + case <-s.fini: + return + } + s.maybeSendPackets() + } +} + +// send as many packets as are ready +func (s *packetManager) maybeSendPackets() { + for { + if len(s.outgoing) == 0 || len(s.incoming) == 0 { + debug("break! -- outgoing: %v; incoming: %v", + len(s.outgoing), len(s.incoming)) + break + } + out := s.outgoing[0] + in := s.incoming[0] + // debug("incoming: %v", ids(s.incoming)) + // debug("outgoing: %v", ids(s.outgoing)) + if in.orderID() == out.orderID() { + debug("Sending packet: %v", out.id()) + s.sender.sendPacket(out.(encoding.BinaryMarshaler)) + if s.alloc != nil { + // mark for reuse the slices allocated for this request + s.alloc.ReleasePages(in.orderID()) + } + // pop off heads + copy(s.incoming, s.incoming[1:]) // shift left + s.incoming[len(s.incoming)-1] = nil // clear last + s.incoming = s.incoming[:len(s.incoming)-1] // remove last + copy(s.outgoing, s.outgoing[1:]) // shift left + s.outgoing[len(s.outgoing)-1] = nil // clear last + s.outgoing = s.outgoing[:len(s.outgoing)-1] // remove last + } else { + break + } + } +} + +// func oids(o []orderedPacket) []uint32 { +// res := make([]uint32, 0, len(o)) +// for _, v := range o { +// res = append(res, v.orderId()) +// } +// return res +// } +// func ids(o []orderedPacket) []uint32 { +// res := make([]uint32, 0, len(o)) +// for _, v := range o { +// res = append(res, v.id()) +// } +// return res +// } diff --git a/vendor/github.com/pkg/sftp/packet-typing.go b/vendor/github.com/pkg/sftp/packet-typing.go new file mode 100644 index 000000000..f4f905295 --- /dev/null +++ b/vendor/github.com/pkg/sftp/packet-typing.go @@ -0,0 +1,135 @@ +package sftp + +import ( + "encoding" + "fmt" +) + +// all incoming packets +type requestPacket interface { + encoding.BinaryUnmarshaler + id() uint32 +} + +type responsePacket interface { + encoding.BinaryMarshaler + id() uint32 +} + +// interfaces to group types +type hasPath interface { + requestPacket + getPath() string +} + +type hasHandle interface { + requestPacket + getHandle() string +} + +type notReadOnly interface { + notReadOnly() +} + +//// define types by adding methods +// hasPath +func (p *sshFxpLstatPacket) getPath() string { return p.Path } +func (p *sshFxpStatPacket) getPath() string { return p.Path } +func (p *sshFxpRmdirPacket) getPath() string { return p.Path } +func (p *sshFxpReadlinkPacket) getPath() string { return p.Path } +func (p *sshFxpRealpathPacket) getPath() string { return p.Path } +func (p *sshFxpMkdirPacket) getPath() string { return p.Path } +func (p *sshFxpSetstatPacket) getPath() string { return p.Path } +func (p *sshFxpStatvfsPacket) getPath() string { return p.Path } +func (p *sshFxpRemovePacket) getPath() string { return p.Filename } +func (p *sshFxpRenamePacket) getPath() string { return p.Oldpath } +func (p *sshFxpSymlinkPacket) getPath() string { return p.Targetpath } +func (p *sshFxpOpendirPacket) getPath() string { return p.Path } +func (p *sshFxpOpenPacket) getPath() string { return p.Path } + +func (p *sshFxpExtendedPacketPosixRename) getPath() string { return p.Oldpath } +func (p *sshFxpExtendedPacketHardlink) getPath() string { return p.Oldpath } + +// getHandle +func (p *sshFxpFstatPacket) getHandle() string { return p.Handle } +func (p *sshFxpFsetstatPacket) getHandle() string { return p.Handle } +func (p *sshFxpReadPacket) getHandle() string { return p.Handle } +func (p *sshFxpWritePacket) getHandle() string { return p.Handle } +func (p *sshFxpReaddirPacket) getHandle() string { return p.Handle } +func (p *sshFxpClosePacket) getHandle() string { return p.Handle } + +// notReadOnly +func (p *sshFxpWritePacket) notReadOnly() {} +func (p *sshFxpSetstatPacket) notReadOnly() {} +func (p *sshFxpFsetstatPacket) notReadOnly() {} +func (p *sshFxpRemovePacket) notReadOnly() {} +func (p *sshFxpMkdirPacket) notReadOnly() {} +func (p *sshFxpRmdirPacket) notReadOnly() {} +func (p *sshFxpRenamePacket) notReadOnly() {} +func (p *sshFxpSymlinkPacket) notReadOnly() {} +func (p *sshFxpExtendedPacketPosixRename) notReadOnly() {} +func (p *sshFxpExtendedPacketHardlink) notReadOnly() {} + +// some packets with ID are missing id() +func (p *sshFxpDataPacket) id() uint32 { return p.ID } +func (p *sshFxpStatusPacket) id() uint32 { return p.ID } +func (p *sshFxpStatResponse) id() uint32 { return p.ID } +func (p *sshFxpNamePacket) id() uint32 { return p.ID } +func (p *sshFxpHandlePacket) id() uint32 { return p.ID } +func (p *StatVFS) id() uint32 { return p.ID } +func (p *sshFxVersionPacket) id() uint32 { return 0 } + +// take raw incoming packet data and build packet objects +func makePacket(p rxPacket) (requestPacket, error) { + var pkt requestPacket + switch p.pktType { + case sshFxpInit: + pkt = &sshFxInitPacket{} + case sshFxpLstat: + pkt = &sshFxpLstatPacket{} + case sshFxpOpen: + pkt = &sshFxpOpenPacket{} + case sshFxpClose: + pkt = &sshFxpClosePacket{} + case sshFxpRead: + pkt = &sshFxpReadPacket{} + case sshFxpWrite: + pkt = &sshFxpWritePacket{} + case sshFxpFstat: + pkt = &sshFxpFstatPacket{} + case sshFxpSetstat: + pkt = &sshFxpSetstatPacket{} + case sshFxpFsetstat: + pkt = &sshFxpFsetstatPacket{} + case sshFxpOpendir: + pkt = &sshFxpOpendirPacket{} + case sshFxpReaddir: + pkt = &sshFxpReaddirPacket{} + case sshFxpRemove: + pkt = &sshFxpRemovePacket{} + case sshFxpMkdir: + pkt = &sshFxpMkdirPacket{} + case sshFxpRmdir: + pkt = &sshFxpRmdirPacket{} + case sshFxpRealpath: + pkt = &sshFxpRealpathPacket{} + case sshFxpStat: + pkt = &sshFxpStatPacket{} + case sshFxpRename: + pkt = &sshFxpRenamePacket{} + case sshFxpReadlink: + pkt = &sshFxpReadlinkPacket{} + case sshFxpSymlink: + pkt = &sshFxpSymlinkPacket{} + case sshFxpExtended: + pkt = &sshFxpExtendedPacket{} + default: + return nil, fmt.Errorf("unhandled packet type: %s", p.pktType) + } + if err := pkt.UnmarshalBinary(p.pktBytes); err != nil { + // Return partially unpacked packet to allow callers to return + // error messages appropriately with necessary id() method. + return pkt, err + } + return pkt, nil +} diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go new file mode 100644 index 000000000..4059cf8e0 --- /dev/null +++ b/vendor/github.com/pkg/sftp/packet.go @@ -0,0 +1,1276 @@ +package sftp + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "reflect" +) + +var ( + errLongPacket = errors.New("packet too long") + errShortPacket = errors.New("packet too short") + errUnknownExtendedPacket = errors.New("unknown extended packet") +) + +const ( + maxMsgLength = 256 * 1024 + debugDumpTxPacket = false + debugDumpRxPacket = false + debugDumpTxPacketBytes = false + debugDumpRxPacketBytes = false +) + +func marshalUint32(b []byte, v uint32) []byte { + return append(b, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +func marshalUint64(b []byte, v uint64) []byte { + return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v)) +} + +func marshalString(b []byte, v string) []byte { + return append(marshalUint32(b, uint32(len(v))), v...) +} + +func marshalFileInfo(b []byte, fi os.FileInfo) []byte { + // attributes variable struct, and also variable per protocol version + // spec version 3 attributes: + // uint32 flags + // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE + // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS + // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED + // string extended_type + // string extended_data + // ... more extended data (extended_type - extended_data pairs), + // so that number of pairs equals extended_count + + flags, fileStat := fileStatFromInfo(fi) + + b = marshalUint32(b, flags) + if flags&sshFileXferAttrSize != 0 { + b = marshalUint64(b, fileStat.Size) + } + if flags&sshFileXferAttrUIDGID != 0 { + b = marshalUint32(b, fileStat.UID) + b = marshalUint32(b, fileStat.GID) + } + if flags&sshFileXferAttrPermissions != 0 { + b = marshalUint32(b, fileStat.Mode) + } + if flags&sshFileXferAttrACmodTime != 0 { + b = marshalUint32(b, fileStat.Atime) + b = marshalUint32(b, fileStat.Mtime) + } + + return b +} + +func marshalStatus(b []byte, err StatusError) []byte { + b = marshalUint32(b, err.Code) + b = marshalString(b, err.msg) + b = marshalString(b, err.lang) + return b +} + +func marshal(b []byte, v interface{}) []byte { + if v == nil { + return b + } + switch v := v.(type) { + case uint8: + return append(b, v) + case uint32: + return marshalUint32(b, v) + case uint64: + return marshalUint64(b, v) + case string: + return marshalString(b, v) + case os.FileInfo: + return marshalFileInfo(b, v) + default: + switch d := reflect.ValueOf(v); d.Kind() { + case reflect.Struct: + for i, n := 0, d.NumField(); i < n; i++ { + b = marshal(b, d.Field(i).Interface()) + } + return b + case reflect.Slice: + for i, n := 0, d.Len(); i < n; i++ { + b = marshal(b, d.Index(i).Interface()) + } + return b + default: + panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v)) + } + } +} + +func unmarshalUint32(b []byte) (uint32, []byte) { + v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + return v, b[4:] +} + +func unmarshalUint32Safe(b []byte) (uint32, []byte, error) { + var v uint32 + if len(b) < 4 { + return 0, nil, errShortPacket + } + v, b = unmarshalUint32(b) + return v, b, nil +} + +func unmarshalUint64(b []byte) (uint64, []byte) { + h, b := unmarshalUint32(b) + l, b := unmarshalUint32(b) + return uint64(h)<<32 | uint64(l), b +} + +func unmarshalUint64Safe(b []byte) (uint64, []byte, error) { + var v uint64 + if len(b) < 8 { + return 0, nil, errShortPacket + } + v, b = unmarshalUint64(b) + return v, b, nil +} + +func unmarshalString(b []byte) (string, []byte) { + n, b := unmarshalUint32(b) + return string(b[:n]), b[n:] +} + +func unmarshalStringSafe(b []byte) (string, []byte, error) { + n, b, err := unmarshalUint32Safe(b) + if err != nil { + return "", nil, err + } + if int64(n) > int64(len(b)) { + return "", nil, errShortPacket + } + return string(b[:n]), b[n:], nil +} + +func unmarshalAttrs(b []byte) (*FileStat, []byte) { + flags, b := unmarshalUint32(b) + return unmarshalFileStat(flags, b) +} + +func unmarshalFileStat(flags uint32, b []byte) (*FileStat, []byte) { + var fs FileStat + if flags&sshFileXferAttrSize == sshFileXferAttrSize { + fs.Size, b, _ = unmarshalUint64Safe(b) + } + if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { + fs.UID, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { + fs.GID, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrPermissions == sshFileXferAttrPermissions { + fs.Mode, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrACmodTime == sshFileXferAttrACmodTime { + fs.Atime, b, _ = unmarshalUint32Safe(b) + fs.Mtime, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrExtended == sshFileXferAttrExtended { + var count uint32 + count, b, _ = unmarshalUint32Safe(b) + ext := make([]StatExtended, count) + for i := uint32(0); i < count; i++ { + var typ string + var data string + typ, b, _ = unmarshalStringSafe(b) + data, b, _ = unmarshalStringSafe(b) + ext[i] = StatExtended{ + ExtType: typ, + ExtData: data, + } + } + fs.Extended = ext + } + return &fs, b +} + +func unmarshalStatus(id uint32, data []byte) error { + sid, data := unmarshalUint32(data) + if sid != id { + return &unexpectedIDErr{id, sid} + } + code, data := unmarshalUint32(data) + msg, data, _ := unmarshalStringSafe(data) + lang, _, _ := unmarshalStringSafe(data) + return &StatusError{ + Code: code, + msg: msg, + lang: lang, + } +} + +type packetMarshaler interface { + marshalPacket() (header, payload []byte, err error) +} + +func marshalPacket(m encoding.BinaryMarshaler) (header, payload []byte, err error) { + if m, ok := m.(packetMarshaler); ok { + return m.marshalPacket() + } + + header, err = m.MarshalBinary() + return +} + +// sendPacket marshals p according to RFC 4234. +func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { + header, payload, err := marshalPacket(m) + if err != nil { + return fmt.Errorf("binary marshaller failed: %w", err) + } + + length := len(header) + len(payload) - 4 // subtract the uint32(length) from the start + if debugDumpTxPacketBytes { + debug("send packet: %s %d bytes %x%x", fxp(header[4]), length, header[5:], payload) + } else if debugDumpTxPacket { + debug("send packet: %s %d bytes", fxp(header[4]), length) + } + + binary.BigEndian.PutUint32(header[:4], uint32(length)) + + if _, err := w.Write(header); err != nil { + return fmt.Errorf("failed to send packet: %w", err) + } + + if len(payload) > 0 { + if _, err := w.Write(payload); err != nil { + return fmt.Errorf("failed to send packet payload: %w", err) + } + } + + return nil +} + +func recvPacket(r io.Reader, alloc *allocator, orderID uint32) (uint8, []byte, error) { + var b []byte + if alloc != nil { + b = alloc.GetPage(orderID) + } else { + b = make([]byte, 4) + } + if _, err := io.ReadFull(r, b[:4]); err != nil { + return 0, nil, err + } + length, _ := unmarshalUint32(b) + if length > maxMsgLength { + debug("recv packet %d bytes too long", length) + return 0, nil, errLongPacket + } + if length == 0 { + debug("recv packet of 0 bytes too short") + return 0, nil, errShortPacket + } + if alloc == nil { + b = make([]byte, length) + } + if _, err := io.ReadFull(r, b[:length]); err != nil { + debug("recv packet %d bytes: err %v", length, err) + return 0, nil, err + } + if debugDumpRxPacketBytes { + debug("recv packet: %s %d bytes %x", fxp(b[0]), length, b[1:length]) + } else if debugDumpRxPacket { + debug("recv packet: %s %d bytes", fxp(b[0]), length) + } + return b[0], b[1:length], nil +} + +type extensionPair struct { + Name string + Data string +} + +func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) { + var ep extensionPair + var err error + ep.Name, b, err = unmarshalStringSafe(b) + if err != nil { + return ep, b, err + } + ep.Data, b, err = unmarshalStringSafe(b) + return ep, b, err +} + +// Here starts the definition of packets along with their MarshalBinary +// implementations. +// Manually writing the marshalling logic wins us a lot of time and +// allocation. + +type sshFxInitPacket struct { + Version uint32 + Extensions []extensionPair +} + +func (p *sshFxInitPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(version) + for _, e := range p.Extensions { + l += 4 + len(e.Name) + 4 + len(e.Data) + } + + b := make([]byte, 4, l) + b = append(b, sshFxpInit) + b = marshalUint32(b, p.Version) + + for _, e := range p.Extensions { + b = marshalString(b, e.Name) + b = marshalString(b, e.Data) + } + + return b, nil +} + +func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error { + var err error + if p.Version, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + for len(b) > 0 { + var ep extensionPair + ep, b, err = unmarshalExtensionPair(b) + if err != nil { + return err + } + p.Extensions = append(p.Extensions, ep) + } + return nil +} + +type sshFxVersionPacket struct { + Version uint32 + Extensions []sshExtensionPair +} + +type sshExtensionPair struct { + Name, Data string +} + +func (p *sshFxVersionPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(version) + for _, e := range p.Extensions { + l += 4 + len(e.Name) + 4 + len(e.Data) + } + + b := make([]byte, 4, l) + b = append(b, sshFxpVersion) + b = marshalUint32(b, p.Version) + + for _, e := range p.Extensions { + b = marshalString(b, e.Name) + b = marshalString(b, e.Data) + } + + return b, nil +} + +func marshalIDStringPacket(packetType byte, id uint32, str string) ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(str) + + b := make([]byte, 4, l) + b = append(b, packetType) + b = marshalUint32(b, id) + b = marshalString(b, str) + + return b, nil +} + +func unmarshalIDString(b []byte, id *uint32, str *string) error { + var err error + *id, b, err = unmarshalUint32Safe(b) + if err != nil { + return err + } + *str, _, err = unmarshalStringSafe(b) + return err +} + +type sshFxpReaddirPacket struct { + ID uint32 + Handle string +} + +func (p *sshFxpReaddirPacket) id() uint32 { return p.ID } + +func (p *sshFxpReaddirPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpReaddir, p.ID, p.Handle) +} + +func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpOpendirPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpOpendirPacket) id() uint32 { return p.ID } + +func (p *sshFxpOpendirPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpOpendir, p.ID, p.Path) +} + +func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpLstatPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpLstatPacket) id() uint32 { return p.ID } + +func (p *sshFxpLstatPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpLstat, p.ID, p.Path) +} + +func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpStatPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpStatPacket) id() uint32 { return p.ID } + +func (p *sshFxpStatPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpStat, p.ID, p.Path) +} + +func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpFstatPacket struct { + ID uint32 + Handle string +} + +func (p *sshFxpFstatPacket) id() uint32 { return p.ID } + +func (p *sshFxpFstatPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpFstat, p.ID, p.Handle) +} + +func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpClosePacket struct { + ID uint32 + Handle string +} + +func (p *sshFxpClosePacket) id() uint32 { return p.ID } + +func (p *sshFxpClosePacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpClose, p.ID, p.Handle) +} + +func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpRemovePacket struct { + ID uint32 + Filename string +} + +func (p *sshFxpRemovePacket) id() uint32 { return p.ID } + +func (p *sshFxpRemovePacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpRemove, p.ID, p.Filename) +} + +func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Filename) +} + +type sshFxpRmdirPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpRmdirPacket) id() uint32 { return p.ID } + +func (p *sshFxpRmdirPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpRmdir, p.ID, p.Path) +} + +func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpSymlinkPacket struct { + ID uint32 + Targetpath string + Linkpath string +} + +func (p *sshFxpSymlinkPacket) id() uint32 { return p.ID } + +func (p *sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Targetpath) + + 4 + len(p.Linkpath) + + b := make([]byte, 4, l) + b = append(b, sshFxpSymlink) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Targetpath) + b = marshalString(b, p.Linkpath) + + return b, nil +} + +func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Targetpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Linkpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpHardlinkPacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p *sshFxpHardlinkPacket) id() uint32 { return p.ID } + +func (p *sshFxpHardlinkPacket) MarshalBinary() ([]byte, error) { + const ext = "hardlink@openssh.com" + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(ext) + + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 4, l) + b = append(b, sshFxpExtended) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + + return b, nil +} + +type sshFxpReadlinkPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpReadlinkPacket) id() uint32 { return p.ID } + +func (p *sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpReadlink, p.ID, p.Path) +} + +func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpRealpathPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpRealpathPacket) id() uint32 { return p.ID } + +func (p *sshFxpRealpathPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpRealpath, p.ID, p.Path) +} + +func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpNameAttr struct { + Name string + LongName string + Attrs []interface{} +} + +func (p *sshFxpNameAttr) MarshalBinary() ([]byte, error) { + var b []byte + b = marshalString(b, p.Name) + b = marshalString(b, p.LongName) + for _, attr := range p.Attrs { + b = marshal(b, attr) + } + return b, nil +} + +type sshFxpNamePacket struct { + ID uint32 + NameAttrs []*sshFxpNameAttr +} + +func (p *sshFxpNamePacket) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + + b := make([]byte, 4, l) + b = append(b, sshFxpName) + b = marshalUint32(b, p.ID) + b = marshalUint32(b, uint32(len(p.NameAttrs))) + + var payload []byte + for _, na := range p.NameAttrs { + ab, err := na.MarshalBinary() + if err != nil { + return nil, nil, err + } + + payload = append(payload, ab...) + } + + return b, payload, nil +} + +func (p *sshFxpNamePacket) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +type sshFxpOpenPacket struct { + ID uint32 + Path string + Pflags uint32 + Flags uint32 // ignored +} + +func (p *sshFxpOpenPacket) id() uint32 { return p.ID } + +func (p *sshFxpOpenPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Path) + + 4 + 4 + + b := make([]byte, 4, l) + b = append(b, sshFxpOpen) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Pflags) + b = marshalUint32(b, p.Flags) + + return b, nil +} + +func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpReadPacket struct { + ID uint32 + Len uint32 + Offset uint64 + Handle string +} + +func (p *sshFxpReadPacket) id() uint32 { return p.ID } + +func (p *sshFxpReadPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Handle) + + 8 + 4 // uint64 + uint32 + + b := make([]byte, 4, l) + b = append(b, sshFxpRead) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint64(b, p.Offset) + b = marshalUint32(b, p.Len) + + return b, nil +} + +func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { + return err + } else if p.Len, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +// We need allocate bigger slices with extra capacity to avoid a re-allocation in sshFxpDataPacket.MarshalBinary +// So, we need: uint32(length) + byte(type) + uint32(id) + uint32(data_length) +const dataHeaderLen = 4 + 1 + 4 + 4 + +func (p *sshFxpReadPacket) getDataSlice(alloc *allocator, orderID uint32) []byte { + dataLen := p.Len + if dataLen > maxTxPacket { + dataLen = maxTxPacket + } + + if alloc != nil { + // GetPage returns a slice with capacity = maxMsgLength this is enough to avoid new allocations in + // sshFxpDataPacket.MarshalBinary + return alloc.GetPage(orderID)[:dataLen] + } + + // allocate with extra space for the header + return make([]byte, dataLen, dataLen+dataHeaderLen) +} + +type sshFxpRenamePacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p *sshFxpRenamePacket) id() uint32 { return p.ID } + +func (p *sshFxpRenamePacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 4, l) + b = append(b, sshFxpRename) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + + return b, nil +} + +func (p *sshFxpRenamePacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpPosixRenamePacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p *sshFxpPosixRenamePacket) id() uint32 { return p.ID } + +func (p *sshFxpPosixRenamePacket) MarshalBinary() ([]byte, error) { + const ext = "posix-rename@openssh.com" + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(ext) + + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 4, l) + b = append(b, sshFxpExtended) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + + return b, nil +} + +type sshFxpWritePacket struct { + ID uint32 + Length uint32 + Offset uint64 + Handle string + Data []byte +} + +func (p *sshFxpWritePacket) id() uint32 { return p.ID } + +func (p *sshFxpWritePacket) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Handle) + + 8 + // uint64 + 4 + + b := make([]byte, 4, l) + b = append(b, sshFxpWrite) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint64(b, p.Offset) + b = marshalUint32(b, p.Length) + + return b, p.Data, nil +} + +func (p *sshFxpWritePacket) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +func (p *sshFxpWritePacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { + return err + } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if uint32(len(b)) < p.Length { + return errShortPacket + } + + p.Data = b[:p.Length] + return nil +} + +type sshFxpMkdirPacket struct { + ID uint32 + Flags uint32 // ignored + Path string +} + +func (p *sshFxpMkdirPacket) id() uint32 { return p.ID } + +func (p *sshFxpMkdirPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Path) + + 4 // uint32 + + b := make([]byte, 4, l) + b = append(b, sshFxpMkdir) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Flags) + + return b, nil +} + +func (p *sshFxpMkdirPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpSetstatPacket struct { + ID uint32 + Flags uint32 + Path string + Attrs interface{} +} + +type sshFxpFsetstatPacket struct { + ID uint32 + Flags uint32 + Handle string + Attrs interface{} +} + +func (p *sshFxpSetstatPacket) id() uint32 { return p.ID } +func (p *sshFxpFsetstatPacket) id() uint32 { return p.ID } + +func (p *sshFxpSetstatPacket) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Path) + + 4 // uint32 + + b := make([]byte, 4, l) + b = append(b, sshFxpSetstat) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Flags) + + payload := marshal(nil, p.Attrs) + + return b, payload, nil +} + +func (p *sshFxpSetstatPacket) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +func (p *sshFxpFsetstatPacket) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Handle) + + 4 // uint32 + + b := make([]byte, 4, l) + b = append(b, sshFxpFsetstat) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint32(b, p.Flags) + + payload := marshal(nil, p.Attrs) + + return b, payload, nil +} + +func (p *sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + p.Attrs = b + return nil +} + +func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + p.Attrs = b + return nil +} + +type sshFxpHandlePacket struct { + ID uint32 + Handle string +} + +func (p *sshFxpHandlePacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Handle) + + b := make([]byte, 4, l) + b = append(b, sshFxpHandle) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + + return b, nil +} + +type sshFxpStatusPacket struct { + ID uint32 + StatusError +} + +func (p *sshFxpStatusPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + + 4 + len(p.StatusError.msg) + + 4 + len(p.StatusError.lang) + + b := make([]byte, 4, l) + b = append(b, sshFxpStatus) + b = marshalUint32(b, p.ID) + b = marshalStatus(b, p.StatusError) + + return b, nil +} + +type sshFxpDataPacket struct { + ID uint32 + Length uint32 + Data []byte +} + +func (p *sshFxpDataPacket) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + + b := make([]byte, 4, l) + b = append(b, sshFxpData) + b = marshalUint32(b, p.ID) + b = marshalUint32(b, p.Length) + + return b, p.Data, nil +} + +// MarshalBinary encodes the receiver into a binary form and returns the result. +// To avoid a new allocation the Data slice must have a capacity >= Length + 9 +// +// This is hand-coded rather than just append(header, payload...), +// in order to try and reuse the r.Data backing store in the packet. +func (p *sshFxpDataPacket) MarshalBinary() ([]byte, error) { + b := append(p.Data, make([]byte, dataHeaderLen)...) + copy(b[dataHeaderLen:], p.Data[:p.Length]) + // b[0:4] will be overwritten with the length in sendPacket + b[4] = sshFxpData + binary.BigEndian.PutUint32(b[5:9], p.ID) + binary.BigEndian.PutUint32(b[9:13], p.Length) + return b, nil +} + +func (p *sshFxpDataPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if uint32(len(b)) < p.Length { + return errShortPacket + } + + p.Data = b[:p.Length] + return nil +} + +type sshFxpStatvfsPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpStatvfsPacket) id() uint32 { return p.ID } + +func (p *sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) { + const ext = "statvfs@openssh.com" + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(ext) + + 4 + len(p.Path) + + b := make([]byte, 4, l) + b = append(b, sshFxpExtended) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Path) + + return b, nil +} + +// A StatVFS contains statistics about a filesystem. +type StatVFS struct { + ID uint32 + Bsize uint64 /* file system block size */ + Frsize uint64 /* fundamental fs block size */ + Blocks uint64 /* number of blocks (unit f_frsize) */ + Bfree uint64 /* free blocks in file system */ + Bavail uint64 /* free blocks for non-root */ + Files uint64 /* total file inodes */ + Ffree uint64 /* free file inodes */ + Favail uint64 /* free file inodes for to non-root */ + Fsid uint64 /* file system id */ + Flag uint64 /* bit mask of f_flag values */ + Namemax uint64 /* maximum filename length */ +} + +// TotalSpace calculates the amount of total space in a filesystem. +func (p *StatVFS) TotalSpace() uint64 { + return p.Frsize * p.Blocks +} + +// FreeSpace calculates the amount of free space in a filesystem. +func (p *StatVFS) FreeSpace() uint64 { + return p.Frsize * p.Bfree +} + +// marshalPacket converts to ssh_FXP_EXTENDED_REPLY packet binary format +func (p *StatVFS) marshalPacket() ([]byte, []byte, error) { + header := []byte{0, 0, 0, 0, sshFxpExtendedReply} + + var buf bytes.Buffer + err := binary.Write(&buf, binary.BigEndian, p) + + return header, buf.Bytes(), err +} + +// MarshalBinary encodes the StatVFS as an SSH_FXP_EXTENDED_REPLY packet. +func (p *StatVFS) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +type sshFxpFsyncPacket struct { + ID uint32 + Handle string +} + +func (p *sshFxpFsyncPacket) id() uint32 { return p.ID } + +func (p *sshFxpFsyncPacket) MarshalBinary() ([]byte, error) { + const ext = "fsync@openssh.com" + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(ext) + + 4 + len(p.Handle) + + b := make([]byte, 4, l) + b = append(b, sshFxpExtended) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Handle) + + return b, nil +} + +type sshFxpExtendedPacket struct { + ID uint32 + ExtendedRequest string + SpecificPacket interface { + serverRespondablePacket + readonly() bool + } +} + +func (p *sshFxpExtendedPacket) id() uint32 { return p.ID } +func (p *sshFxpExtendedPacket) readonly() bool { + if p.SpecificPacket == nil { + return true + } + return p.SpecificPacket.readonly() +} + +func (p *sshFxpExtendedPacket) respond(svr *Server) responsePacket { + if p.SpecificPacket == nil { + return statusFromError(p.ID, nil) + } + return p.SpecificPacket.respond(svr) +} + +func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error { + var err error + bOrig := b + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, _, err = unmarshalStringSafe(b); err != nil { + return err + } + + // specific unmarshalling + switch p.ExtendedRequest { + case "statvfs@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketStatVFS{} + case "posix-rename@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketPosixRename{} + case "hardlink@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketHardlink{} + default: + return fmt.Errorf("packet type %v: %w", p.SpecificPacket, errUnknownExtendedPacket) + } + + return p.SpecificPacket.UnmarshalBinary(bOrig) +} + +type sshFxpExtendedPacketStatVFS struct { + ID uint32 + ExtendedRequest string + Path string +} + +func (p *sshFxpExtendedPacketStatVFS) id() uint32 { return p.ID } +func (p *sshFxpExtendedPacketStatVFS) readonly() bool { return true } +func (p *sshFxpExtendedPacketStatVFS) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Path, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpExtendedPacketPosixRename struct { + ID uint32 + ExtendedRequest string + Oldpath string + Newpath string +} + +func (p *sshFxpExtendedPacketPosixRename) id() uint32 { return p.ID } +func (p *sshFxpExtendedPacketPosixRename) readonly() bool { return false } +func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +func (p *sshFxpExtendedPacketPosixRename) respond(s *Server) responsePacket { + err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) + return statusFromError(p.ID, err) +} + +type sshFxpExtendedPacketHardlink struct { + ID uint32 + ExtendedRequest string + Oldpath string + Newpath string +} + +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL +func (p *sshFxpExtendedPacketHardlink) id() uint32 { return p.ID } +func (p *sshFxpExtendedPacketHardlink) readonly() bool { return true } +func (p *sshFxpExtendedPacketHardlink) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +func (p *sshFxpExtendedPacketHardlink) respond(s *Server) responsePacket { + err := os.Link(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) + return statusFromError(p.ID, err) +} diff --git a/vendor/github.com/pkg/sftp/pool.go b/vendor/github.com/pkg/sftp/pool.go new file mode 100644 index 000000000..361262906 --- /dev/null +++ b/vendor/github.com/pkg/sftp/pool.go @@ -0,0 +1,79 @@ +package sftp + +// bufPool provides a pool of byte-slices to be reused in various parts of the package. +// It is safe to use concurrently through a pointer. +type bufPool struct { + ch chan []byte + blen int +} + +func newBufPool(depth, bufLen int) *bufPool { + return &bufPool{ + ch: make(chan []byte, depth), + blen: bufLen, + } +} + +func (p *bufPool) Get() []byte { + if p.blen <= 0 { + panic("bufPool: new buffer creation length must be greater than zero") + } + + for { + select { + case b := <-p.ch: + if cap(b) < p.blen { + // just in case: throw away any buffer with insufficient capacity. + continue + } + + return b[:p.blen] + + default: + return make([]byte, p.blen) + } + } +} + +func (p *bufPool) Put(b []byte) { + if p == nil { + // functional default: no reuse. + return + } + + if cap(b) < p.blen || cap(b) > p.blen*2 { + // DO NOT reuse buffers with insufficient capacity. + // This could cause panics when resizing to p.blen. + + // DO NOT reuse buffers with excessive capacity. + // This could cause memory leaks. + return + } + + select { + case p.ch <- b: + default: + } +} + +type resChanPool chan chan result + +func newResChanPool(depth int) resChanPool { + return make(chan chan result, depth) +} + +func (p resChanPool) Get() chan result { + select { + case ch := <-p: + return ch + default: + return make(chan result, 1) + } +} + +func (p resChanPool) Put(ch chan result) { + select { + case p <- ch: + default: + } +} diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go new file mode 100644 index 000000000..b695528fd --- /dev/null +++ b/vendor/github.com/pkg/sftp/release.go @@ -0,0 +1,5 @@ +// +build !debug + +package sftp + +func debug(fmt string, args ...interface{}) {} diff --git a/vendor/github.com/pkg/sftp/request-attrs.go b/vendor/github.com/pkg/sftp/request-attrs.go new file mode 100644 index 000000000..b5c95b4ad --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-attrs.go @@ -0,0 +1,63 @@ +package sftp + +// Methods on the Request object to make working with the Flags bitmasks and +// Attr(ibutes) byte blob easier. Use Pflags() when working with an Open/Write +// request and AttrFlags() and Attributes() when working with SetStat requests. +import "os" + +// FileOpenFlags defines Open and Write Flags. Correlate directly with with os.OpenFile flags +// (https://golang.org/pkg/os/#pkg-constants). +type FileOpenFlags struct { + Read, Write, Append, Creat, Trunc, Excl bool +} + +func newFileOpenFlags(flags uint32) FileOpenFlags { + return FileOpenFlags{ + Read: flags&sshFxfRead != 0, + Write: flags&sshFxfWrite != 0, + Append: flags&sshFxfAppend != 0, + Creat: flags&sshFxfCreat != 0, + Trunc: flags&sshFxfTrunc != 0, + Excl: flags&sshFxfExcl != 0, + } +} + +// Pflags converts the bitmap/uint32 from SFTP Open packet pflag values, +// into a FileOpenFlags struct with booleans set for flags set in bitmap. +func (r *Request) Pflags() FileOpenFlags { + return newFileOpenFlags(r.Flags) +} + +// FileAttrFlags that indicate whether SFTP file attributes were passed. When a flag is +// true the corresponding attribute should be available from the FileStat +// object returned by Attributes method. Used with SetStat. +type FileAttrFlags struct { + Size, UidGid, Permissions, Acmodtime bool +} + +func newFileAttrFlags(flags uint32) FileAttrFlags { + return FileAttrFlags{ + Size: (flags & sshFileXferAttrSize) != 0, + UidGid: (flags & sshFileXferAttrUIDGID) != 0, + Permissions: (flags & sshFileXferAttrPermissions) != 0, + Acmodtime: (flags & sshFileXferAttrACmodTime) != 0, + } +} + +// AttrFlags returns a FileAttrFlags boolean struct based on the +// bitmap/uint32 file attribute flags from the SFTP packaet. +func (r *Request) AttrFlags() FileAttrFlags { + return newFileAttrFlags(r.Flags) +} + +// FileMode returns the Mode SFTP file attributes wrapped as os.FileMode +func (a FileStat) FileMode() os.FileMode { + return os.FileMode(a.Mode) +} + +// Attributes parses file attributes byte blob and return them in a +// FileStat object. +func (r *Request) Attributes() *FileStat { + fs, _ := unmarshalFileStat(r.Flags, r.Attrs) + return fs +} diff --git a/vendor/github.com/pkg/sftp/request-errors.go b/vendor/github.com/pkg/sftp/request-errors.go new file mode 100644 index 000000000..6505b5c74 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-errors.go @@ -0,0 +1,54 @@ +package sftp + +type fxerr uint32 + +// Error types that match the SFTP's SSH_FXP_STATUS codes. Gives you more +// direct control of the errors being sent vs. letting the library work them +// out from the standard os/io errors. +const ( + ErrSSHFxOk = fxerr(sshFxOk) + ErrSSHFxEOF = fxerr(sshFxEOF) + ErrSSHFxNoSuchFile = fxerr(sshFxNoSuchFile) + ErrSSHFxPermissionDenied = fxerr(sshFxPermissionDenied) + ErrSSHFxFailure = fxerr(sshFxFailure) + ErrSSHFxBadMessage = fxerr(sshFxBadMessage) + ErrSSHFxNoConnection = fxerr(sshFxNoConnection) + ErrSSHFxConnectionLost = fxerr(sshFxConnectionLost) + ErrSSHFxOpUnsupported = fxerr(sshFxOPUnsupported) +) + +// Deprecated error types, these are aliases for the new ones, please use the new ones directly +const ( + ErrSshFxOk = ErrSSHFxOk + ErrSshFxEof = ErrSSHFxEOF + ErrSshFxNoSuchFile = ErrSSHFxNoSuchFile + ErrSshFxPermissionDenied = ErrSSHFxPermissionDenied + ErrSshFxFailure = ErrSSHFxFailure + ErrSshFxBadMessage = ErrSSHFxBadMessage + ErrSshFxNoConnection = ErrSSHFxNoConnection + ErrSshFxConnectionLost = ErrSSHFxConnectionLost + ErrSshFxOpUnsupported = ErrSSHFxOpUnsupported +) + +func (e fxerr) Error() string { + switch e { + case ErrSSHFxOk: + return "OK" + case ErrSSHFxEOF: + return "EOF" + case ErrSSHFxNoSuchFile: + return "no such file" + case ErrSSHFxPermissionDenied: + return "permission denied" + case ErrSSHFxBadMessage: + return "bad message" + case ErrSSHFxNoConnection: + return "no connection" + case ErrSSHFxConnectionLost: + return "connection lost" + case ErrSSHFxOpUnsupported: + return "operation unsupported" + default: + return "failure" + } +} diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go new file mode 100644 index 000000000..ba22bcd0f --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-example.go @@ -0,0 +1,666 @@ +package sftp + +// This serves as an example of how to implement the request server handler as +// well as a dummy backend for testing. It implements an in-memory backend that +// works as a very simple filesystem with simple flat key-value lookup system. + +import ( + "errors" + "io" + "os" + "path" + "sort" + "strings" + "sync" + "syscall" + "time" +) + +const maxSymlinkFollows = 5 + +var errTooManySymlinks = errors.New("too many symbolic links") + +// InMemHandler returns a Hanlders object with the test handlers. +func InMemHandler() Handlers { + root := &root{ + rootFile: &memFile{name: "/", modtime: time.Now(), isdir: true}, + files: make(map[string]*memFile), + } + return Handlers{root, root, root, root} +} + +// Example Handlers +func (fs *root) Fileread(r *Request) (io.ReaderAt, error) { + flags := r.Pflags() + if !flags.Read { + // sanity check + return nil, os.ErrInvalid + } + + return fs.OpenFile(r) +} + +func (fs *root) Filewrite(r *Request) (io.WriterAt, error) { + flags := r.Pflags() + if !flags.Write { + // sanity check + return nil, os.ErrInvalid + } + + return fs.OpenFile(r) +} + +func (fs *root) OpenFile(r *Request) (WriterAtReaderAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + _ = r.WithContext(r.Context()) // initialize context for deadlock testing + + fs.mu.Lock() + defer fs.mu.Unlock() + + return fs.openfile(r.Filepath, r.Flags) +} + +func (fs *root) putfile(pathname string, file *memFile) error { + pathname, err := fs.canonName(pathname) + if err != nil { + return err + } + + if !strings.HasPrefix(pathname, "/") { + return os.ErrInvalid + } + + if _, err := fs.lfetch(pathname); err != os.ErrNotExist { + return os.ErrExist + } + + file.name = pathname + fs.files[pathname] = file + + return nil +} + +func (fs *root) openfile(pathname string, flags uint32) (*memFile, error) { + pflags := newFileOpenFlags(flags) + + file, err := fs.fetch(pathname) + if err == os.ErrNotExist { + if !pflags.Creat { + return nil, os.ErrNotExist + } + + var count int + // You can create files through dangling symlinks. + link, err := fs.lfetch(pathname) + for err == nil && link.symlink != "" { + if pflags.Excl { + // unless you also passed in O_EXCL + return nil, os.ErrInvalid + } + + if count++; count > maxSymlinkFollows { + return nil, errTooManySymlinks + } + + pathname = link.symlink + link, err = fs.lfetch(pathname) + } + + file := &memFile{ + modtime: time.Now(), + } + + if err := fs.putfile(pathname, file); err != nil { + return nil, err + } + + return file, nil + } + + if err != nil { + return nil, err + } + + if pflags.Creat && pflags.Excl { + return nil, os.ErrExist + } + + if file.IsDir() { + return nil, os.ErrInvalid + } + + if pflags.Trunc { + if err := file.Truncate(0); err != nil { + return nil, err + } + } + + return file, nil +} + +func (fs *root) Filecmd(r *Request) error { + if fs.mockErr != nil { + return fs.mockErr + } + _ = r.WithContext(r.Context()) // initialize context for deadlock testing + + fs.mu.Lock() + defer fs.mu.Unlock() + + switch r.Method { + case "Setstat": + file, err := fs.openfile(r.Filepath, sshFxfWrite) + if err != nil { + return err + } + + if r.AttrFlags().Size { + return file.Truncate(int64(r.Attributes().Size)) + } + + return nil + + case "Rename": + // SFTP-v2: "It is an error if there already exists a file with the name specified by newpath." + // This varies from the POSIX specification, which allows limited replacement of target files. + if fs.exists(r.Target) { + return os.ErrExist + } + + return fs.rename(r.Filepath, r.Target) + + case "Rmdir": + return fs.rmdir(r.Filepath) + + case "Remove": + // IEEE 1003.1 remove explicitly can unlink files and remove empty directories. + // We use instead here the semantics of unlink, which is allowed to be restricted against directories. + return fs.unlink(r.Filepath) + + case "Mkdir": + return fs.mkdir(r.Filepath) + + case "Link": + return fs.link(r.Filepath, r.Target) + + case "Symlink": + // NOTE: r.Filepath is the target, and r.Target is the linkpath. + return fs.symlink(r.Filepath, r.Target) + } + + return errors.New("unsupported") +} + +func (fs *root) rename(oldpath, newpath string) error { + file, err := fs.lfetch(oldpath) + if err != nil { + return err + } + + newpath, err = fs.canonName(newpath) + if err != nil { + return err + } + + if !strings.HasPrefix(newpath, "/") { + return os.ErrInvalid + } + + target, err := fs.lfetch(newpath) + if err != os.ErrNotExist { + if target == file { + // IEEE 1003.1: if oldpath and newpath are the same directory entry, + // then return no error, and perform no further action. + return nil + } + + switch { + case file.IsDir(): + // IEEE 1003.1: if oldpath is a directory, and newpath exists, + // then newpath must be a directory, and empty. + // It is to be removed prior to rename. + if err := fs.rmdir(newpath); err != nil { + return err + } + + case target.IsDir(): + // IEEE 1003.1: if oldpath is not a directory, and newpath exists, + // then newpath may not be a directory. + return syscall.EISDIR + } + } + + fs.files[newpath] = file + + if file.IsDir() { + dirprefix := file.name + "/" + + for name, file := range fs.files { + if strings.HasPrefix(name, dirprefix) { + newname := path.Join(newpath, strings.TrimPrefix(name, dirprefix)) + + fs.files[newname] = file + file.name = newname + delete(fs.files, name) + } + } + } + + file.name = newpath + delete(fs.files, oldpath) + + return nil +} + +func (fs *root) PosixRename(r *Request) error { + if fs.mockErr != nil { + return fs.mockErr + } + _ = r.WithContext(r.Context()) // initialize context for deadlock testing + + fs.mu.Lock() + defer fs.mu.Unlock() + + return fs.rename(r.Filepath, r.Target) +} + +func (fs *root) StatVFS(r *Request) (*StatVFS, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + + return getStatVFSForPath(r.Filepath) +} + +func (fs *root) mkdir(pathname string) error { + dir := &memFile{ + modtime: time.Now(), + isdir: true, + } + + return fs.putfile(pathname, dir) +} + +func (fs *root) rmdir(pathname string) error { + // IEEE 1003.1: If pathname is a symlink, then rmdir should fail with ENOTDIR. + dir, err := fs.lfetch(pathname) + if err != nil { + return err + } + + if !dir.IsDir() { + return syscall.ENOTDIR + } + + // use the dir‘s internal name not the pathname we passed in. + // the dir.name is always the canonical name of a directory. + pathname = dir.name + + for name := range fs.files { + if path.Dir(name) == pathname { + return errors.New("directory not empty") + } + } + + delete(fs.files, pathname) + + return nil +} + +func (fs *root) link(oldpath, newpath string) error { + file, err := fs.lfetch(oldpath) + if err != nil { + return err + } + + if file.IsDir() { + return errors.New("hard link not allowed for directory") + } + + return fs.putfile(newpath, file) +} + +// symlink() creates a symbolic link named `linkpath` which contains the string `target`. +// NOTE! This would be called with `symlink(req.Filepath, req.Target)` due to different semantics. +func (fs *root) symlink(target, linkpath string) error { + link := &memFile{ + modtime: time.Now(), + symlink: target, + } + + return fs.putfile(linkpath, link) +} + +func (fs *root) unlink(pathname string) error { + // does not follow symlinks! + file, err := fs.lfetch(pathname) + if err != nil { + return err + } + + if file.IsDir() { + // IEEE 1003.1: implementations may opt out of allowing the unlinking of directories. + // SFTP-v2: SSH_FXP_REMOVE may not remove directories. + return os.ErrInvalid + } + + // DO NOT use the file’s internal name. + // because of hard-links files cannot have a single canonical name. + delete(fs.files, pathname) + + return nil +} + +type listerat []os.FileInfo + +// Modeled after strings.Reader's ReadAt() implementation +func (f listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) { + var n int + if offset >= int64(len(f)) { + return 0, io.EOF + } + n = copy(ls, f[offset:]) + if n < len(ls) { + return n, io.EOF + } + return n, nil +} + +func (fs *root) Filelist(r *Request) (ListerAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + _ = r.WithContext(r.Context()) // initialize context for deadlock testing + + fs.mu.Lock() + defer fs.mu.Unlock() + + switch r.Method { + case "List": + files, err := fs.readdir(r.Filepath) + if err != nil { + return nil, err + } + return listerat(files), nil + + case "Stat": + file, err := fs.fetch(r.Filepath) + if err != nil { + return nil, err + } + return listerat{file}, nil + + case "Readlink": + symlink, err := fs.readlink(r.Filepath) + if err != nil { + return nil, err + } + + // SFTP-v2: The server will respond with a SSH_FXP_NAME packet containing only + // one name and a dummy attributes value. + return listerat{ + &memFile{ + name: symlink, + err: os.ErrNotExist, // prevent accidental use as a reader/writer. + }, + }, nil + } + + return nil, errors.New("unsupported") +} + +func (fs *root) readdir(pathname string) ([]os.FileInfo, error) { + dir, err := fs.fetch(pathname) + if err != nil { + return nil, err + } + + if !dir.IsDir() { + return nil, syscall.ENOTDIR + } + + var files []os.FileInfo + + for name, file := range fs.files { + if path.Dir(name) == dir.name { + files = append(files, file) + } + } + + sort.Slice(files, func(i, j int) bool { return files[i].Name() < files[j].Name() }) + + return files, nil +} + +func (fs *root) readlink(pathname string) (string, error) { + file, err := fs.lfetch(pathname) + if err != nil { + return "", err + } + + if file.symlink == "" { + return "", os.ErrInvalid + } + + return file.symlink, nil +} + +// implements LstatFileLister interface +func (fs *root) Lstat(r *Request) (ListerAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + _ = r.WithContext(r.Context()) // initialize context for deadlock testing + + fs.mu.Lock() + defer fs.mu.Unlock() + + file, err := fs.lfetch(r.Filepath) + if err != nil { + return nil, err + } + return listerat{file}, nil +} + +// implements RealpathFileLister interface +func (fs *root) Realpath(p string) string { + if fs.startDirectory == "" || fs.startDirectory == "/" { + return cleanPath(p) + } + return cleanPathWithBase(fs.startDirectory, p) +} + +// In memory file-system-y thing that the Hanlders live on +type root struct { + rootFile *memFile + mockErr error + startDirectory string + + mu sync.Mutex + files map[string]*memFile +} + +// Set a mocked error that the next handler call will return. +// Set to nil to reset for no error. +func (fs *root) returnErr(err error) { + fs.mockErr = err +} + +func (fs *root) lfetch(path string) (*memFile, error) { + if path == "/" { + return fs.rootFile, nil + } + + file, ok := fs.files[path] + if file == nil { + if ok { + delete(fs.files, path) + } + + return nil, os.ErrNotExist + } + + return file, nil +} + +// canonName returns the “canonical” name of a file, that is: +// if the directory of the pathname is a symlink, it follows that symlink to the valid directory name. +// this is relatively easy, since `dir.name` will be the only valid canonical path for a directory. +func (fs *root) canonName(pathname string) (string, error) { + dirname, filename := path.Dir(pathname), path.Base(pathname) + + dir, err := fs.fetch(dirname) + if err != nil { + return "", err + } + + if !dir.IsDir() { + return "", syscall.ENOTDIR + } + + return path.Join(dir.name, filename), nil +} + +func (fs *root) exists(path string) bool { + path, err := fs.canonName(path) + if err != nil { + return false + } + + _, err = fs.lfetch(path) + + return err != os.ErrNotExist +} + +func (fs *root) fetch(path string) (*memFile, error) { + file, err := fs.lfetch(path) + if err != nil { + return nil, err + } + + var count int + for file.symlink != "" { + if count++; count > maxSymlinkFollows { + return nil, errTooManySymlinks + } + + file, err = fs.lfetch(file.symlink) + if err != nil { + return nil, err + } + } + + return file, nil +} + +// Implements os.FileInfo, io.ReaderAt and io.WriterAt interfaces. +// These are the 3 interfaces necessary for the Handlers. +// Implements the optional interface TransferError. +type memFile struct { + name string + modtime time.Time + symlink string + isdir bool + + mu sync.RWMutex + content []byte + err error +} + +// These are helper functions, they must be called while holding the memFile.mu mutex +func (f *memFile) size() int64 { return int64(len(f.content)) } +func (f *memFile) grow(n int64) { f.content = append(f.content, make([]byte, n)...) } + +// Have memFile fulfill os.FileInfo interface +func (f *memFile) Name() string { return path.Base(f.name) } +func (f *memFile) Size() int64 { + f.mu.Lock() + defer f.mu.Unlock() + + return f.size() +} +func (f *memFile) Mode() os.FileMode { + if f.isdir { + return os.FileMode(0755) | os.ModeDir + } + if f.symlink != "" { + return os.FileMode(0777) | os.ModeSymlink + } + return os.FileMode(0644) +} +func (f *memFile) ModTime() time.Time { return f.modtime } +func (f *memFile) IsDir() bool { return f.isdir } +func (f *memFile) Sys() interface{} { + return fakeFileInfoSys() +} + +func (f *memFile) ReadAt(b []byte, off int64) (int, error) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.err != nil { + return 0, f.err + } + + if off < 0 { + return 0, errors.New("memFile.ReadAt: negative offset") + } + + if off >= f.size() { + return 0, io.EOF + } + + n := copy(b, f.content[off:]) + if n < len(b) { + return n, io.EOF + } + + return n, nil +} + +func (f *memFile) WriteAt(b []byte, off int64) (int, error) { + // fmt.Println(string(p), off) + // mimic write delays, should be optional + time.Sleep(time.Microsecond * time.Duration(len(b))) + + f.mu.Lock() + defer f.mu.Unlock() + + if f.err != nil { + return 0, f.err + } + + grow := int64(len(b)) + off - f.size() + if grow > 0 { + f.grow(grow) + } + + return copy(f.content[off:], b), nil +} + +func (f *memFile) Truncate(size int64) error { + f.mu.Lock() + defer f.mu.Unlock() + + if f.err != nil { + return f.err + } + + grow := size - f.size() + if grow <= 0 { + f.content = f.content[:size] + } else { + f.grow(grow) + } + + return nil +} + +func (f *memFile) TransferError(err error) { + f.mu.Lock() + defer f.mu.Unlock() + + f.err = err +} diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go new file mode 100644 index 000000000..e5dc49bb1 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-interfaces.go @@ -0,0 +1,123 @@ +package sftp + +import ( + "io" + "os" +) + +// WriterAtReaderAt defines the interface to return when a file is to +// be opened for reading and writing +type WriterAtReaderAt interface { + io.WriterAt + io.ReaderAt +} + +// Interfaces are differentiated based on required returned values. +// All input arguments are to be pulled from Request (the only arg). + +// The Handler interfaces all take the Request object as its only argument. +// All the data you should need to handle the call are in the Request object. +// The request.Method attribute is initially the most important one as it +// determines which Handler gets called. + +// FileReader should return an io.ReaderAt for the filepath +// Note in cases of an error, the error text will be sent to the client. +// Called for Methods: Get +type FileReader interface { + Fileread(*Request) (io.ReaderAt, error) +} + +// FileWriter should return an io.WriterAt for the filepath. +// +// The request server code will call Close() on the returned io.WriterAt +// ojbect if an io.Closer type assertion succeeds. +// Note in cases of an error, the error text will be sent to the client. +// Note when receiving an Append flag it is important to not open files using +// O_APPEND if you plan to use WriteAt, as they conflict. +// Called for Methods: Put, Open +type FileWriter interface { + Filewrite(*Request) (io.WriterAt, error) +} + +// OpenFileWriter is a FileWriter that implements the generic OpenFile method. +// You need to implement this optional interface if you want to be able +// to read and write from/to the same handle. +// Called for Methods: Open +type OpenFileWriter interface { + FileWriter + OpenFile(*Request) (WriterAtReaderAt, error) +} + +// FileCmder should return an error +// Note in cases of an error, the error text will be sent to the client. +// Called for Methods: Setstat, Rename, Rmdir, Mkdir, Link, Symlink, Remove +type FileCmder interface { + Filecmd(*Request) error +} + +// PosixRenameFileCmder is a FileCmder that implements the PosixRename method. +// If this interface is implemented PosixRename requests will call it +// otherwise they will be handled in the same way as Rename +type PosixRenameFileCmder interface { + FileCmder + PosixRename(*Request) error +} + +// StatVFSFileCmder is a FileCmder that implements the StatVFS method. +// You need to implement this interface if you want to handle statvfs requests. +// Please also be sure that the statvfs@openssh.com extension is enabled +type StatVFSFileCmder interface { + FileCmder + StatVFS(*Request) (*StatVFS, error) +} + +// FileLister should return an object that fulfils the ListerAt interface +// Note in cases of an error, the error text will be sent to the client. +// Called for Methods: List, Stat, Readlink +type FileLister interface { + Filelist(*Request) (ListerAt, error) +} + +// LstatFileLister is a FileLister that implements the Lstat method. +// If this interface is implemented Lstat requests will call it +// otherwise they will be handled in the same way as Stat +type LstatFileLister interface { + FileLister + Lstat(*Request) (ListerAt, error) +} + +// RealPathFileLister is a FileLister that implements the Realpath method. +// We use "/" as start directory for relative paths, implementing this +// interface you can customize the start directory. +// You have to return an absolute POSIX path. +// +// Deprecated: if you want to set a start directory use WithStartDirectory RequestServerOption instead. +type RealPathFileLister interface { + FileLister + RealPath(string) string +} + +// NameLookupFileLister is a FileLister that implmeents the LookupUsername and LookupGroupName methods. +// If this interface is implemented, then longname ls formatting will use these to convert usernames and groupnames. +type NameLookupFileLister interface { + FileLister + LookupUserName(string) string + LookupGroupName(string) string +} + +// ListerAt does for file lists what io.ReaderAt does for files. +// ListAt should return the number of entries copied and an io.EOF +// error if at end of list. This is testable by comparing how many you +// copied to how many could be copied (eg. n < len(ls) below). +// The copy() builtin is best for the copying. +// Note in cases of an error, the error text will be sent to the client. +type ListerAt interface { + ListAt([]os.FileInfo, int64) (int, error) +} + +// TransferError is an optional interface that readerAt and writerAt +// can implement to be notified about the error causing Serve() to exit +// with the request still open +type TransferError interface { + TransferError(err error) +} diff --git a/vendor/github.com/pkg/sftp/request-plan9.go b/vendor/github.com/pkg/sftp/request-plan9.go new file mode 100644 index 000000000..2444da593 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-plan9.go @@ -0,0 +1,34 @@ +// +build plan9 + +package sftp + +import ( + "path" + "path/filepath" + "syscall" +) + +func fakeFileInfoSys() interface{} { + return &syscall.Dir{} +} + +func testOsSys(sys interface{}) error { + return nil +} + +func toLocalPath(p string) string { + lp := filepath.FromSlash(p) + + if path.IsAbs(p) { + tmp := lp[1:] + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes is absolute, + // then we have a filepath encoded with a prefix '/'. + // e.g. "/#s/boot" to "#s/boot" + return tmp + } + } + + return lp +} diff --git a/vendor/github.com/pkg/sftp/request-readme.md b/vendor/github.com/pkg/sftp/request-readme.md new file mode 100644 index 000000000..f887274dc --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-readme.md @@ -0,0 +1,53 @@ +# Request Based SFTP API + +The request based API allows for custom backends in a way similar to the http +package. In order to create a backend you need to implement 4 handler +interfaces; one for reading, one for writing, one for misc commands and one for +listing files. Each has 1 required method and in each case those methods take +the Request as the only parameter and they each return something different. +These 4 interfaces are enough to handle all the SFTP traffic in a simplified +manner. + +The Request structure has 5 public fields which you will deal with. + +- Method (string) - string name of incoming call +- Filepath (string) - POSIX path of file to act on +- Flags (uint32) - 32bit bitmask value of file open/create flags +- Attrs ([]byte) - byte string of file attribute data +- Target (string) - target path for renames and sym-links + +Below are the methods and a brief description of what they need to do. + +### Fileread(*Request) (io.Reader, error) + +Handler for "Get" method and returns an io.Reader for the file which the server +then sends to the client. + +### Filewrite(*Request) (io.Writer, error) + +Handler for "Put" method and returns an io.Writer for the file which the server +then writes the uploaded file to. The file opening "pflags" are currently +preserved in the Request.Flags field as a 32bit bitmask value. See the [SFTP +spec](https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.3) for +details. + +### Filecmd(*Request) error + +Handles "SetStat", "Rename", "Rmdir", "Mkdir" and "Symlink" methods. Makes the +appropriate changes and returns nil for success or an filesystem like error +(eg. os.ErrNotExist). The attributes are currently propagated in their raw form +([]byte) and will need to be unmarshalled to be useful. See the respond method +on sshFxpSetstatPacket for example of you might want to do this. + +### Fileinfo(*Request) ([]os.FileInfo, error) + +Handles "List", "Stat", "Readlink" methods. Gathers/creates FileInfo structs +with the data on the files and returns in a list (list of 1 for Stat and +Readlink). + + +## TODO + +- Add support for API users to see trace/debugging info of what is going on +inside SFTP server. +- Unmarshal the file attributes into a structure on the Request object. diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go new file mode 100644 index 000000000..b7dadd6c1 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-server.go @@ -0,0 +1,328 @@ +package sftp + +import ( + "context" + "errors" + "io" + "path" + "path/filepath" + "strconv" + "sync" +) + +var maxTxPacket uint32 = 1 << 15 + +// Handlers contains the 4 SFTP server request handlers. +type Handlers struct { + FileGet FileReader + FilePut FileWriter + FileCmd FileCmder + FileList FileLister +} + +// RequestServer abstracts the sftp protocol with an http request-like protocol +type RequestServer struct { + Handlers Handlers + + *serverConn + pktMgr *packetManager + + startDirectory string + + mu sync.RWMutex + handleCount int + openRequests map[string]*Request +} + +// A RequestServerOption is a function which applies configuration to a RequestServer. +type RequestServerOption func(*RequestServer) + +// WithRSAllocator enable the allocator. +// After processing a packet we keep in memory the allocated slices +// and we reuse them for new packets. +// The allocator is experimental +func WithRSAllocator() RequestServerOption { + return func(rs *RequestServer) { + alloc := newAllocator() + rs.pktMgr.alloc = alloc + rs.conn.alloc = alloc + } +} + +// WithStartDirectory sets a start directory to use as base for relative paths. +// If unset the default is "/" +func WithStartDirectory(startDirectory string) RequestServerOption { + return func(rs *RequestServer) { + rs.startDirectory = cleanPath(startDirectory) + } +} + +// NewRequestServer creates/allocates/returns new RequestServer. +// Normally there will be one server per user-session. +func NewRequestServer(rwc io.ReadWriteCloser, h Handlers, options ...RequestServerOption) *RequestServer { + svrConn := &serverConn{ + conn: conn{ + Reader: rwc, + WriteCloser: rwc, + }, + } + rs := &RequestServer{ + Handlers: h, + + serverConn: svrConn, + pktMgr: newPktMgr(svrConn), + + startDirectory: "/", + + openRequests: make(map[string]*Request), + } + + for _, o := range options { + o(rs) + } + return rs +} + +// New Open packet/Request +func (rs *RequestServer) nextRequest(r *Request) string { + rs.mu.Lock() + defer rs.mu.Unlock() + + rs.handleCount++ + + r.handle = strconv.Itoa(rs.handleCount) + rs.openRequests[r.handle] = r + + return r.handle +} + +// Returns Request from openRequests, bool is false if it is missing. +// +// The Requests in openRequests work essentially as open file descriptors that +// you can do different things with. What you are doing with it are denoted by +// the first packet of that type (read/write/etc). +func (rs *RequestServer) getRequest(handle string) (*Request, bool) { + rs.mu.RLock() + defer rs.mu.RUnlock() + + r, ok := rs.openRequests[handle] + return r, ok +} + +// Close the Request and clear from openRequests map +func (rs *RequestServer) closeRequest(handle string) error { + rs.mu.Lock() + defer rs.mu.Unlock() + + if r, ok := rs.openRequests[handle]; ok { + delete(rs.openRequests, handle) + return r.close() + } + + return EBADF +} + +// Close the read/write/closer to trigger exiting the main server loop +func (rs *RequestServer) Close() error { return rs.conn.Close() } + +func (rs *RequestServer) serveLoop(pktChan chan<- orderedRequest) error { + defer close(pktChan) // shuts down sftpServerWorkers + + var err error + var pkt requestPacket + var pktType uint8 + var pktBytes []byte + + for { + pktType, pktBytes, err = rs.serverConn.recvPacket(rs.pktMgr.getNextOrderID()) + if err != nil { + // we don't care about releasing allocated pages here, the server will quit and the allocator freed + return err + } + + pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) + if err != nil { + switch { + case errors.Is(err, errUnknownExtendedPacket): + // do nothing + default: + debug("makePacket err: %v", err) + rs.conn.Close() // shuts down recvPacket + return err + } + } + + pktChan <- rs.pktMgr.newOrderedRequest(pkt) + } +} + +// Serve requests for user session +func (rs *RequestServer) Serve() error { + defer func() { + if rs.pktMgr.alloc != nil { + rs.pktMgr.alloc.Free() + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var wg sync.WaitGroup + runWorker := func(ch chan orderedRequest) { + wg.Add(1) + go func() { + defer wg.Done() + if err := rs.packetWorker(ctx, ch); err != nil { + rs.conn.Close() // shuts down recvPacket + } + }() + } + pktChan := rs.pktMgr.workerChan(runWorker) + + err := rs.serveLoop(pktChan) + + wg.Wait() // wait for all workers to exit + + rs.mu.Lock() + defer rs.mu.Unlock() + + // make sure all open requests are properly closed + // (eg. possible on dropped connections, client crashes, etc.) + for handle, req := range rs.openRequests { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + req.transferError(err) + + delete(rs.openRequests, handle) + req.close() + } + + return err +} + +func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedRequest) error { + for pkt := range pktChan { + orderID := pkt.orderID() + if epkt, ok := pkt.requestPacket.(*sshFxpExtendedPacket); ok { + if epkt.SpecificPacket != nil { + pkt.requestPacket = epkt.SpecificPacket + } + } + + var rpkt responsePacket + switch pkt := pkt.requestPacket.(type) { + case *sshFxInitPacket: + rpkt = &sshFxVersionPacket{Version: sftpProtocolVersion, Extensions: sftpExtensions} + case *sshFxpClosePacket: + handle := pkt.getHandle() + rpkt = statusFromError(pkt.ID, rs.closeRequest(handle)) + case *sshFxpRealpathPacket: + var realPath string + if realPather, ok := rs.Handlers.FileList.(RealPathFileLister); ok { + realPath = realPather.RealPath(pkt.getPath()) + } else { + realPath = cleanPathWithBase(rs.startDirectory, pkt.getPath()) + } + rpkt = cleanPacketPath(pkt, realPath) + case *sshFxpOpendirPacket: + request := requestFromPacket(ctx, pkt, rs.startDirectory) + handle := rs.nextRequest(request) + rpkt = request.opendir(rs.Handlers, pkt) + if _, ok := rpkt.(*sshFxpHandlePacket); !ok { + // if we return an error we have to remove the handle from the active ones + rs.closeRequest(handle) + } + case *sshFxpOpenPacket: + request := requestFromPacket(ctx, pkt, rs.startDirectory) + handle := rs.nextRequest(request) + rpkt = request.open(rs.Handlers, pkt) + if _, ok := rpkt.(*sshFxpHandlePacket); !ok { + // if we return an error we have to remove the handle from the active ones + rs.closeRequest(handle) + } + case *sshFxpFstatPacket: + handle := pkt.getHandle() + request, ok := rs.getRequest(handle) + if !ok { + rpkt = statusFromError(pkt.ID, EBADF) + } else { + request = &Request{ + Method: "Stat", + Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath), + } + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + } + case *sshFxpFsetstatPacket: + handle := pkt.getHandle() + request, ok := rs.getRequest(handle) + if !ok { + rpkt = statusFromError(pkt.ID, EBADF) + } else { + request = &Request{ + Method: "Setstat", + Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath), + } + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + } + case *sshFxpExtendedPacketPosixRename: + request := &Request{ + Method: "PosixRename", + Filepath: cleanPathWithBase(rs.startDirectory, pkt.Oldpath), + Target: cleanPathWithBase(rs.startDirectory, pkt.Newpath), + } + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + case *sshFxpExtendedPacketStatVFS: + request := &Request{ + Method: "StatVFS", + Filepath: cleanPathWithBase(rs.startDirectory, pkt.Path), + } + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + case hasHandle: + handle := pkt.getHandle() + request, ok := rs.getRequest(handle) + if !ok { + rpkt = statusFromError(pkt.id(), EBADF) + } else { + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + } + case hasPath: + request := requestFromPacket(ctx, pkt, rs.startDirectory) + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + request.close() + default: + rpkt = statusFromError(pkt.id(), ErrSSHFxOpUnsupported) + } + + rs.pktMgr.readyPacket( + rs.pktMgr.newOrderedResponse(rpkt, orderID)) + } + return nil +} + +// clean and return name packet for file +func cleanPacketPath(pkt *sshFxpRealpathPacket, realPath string) responsePacket { + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []*sshFxpNameAttr{ + { + Name: realPath, + LongName: realPath, + Attrs: emptyFileStat, + }, + }, + } +} + +// Makes sure we have a clean POSIX (/) absolute path to work with +func cleanPath(p string) string { + return cleanPathWithBase("/", p) +} + +func cleanPathWithBase(base, p string) string { + p = filepath.ToSlash(filepath.Clean(p)) + if !path.IsAbs(p) { + return path.Join(base, p) + } + return p +} diff --git a/vendor/github.com/pkg/sftp/request-unix.go b/vendor/github.com/pkg/sftp/request-unix.go new file mode 100644 index 000000000..50b08a38d --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-unix.go @@ -0,0 +1,27 @@ +// +build !windows,!plan9 + +package sftp + +import ( + "errors" + "syscall" +) + +func fakeFileInfoSys() interface{} { + return &syscall.Stat_t{Uid: 65534, Gid: 65534} +} + +func testOsSys(sys interface{}) error { + fstat := sys.(*FileStat) + if fstat.UID != uint32(65534) { + return errors.New("Uid failed to match") + } + if fstat.GID != uint32(65534) { + return errors.New("Gid failed to match") + } + return nil +} + +func toLocalPath(p string) string { + return p +} diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go new file mode 100644 index 000000000..116c27aab --- /dev/null +++ b/vendor/github.com/pkg/sftp/request.go @@ -0,0 +1,630 @@ +package sftp + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "strings" + "sync" + "syscall" +) + +// MaxFilelist is the max number of files to return in a readdir batch. +var MaxFilelist int64 = 100 + +// state encapsulates the reader/writer/readdir from handlers. +type state struct { + mu sync.RWMutex + + writerAt io.WriterAt + readerAt io.ReaderAt + writerAtReaderAt WriterAtReaderAt + listerAt ListerAt + lsoffset int64 +} + +// copy returns a shallow copy the state. +// This is broken out to specific fields, +// because we have to copy around the mutex in state. +func (s *state) copy() state { + s.mu.RLock() + defer s.mu.RUnlock() + + return state{ + writerAt: s.writerAt, + readerAt: s.readerAt, + writerAtReaderAt: s.writerAtReaderAt, + listerAt: s.listerAt, + lsoffset: s.lsoffset, + } +} + +func (s *state) setReaderAt(rd io.ReaderAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.readerAt = rd +} + +func (s *state) getReaderAt() io.ReaderAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.readerAt +} + +func (s *state) setWriterAt(rd io.WriterAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.writerAt = rd +} + +func (s *state) getWriterAt() io.WriterAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.writerAt +} + +func (s *state) setWriterAtReaderAt(rw WriterAtReaderAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.writerAtReaderAt = rw +} + +func (s *state) getWriterAtReaderAt() WriterAtReaderAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.writerAtReaderAt +} + +func (s *state) getAllReaderWriters() (io.ReaderAt, io.WriterAt, WriterAtReaderAt) { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.readerAt, s.writerAt, s.writerAtReaderAt +} + +// Returns current offset for file list +func (s *state) lsNext() int64 { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.lsoffset +} + +// Increases next offset +func (s *state) lsInc(offset int64) { + s.mu.Lock() + defer s.mu.Unlock() + + s.lsoffset += offset +} + +// manage file read/write state +func (s *state) setListerAt(la ListerAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.listerAt = la +} + +func (s *state) getListerAt() ListerAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.listerAt +} + +// Request contains the data and state for the incoming service request. +type Request struct { + // Get, Put, Setstat, Stat, Rename, Remove + // Rmdir, Mkdir, List, Readlink, Link, Symlink + Method string + Filepath string + Flags uint32 + Attrs []byte // convert to sub-struct + Target string // for renames and sym-links + handle string + + // reader/writer/readdir from handlers + state + + // context lasts duration of request + ctx context.Context + cancelCtx context.CancelFunc +} + +// NewRequest creates a new Request object. +func NewRequest(method, path string) *Request { + return &Request{ + Method: method, + Filepath: cleanPath(path), + } +} + +// copy returns a shallow copy of existing request. +// This is broken out to specific fields, +// because we have to copy around the mutex in state. +func (r *Request) copy() *Request { + return &Request{ + Method: r.Method, + Filepath: r.Filepath, + Flags: r.Flags, + Attrs: r.Attrs, + Target: r.Target, + handle: r.handle, + + state: r.state.copy(), + + ctx: r.ctx, + cancelCtx: r.cancelCtx, + } +} + +// New Request initialized based on packet data +func requestFromPacket(ctx context.Context, pkt hasPath, baseDir string) *Request { + request := &Request{ + Method: requestMethod(pkt), + Filepath: cleanPathWithBase(baseDir, pkt.getPath()), + } + request.ctx, request.cancelCtx = context.WithCancel(ctx) + + switch p := pkt.(type) { + case *sshFxpOpenPacket: + request.Flags = p.Pflags + case *sshFxpSetstatPacket: + request.Flags = p.Flags + request.Attrs = p.Attrs.([]byte) + case *sshFxpRenamePacket: + request.Target = cleanPathWithBase(baseDir, p.Newpath) + case *sshFxpSymlinkPacket: + // NOTE: given a POSIX compliant signature: symlink(target, linkpath string) + // this makes Request.Target the linkpath, and Request.Filepath the target. + request.Target = cleanPathWithBase(baseDir, p.Linkpath) + case *sshFxpExtendedPacketHardlink: + request.Target = cleanPathWithBase(baseDir, p.Newpath) + } + return request +} + +// Context returns the request's context. To change the context, +// use WithContext. +// +// The returned context is always non-nil; it defaults to the +// background context. +// +// For incoming server requests, the context is canceled when the +// request is complete or the client's connection closes. +func (r *Request) Context() context.Context { + if r.ctx != nil { + return r.ctx + } + return context.Background() +} + +// WithContext returns a copy of r with its context changed to ctx. +// The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + if ctx == nil { + panic("nil context") + } + r2 := r.copy() + r2.ctx = ctx + r2.cancelCtx = nil + return r2 +} + +// Close reader/writer if possible +func (r *Request) close() error { + defer func() { + if r.cancelCtx != nil { + r.cancelCtx() + } + }() + + rd, wr, rw := r.getAllReaderWriters() + + var err error + + // Close errors on a Writer are far more likely to be the important one. + // As they can be information that there was a loss of data. + if c, ok := wr.(io.Closer); ok { + if err2 := c.Close(); err == nil { + // update error if it is still nil + err = err2 + } + } + + if c, ok := rw.(io.Closer); ok { + if err2 := c.Close(); err == nil { + // update error if it is still nil + err = err2 + + r.setWriterAtReaderAt(nil) + } + } + + if c, ok := rd.(io.Closer); ok { + if err2 := c.Close(); err == nil { + // update error if it is still nil + err = err2 + } + } + + return err +} + +// Notify transfer error if any +func (r *Request) transferError(err error) { + if err == nil { + return + } + + rd, wr, rw := r.getAllReaderWriters() + + if t, ok := wr.(TransferError); ok { + t.TransferError(err) + } + + if t, ok := rw.(TransferError); ok { + t.TransferError(err) + } + + if t, ok := rd.(TransferError); ok { + t.TransferError(err) + } +} + +// called from worker to handle packet/request +func (r *Request) call(handlers Handlers, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { + switch r.Method { + case "Get": + return fileget(handlers.FileGet, r, pkt, alloc, orderID) + case "Put": + return fileput(handlers.FilePut, r, pkt, alloc, orderID) + case "Open": + return fileputget(handlers.FilePut, r, pkt, alloc, orderID) + case "Setstat", "Rename", "Rmdir", "Mkdir", "Link", "Symlink", "Remove", "PosixRename", "StatVFS": + return filecmd(handlers.FileCmd, r, pkt) + case "List": + return filelist(handlers.FileList, r, pkt) + case "Stat", "Lstat", "Readlink": + return filestat(handlers.FileList, r, pkt) + default: + return statusFromError(pkt.id(), fmt.Errorf("unexpected method: %s", r.Method)) + } +} + +// Additional initialization for Open packets +func (r *Request) open(h Handlers, pkt requestPacket) responsePacket { + flags := r.Pflags() + + id := pkt.id() + + switch { + case flags.Write, flags.Append, flags.Creat, flags.Trunc: + if flags.Read { + if openFileWriter, ok := h.FilePut.(OpenFileWriter); ok { + r.Method = "Open" + rw, err := openFileWriter.OpenFile(r) + if err != nil { + return statusFromError(id, err) + } + + r.setWriterAtReaderAt(rw) + + return &sshFxpHandlePacket{ + ID: id, + Handle: r.handle, + } + } + } + + r.Method = "Put" + wr, err := h.FilePut.Filewrite(r) + if err != nil { + return statusFromError(id, err) + } + + r.setWriterAt(wr) + + case flags.Read: + r.Method = "Get" + rd, err := h.FileGet.Fileread(r) + if err != nil { + return statusFromError(id, err) + } + + r.setReaderAt(rd) + + default: + return statusFromError(id, errors.New("bad file flags")) + } + + return &sshFxpHandlePacket{ + ID: id, + Handle: r.handle, + } +} + +func (r *Request) opendir(h Handlers, pkt requestPacket) responsePacket { + r.Method = "List" + la, err := h.FileList.Filelist(r) + if err != nil { + return statusFromError(pkt.id(), wrapPathError(r.Filepath, err)) + } + + r.setListerAt(la) + + return &sshFxpHandlePacket{ + ID: pkt.id(), + Handle: r.handle, + } +} + +// wrap FileReader handler +func fileget(h FileReader, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { + rd := r.getReaderAt() + if rd == nil { + return statusFromError(pkt.id(), errors.New("unexpected read packet")) + } + + data, offset, _ := packetData(pkt, alloc, orderID) + + n, err := rd.ReadAt(data, offset) + // only return EOF error if no data left to read + if err != nil && (err != io.EOF || n == 0) { + return statusFromError(pkt.id(), err) + } + + return &sshFxpDataPacket{ + ID: pkt.id(), + Length: uint32(n), + Data: data[:n], + } +} + +// wrap FileWriter handler +func fileput(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { + wr := r.getWriterAt() + if wr == nil { + return statusFromError(pkt.id(), errors.New("unexpected write packet")) + } + + data, offset, _ := packetData(pkt, alloc, orderID) + + _, err := wr.WriteAt(data, offset) + return statusFromError(pkt.id(), err) +} + +// wrap OpenFileWriter handler +func fileputget(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { + rw := r.getWriterAtReaderAt() + if rw == nil { + return statusFromError(pkt.id(), errors.New("unexpected write and read packet")) + } + + switch p := pkt.(type) { + case *sshFxpReadPacket: + data, offset := p.getDataSlice(alloc, orderID), int64(p.Offset) + + n, err := rw.ReadAt(data, offset) + // only return EOF error if no data left to read + if err != nil && (err != io.EOF || n == 0) { + return statusFromError(pkt.id(), err) + } + + return &sshFxpDataPacket{ + ID: pkt.id(), + Length: uint32(n), + Data: data[:n], + } + + case *sshFxpWritePacket: + data, offset := p.Data, int64(p.Offset) + + _, err := rw.WriteAt(data, offset) + return statusFromError(pkt.id(), err) + + default: + return statusFromError(pkt.id(), errors.New("unexpected packet type for read or write")) + } +} + +// file data for additional read/write packets +func packetData(p requestPacket, alloc *allocator, orderID uint32) (data []byte, offset int64, length uint32) { + switch p := p.(type) { + case *sshFxpReadPacket: + return p.getDataSlice(alloc, orderID), int64(p.Offset), p.Len + case *sshFxpWritePacket: + return p.Data, int64(p.Offset), p.Length + } + return +} + +// wrap FileCmder handler +func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket { + switch p := pkt.(type) { + case *sshFxpFsetstatPacket: + r.Flags = p.Flags + r.Attrs = p.Attrs.([]byte) + } + + switch r.Method { + case "PosixRename": + if posixRenamer, ok := h.(PosixRenameFileCmder); ok { + err := posixRenamer.PosixRename(r) + return statusFromError(pkt.id(), err) + } + + // PosixRenameFileCmder not implemented handle this request as a Rename + r.Method = "Rename" + err := h.Filecmd(r) + return statusFromError(pkt.id(), err) + + case "StatVFS": + if statVFSCmdr, ok := h.(StatVFSFileCmder); ok { + stat, err := statVFSCmdr.StatVFS(r) + if err != nil { + return statusFromError(pkt.id(), err) + } + stat.ID = pkt.id() + return stat + } + + return statusFromError(pkt.id(), ErrSSHFxOpUnsupported) + } + + err := h.Filecmd(r) + return statusFromError(pkt.id(), err) +} + +// wrap FileLister handler +func filelist(h FileLister, r *Request, pkt requestPacket) responsePacket { + lister := r.getListerAt() + if lister == nil { + return statusFromError(pkt.id(), errors.New("unexpected dir packet")) + } + + offset := r.lsNext() + finfo := make([]os.FileInfo, MaxFilelist) + n, err := lister.ListAt(finfo, offset) + r.lsInc(int64(n)) + // ignore EOF as we only return it when there are no results + finfo = finfo[:n] // avoid need for nil tests below + + switch r.Method { + case "List": + if err != nil && (err != io.EOF || n == 0) { + return statusFromError(pkt.id(), err) + } + + nameAttrs := make([]*sshFxpNameAttr, 0, len(finfo)) + + // If the type conversion fails, we get untyped `nil`, + // which is handled by not looking up any names. + idLookup, _ := h.(NameLookupFileLister) + + for _, fi := range finfo { + nameAttrs = append(nameAttrs, &sshFxpNameAttr{ + Name: fi.Name(), + LongName: runLs(idLookup, fi), + Attrs: []interface{}{fi}, + }) + } + + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: nameAttrs, + } + + default: + err = fmt.Errorf("unexpected method: %s", r.Method) + return statusFromError(pkt.id(), err) + } +} + +func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket { + var lister ListerAt + var err error + + if r.Method == "Lstat" { + if lstatFileLister, ok := h.(LstatFileLister); ok { + lister, err = lstatFileLister.Lstat(r) + } else { + // LstatFileLister not implemented handle this request as a Stat + r.Method = "Stat" + lister, err = h.Filelist(r) + } + } else { + lister, err = h.Filelist(r) + } + if err != nil { + return statusFromError(pkt.id(), err) + } + finfo := make([]os.FileInfo, 1) + n, err := lister.ListAt(finfo, 0) + finfo = finfo[:n] // avoid need for nil tests below + + switch r.Method { + case "Stat", "Lstat": + if err != nil && err != io.EOF { + return statusFromError(pkt.id(), err) + } + if n == 0 { + err = &os.PathError{ + Op: strings.ToLower(r.Method), + Path: r.Filepath, + Err: syscall.ENOENT, + } + return statusFromError(pkt.id(), err) + } + return &sshFxpStatResponse{ + ID: pkt.id(), + info: finfo[0], + } + case "Readlink": + if err != nil && err != io.EOF { + return statusFromError(pkt.id(), err) + } + if n == 0 { + err = &os.PathError{ + Op: "readlink", + Path: r.Filepath, + Err: syscall.ENOENT, + } + return statusFromError(pkt.id(), err) + } + filename := finfo[0].Name() + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []*sshFxpNameAttr{ + { + Name: filename, + LongName: filename, + Attrs: emptyFileStat, + }, + }, + } + default: + err = fmt.Errorf("unexpected method: %s", r.Method) + return statusFromError(pkt.id(), err) + } +} + +// init attributes of request object from packet data +func requestMethod(p requestPacket) (method string) { + switch p.(type) { + case *sshFxpReadPacket, *sshFxpWritePacket, *sshFxpOpenPacket: + // set in open() above + case *sshFxpOpendirPacket, *sshFxpReaddirPacket: + // set in opendir() above + case *sshFxpSetstatPacket, *sshFxpFsetstatPacket: + method = "Setstat" + case *sshFxpRenamePacket: + method = "Rename" + case *sshFxpSymlinkPacket: + method = "Symlink" + case *sshFxpRemovePacket: + method = "Remove" + case *sshFxpStatPacket, *sshFxpFstatPacket: + method = "Stat" + case *sshFxpLstatPacket: + method = "Lstat" + case *sshFxpRmdirPacket: + method = "Rmdir" + case *sshFxpReadlinkPacket: + method = "Readlink" + case *sshFxpMkdirPacket: + method = "Mkdir" + case *sshFxpExtendedPacketHardlink: + method = "Link" + } + return method +} diff --git a/vendor/github.com/pkg/sftp/request_windows.go b/vendor/github.com/pkg/sftp/request_windows.go new file mode 100644 index 000000000..1f6d3df17 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request_windows.go @@ -0,0 +1,44 @@ +package sftp + +import ( + "path" + "path/filepath" + "syscall" +) + +func fakeFileInfoSys() interface{} { + return syscall.Win32FileAttributeData{} +} + +func testOsSys(sys interface{}) error { + return nil +} + +func toLocalPath(p string) string { + lp := filepath.FromSlash(p) + + if path.IsAbs(p) { + tmp := lp + for len(tmp) > 0 && tmp[0] == '\\' { + tmp = tmp[1:] + } + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes is absolute, + // then we have a filepath encoded with a prefix '/'. + // e.g. "/C:/Windows" to "C:\\Windows" + return tmp + } + + tmp += "\\" + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes but with extra end slash is absolute, + // then we have a filepath encoded with a prefix '/' and a dropped '/' at the end. + // e.g. "/C:" to "C:\\" + return tmp + } + } + + return lp +} diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go new file mode 100644 index 000000000..529052b44 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server.go @@ -0,0 +1,616 @@ +package sftp + +// sftp server counterpart + +import ( + "encoding" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "time" +) + +const ( + // SftpServerWorkerCount defines the number of workers for the SFTP server + SftpServerWorkerCount = 8 +) + +// Server is an SSH File Transfer Protocol (sftp) server. +// This is intended to provide the sftp subsystem to an ssh server daemon. +// This implementation currently supports most of sftp server protocol version 3, +// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +type Server struct { + *serverConn + debugStream io.Writer + readOnly bool + pktMgr *packetManager + openFiles map[string]*os.File + openFilesLock sync.RWMutex + handleCount int +} + +func (svr *Server) nextHandle(f *os.File) string { + svr.openFilesLock.Lock() + defer svr.openFilesLock.Unlock() + svr.handleCount++ + handle := strconv.Itoa(svr.handleCount) + svr.openFiles[handle] = f + return handle +} + +func (svr *Server) closeHandle(handle string) error { + svr.openFilesLock.Lock() + defer svr.openFilesLock.Unlock() + if f, ok := svr.openFiles[handle]; ok { + delete(svr.openFiles, handle) + return f.Close() + } + + return EBADF +} + +func (svr *Server) getHandle(handle string) (*os.File, bool) { + svr.openFilesLock.RLock() + defer svr.openFilesLock.RUnlock() + f, ok := svr.openFiles[handle] + return f, ok +} + +type serverRespondablePacket interface { + encoding.BinaryUnmarshaler + id() uint32 + respond(svr *Server) responsePacket +} + +// NewServer creates a new Server instance around the provided streams, serving +// content from the root of the filesystem. Optionally, ServerOption +// functions may be specified to further configure the Server. +// +// A subsequent call to Serve() is required to begin serving files over SFTP. +func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) { + svrConn := &serverConn{ + conn: conn{ + Reader: rwc, + WriteCloser: rwc, + }, + } + s := &Server{ + serverConn: svrConn, + debugStream: ioutil.Discard, + pktMgr: newPktMgr(svrConn), + openFiles: make(map[string]*os.File), + } + + for _, o := range options { + if err := o(s); err != nil { + return nil, err + } + } + + return s, nil +} + +// A ServerOption is a function which applies configuration to a Server. +type ServerOption func(*Server) error + +// WithDebug enables Server debugging output to the supplied io.Writer. +func WithDebug(w io.Writer) ServerOption { + return func(s *Server) error { + s.debugStream = w + return nil + } +} + +// ReadOnly configures a Server to serve files in read-only mode. +func ReadOnly() ServerOption { + return func(s *Server) error { + s.readOnly = true + return nil + } +} + +// WithAllocator enable the allocator. +// After processing a packet we keep in memory the allocated slices +// and we reuse them for new packets. +// The allocator is experimental +func WithAllocator() ServerOption { + return func(s *Server) error { + alloc := newAllocator() + s.pktMgr.alloc = alloc + s.conn.alloc = alloc + return nil + } +} + +type rxPacket struct { + pktType fxp + pktBytes []byte +} + +// Up to N parallel servers +func (svr *Server) sftpServerWorker(pktChan chan orderedRequest) error { + for pkt := range pktChan { + // readonly checks + readonly := true + switch pkt := pkt.requestPacket.(type) { + case notReadOnly: + readonly = false + case *sshFxpOpenPacket: + readonly = pkt.readonly() + case *sshFxpExtendedPacket: + readonly = pkt.readonly() + } + + // If server is operating read-only and a write operation is requested, + // return permission denied + if !readonly && svr.readOnly { + svr.pktMgr.readyPacket( + svr.pktMgr.newOrderedResponse(statusFromError(pkt.id(), syscall.EPERM), pkt.orderID()), + ) + continue + } + + if err := handlePacket(svr, pkt); err != nil { + return err + } + } + return nil +} + +func handlePacket(s *Server, p orderedRequest) error { + var rpkt responsePacket + orderID := p.orderID() + switch p := p.requestPacket.(type) { + case *sshFxInitPacket: + rpkt = &sshFxVersionPacket{ + Version: sftpProtocolVersion, + Extensions: sftpExtensions, + } + case *sshFxpStatPacket: + // stat the requested file + info, err := os.Stat(toLocalPath(p.Path)) + rpkt = &sshFxpStatResponse{ + ID: p.ID, + info: info, + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + case *sshFxpLstatPacket: + // stat the requested file + info, err := os.Lstat(toLocalPath(p.Path)) + rpkt = &sshFxpStatResponse{ + ID: p.ID, + info: info, + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + case *sshFxpFstatPacket: + f, ok := s.getHandle(p.Handle) + var err error = EBADF + var info os.FileInfo + if ok { + info, err = f.Stat() + rpkt = &sshFxpStatResponse{ + ID: p.ID, + info: info, + } + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + case *sshFxpMkdirPacket: + // TODO FIXME: ignore flags field + err := os.Mkdir(toLocalPath(p.Path), 0755) + rpkt = statusFromError(p.ID, err) + case *sshFxpRmdirPacket: + err := os.Remove(toLocalPath(p.Path)) + rpkt = statusFromError(p.ID, err) + case *sshFxpRemovePacket: + err := os.Remove(toLocalPath(p.Filename)) + rpkt = statusFromError(p.ID, err) + case *sshFxpRenamePacket: + err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) + rpkt = statusFromError(p.ID, err) + case *sshFxpSymlinkPacket: + err := os.Symlink(toLocalPath(p.Targetpath), toLocalPath(p.Linkpath)) + rpkt = statusFromError(p.ID, err) + case *sshFxpClosePacket: + rpkt = statusFromError(p.ID, s.closeHandle(p.Handle)) + case *sshFxpReadlinkPacket: + f, err := os.Readlink(toLocalPath(p.Path)) + rpkt = &sshFxpNamePacket{ + ID: p.ID, + NameAttrs: []*sshFxpNameAttr{ + { + Name: f, + LongName: f, + Attrs: emptyFileStat, + }, + }, + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + case *sshFxpRealpathPacket: + f, err := filepath.Abs(toLocalPath(p.Path)) + f = cleanPath(f) + rpkt = &sshFxpNamePacket{ + ID: p.ID, + NameAttrs: []*sshFxpNameAttr{ + { + Name: f, + LongName: f, + Attrs: emptyFileStat, + }, + }, + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + case *sshFxpOpendirPacket: + p.Path = toLocalPath(p.Path) + + if stat, err := os.Stat(p.Path); err != nil { + rpkt = statusFromError(p.ID, err) + } else if !stat.IsDir() { + rpkt = statusFromError(p.ID, &os.PathError{ + Path: p.Path, Err: syscall.ENOTDIR}) + } else { + rpkt = (&sshFxpOpenPacket{ + ID: p.ID, + Path: p.Path, + Pflags: sshFxfRead, + }).respond(s) + } + case *sshFxpReadPacket: + var err error = EBADF + f, ok := s.getHandle(p.Handle) + if ok { + err = nil + data := p.getDataSlice(s.pktMgr.alloc, orderID) + n, _err := f.ReadAt(data, int64(p.Offset)) + if _err != nil && (_err != io.EOF || n == 0) { + err = _err + } + rpkt = &sshFxpDataPacket{ + ID: p.ID, + Length: uint32(n), + Data: data[:n], + // do not use data[:n:n] here to clamp the capacity, we allocated extra capacity above to avoid reallocations + } + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + + case *sshFxpWritePacket: + f, ok := s.getHandle(p.Handle) + var err error = EBADF + if ok { + _, err = f.WriteAt(p.Data, int64(p.Offset)) + } + rpkt = statusFromError(p.ID, err) + case *sshFxpExtendedPacket: + if p.SpecificPacket == nil { + rpkt = statusFromError(p.ID, ErrSSHFxOpUnsupported) + } else { + rpkt = p.respond(s) + } + case serverRespondablePacket: + rpkt = p.respond(s) + default: + return fmt.Errorf("unexpected packet type %T", p) + } + + s.pktMgr.readyPacket(s.pktMgr.newOrderedResponse(rpkt, orderID)) + return nil +} + +// Serve serves SFTP connections until the streams stop or the SFTP subsystem +// is stopped. +func (svr *Server) Serve() error { + defer func() { + if svr.pktMgr.alloc != nil { + svr.pktMgr.alloc.Free() + } + }() + var wg sync.WaitGroup + runWorker := func(ch chan orderedRequest) { + wg.Add(1) + go func() { + defer wg.Done() + if err := svr.sftpServerWorker(ch); err != nil { + svr.conn.Close() // shuts down recvPacket + } + }() + } + pktChan := svr.pktMgr.workerChan(runWorker) + + var err error + var pkt requestPacket + var pktType uint8 + var pktBytes []byte + for { + pktType, pktBytes, err = svr.serverConn.recvPacket(svr.pktMgr.getNextOrderID()) + if err != nil { + // we don't care about releasing allocated pages here, the server will quit and the allocator freed + break + } + + pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) + if err != nil { + switch { + case errors.Is(err, errUnknownExtendedPacket): + //if err := svr.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil { + // debug("failed to send err packet: %v", err) + // svr.conn.Close() // shuts down recvPacket + // break + //} + default: + debug("makePacket err: %v", err) + svr.conn.Close() // shuts down recvPacket + break + } + } + + pktChan <- svr.pktMgr.newOrderedRequest(pkt) + } + + close(pktChan) // shuts down sftpServerWorkers + wg.Wait() // wait for all workers to exit + + // close any still-open files + for handle, file := range svr.openFiles { + fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name()) + file.Close() + } + return err // error from recvPacket +} + +type ider interface { + id() uint32 +} + +// The init packet has no ID, so we just return a zero-value ID +func (p *sshFxInitPacket) id() uint32 { return 0 } + +type sshFxpStatResponse struct { + ID uint32 + info os.FileInfo +} + +func (p *sshFxpStatResponse) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(id) + + b := make([]byte, 4, l) + b = append(b, sshFxpAttrs) + b = marshalUint32(b, p.ID) + + var payload []byte + payload = marshalFileInfo(payload, p.info) + + return b, payload, nil +} + +func (p *sshFxpStatResponse) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +var emptyFileStat = []interface{}{uint32(0)} + +func (p *sshFxpOpenPacket) readonly() bool { + return !p.hasPflags(sshFxfWrite) +} + +func (p *sshFxpOpenPacket) hasPflags(flags ...uint32) bool { + for _, f := range flags { + if p.Pflags&f == 0 { + return false + } + } + return true +} + +func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket { + var osFlags int + if p.hasPflags(sshFxfRead, sshFxfWrite) { + osFlags |= os.O_RDWR + } else if p.hasPflags(sshFxfWrite) { + osFlags |= os.O_WRONLY + } else if p.hasPflags(sshFxfRead) { + osFlags |= os.O_RDONLY + } else { + // how are they opening? + return statusFromError(p.ID, syscall.EINVAL) + } + + // Don't use O_APPEND flag as it conflicts with WriteAt. + // The sshFxfAppend flag is a no-op here as the client sends the offsets. + + if p.hasPflags(sshFxfCreat) { + osFlags |= os.O_CREATE + } + if p.hasPflags(sshFxfTrunc) { + osFlags |= os.O_TRUNC + } + if p.hasPflags(sshFxfExcl) { + osFlags |= os.O_EXCL + } + + f, err := os.OpenFile(toLocalPath(p.Path), osFlags, 0644) + if err != nil { + return statusFromError(p.ID, err) + } + + handle := svr.nextHandle(f) + return &sshFxpHandlePacket{ID: p.ID, Handle: handle} +} + +func (p *sshFxpReaddirPacket) respond(svr *Server) responsePacket { + f, ok := svr.getHandle(p.Handle) + if !ok { + return statusFromError(p.ID, EBADF) + } + + dirents, err := f.Readdir(128) + if err != nil { + return statusFromError(p.ID, err) + } + + idLookup := osIDLookup{} + + ret := &sshFxpNamePacket{ID: p.ID} + for _, dirent := range dirents { + ret.NameAttrs = append(ret.NameAttrs, &sshFxpNameAttr{ + Name: dirent.Name(), + LongName: runLs(idLookup, dirent), + Attrs: []interface{}{dirent}, + }) + } + return ret +} + +func (p *sshFxpSetstatPacket) respond(svr *Server) responsePacket { + // additional unmarshalling is required for each possibility here + b := p.Attrs.([]byte) + var err error + + p.Path = toLocalPath(p.Path) + + debug("setstat name \"%s\"", p.Path) + if (p.Flags & sshFileXferAttrSize) != 0 { + var size uint64 + if size, b, err = unmarshalUint64Safe(b); err == nil { + err = os.Truncate(p.Path, int64(size)) + } + } + if (p.Flags & sshFileXferAttrPermissions) != 0 { + var mode uint32 + if mode, b, err = unmarshalUint32Safe(b); err == nil { + err = os.Chmod(p.Path, os.FileMode(mode)) + } + } + if (p.Flags & sshFileXferAttrACmodTime) != 0 { + var atime uint32 + var mtime uint32 + if atime, b, err = unmarshalUint32Safe(b); err != nil { + } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { + } else { + atimeT := time.Unix(int64(atime), 0) + mtimeT := time.Unix(int64(mtime), 0) + err = os.Chtimes(p.Path, atimeT, mtimeT) + } + } + if (p.Flags & sshFileXferAttrUIDGID) != 0 { + var uid uint32 + var gid uint32 + if uid, b, err = unmarshalUint32Safe(b); err != nil { + } else if gid, _, err = unmarshalUint32Safe(b); err != nil { + } else { + err = os.Chown(p.Path, int(uid), int(gid)) + } + } + + return statusFromError(p.ID, err) +} + +func (p *sshFxpFsetstatPacket) respond(svr *Server) responsePacket { + f, ok := svr.getHandle(p.Handle) + if !ok { + return statusFromError(p.ID, EBADF) + } + + // additional unmarshalling is required for each possibility here + b := p.Attrs.([]byte) + var err error + + debug("fsetstat name \"%s\"", f.Name()) + if (p.Flags & sshFileXferAttrSize) != 0 { + var size uint64 + if size, b, err = unmarshalUint64Safe(b); err == nil { + err = f.Truncate(int64(size)) + } + } + if (p.Flags & sshFileXferAttrPermissions) != 0 { + var mode uint32 + if mode, b, err = unmarshalUint32Safe(b); err == nil { + err = f.Chmod(os.FileMode(mode)) + } + } + if (p.Flags & sshFileXferAttrACmodTime) != 0 { + var atime uint32 + var mtime uint32 + if atime, b, err = unmarshalUint32Safe(b); err != nil { + } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { + } else { + atimeT := time.Unix(int64(atime), 0) + mtimeT := time.Unix(int64(mtime), 0) + err = os.Chtimes(f.Name(), atimeT, mtimeT) + } + } + if (p.Flags & sshFileXferAttrUIDGID) != 0 { + var uid uint32 + var gid uint32 + if uid, b, err = unmarshalUint32Safe(b); err != nil { + } else if gid, _, err = unmarshalUint32Safe(b); err != nil { + } else { + err = f.Chown(int(uid), int(gid)) + } + } + + return statusFromError(p.ID, err) +} + +func statusFromError(id uint32, err error) *sshFxpStatusPacket { + ret := &sshFxpStatusPacket{ + ID: id, + StatusError: StatusError{ + // sshFXOk = 0 + // sshFXEOF = 1 + // sshFXNoSuchFile = 2 ENOENT + // sshFXPermissionDenied = 3 + // sshFXFailure = 4 + // sshFXBadMessage = 5 + // sshFXNoConnection = 6 + // sshFXConnectionLost = 7 + // sshFXOPUnsupported = 8 + Code: sshFxOk, + }, + } + if err == nil { + return ret + } + + debug("statusFromError: error is %T %#v", err, err) + ret.StatusError.Code = sshFxFailure + ret.StatusError.msg = err.Error() + + if os.IsNotExist(err) { + ret.StatusError.Code = sshFxNoSuchFile + return ret + } + if code, ok := translateSyscallError(err); ok { + ret.StatusError.Code = code + return ret + } + + switch e := err.(type) { + case fxerr: + ret.StatusError.Code = uint32(e) + default: + if e == io.EOF { + ret.StatusError.Code = sshFxEOF + } + } + + return ret +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go new file mode 100644 index 000000000..8c01dac52 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go @@ -0,0 +1,21 @@ +package sftp + +import ( + "syscall" +) + +func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { + return &StatVFS{ + Bsize: uint64(stat.Bsize), + Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here + Blocks: stat.Blocks, + Bfree: stat.Bfree, + Bavail: stat.Bavail, + Files: stat.Files, + Ffree: stat.Ffree, + Favail: stat.Ffree, // not sure how to calculate Favail + Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness? + Flag: uint64(stat.Flags), // assuming POSIX? + Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024 + }, nil +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go new file mode 100644 index 000000000..94b6d832c --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_impl.go @@ -0,0 +1,29 @@ +// +build darwin linux + +// fill in statvfs structure with OS specific values +// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance) + +package sftp + +import ( + "syscall" +) + +func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { + retPkt, err := getStatVFSForPath(p.Path) + if err != nil { + return statusFromError(p.ID, err) + } + retPkt.ID = p.ID + + return retPkt +} + +func getStatVFSForPath(name string) (*StatVFS, error) { + var stat syscall.Statfs_t + if err := syscall.Statfs(name, &stat); err != nil { + return nil, err + } + + return statvfsFromStatfst(&stat) +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go new file mode 100644 index 000000000..1d180d47c --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_linux.go @@ -0,0 +1,22 @@ +// +build linux + +package sftp + +import ( + "syscall" +) + +func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { + return &StatVFS{ + Bsize: uint64(stat.Bsize), + Frsize: uint64(stat.Frsize), + Blocks: stat.Blocks, + Bfree: stat.Bfree, + Bavail: stat.Bavail, + Files: stat.Files, + Ffree: stat.Ffree, + Favail: stat.Ffree, // not sure how to calculate Favail + Flag: uint64(stat.Flags), // assuming POSIX? + Namemax: uint64(stat.Namelen), + }, nil +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_plan9.go b/vendor/github.com/pkg/sftp/server_statvfs_plan9.go new file mode 100644 index 000000000..e71a27d37 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_plan9.go @@ -0,0 +1,13 @@ +package sftp + +import ( + "syscall" +) + +func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { + return statusFromError(p.ID, syscall.EPLAN9) +} + +func getStatVFSForPath(name string) (*StatVFS, error) { + return nil, syscall.EPLAN9 +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go new file mode 100644 index 000000000..fbf49068f --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go @@ -0,0 +1,15 @@ +// +build !darwin,!linux,!plan9 + +package sftp + +import ( + "syscall" +) + +func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { + return statusFromError(p.ID, syscall.ENOTSUP) +} + +func getStatVFSForPath(name string) (*StatVFS, error) { + return nil, syscall.ENOTSUP +} diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go new file mode 100644 index 000000000..9a63c39dc --- /dev/null +++ b/vendor/github.com/pkg/sftp/sftp.go @@ -0,0 +1,258 @@ +// Package sftp implements the SSH File Transfer Protocol as described in +// https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +package sftp + +import ( + "fmt" +) + +const ( + sshFxpInit = 1 + sshFxpVersion = 2 + sshFxpOpen = 3 + sshFxpClose = 4 + sshFxpRead = 5 + sshFxpWrite = 6 + sshFxpLstat = 7 + sshFxpFstat = 8 + sshFxpSetstat = 9 + sshFxpFsetstat = 10 + sshFxpOpendir = 11 + sshFxpReaddir = 12 + sshFxpRemove = 13 + sshFxpMkdir = 14 + sshFxpRmdir = 15 + sshFxpRealpath = 16 + sshFxpStat = 17 + sshFxpRename = 18 + sshFxpReadlink = 19 + sshFxpSymlink = 20 + sshFxpStatus = 101 + sshFxpHandle = 102 + sshFxpData = 103 + sshFxpName = 104 + sshFxpAttrs = 105 + sshFxpExtended = 200 + sshFxpExtendedReply = 201 +) + +const ( + sshFxOk = 0 + sshFxEOF = 1 + sshFxNoSuchFile = 2 + sshFxPermissionDenied = 3 + sshFxFailure = 4 + sshFxBadMessage = 5 + sshFxNoConnection = 6 + sshFxConnectionLost = 7 + sshFxOPUnsupported = 8 + + // see draft-ietf-secsh-filexfer-13 + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 + sshFxInvalidHandle = 9 + sshFxNoSuchPath = 10 + sshFxFileAlreadyExists = 11 + sshFxWriteProtect = 12 + sshFxNoMedia = 13 + sshFxNoSpaceOnFilesystem = 14 + sshFxQuotaExceeded = 15 + sshFxUnknownPrincipal = 16 + sshFxLockConflict = 17 + sshFxDirNotEmpty = 18 + sshFxNotADirectory = 19 + sshFxInvalidFilename = 20 + sshFxLinkLoop = 21 + sshFxCannotDelete = 22 + sshFxInvalidParameter = 23 + sshFxFileIsADirectory = 24 + sshFxByteRangeLockConflict = 25 + sshFxByteRangeLockRefused = 26 + sshFxDeletePending = 27 + sshFxFileCorrupt = 28 + sshFxOwnerInvalid = 29 + sshFxGroupInvalid = 30 + sshFxNoMatchingByteRangeLock = 31 +) + +const ( + sshFxfRead = 0x00000001 + sshFxfWrite = 0x00000002 + sshFxfAppend = 0x00000004 + sshFxfCreat = 0x00000008 + sshFxfTrunc = 0x00000010 + sshFxfExcl = 0x00000020 +) + +var ( + // supportedSFTPExtensions defines the supported extensions + supportedSFTPExtensions = []sshExtensionPair{ + {"hardlink@openssh.com", "1"}, + {"posix-rename@openssh.com", "1"}, + {"statvfs@openssh.com", "2"}, + } + sftpExtensions = supportedSFTPExtensions +) + +type fxp uint8 + +func (f fxp) String() string { + switch f { + case sshFxpInit: + return "SSH_FXP_INIT" + case sshFxpVersion: + return "SSH_FXP_VERSION" + case sshFxpOpen: + return "SSH_FXP_OPEN" + case sshFxpClose: + return "SSH_FXP_CLOSE" + case sshFxpRead: + return "SSH_FXP_READ" + case sshFxpWrite: + return "SSH_FXP_WRITE" + case sshFxpLstat: + return "SSH_FXP_LSTAT" + case sshFxpFstat: + return "SSH_FXP_FSTAT" + case sshFxpSetstat: + return "SSH_FXP_SETSTAT" + case sshFxpFsetstat: + return "SSH_FXP_FSETSTAT" + case sshFxpOpendir: + return "SSH_FXP_OPENDIR" + case sshFxpReaddir: + return "SSH_FXP_READDIR" + case sshFxpRemove: + return "SSH_FXP_REMOVE" + case sshFxpMkdir: + return "SSH_FXP_MKDIR" + case sshFxpRmdir: + return "SSH_FXP_RMDIR" + case sshFxpRealpath: + return "SSH_FXP_REALPATH" + case sshFxpStat: + return "SSH_FXP_STAT" + case sshFxpRename: + return "SSH_FXP_RENAME" + case sshFxpReadlink: + return "SSH_FXP_READLINK" + case sshFxpSymlink: + return "SSH_FXP_SYMLINK" + case sshFxpStatus: + return "SSH_FXP_STATUS" + case sshFxpHandle: + return "SSH_FXP_HANDLE" + case sshFxpData: + return "SSH_FXP_DATA" + case sshFxpName: + return "SSH_FXP_NAME" + case sshFxpAttrs: + return "SSH_FXP_ATTRS" + case sshFxpExtended: + return "SSH_FXP_EXTENDED" + case sshFxpExtendedReply: + return "SSH_FXP_EXTENDED_REPLY" + default: + return "unknown" + } +} + +type fx uint8 + +func (f fx) String() string { + switch f { + case sshFxOk: + return "SSH_FX_OK" + case sshFxEOF: + return "SSH_FX_EOF" + case sshFxNoSuchFile: + return "SSH_FX_NO_SUCH_FILE" + case sshFxPermissionDenied: + return "SSH_FX_PERMISSION_DENIED" + case sshFxFailure: + return "SSH_FX_FAILURE" + case sshFxBadMessage: + return "SSH_FX_BAD_MESSAGE" + case sshFxNoConnection: + return "SSH_FX_NO_CONNECTION" + case sshFxConnectionLost: + return "SSH_FX_CONNECTION_LOST" + case sshFxOPUnsupported: + return "SSH_FX_OP_UNSUPPORTED" + default: + return "unknown" + } +} + +type unexpectedPacketErr struct { + want, got uint8 +} + +func (u *unexpectedPacketErr) Error() string { + return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got)) +} + +func unimplementedPacketErr(u uint8) error { + return fmt.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) +} + +type unexpectedIDErr struct{ want, got uint32 } + +func (u *unexpectedIDErr) Error() string { + return fmt.Sprintf("sftp: unexpected id: want %d, got %d", u.want, u.got) +} + +func unimplementedSeekWhence(whence int) error { + return fmt.Errorf("sftp: unimplemented seek whence %d", whence) +} + +func unexpectedCount(want, got uint32) error { + return fmt.Errorf("sftp: unexpected count: want %d, got %d", want, got) +} + +type unexpectedVersionErr struct{ want, got uint32 } + +func (u *unexpectedVersionErr) Error() string { + return fmt.Sprintf("sftp: unexpected server version: want %v, got %v", u.want, u.got) +} + +// A StatusError is returned when an SFTP operation fails, and provides +// additional information about the failure. +type StatusError struct { + Code uint32 + msg, lang string +} + +func (s *StatusError) Error() string { + return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) +} + +// FxCode returns the error code typed to match against the exported codes +func (s *StatusError) FxCode() fxerr { + return fxerr(s.Code) +} + +func getSupportedExtensionByName(extensionName string) (sshExtensionPair, error) { + for _, supportedExtension := range supportedSFTPExtensions { + if supportedExtension.Name == extensionName { + return supportedExtension, nil + } + } + return sshExtensionPair{}, fmt.Errorf("unsupported extension: %s", extensionName) +} + +// SetSFTPExtensions allows to customize the supported server extensions. +// See the variable supportedSFTPExtensions for supported extensions. +// This method accepts a slice of sshExtensionPair names for example 'hardlink@openssh.com'. +// If an invalid extension is given an error will be returned and nothing will be changed +func SetSFTPExtensions(extensions ...string) error { + tempExtensions := []sshExtensionPair{} + for _, extension := range extensions { + sftpExtension, err := getSupportedExtensionByName(extension) + if err != nil { + return err + } + tempExtensions = append(tempExtensions, sftpExtension) + } + sftpExtensions = tempExtensions + return nil +} diff --git a/vendor/github.com/pkg/sftp/stat_plan9.go b/vendor/github.com/pkg/sftp/stat_plan9.go new file mode 100644 index 000000000..761abdf56 --- /dev/null +++ b/vendor/github.com/pkg/sftp/stat_plan9.go @@ -0,0 +1,103 @@ +package sftp + +import ( + "os" + "syscall" +) + +var EBADF = syscall.NewError("fd out of range or not open") + +func wrapPathError(filepath string, err error) error { + if errno, ok := err.(syscall.ErrorString); ok { + return &os.PathError{Path: filepath, Err: errno} + } + return err +} + +// translateErrno translates a syscall error number to a SFTP error code. +func translateErrno(errno syscall.ErrorString) uint32 { + switch errno { + case "": + return sshFxOk + case syscall.ENOENT: + return sshFxNoSuchFile + case syscall.EPERM: + return sshFxPermissionDenied + } + + return sshFxFailure +} + +func translateSyscallError(err error) (uint32, bool) { + switch e := err.(type) { + case syscall.ErrorString: + return translateErrno(e), true + case *os.PathError: + debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) + if errno, ok := e.Err.(syscall.ErrorString); ok { + return translateErrno(errno), true + } + } + return 0, false +} + +// isRegular returns true if the mode describes a regular file. +func isRegular(mode uint32) bool { + return mode&S_IFMT == syscall.S_IFREG +} + +// toFileMode converts sftp filemode bits to the os.FileMode specification +func toFileMode(mode uint32) os.FileMode { + var fm = os.FileMode(mode & 0777) + + switch mode & S_IFMT { + case syscall.S_IFBLK: + fm |= os.ModeDevice + case syscall.S_IFCHR: + fm |= os.ModeDevice | os.ModeCharDevice + case syscall.S_IFDIR: + fm |= os.ModeDir + case syscall.S_IFIFO: + fm |= os.ModeNamedPipe + case syscall.S_IFLNK: + fm |= os.ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fm |= os.ModeSocket + } + + return fm +} + +// fromFileMode converts from the os.FileMode specification to sftp filemode bits +func fromFileMode(mode os.FileMode) uint32 { + ret := uint32(mode & os.ModePerm) + + switch mode & os.ModeType { + case os.ModeDevice | os.ModeCharDevice: + ret |= syscall.S_IFCHR + case os.ModeDevice: + ret |= syscall.S_IFBLK + case os.ModeDir: + ret |= syscall.S_IFDIR + case os.ModeNamedPipe: + ret |= syscall.S_IFIFO + case os.ModeSymlink: + ret |= syscall.S_IFLNK + case 0: + ret |= syscall.S_IFREG + case os.ModeSocket: + ret |= syscall.S_IFSOCK + } + + return ret +} + +// Plan 9 doesn't have setuid, setgid or sticky, but a Plan 9 client should +// be able to send these bits to a POSIX server. +const ( + s_ISUID = 04000 + s_ISGID = 02000 + s_ISVTX = 01000 +) diff --git a/vendor/github.com/pkg/sftp/stat_posix.go b/vendor/github.com/pkg/sftp/stat_posix.go new file mode 100644 index 000000000..5b870e23c --- /dev/null +++ b/vendor/github.com/pkg/sftp/stat_posix.go @@ -0,0 +1,124 @@ +//go:build !plan9 +// +build !plan9 + +package sftp + +import ( + "os" + "syscall" +) + +const EBADF = syscall.EBADF + +func wrapPathError(filepath string, err error) error { + if errno, ok := err.(syscall.Errno); ok { + return &os.PathError{Path: filepath, Err: errno} + } + return err +} + +// translateErrno translates a syscall error number to a SFTP error code. +func translateErrno(errno syscall.Errno) uint32 { + switch errno { + case 0: + return sshFxOk + case syscall.ENOENT: + return sshFxNoSuchFile + case syscall.EACCES, syscall.EPERM: + return sshFxPermissionDenied + } + + return sshFxFailure +} + +func translateSyscallError(err error) (uint32, bool) { + switch e := err.(type) { + case syscall.Errno: + return translateErrno(e), true + case *os.PathError: + debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) + if errno, ok := e.Err.(syscall.Errno); ok { + return translateErrno(errno), true + } + } + return 0, false +} + +// isRegular returns true if the mode describes a regular file. +func isRegular(mode uint32) bool { + return mode&S_IFMT == syscall.S_IFREG +} + +// toFileMode converts sftp filemode bits to the os.FileMode specification +func toFileMode(mode uint32) os.FileMode { + var fm = os.FileMode(mode & 0777) + + switch mode & S_IFMT { + case syscall.S_IFBLK: + fm |= os.ModeDevice + case syscall.S_IFCHR: + fm |= os.ModeDevice | os.ModeCharDevice + case syscall.S_IFDIR: + fm |= os.ModeDir + case syscall.S_IFIFO: + fm |= os.ModeNamedPipe + case syscall.S_IFLNK: + fm |= os.ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fm |= os.ModeSocket + } + + if mode&syscall.S_ISUID != 0 { + fm |= os.ModeSetuid + } + if mode&syscall.S_ISGID != 0 { + fm |= os.ModeSetgid + } + if mode&syscall.S_ISVTX != 0 { + fm |= os.ModeSticky + } + + return fm +} + +// fromFileMode converts from the os.FileMode specification to sftp filemode bits +func fromFileMode(mode os.FileMode) uint32 { + ret := uint32(mode & os.ModePerm) + + switch mode & os.ModeType { + case os.ModeDevice | os.ModeCharDevice: + ret |= syscall.S_IFCHR + case os.ModeDevice: + ret |= syscall.S_IFBLK + case os.ModeDir: + ret |= syscall.S_IFDIR + case os.ModeNamedPipe: + ret |= syscall.S_IFIFO + case os.ModeSymlink: + ret |= syscall.S_IFLNK + case 0: + ret |= syscall.S_IFREG + case os.ModeSocket: + ret |= syscall.S_IFSOCK + } + + if mode&os.ModeSetuid != 0 { + ret |= syscall.S_ISUID + } + if mode&os.ModeSetgid != 0 { + ret |= syscall.S_ISGID + } + if mode&os.ModeSticky != 0 { + ret |= syscall.S_ISVTX + } + + return ret +} + +const ( + s_ISUID = syscall.S_ISUID + s_ISGID = syscall.S_ISGID + s_ISVTX = syscall.S_ISVTX +) diff --git a/vendor/github.com/pkg/sftp/syscall_fixed.go b/vendor/github.com/pkg/sftp/syscall_fixed.go new file mode 100644 index 000000000..d40457776 --- /dev/null +++ b/vendor/github.com/pkg/sftp/syscall_fixed.go @@ -0,0 +1,9 @@ +// +build plan9 windows js,wasm + +// Go defines S_IFMT on windows, plan9 and js/wasm as 0x1f000 instead of +// 0xf000. None of the the other S_IFxyz values include the "1" (in 0x1f000) +// which prevents them from matching the bitmask. + +package sftp + +const S_IFMT = 0xf000 diff --git a/vendor/github.com/pkg/sftp/syscall_good.go b/vendor/github.com/pkg/sftp/syscall_good.go new file mode 100644 index 000000000..4c2b240cf --- /dev/null +++ b/vendor/github.com/pkg/sftp/syscall_good.go @@ -0,0 +1,8 @@ +// +build !plan9,!windows +// +build !js !wasm + +package sftp + +import "syscall" + +const S_IFMT = syscall.S_IFMT diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index cda3fdd35..bc62161d6 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -9,7 +9,8 @@ package curve25519 // import "golang.org/x/crypto/curve25519" import ( "crypto/subtle" - "fmt" + "errors" + "strconv" "golang.org/x/crypto/curve25519/internal/field" ) @@ -124,10 +125,10 @@ func X25519(scalar, point []byte) ([]byte, error) { func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { var in [32]byte if l := len(scalar); l != 32 { - return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32) + return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32") } if l := len(point); l != 32 { - return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32) + return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32") } copy(in[:], scalar) if &point[0] == &Basepoint[0] { @@ -138,7 +139,7 @@ func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { copy(base[:], point) ScalarMult(dst, &in, &base) if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, fmt.Errorf("bad input point: low order point") + return nil, errors.New("bad input point: low order point") } } return dst[:], nil diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go index dbc79d583..3c4d18a15 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -772,7 +772,7 @@ func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, } func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*ssh.Signature, error) { - if algorithm == "" || algorithm == s.pub.Type() { + if algorithm == "" || algorithm == underlyingAlgo(s.pub.Type()) { return s.Sign(rand, data) } @@ -791,6 +791,33 @@ func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algo var _ ssh.AlgorithmSigner = &agentKeyringSigner{} +// certKeyAlgoNames is a mapping from known certificate algorithm names to the +// corresponding public key signature algorithm. +// +// This map must be kept in sync with the one in certs.go. +var certKeyAlgoNames = map[string]string{ + ssh.CertAlgoRSAv01: ssh.KeyAlgoRSA, + ssh.CertAlgoRSASHA256v01: ssh.KeyAlgoRSASHA256, + ssh.CertAlgoRSASHA512v01: ssh.KeyAlgoRSASHA512, + ssh.CertAlgoDSAv01: ssh.KeyAlgoDSA, + ssh.CertAlgoECDSA256v01: ssh.KeyAlgoECDSA256, + ssh.CertAlgoECDSA384v01: ssh.KeyAlgoECDSA384, + ssh.CertAlgoECDSA521v01: ssh.KeyAlgoECDSA521, + ssh.CertAlgoSKECDSA256v01: ssh.KeyAlgoSKECDSA256, + ssh.CertAlgoED25519v01: ssh.KeyAlgoED25519, + ssh.CertAlgoSKED25519v01: ssh.KeyAlgoSKED25519, +} + +// underlyingAlgo returns the signature algorithm associated with algo (which is +// an advertised or negotiated public key or host key algorithm). These are +// usually the same, except for certificate algorithms. +func underlyingAlgo(algo string) string { + if a, ok := certKeyAlgoNames[algo]; ok { + return a + } + return algo +} + // Calls an extension method. It is up to the agent implementation as to whether or not // any particular extension is supported and may always return an error. Because the // type of the response is up to the implementation, this returns the bytes of the diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index a69e22491..4600c2077 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -460,6 +460,8 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { // certKeyAlgoNames is a mapping from known certificate algorithm names to the // corresponding public key signature algorithm. +// +// This map must be kept in sync with the one in agent/client.go. var certKeyAlgoNames = map[string]string{ CertAlgoRSAv01: KeyAlgoRSA, CertAlgoRSASHA256v01: KeyAlgoRSASHA256, diff --git a/vendor/modules.txt b/vendor/modules.txt index 2a1beb611..e4d1330fe 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -91,7 +91,7 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.1.1 ## explicit github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.26.1-0.20220716095526-d31d27c357ab +# github.com/containers/buildah v1.27.0 ## explicit github.com/containers/buildah github.com/containers/buildah/bind @@ -114,7 +114,7 @@ github.com/containers/buildah/pkg/rusage github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/util github.com/containers/buildah/util -# github.com/containers/common v0.49.1-0.20220729221035-246800047d46 +# github.com/containers/common v0.49.2-0.20220809074359-b0ea008ba661 ## explicit github.com/containers/common/libimage github.com/containers/common/libimage/define @@ -157,6 +157,7 @@ github.com/containers/common/pkg/secrets/filedriver github.com/containers/common/pkg/secrets/passdriver github.com/containers/common/pkg/secrets/shelldriver github.com/containers/common/pkg/signal +github.com/containers/common/pkg/ssh github.com/containers/common/pkg/subscriptions github.com/containers/common/pkg/supplemented github.com/containers/common/pkg/sysinfo @@ -403,9 +404,6 @@ github.com/docker/go-units # github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 ## explicit github.com/docker/libnetwork/ipamutils -# github.com/dtylman/scp v0.0.0-20181017070807-f3000a34aef4 -## explicit -github.com/dtylman/scp # github.com/felixge/httpsnoop v1.0.1 github.com/felixge/httpsnoop # github.com/fsnotify/fsnotify v1.5.4 @@ -488,6 +486,8 @@ github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/pgzip v1.2.5 github.com/klauspost/pgzip +# github.com/kr/fs v0.1.0 +github.com/kr/fs # github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e github.com/letsencrypt/boulder/core github.com/letsencrypt/boulder/core/proto @@ -630,6 +630,9 @@ github.com/ostreedev/ostree-go/pkg/glibobject github.com/ostreedev/ostree-go/pkg/otbuiltin # github.com/pkg/errors v0.9.1 github.com/pkg/errors +# github.com/pkg/sftp v1.13.5 +github.com/pkg/sftp +github.com/pkg/sftp/internal/encoding/ssh/filexfer # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib # github.com/proglottis/gpgme v0.1.3 @@ -745,8 +748,7 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 -## explicit +# golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 |