diff options
26 files changed, 287 insertions, 67 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 33162e49f..70c3cb3da 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -13,6 +13,8 @@ env: #### #### Global variables used for all tasks #### + # Name of the ultimate destination branch for this build + DEST_BRANCH: "master" # Overrides default location (/tmp/cirrus) for repo clone GOPATH: "/var/tmp/go" GOSRC: "/var/tmp/go/src/github.com/containers/libpod" @@ -29,9 +31,9 @@ env: #### #### Cache-image names to test with ### - FEDORA_CACHE_IMAGE_NAME: "fedora-30-libpod-5744029755506688" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-29-libpod-5744029755506688" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-libpod-5744029755506688" + FEDORA_CACHE_IMAGE_NAME: "fedora-30-libpod-5925244995371008" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-29-libpod-5925244995371008" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-libpod-5925244995371008" #### #### Variables for composing new cache-images (used in PR testing) from @@ -118,7 +120,7 @@ gating_task: pipefail_enabledscript: 'if /bin/false | /bin/true; then echo "pipefail fault" && exit 72; fi' on_failure: - failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' # This task runs `make vendor` followed by ./hack/tree_status.sh to check @@ -150,7 +152,7 @@ vendor_task: - 'cd ${GOSRC} && ./hack/tree_status.sh |& ${TIMESTAMP}' on_failure: - failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh |& ${TIMESTAMP}' + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh |& ${TIMESTAMP}' # This task runs `make varlink_api_generate` followed by ./hack/tree_status.sh to check @@ -182,7 +184,7 @@ varlink_api_task: - 'cd ${GOSRC} && ./hack/tree_status.sh |& ${TIMESTAMP}' on_failure: - failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' build_each_commit_task: @@ -193,7 +195,7 @@ build_each_commit_task: - "varlink_api" # $CIRRUS_BASE_BRANCH is only set when testing a PR - only_if: $CIRRUS_BRANCH != 'master' && + only_if: $CIRRUS_BRANCH != $DEST_BRANCH && $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' gce_instance: @@ -213,7 +215,7 @@ build_each_commit_task: - 'make build-all-new-commits GIT_BASE_BRANCH=origin/$CIRRUS_BASE_BRANCH |& ${TIMESTAMP}' on_failure: - failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' build_without_cgo_task: @@ -224,7 +226,7 @@ build_without_cgo_task: - "varlink_api" # $CIRRUS_BASE_BRANCH is only set when testing a PR - only_if: $CIRRUS_BRANCH != 'master' && + only_if: $CIRRUS_BRANCH != $DEST_BRANCH && $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' gce_instance: @@ -243,7 +245,7 @@ build_without_cgo_task: - 'make build-no-cgo' on_failure: - failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' # Update metadata on VM images referenced by this repository state @@ -341,7 +343,7 @@ testing_task: $SCRIPT_BASE/cache_release_archive.sh |& ${TIMESTAMP} on_failure: - failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' always: &standardlogs ginkgo_node_logs_script: '$SCRIPT_BASE/logcollector.sh ginkgo' @@ -376,7 +378,7 @@ special_testing_rootless_task: system_test_script: '$SCRIPT_BASE/system_test.sh |& ${TIMESTAMP}' on_failure: - failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' always: <<: *standardlogs @@ -402,7 +404,7 @@ special_testing_in_podman_task: integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}' on_failure: - failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' always: <<: *standardlogs @@ -428,14 +430,14 @@ special_testing_cross_task: cache_release_archive_script: '$SCRIPT_BASE/cache_release_archive.sh |& ${TIMESTAMP}' on_failure: - failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' # Test building of new cache-images for future PR testing, in this PR. test_build_cache_images_task: only_if: >- - $CIRRUS_BRANCH != 'master' && + $CIRRUS_BRANCH != $DEST_BRANCH && $CIRRUS_CHANGE_MESSAGE =~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' && $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*SYSTEM\s*TEST\s*\*\*\*.*' @@ -470,7 +472,7 @@ test_build_cache_images_task: verify_test_built_images_task: only_if: >- - $CIRRUS_BRANCH != 'master' && + $CIRRUS_BRANCH != $DEST_BRANCH && $CIRRUS_CHANGE_MESSAGE =~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' && $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*SYSTEM\s*TEST\s*\*\*\*.*' @@ -505,7 +507,9 @@ verify_test_built_images_task: # Post message to IRC if everything passed PR testing success_task: - only_if: $CIRRUS_BRANCH != 'master' + # This task is a required-pass in github settings, + # it blocks PRs from merging if a depends_on task fails + only_if: $CIRRUS_BRANCH != $DEST_BRANCH # ignores any dependent task conditions, include everything except 'release' depends_on: &alltasks @@ -555,3 +559,6 @@ release_task: GCPROJECT: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f] uncache_release_archives_script: '$SCRIPT_BASE/uncache_release_archives.sh |& ${TIMESTAMP}' + + on_failure: + failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh' @@ -20,6 +20,7 @@ SHAREDIR_CONTAINERS ?= ${PREFIX}/share/containers ETCDIR ?= /etc TMPFILESDIR ?= ${PREFIX}/lib/tmpfiles.d SYSTEMDDIR ?= ${PREFIX}/lib/systemd/system +BUILDFLAGS ?= BUILDTAGS ?= \ $(shell hack/apparmor_tag.sh) \ $(shell hack/btrfs_installed_tag.sh) \ @@ -147,10 +148,10 @@ test/goecho/goecho: .gopathok $(wildcard test/goecho/*.go) $(GO) build -ldflags '$(LDFLAGS)' -o $@ $(PROJECT)/test/goecho podman: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman - $(GO) build -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o bin/$@ $(PROJECT)/cmd/podman + $(GO) build $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o bin/$@ $(PROJECT)/cmd/podman podman-remote: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on remote environment - $(GO) build -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS) remoteclient" -o bin/$@ $(PROJECT)/cmd/podman + $(GO) build $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS) remoteclient" -o bin/$@ $(PROJECT)/cmd/podman podman-remote-darwin: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on remote OSX environment CGO_ENABLED=0 GOOS=darwin $(GO) build -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/$@ $(PROJECT)/cmd/podman diff --git a/cmd/podman/common.go b/cmd/podman/common.go index 96a1c2244..15f753d55 100644 --- a/cmd/podman/common.go +++ b/cmd/podman/common.go @@ -244,23 +244,23 @@ func getCreateFlags(c *cliconfig.PodmanCommand) { "help", false, "", ) createFlags.String( - "healthcheck-command", "", + "health-cmd", "", "set a healthcheck command for the container ('none' disables the existing healthcheck)", ) createFlags.String( - "healthcheck-interval", cliconfig.DefaultHealthCheckInterval, + "health-interval", cliconfig.DefaultHealthCheckInterval, "set an interval for the healthchecks (a value of disable results in no automatic timer setup)", ) createFlags.Uint( - "healthcheck-retries", cliconfig.DefaultHealthCheckRetries, + "health-retries", cliconfig.DefaultHealthCheckRetries, "the number of retries allowed before a healthcheck is considered to be unhealthy", ) createFlags.String( - "healthcheck-start-period", cliconfig.DefaultHealthCheckStartPeriod, + "health-start-period", cliconfig.DefaultHealthCheckStartPeriod, "the initialization time needed for a container to bootstrap", ) createFlags.String( - "healthcheck-timeout", cliconfig.DefaultHealthCheckTimeout, + "health-timeout", cliconfig.DefaultHealthCheckTimeout, "the maximum time allowed to complete the healthcheck before an interval is considered failed", ) createFlags.StringP( diff --git a/cmd/podman/create.go b/cmd/podman/create.go index 93141a800..262cdffe4 100644 --- a/cmd/podman/create.go +++ b/cmd/podman/create.go @@ -40,7 +40,7 @@ func init() { getCreateFlags(&createCommand.PodmanCommand) flags := createCommand.Flags() flags.SetInterspersed(false) - + flags.SetNormalizeFunc(aliasFlags) } func createCmd(c *cliconfig.CreateValues) error { diff --git a/cmd/podman/run.go b/cmd/podman/run.go index 76ab3d944..4836c99dc 100644 --- a/cmd/podman/run.go +++ b/cmd/podman/run.go @@ -34,6 +34,7 @@ func init() { runCommand.SetUsageTemplate(UsageTemplate()) flags := runCommand.Flags() flags.SetInterspersed(false) + flags.SetNormalizeFunc(aliasFlags) flags.Bool("sig-proxy", true, "Proxy received signals to the process") getCreateFlags(&runCommand.PodmanCommand) markFlagHiddenForRemoteClient("authfile", flags) diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go index be1a731cc..fd319e215 100644 --- a/cmd/podman/shared/create.go +++ b/cmd/podman/shared/create.go @@ -26,7 +26,6 @@ import ( "github.com/docker/docker/pkg/signal" "github.com/docker/go-connections/nat" "github.com/docker/go-units" - "github.com/google/shlex" "github.com/opencontainers/selinux/go-selinux/label" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" @@ -116,6 +115,30 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod. if err != nil { return nil, nil, errors.Wrapf(err, "unable to get healthcheck for %s", c.InputArgs[0]) } + + if healthCheck != nil { + hcCommand := healthCheck.Test + if len(hcCommand) < 1 || hcCommand[0] == "" || hcCommand[0] == "NONE" { + // disable health check + healthCheck = nil + } else { + // apply defaults if image doesn't override them + if healthCheck.Interval == 0 { + healthCheck.Interval = 30 * time.Second + } + if healthCheck.Timeout == 0 { + healthCheck.Timeout = 30 * time.Second + } + /* Docker default is 0s, so the following would be a no-op + if healthCheck.StartPeriod == 0 { + healthCheck.StartPeriod = 0 * time.Second + } + */ + if healthCheck.Retries == 0 { + healthCheck.Retries = 3 + } + } + } } } } @@ -788,9 +811,12 @@ func makeHealthCheckFromCli(c *GenericCLIResults) (*manifest.Schema2HealthConfig return nil, errors.New("Must define a healthcheck command for all healthchecks") } - cmd, err := shlex.Split(inCommand) + // first try to parse option value as JSON array of strings... + cmd := []string{} + err := json.Unmarshal([]byte(inCommand), &cmd) if err != nil { - return nil, errors.Wrap(err, "failed to parse healthcheck command") + // ...otherwise pass it to "/bin/sh -c" inside the container + cmd = []string{"CMD-SHELL", inCommand} } hc := manifest.Schema2HealthConfig{ Test: cmd, diff --git a/cmd/podman/shared/intermediate.go b/cmd/podman/shared/intermediate.go index 855f84086..8337dc647 100644 --- a/cmd/podman/shared/intermediate.go +++ b/cmd/podman/shared/intermediate.go @@ -399,11 +399,11 @@ func NewIntermediateLayer(c *cliconfig.PodmanCommand, remote bool) GenericCLIRes m["gidmap"] = newCRStringSlice(c, "gidmap") m["group-add"] = newCRStringSlice(c, "group-add") m["help"] = newCRBool(c, "help") - m["healthcheck-command"] = newCRString(c, "healthcheck-command") - m["healthcheck-interval"] = newCRString(c, "healthcheck-interval") - m["healthcheck-retries"] = newCRUint(c, "healthcheck-retries") - m["healthcheck-start-period"] = newCRString(c, "healthcheck-start-period") - m["healthcheck-timeout"] = newCRString(c, "healthcheck-timeout") + m["healthcheck-command"] = newCRString(c, "health-cmd") + m["healthcheck-interval"] = newCRString(c, "health-interval") + m["healthcheck-retries"] = newCRUint(c, "health-retries") + m["healthcheck-start-period"] = newCRString(c, "health-start-period") + m["healthcheck-timeout"] = newCRString(c, "health-timeout") m["hostname"] = newCRString(c, "hostname") m["http-proxy"] = newCRBool(c, "http-proxy") m["image-volume"] = newCRString(c, "image-volume") diff --git a/cmd/podman/utils.go b/cmd/podman/utils.go index 0790f673a..c0ddaba4e 100644 --- a/cmd/podman/utils.go +++ b/cmd/podman/utils.go @@ -47,3 +47,19 @@ func markFlagHidden(flags *pflag.FlagSet, flag string) { logrus.Errorf("unable to mark flag '%s' as hidden: %q", flag, err) } } + +func aliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName { + switch name { + case "healthcheck-command": + name = "health-cmd" + case "healthcheck-interval": + name = "health-interval" + case "healthcheck-retries": + name = "health-retries" + case "healthcheck-start-period": + name = "health-start-period" + case "healthcheck-timeout": + name = "health-timeout" + } + return pflag.NormalizedName(name) +} diff --git a/completions/bash/podman b/completions/bash/podman index 2b9254d47..0703029ea 100644 --- a/completions/bash/podman +++ b/completions/bash/podman @@ -1812,10 +1812,10 @@ _podman_container_run() { --health-interval --health-retries --health-timeout + --health-start-period " boolean_options="$boolean_options --detach -d - --no-healthcheck --rm --sig-proxy=false " diff --git a/contrib/cirrus/check_image.sh b/contrib/cirrus/check_image.sh index 67e807d61..690a38119 100755 --- a/contrib/cirrus/check_image.sh +++ b/contrib/cirrus/check_image.sh @@ -36,4 +36,10 @@ do "$(systemctl list-unit-files --no-legend $REQ_UNIT)" = "$REQ_UNIT enabled" || let "RET+=1" done +# Exits zero if any unit matching pattern is running +UNIT_STATUS=$(systemctl is-active $EVIL_UNITS; echo $?) +item_test "No interfering background units are active:" \ + "$UNIT_STATUS" -ne "0" || let "RET+=1" + +echo "Total failed tests: $RET" exit $RET diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh index b2fcaa749..a9da3f4ce 100644 --- a/contrib/cirrus/lib.sh +++ b/contrib/cirrus/lib.sh @@ -45,6 +45,7 @@ fi # Defaults when not running under CI export CI="${CI:-false}" CIRRUS_CI="${CIRRUS_CI:-false}" +DEST_BRANCH="${DEST_BRANCH:-master}" CONTINUOUS_INTEGRATION="${CONTINUOUS_INTEGRATION:-false}" CIRRUS_REPO_NAME=${CIRRUS_REPO_NAME:-libpod} CIRRUS_BASE_SHA=${CIRRUS_BASE_SHA:-unknown$(date +%s)} # difficult to reliably discover @@ -77,6 +78,9 @@ ROOTLESS_ENV_RE='(CIRRUS_.+)|(ROOTLESS_.+)|(.+_IMAGE.*)|(.+_BASE)|(.*DIRPATH)|(. # Unsafe env. vars for display SECRET_ENV_RE='(IRCID)|(ACCOUNT)|(^GC[EP]..+)|(SSH)' +# Names of systemd units which should never be running +EVIL_UNITS="cron crond atd apt-daily-upgrade apt-daily fstrim motd-news systemd-tmpfiles-clean" + SPECIALMODE="${SPECIALMODE:-none}" TEST_REMOTE_CLIENT="${TEST_REMOTE_CLIENT:-false}" export CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-podman} @@ -349,6 +353,20 @@ remove_packaged_podman_files(){ done } +systemd_banish(){ + echo "Disabling periodic services that could destabilize testing:" + set +e # Not all of these exist on every platform + for unit in $EVIL_UNITS + do + ooe.sh sudo systemctl stop $unit + ooe.sh sudo systemctl disable $unit + ooe.sh sudo systemctl disable $unit.timer + ooe.sh sudo systemctl mask $unit + ooe.sh sudo systemctl mask $unit.timer + done + set -e +} + _finalize(){ set +e # Don't fail at the very end set +e # make errors non-fatal diff --git a/contrib/cirrus/notice_master_failure.sh b/contrib/cirrus/notice_branch_failure.sh index 1fc15cdf9..423231dfd 100755 --- a/contrib/cirrus/notice_master_failure.sh +++ b/contrib/cirrus/notice_branch_failure.sh @@ -9,10 +9,10 @@ ETX="$(echo -n -e '\x03')" RED="${ETX}4" NOR="$(echo -n -e '\x0f')" -if [[ "$CIRRUS_BRANCH" =~ "master" ]] +if [[ "$CIRRUS_BRANCH" = "$DEST_BRANCH" ]] then BURL="https://cirrus-ci.com/build/$CIRRUS_BUILD_ID" - ircmsg "${RED}[Action Recommended]: ${NOR}Post-merge testing ${RED}$CIRRUS_BRANCH failed${NOR} in $CIRRUS_TASK_NAME on $(OS_RELEASE_ID)-$(OS_RELEASE_VER): $BURL. Please investigate, and re-run if appropriate." + ircmsg "${RED}[Action Recommended]: ${NOR}Post-merge testing on ${RED}$CIRRUS_BRANCH failed${NOR} in $CIRRUS_TASK_NAME on $(OS_RELEASE_ID)-$(OS_RELEASE_VER): $BURL. Please investigate, and re-run if appropriate." fi # This script assumed to be executed on failure diff --git a/contrib/cirrus/packer/fedora_base-setup.sh b/contrib/cirrus/packer/fedora_base-setup.sh index a425b2b57..788a54c34 100644 --- a/contrib/cirrus/packer/fedora_base-setup.sh +++ b/contrib/cirrus/packer/fedora_base-setup.sh @@ -27,6 +27,9 @@ ooe.sh systemctl enable rngd echo "Setting cloud-init service to start after google-network-daemon.service" cp -v $GOSRC/$PACKER_BASE/cloud-init/fedora/cloud-init.service /etc/systemd/system/ +# Ensure there are no disruptive periodic services enabled by default in image +systemd_banish + rh_finalize echo "SUCCESS!" diff --git a/contrib/cirrus/packer/fedora_setup.sh b/contrib/cirrus/packer/fedora_setup.sh index eb95db907..1e25a1a3c 100644 --- a/contrib/cirrus/packer/fedora_setup.sh +++ b/contrib/cirrus/packer/fedora_setup.sh @@ -76,6 +76,9 @@ ooe.sh sudo dnf install -y \ xz \ zip +# Ensure there are no disruptive periodic services enabled by default in image +systemd_banish + sudo /tmp/libpod/hack/install_catatonit.sh rh_finalize diff --git a/contrib/cirrus/packer/ubuntu_setup.sh b/contrib/cirrus/packer/ubuntu_setup.sh index 6209f2f89..dba191ad2 100644 --- a/contrib/cirrus/packer/ubuntu_setup.sh +++ b/contrib/cirrus/packer/ubuntu_setup.sh @@ -100,6 +100,9 @@ ooe.sh sudo update-grub sudo /tmp/libpod/hack/install_catatonit.sh ooe.sh sudo make -C /tmp/libpod install.libseccomp.sudo +# Ensure there are no disruptive periodic services enabled by default in image +systemd_banish + ubuntu_finalize echo "SUCCESS!" diff --git a/dependencies/analyses/README.md b/dependencies/analyses/README.md new file mode 100644 index 000000000..a440a0ebd --- /dev/null +++ b/dependencies/analyses/README.md @@ -0,0 +1,88 @@ +# A set of scripts and instructions that help to analyze and debloat go-lang dependencies + +Note that all scripts mentioned below follow the [KISS principle](https://en.wikipedia.org/wiki/KISS_principle) on purpose. +The scripts are meant to be used in combination to aid in understanding the packages' dependencies and how they contribute to the size of the compiled binary. + +## Size of packages + +To analyze the size of all go packages used during the build process, pass the `-work -a` build flags to `go build`. +The `-a` flag forces go to rebuild all packages even if they are already up-to-date (e.g., in the build cache), while the `-work` flag instructs go to print the temporary work directory used for compiling the packages. +The path to the temporary work directory of `go-build` must be passed to `go-archive-analysis.sh` by setting it as an environment variable. +The analysis script will then read and parse the build data and print a sorted table of the package size in bytes followed by the package name. + +Running such an analysis on libpod may look as follows: + +``` +# 1) Build the podman binary with `-work -a`. +[libpod]$ BUILDFLAGS="-work -a" make podman +[...] +WORK=/tmp/go-build794287815 + +# 2) Set the work directory as an environment variable and call the analysis script +[libpod]$ WORK=/tmp/go-build794287815 ./dependencies/analyses/go-archive-analysis.sh | head -n10 +17M github.com/containers/libpod/cmd/podman/cliconfig +13M github.com/containers/libpod/vendor/github.com/DataDog/zstd +10M github.com/containers/libpod/vendor/k8s.io/api/core/v1 +3.7M net/http +3.7M github.com/containers/libpod/libpod +3.2M runtime +2.7M github.com/containers/libpod/vendor/github.com/gogo/protobuf/proto +2.5M github.com/containers/libpod/vendor/k8s.io/apimachinery/pkg/apis/meta/v1 +2.3M github.com/containers/libpod/vendor/github.com/vishvananda/netlink +2.1M github.com/containers/libpod/cmd/podman/varlink +``` + +The output of the `go-archive-analysis.sh` script is a sorted table with the size in bytes followed by the package. +The size denotes the size of the compiled package (i.e., the `.a` file). + + +## Size of symbols in binary + +Once the binary is compiled, we can run another set of analyses on it. +The `nm-symbols-analysis.sh` is a wrapper around `go tool nm` and prints a table with the size in bytes followed by the symbol's name. +To avoid information overload, the scripts prints only symbols from the text/code segment. + +Running such an analysis on libpod may look as follows: + +``` +# 1) Compile the binary +[libpod]$ make podman +[...] + +# 2) Run the script with the binary as an argument +[libpod]$ ./dependencies/analyses/nm-symbols-analysis.sh ./bin/podman | grep "containers/libpod/libpod" | head -n10 +299 github.com/containers/libpod/libpod.(*BoltState).AddContainer +658 github.com/containers/libpod/libpod.(*BoltState).AddContainerToPod +2120 github.com/containers/libpod/libpod.(*BoltState).AddPod +3773 github.com/containers/libpod/libpod.(*BoltState).AddPod.func1 +965 github.com/containers/libpod/libpod.(*BoltState).AddVolume +1651 github.com/containers/libpod/libpod.(*BoltState).AddVolume.func1 +558 github.com/containers/libpod/libpod.(*BoltState).AllContainers +282 github.com/containers/libpod/libpod.(*BoltState).AllContainers.func1 +1121 github.com/containers/libpod/libpod.(*BoltState).AllContainers.func1.1 +558 github.com/containers/libpod/libpod.(*BoltState).AllPods +``` + +Running the script can help identify sources of bloat and reveal potential candidates (e.g., entire packages, types, or function) for refactoring. + + +## Dependency Tree + +Use the `dependency-tree.sh` script to figure out which package includes which packages. +The output of the script has the format `package: dependency_1, dependency_2, ...`. +Each line is followed by a blank line to make it easier to read. +The script generates two files: + + - `direct-tree.txt` - listing direct dependencies + - `transitive-tree.txt` - listing direct and transitive dependencies + +Running such a dependency-tree analysis may look as follows: + + +``` +[libpod]$ ./dependencies/analyses/dependency-tree.sh github.com/containers/libpod +[libpod]$ grep "^github.com/containers/libpod/pkg/registries" direct-tree.txt +github.com/containers/libpod/pkg/registries: github.com/containers/libpod/vendor/github.com/containers/image/pkg/sysregistriesv2, github.com/containers/libpod/vendor/github.com/containers/image/types, github.com/containers/libpod/pkg/rootless, github.com/containers/libpod/vendor/github.com/docker/distribution/reference, github.com/containers/libpod/vendor/github.com/pkg/errors, os, path/filepath, strings +``` + +As shown above, the script's output can then be used to query for specific packages (e.g, with `grep`). diff --git a/dependencies/analyses/dependency-tree.sh b/dependencies/analyses/dependency-tree.sh new file mode 100755 index 000000000..84085a50d --- /dev/null +++ b/dependencies/analyses/dependency-tree.sh @@ -0,0 +1,17 @@ +#!/usr/bin/bash + +if test "$#" -ne 1; then + echo "invalid arguments: usage: $0 path to package" + exit 1 +fi + +go list $1/... \ + | xargs -d '\n' go list -f '{{ .ImportPath }}: {{ join .Imports ", " }}' \ + | awk '{ printf "%s\n\n", $0 }' \ + > direct-tree.tmp.$$ && mv -f direct-tree.tmp.$$ direct-tree.txt + + +go list $1/... \ + | xargs -d '\n' go list -f '{{ .ImportPath }}: {{ join .Deps ", " }}' \ + | awk '{ printf "%s\n\n", $0 }' \ + > transitive-tree.tmp.$$ && mv -f transitive-tree.tmp.$$ transitive-tree.txt diff --git a/dependencies/analyses/go-archive-analysis.sh b/dependencies/analyses/go-archive-analysis.sh new file mode 100755 index 000000000..f10145dad --- /dev/null +++ b/dependencies/analyses/go-archive-analysis.sh @@ -0,0 +1,12 @@ +#!/usr/bin/bash + +if [ -z "$WORK" ] +then + echo "WORK environment variable must be set" + exit 1 +fi + +grep --no-filename packagefile $WORK/**/importcfg \ + | awk '{ split($2, data, "="); printf "%s ", data[1]; system("du -sh " data[2]) }' \ + | awk '{ printf "%s %s\n", $2, $1 }' \ + | sort -u | sort -rh diff --git a/dependencies/analyses/nm-symbols-analysis.sh b/dependencies/analyses/nm-symbols-analysis.sh new file mode 100755 index 000000000..361b746e4 --- /dev/null +++ b/dependencies/analyses/nm-symbols-analysis.sh @@ -0,0 +1,9 @@ +#!/usr/bin/bash + +if test "$#" -ne 1; then + echo "invalid arguments: usage: $0 path/to/binary" + exit 1 +fi + +go tool nm -size "$1" \ + | awk 'NF==4 && $3=="t" {printf "%s\t\t%s\n", $2, $4}' diff --git a/docs/podman-create.1.md b/docs/podman-create.1.md index 87e18dbb9..89f146670 100644 --- a/docs/podman-create.1.md +++ b/docs/podman-create.1.md @@ -272,26 +272,29 @@ The following example maps uids 0-2000 in the container to the uids 30000-31999 Add additional groups to run as -**--healthcheck-command**=*command* +**--health-cmd**=*"command"* | *'["command", "arg1", ...]'* Set or alter a healthcheck command for a container. The command is a command to be executed inside your container that determines your container health. The command is required for other healthcheck options to be applied. A value of `none` disables existing healthchecks. -**--healthcheck-interval**=*interval* +Multiple options can be passed in the form of a JSON array; otherwise, the command will be interpreted +as an argument to `/bin/sh -c`. + +**--health-interval**=*interval* Set an interval for the healthchecks (a value of `disable` results in no automatic timer setup) (default "30s") -**--healthcheck-retries**=*retries* +**--health-retries**=*retries* The number of retries allowed before a healthcheck is considered to be unhealthy. The default value is `3`. -**--healthcheck-start-period**=*period* +**--health-start-period**=*period* The initialization time needed for a container to bootstrap. The value can be expressed in time format like `2m3s`. The default value is `0s` -**--healthcheck-timeout**=*timeout* +**--health-timeout**=*timeout* The maximum time allowed to complete the healthcheck before an interval is considered failed. Like start-period, the value can be expressed in a time format such as `1m22s`. The default value is `30s`. diff --git a/docs/podman-run.1.md b/docs/podman-run.1.md index 95499edd6..ebf774b24 100644 --- a/docs/podman-run.1.md +++ b/docs/podman-run.1.md @@ -279,26 +279,29 @@ The example maps gids 0-2000 in the container to the gids 30000-31999 on the hos Add additional groups to run as -**--healthcheck-command**=*command* +**--health-cmd**=*"command"* | *'["command", "arg1", ...]'* Set or alter a healthcheck command for a container. The command is a command to be executed inside your container that determines your container health. The command is required for other healthcheck options to be applied. A value of `none` disables existing healthchecks. -**--healthcheck-interval**=*interval* +Multiple options can be passed in the form of a JSON array; otherwise, the command will be interpreted +as an argument to `/bin/sh -c`. + +**--health-interval**=*interval* Set an interval for the healthchecks (a value of `disable` results in no automatic timer setup) (default "30s") -**--healthcheck-retries**=*retries* +**--health-retries**=*retries* The number of retries allowed before a healthcheck is considered to be unhealthy. The default value is `3`. -**--healthcheck-start-period**=*period* +**--health-start-period**=*period* The initialization time needed for a container to bootstrap. The value can be expressed in time format like `2m3s`. The default value is `0s` -**--healthcheck-timeout**=*timeout* +**--health-timeout**=*timeout* The maximum time allowed to complete the healthcheck before an interval is considered failed. Like start-period, the value can be expressed in a time format such as `1m22s`. The default value is `30s`. diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index f4ea6c694..8ed2b12e1 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -107,16 +107,25 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { capture bytes.Buffer inStartPeriod bool ) - hcStatus, err := checkHealthCheckCanBeRun(c) - if err != nil { - return hcStatus, err - } hcCommand := c.HealthCheckConfig().Test - if len(hcCommand) > 0 && hcCommand[0] == "CMD-SHELL" { - newCommand = []string{"sh", "-c", strings.Join(hcCommand[1:], " ")} - } else { + if len(hcCommand) < 1 { + return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) + } + switch hcCommand[0] { + case "", "NONE": + return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) + case "CMD": + newCommand = hcCommand[1:] + case "CMD-SHELL": + // TODO: SHELL command from image not available in Container - use Docker default + newCommand = []string{"/bin/sh", "-c", strings.Join(hcCommand[1:], " ")} + default: + // command supplied on command line - pass as-is newCommand = hcCommand } + if len(newCommand) < 1 || newCommand[0] == "" { + return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) + } captureBuffer := bufio.NewWriter(&capture) hcw := hcWriteCloser{ captureBuffer, diff --git a/pkg/varlinkapi/transfers.go b/pkg/varlinkapi/transfers.go index 24a91a86f..31d26c3aa 100644 --- a/pkg/varlinkapi/transfers.go +++ b/pkg/varlinkapi/transfers.go @@ -26,11 +26,6 @@ func (i *LibpodAPI) SendFile(call iopodman.VarlinkCall, ftype string, length int defer outputFile.Close() if err = call.ReplySendFile(outputFile.Name()); err != nil { - return call.ReplyErrorOccurred(err.Error()) - } - - // FIXME return parameter - if err = call.ReplySendFile("FIXME_file_handle"); err != nil { // If an error occurs while sending the reply, return the error return err } diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go index 21afc4b84..abaf2cccf 100644 --- a/test/e2e/common_test.go +++ b/test/e2e/common_test.go @@ -375,7 +375,7 @@ func (p *PodmanTestIntegration) RunNginxWithHealthCheck(name string) (*PodmanSes if name != "" { podmanArgs = append(podmanArgs, "--name", name) } - podmanArgs = append(podmanArgs, "-dt", "-P", "--healthcheck-command", "CMD-SHELL curl http://localhost/", nginx) + podmanArgs = append(podmanArgs, "-dt", "-P", "--health-cmd", "curl http://localhost/", nginx) session := p.Podman(podmanArgs) session.WaitWithDefaultTimeout() return session, session.OutputToString() diff --git a/test/e2e/healthcheck_run_test.go b/test/e2e/healthcheck_run_test.go index 125002bf9..dafc8a837 100644 --- a/test/e2e/healthcheck_run_test.go +++ b/test/e2e/healthcheck_run_test.go @@ -95,7 +95,7 @@ var _ = Describe("Podman healthcheck run", func() { }) It("podman healthcheck should be starting", func() { - session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL ls /foo || exit 1\"", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-retries", "2", "--health-cmd", "ls /foo || exit 1", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) inspect := podmanTest.InspectContainer("hc") @@ -103,7 +103,7 @@ var _ = Describe("Podman healthcheck run", func() { }) It("podman healthcheck failed checks in start-period should not change status", func() { - session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-start-period", "2m", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL ls /foo || exit 1\"", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-start-period", "2m", "--health-retries", "2", "--health-cmd", "ls /foo || exit 1", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) @@ -124,7 +124,7 @@ var _ = Describe("Podman healthcheck run", func() { }) It("podman healthcheck failed checks must reach retries before unhealthy ", func() { - session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL ls /foo || exit 1\"", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-retries", "2", "--health-cmd", "ls /foo || exit 1", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) @@ -145,7 +145,7 @@ var _ = Describe("Podman healthcheck run", func() { }) It("podman healthcheck good check results in healthy even in start-period", func() { - session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-start-period", "2m", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL\" \"ls\" \"||\" \"exit\" \"1\"", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-start-period", "2m", "--health-retries", "2", "--health-cmd", "ls || exit 1", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) @@ -158,7 +158,7 @@ var _ = Describe("Podman healthcheck run", func() { }) It("podman healthcheck single healthy result changes failed to healthy", func() { - session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--healthcheck-retries", "2", "--healthcheck-command", "\"CMD-SHELL\" \"ls\" \"/foo\" \"||\" \"exit\" \"1\"", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-retries", "2", "--health-cmd", "ls /foo || exit 1", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go index 8c7830204..e35c84f5b 100644 --- a/test/e2e/run_test.go +++ b/test/e2e/run_test.go @@ -750,21 +750,21 @@ USER mail` }) It("podman run with bad healthcheck retries", func() { - session := podmanTest.Podman([]string{"run", "-dt", "--healthcheck-command", "foo", "--healthcheck-retries", "0", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "[\"foo\"]", "--health-retries", "0", ALPINE, "top"}) session.Wait() Expect(session.ExitCode()).ToNot(Equal(0)) Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-retries must be greater than 0")) }) It("podman run with bad healthcheck timeout", func() { - session := podmanTest.Podman([]string{"run", "-dt", "--healthcheck-command", "foo", "--healthcheck-timeout", "0s", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "[\"foo\"]", "--health-timeout", "0s", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).ToNot(Equal(0)) Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-timeout must be at least 1 second")) }) It("podman run with bad healthcheck start-period", func() { - session := podmanTest.Podman([]string{"run", "-dt", "--healthcheck-command", "foo", "--healthcheck-start-period", "-1s", ALPINE, "top"}) + session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "[\"foo\"]", "--health-start-period", "-1s", ALPINE, "top"}) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).ToNot(Equal(0)) Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-start-period must be 0 seconds or greater")) |