diff options
author | OpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com> | 2019-06-17 21:38:25 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-06-17 21:38:25 +0200 |
commit | 058c93f8505e14cfcc718b84b66766d7ea18975d (patch) | |
tree | 26035631cd69d019a797ab798f65b3772e2b4229 /.cirrus.yml | |
parent | bce4a93575234a99e5e252482a0c15690b13e91a (diff) | |
parent | 4e9f5e5f2bd6a812581af0efada7219ce8eebf50 (diff) | |
download | podman-058c93f8505e14cfcc718b84b66766d7ea18975d.tar.gz podman-058c93f8505e14cfcc718b84b66766d7ea18975d.tar.bz2 podman-058c93f8505e14cfcc718b84b66766d7ea18975d.zip |
Merge pull request #3308 from cevich/always_collect_logs
Cirrus: Simplify log collection commands
Diffstat (limited to '.cirrus.yml')
-rw-r--r-- | .cirrus.yml | 55 |
1 files changed, 18 insertions, 37 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 848dc2b6d..4b3b6f626 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -21,12 +21,10 @@ env: CIRRUS_SHELL: "/bin/bash" # Save a little typing (path relative to $CIRRUS_WORKING_DIR) SCRIPT_BASE: "./contrib/cirrus" - # Command to prefix every output line with a timestamp + CIRRUS_CLONE_DEPTH: 50 + # Command to prefix output lines with timing information # (can't do inline awk script, Cirrus-CI or YAML mangles quoting) TIMESTAMP: "awk --file ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk" - # Command to log critical filesystems, types, and sizes. - DFCMD: "df -lhTx tmpfs" - CIRRUS_CLONE_DEPTH: 50 #### #### Cache-image names to test with @@ -280,18 +278,15 @@ testing_task: setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}' unit_test_script: '$SCRIPT_BASE/unit_test.sh |& ${TIMESTAMP}' integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}' - ginkgo_node_logs_script: 'cat $CIRRUS_WORKING_DIR/test/e2e/ginkgo-node-*.log || echo "Ginkgo node logs not found"' - df_script: '${DFCMD}' - audit_log_script: 'cat /var/log/audit/audit.log || cat /var/log/kern.log' - journalctl_b_script: 'journalctl -b' on_failure: failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' - # Job has already failed, don't fail again and miss collecting data - failed_ginkgo_node_logs_script: 'cat $CIRRUS_WORKING_DIR/test/e2e/ginkgo-node-*.log || echo "Ginkgo node logs not found"' - failed_df_script: '${DFCMD}' - failed_audit_log_script: 'cat /var/log/audit/audit.log || cat /var/log/kern.log || echo "Uh oh, cat audit.log failed"' - failed_journalctl_b_script: 'journalctl -b || echo "Uh oh, journalctl -b failed"' + + always: &standardlogs + ginkgo_node_logs_script: '$SCRIPT_BASE/logcollector.sh ginkgo' + df_script: '$SCRIPT_BASE/logcollector.sh df' + audit_log_script: '$SCRIPT_BASE/logcollector.sh audit' + journal_script: '$SCRIPT_BASE/logcollector.sh journal' # This task executes tests under unique environments/conditions @@ -316,16 +311,13 @@ special_testing_rootless_task: setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}' integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}' - df_script: '${DFCMD}' - audit_log_script: 'cat /var/log/audit/audit.log || cat /var/log/kern.log' - journalctl_b_script: 'journalctl -b' on_failure: failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' - # Job has already failed, don't fail again and miss collecting data - failed_df_script: '${DFCMD}' - failed_audit_log_script: 'cat /var/log/audit/audit.log || cat /var/log/kern.log || echo "Uh oh, cat audit.log failed"' - failed_journalctl_b_script: 'journalctl -b || echo "Uh oh, journalctl -b failed"' + + always: + <<: *standardlogs + special_testing_in_podman_task: @@ -344,16 +336,12 @@ special_testing_in_podman_task: setup_environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}' integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}' - df_script: '${DFCMD}' - audit_log_script: 'cat /var/log/audit/audit.log || cat /var/log/kern.log' - journalctl_b_script: 'journalctl -b' on_failure: failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh' - # Job has already failed, don't fail again and miss collecting data - failed_df_script: '${DFCMD}' - failed_audit_log_script: 'cat /var/log/audit/audit.log || cat /var/log/kern.log || echo "Uh oh, cat audit.log failed"' - failed_journalctl_b_script: 'journalctl -b || echo "Uh oh, journalctl -b failed"' + + always: + <<: *standardlogs # Test building of new cache-images for future PR testing, in this PR. @@ -416,16 +404,9 @@ verify_test_built_images_task: environment_script: '$SCRIPT_BASE/setup_environment.sh |& ${TIMESTAMP}' integration_test_script: '$SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}' - ginkgo_node_logs_script: 'cat $CIRRUS_WORKING_DIR/test/e2e/ginkgo-node-*.log || echo "Ginkgo node logs not found"' - df_script: '${DFCMD}' - audit_log_script: 'cat /var/log/audit/audit.log || cat /var/log/kern.log' - journalctl_b_script: 'journalctl -b' - on_failure: - # Job has already failed, don't fail again and miss collecting data - failed_ginkgo_node_logs_script: 'cat $CIRRUS_WORKING_DIR/test/e2e/ginkgo-node-*.log || echo "Ginkgo node logs not found"' - failed_df_script: '${DFCMD}' - failed_audit_log_script: 'cat /var/log/audit/audit.log || cat /var/log/kern.log || echo "Uh oh, cat audit.log failed"' - failed_journalctl_b_script: 'journalctl -b || echo "Uh oh, journalctl -b failed"' + + always: + <<: *standardlogs # Build new cache-images for future PR testing, but only after a PR merge. |