Skip to content

Commit

Permalink
Initial check-result implementation
Browse files Browse the repository at this point in the history
Failing 'check' should affect test result by default.
respect_checks option needs to be implemented.
  • Loading branch information
martinhoyer committed Oct 4, 2024
1 parent 14d60dc commit 41bb466
Show file tree
Hide file tree
Showing 5 changed files with 165 additions and 17 deletions.
75 changes: 75 additions & 0 deletions tests/execute/result/check_results.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
#!/bin/bash
# vim: dict+=/usr/share/beakerlib/dictionary.vim cpt=.,w,b,u,t,i,k
. /usr/share/beakerlib/beakerlib.sh || exit 1

rlJournalStart
rlPhaseStartSetup
rlRun "run=\$(mktemp -d)" 0 "Create run directory"
rlRun "pushd check_results"
rlRun "set -o pipefail"
rlPhaseEnd

rlPhaseStartTest "Check results are respected"
rlRun -s "tmt run --id \${run} --scratch provision --how local test --name /test/check-pass execute -vv report -vv 2>&1 >/dev/null" 0
rlAssertGrep "pass /test/check-pass" $rlRun_LOG
rlAssertGrep "pass dmesg (before-test check)" $rlRun_LOG
rlAssertGrep "pass dmesg (after-test check)" $rlRun_LOG

rlRun -s "tmt run --id \${run} --scratch provision --how local test --name /test/check-fail execute -vv report -vv 2>&1 >/dev/null" 1
rlAssertGrep "fail /test/check-fail" $rlRun_LOG
rlAssertGrep "pass dmesg (before-test check)" $rlRun_LOG
rlAssertGrep "fail dmesg (after-test check)" $rlRun_LOG
rlAssertGrep "check failed" $rlRun_LOG

rlRun -s "tmt run --id \${run} --scratch provision --how local test --name /test/check-ignore execute -vv report -vv 2>&1 >/dev/null" 0
rlAssertGrep "pass /test/check-ignore" $rlRun_LOG
rlAssertGrep "pass dmesg (before-test check)" $rlRun_LOG
rlAssertGrep "fail dmesg (after-test check)" $rlRun_LOG
rlAssertNotGrep "check failed" $rlRun_LOG
rlPhaseEnd

rlPhaseStartTest "Verify results.yaml content"
results_file="${run}/default/plan/execute/results.yaml"
rlAssertExists "$results_file"

rlRun -s "yq e '.[0]' $results_file"
rlAssertGrep "name: /test/check-pass" $rlRun_LOG
rlAssertGrep "result: pass" $rlRun_LOG
rlAssertGrep "check:" $rlRun_LOG
rlAssertGrep "- name: dmesg" $rlRun_LOG
rlAssertGrep " result: pass" $rlRun_LOG
rlAssertGrep " event: before-test" $rlRun_LOG
rlAssertGrep "- name: dmesg" $rlRun_LOG
rlAssertGrep " result: pass" $rlRun_LOG
rlAssertGrep " event: after-test" $rlRun_LOG

rlRun -s "yq e '.[1]' $results_file"
rlAssertGrep "name: /test/check-fail" $rlRun_LOG
rlAssertGrep "result: fail" $rlRun_LOG
rlAssertGrep "note: check failed" $rlRun_LOG
rlAssertGrep "check:" $rlRun_LOG
rlAssertGrep "- name: dmesg" $rlRun_LOG
rlAssertGrep " result: pass" $rlRun_LOG
rlAssertGrep " event: before-test" $rlRun_LOG
rlAssertGrep "- name: dmesg" $rlRun_LOG
rlAssertGrep " result: fail" $rlRun_LOG
rlAssertGrep " event: after-test" $rlRun_LOG

rlRun -s "yq e '.[2]' $results_file"
rlAssertGrep "name: /test/check-ignore" $rlRun_LOG
rlAssertGrep "result: pass" $rlRun_LOG
rlAssertNotGrep "note: check failed" $rlRun_LOG
rlAssertGrep "check:" $rlRun_LOG
rlAssertGrep "- name: dmesg" $rlRun_LOG
rlAssertGrep " result: pass" $rlRun_LOG
rlAssertGrep " event: before-test" $rlRun_LOG
rlAssertGrep "- name: dmesg" $rlRun_LOG
rlAssertGrep " result: fail" $rlRun_LOG
rlAssertGrep " event: after-test" $rlRun_LOG
rlPhaseEnd

rlPhaseStartCleanup
rlRun "popd"
rlRun "rm -r ${run}" 0 "Remove run directory"
rlPhaseEnd
rlJournalEnd
31 changes: 31 additions & 0 deletions tests/execute/result/check_results/main.fmf
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
summary: Tests for check results behavior
description: Verify that check results, including after-test checks, are correctly handled

/test/check-pass:
summary: Test with passing checks
test: echo "Test passed"
framework: shell
duration: 1m
check:
- how: dmesg

/test/check-fail:
summary: Test with failing dmesg check
test: |
echo "Test passed"
echo "Call Trace:" >> /dev/kmsg
framework: shell
duration: 1m
check:
- how: dmesg

/test/check-ignore:
summary: Test with failing dmesg check but ignored
test: |
echo "Test passed"
echo "Call Trace:" >> /dev/kmsg
framework: shell
duration: 1m
result: pass
check:
- how: dmesg
3 changes: 3 additions & 0 deletions tests/execute/result/main.fmf
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,6 @@
/special:
summary: Test special characters generated to tmt-report-results.yaml
test: ./special.sh
/check_results:
summary: Test behavior of check results, including after-test checks
test: ./check_results.sh
62 changes: 50 additions & 12 deletions tmt/result.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,11 @@ class Result(BaseResult):
serialize=lambda path: None if path is None else str(path),
unserialize=lambda value: None if value is None else Path(value)
)
respect_checks: bool = field(
default=True,
serialize=lambda value: value,
unserialize=lambda value: value
)

@classmethod
def from_test_invocation(
Expand All @@ -270,7 +275,8 @@ def from_test_invocation(
result: ResultOutcome,
note: Optional[str] = None,
ids: Optional[ResultIds] = None,
log: Optional[list[Path]] = None) -> 'Result':
log: Optional[list[Path]] = None,
respect_checks: bool = True) -> 'Result':
"""
Create a result from a test invocation.
Expand All @@ -287,6 +293,7 @@ def from_test_invocation(
:param ids: additional test IDs. They will be added to IDs extracted
from the test.
:param log: optional list of test logs.
:param respect_checks: whether to respect or ignore check results.
"""

# Saving identifiable information for each test case so we can match them
Expand Down Expand Up @@ -318,7 +325,12 @@ def from_test_invocation(
ids=ids,
log=log or [],
guest=ResultGuestData.from_test_invocation(invocation=invocation),
data_path=invocation.relative_test_data_path)
data_path=invocation.relative_test_data_path,
respect_checks=respect_checks,
check=invocation.check_results)

for check in _result.check:
print(f" - Name: {check.name}, Result: {check.result}, Event: {check.event}")

return _result.interpret_result(invocation.test.result)

Expand All @@ -333,7 +345,41 @@ def interpret_result(self, interpret: ResultInterpret) -> 'Result':
:returns: :py:class:`Result` instance containing the updated result.
"""

if interpret in (ResultInterpret.RESPECT, ResultInterpret.CUSTOM):
if interpret == ResultInterpret.CUSTOM:
return self

# Check for failed checks
checks_failed = any(check.result == ResultOutcome.FAIL for check in self.check)

if interpret == ResultInterpret.RESPECT:
if self.respect_checks and checks_failed:
self.result = ResultOutcome.FAIL
if self.note:
self.note += ', check failed'
else:
self.note = 'check failed'
return self

# Handle XFAIL
if interpret == ResultInterpret.XFAIL:
if self.result == ResultOutcome.PASS and checks_failed:
self.result = ResultOutcome.FAIL
if self.note:
self.note += ', check failed'
else:
self.note = 'check failed'
elif self.result == ResultOutcome.FAIL:
self.result = ResultOutcome.PASS
if self.note:
self.note += ', expected failure'
else:
self.note = 'expected failure'
elif self.result == ResultOutcome.PASS:
self.result = ResultOutcome.FAIL
if self.note:
self.note += ', unexpected pass'
else:
self.note = 'unexpected pass'
return self

# Extend existing note or set a new one
Expand All @@ -347,15 +393,7 @@ def interpret_result(self, interpret: ResultInterpret) -> 'Result':
raise tmt.utils.SpecificationError(
f"Test result note '{self.note}' must be a string.")

if interpret == ResultInterpret.XFAIL:
# Swap just fail<-->pass, keep the rest as is (info, warn,
# error)
self.result = {
ResultOutcome.FAIL: ResultOutcome.PASS,
ResultOutcome.PASS: ResultOutcome.FAIL
}.get(self.result, self.result)

elif ResultInterpret.is_result_outcome(interpret):
if ResultInterpret.is_result_outcome(interpret):
self.result = ResultOutcome(interpret.value)

else:
Expand Down
11 changes: 6 additions & 5 deletions tmt/steps/execute/internal.py
Original file line number Diff line number Diff line change
Expand Up @@ -453,17 +453,18 @@ def _save_process(
# losing logs if the guest becomes later unresponsive.
guest.pull(source=self.step.plan.data_directory)

# Extract test results and store them in the invocation. Note
# that these results will be overwritten with a fresh set of
# results after a successful reboot in the middle of a test.
invocation.results = self.extract_results(invocation, logger)

# Run after-test checks before extracting results
invocation.check_results += self.run_checks_after_test(
invocation=invocation,
environment=environment,
logger=logger
)

# Extract test results and store them in the invocation. Note
# that these results will be overwritten with a fresh set of
# results after a successful reboot in the middle of a test.
invocation.results = self.extract_results(invocation, logger)

if invocation.is_guest_healthy:
# Fetch #2: after-test checks might have produced remote files as well,
# we need to fetch them too.
Expand Down

0 comments on commit 41bb466

Please sign in to comment.