spack test: fix stand-alone test suite status reporting (#37602)

* Fix reporting of packageless specs as having no tests

* Add test_test_output_multiple_specs with update to simple-standalone-test (and tests)

* Refactored test status summary; added more tests or checks
This commit is contained in:
Tamara Dahlgren 2023-05-17 07:03:21 -07:00 committed by GitHub
parent 05232034f5
commit 86b9ce1c88
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 198 additions and 43 deletions

View file

@ -215,6 +215,31 @@ def print_message(logger: LogType, msg: str, verbose: bool = False):
tty.info(msg, format="g") tty.info(msg, format="g")
def overall_status(current_status: "TestStatus", substatuses: List["TestStatus"]) -> "TestStatus":
"""Determine the overall status based on the current and associated sub status values.
Args:
current_status: current overall status, assumed to default to PASSED
substatuses: status of each test part or overall status of each test spec
Returns:
test status encompassing the main test and all subtests
"""
if current_status in [TestStatus.SKIPPED, TestStatus.NO_TESTS, TestStatus.FAILED]:
return current_status
skipped = 0
for status in substatuses:
if status == TestStatus.FAILED:
return status
elif status == TestStatus.SKIPPED:
skipped += 1
if skipped and skipped == len(substatuses):
return TestStatus.SKIPPED
return current_status
class PackageTest: class PackageTest:
"""The class that manages stand-alone (post-install) package tests.""" """The class that manages stand-alone (post-install) package tests."""
@ -308,14 +333,12 @@ def status(self, name: str, status: "TestStatus", msg: Optional[str] = None):
# to start with the same name) may not have PASSED. This extra # to start with the same name) may not have PASSED. This extra
# check is used to ensure the containing test part is not claiming # check is used to ensure the containing test part is not claiming
# to have passed when at least one subpart failed. # to have passed when at least one subpart failed.
if status == TestStatus.PASSED: substatuses = []
for pname, substatus in self.test_parts.items(): for pname, substatus in self.test_parts.items():
if pname != part_name and pname.startswith(part_name): if pname != part_name and pname.startswith(part_name):
if substatus == TestStatus.FAILED: substatuses.append(substatus)
print(f"{substatus}: {part_name}{extra}") if substatuses:
self.test_parts[part_name] = substatus status = overall_status(status, substatuses)
self.counts[substatus] += 1
return
print(f"{status}: {part_name}{extra}") print(f"{status}: {part_name}{extra}")
self.test_parts[part_name] = status self.test_parts[part_name] = status
@ -420,6 +443,25 @@ def summarize(self):
lines.append(f"{totals:=^80}") lines.append(f"{totals:=^80}")
return lines return lines
def write_tested_status(self):
"""Write the overall status to the tested file.
If there any test part failures, then the tests failed. If all test
parts are skipped, then the tests were skipped. If any tests passed
then the tests passed; otherwise, there were not tests executed.
"""
status = TestStatus.NO_TESTS
if self.counts[TestStatus.FAILED] > 0:
status = TestStatus.FAILED
else:
skipped = self.counts[TestStatus.SKIPPED]
if skipped and self.parts() == skipped:
status = TestStatus.SKIPPED
elif self.counts[TestStatus.PASSED] > 0:
status = TestStatus.PASSED
_add_msg_to_file(self.tested_file, f"{status.value}")
@contextlib.contextmanager @contextlib.contextmanager
def test_part(pkg: Pb, test_name: str, purpose: str, work_dir: str = ".", verbose: bool = False): def test_part(pkg: Pb, test_name: str, purpose: str, work_dir: str = ".", verbose: bool = False):
@ -654,8 +696,9 @@ def process_test_parts(pkg: Pb, test_specs: List[spack.spec.Spec], verbose: bool
try: try:
tests = test_functions(spec.package_class) tests = test_functions(spec.package_class)
except spack.repo.UnknownPackageError: except spack.repo.UnknownPackageError:
# some virtuals don't have a package # Some virtuals don't have a package so we don't want to report
tests = [] # them as not having tests when that isn't appropriate.
continue
if len(tests) == 0: if len(tests) == 0:
tester.status(spec.name, TestStatus.NO_TESTS) tester.status(spec.name, TestStatus.NO_TESTS)
@ -682,7 +725,7 @@ def process_test_parts(pkg: Pb, test_specs: List[spack.spec.Spec], verbose: bool
finally: finally:
if tester.ran_tests(): if tester.ran_tests():
fs.touch(tester.tested_file) tester.write_tested_status()
# log one more test message to provide a completion timestamp # log one more test message to provide a completion timestamp
# for CDash reporting # for CDash reporting
@ -889,20 +932,15 @@ def __call__(self, *args, **kwargs):
if remove_directory: if remove_directory:
shutil.rmtree(test_dir) shutil.rmtree(test_dir)
tested = os.path.exists(self.tested_file_for_spec(spec)) status = self.test_status(spec, externals)
if tested:
status = TestStatus.PASSED
else:
self.ensure_stage()
if spec.external and not externals:
status = TestStatus.SKIPPED
elif not spec.installed:
status = TestStatus.SKIPPED
else:
status = TestStatus.NO_TESTS
self.counts[status] += 1 self.counts[status] += 1
self.write_test_result(spec, status) self.write_test_result(spec, status)
except SkipTest:
status = TestStatus.SKIPPED
self.counts[status] += 1
self.write_test_result(spec, TestStatus.SKIPPED)
except BaseException as exc: except BaseException as exc:
status = TestStatus.FAILED status = TestStatus.FAILED
self.counts[status] += 1 self.counts[status] += 1
@ -939,6 +977,31 @@ def __call__(self, *args, **kwargs):
if failures: if failures:
raise TestSuiteFailure(failures) raise TestSuiteFailure(failures)
def test_status(self, spec: spack.spec.Spec, externals: bool) -> Optional[TestStatus]:
"""Determine the overall test results status for the spec.
Args:
spec: instance of the spec under test
externals: ``True`` if externals are to be tested, else ``False``
Returns:
the spec's test status if available or ``None``
"""
tests_status_file = self.tested_file_for_spec(spec)
if not os.path.exists(tests_status_file):
self.ensure_stage()
if spec.external and not externals:
status = TestStatus.SKIPPED
elif not spec.installed:
status = TestStatus.SKIPPED
else:
status = TestStatus.NO_TESTS
return status
with open(tests_status_file, "r") as f:
value = (f.read()).strip("\n")
return TestStatus(int(value)) if value else TestStatus.NO_TESTS
def ensure_stage(self): def ensure_stage(self):
"""Ensure the test suite stage directory exists.""" """Ensure the test suite stage directory exists."""
if not os.path.exists(self.stage): if not os.path.exists(self.stage):

View file

@ -319,3 +319,17 @@ def test_report_filename_for_cdash(install_mockery_mutable_config, mock_fetch):
spack.cmd.common.arguments.sanitize_reporter_options(args) spack.cmd.common.arguments.sanitize_reporter_options(args)
filename = spack.cmd.test.report_filename(args, suite) filename = spack.cmd.test.report_filename(args, suite)
assert filename != "https://blahblah/submit.php?project=debugging" assert filename != "https://blahblah/submit.php?project=debugging"
def test_test_output_multiple_specs(
mock_test_stage, mock_packages, mock_archive, mock_fetch, install_mockery_mutable_config
):
"""Ensure proper reporting for suite with skipped, failing, and passed tests."""
install("test-error", "simple-standalone-test@0.9", "simple-standalone-test@1.0")
out = spack_test("run", "test-error", "simple-standalone-test", fail_on_error=False)
# Note that a spec with passing *and* skipped tests is still considered
# to have passed at this level. If you want to see the spec-specific
# part result summaries, you'll have to look at the "test-out.txt" files
# for each spec.
assert "1 failed, 2 passed of 3 specs" in out

View file

@ -1399,17 +1399,24 @@ def test_print_install_test_log_skipped(install_mockery, mock_packages, capfd, r
assert out == "" assert out == ""
def test_print_install_test_log_missing( def test_print_install_test_log_failures(
tmpdir, install_mockery, mock_packages, ensure_debug, capfd tmpdir, install_mockery, mock_packages, ensure_debug, capfd
): ):
"""Confirm expected error on attempt to print missing test log file.""" """Confirm expected outputs when there are test failures."""
name = "trivial-install-test-package" name = "trivial-install-test-package"
s = spack.spec.Spec(name).concretized() s = spack.spec.Spec(name).concretized()
pkg = s.package pkg = s.package
# Missing test log is an error
pkg.run_tests = True pkg.run_tests = True
pkg.tester.test_log_file = str(tmpdir.join("test-log.txt")) pkg.tester.test_log_file = str(tmpdir.join("test-log.txt"))
pkg.tester.add_failure(AssertionError("test"), "test-failure") pkg.tester.add_failure(AssertionError("test"), "test-failure")
spack.installer.print_install_test_log(pkg) spack.installer.print_install_test_log(pkg)
err = capfd.readouterr()[1] err = capfd.readouterr()[1]
assert "no test log file" in err assert "no test log file" in err
# Having test log results in path being output
fs.touch(pkg.tester.test_log_file)
spack.installer.print_install_test_log(pkg)
out = capfd.readouterr()[0]
assert "See test results at" in out

View file

@ -12,6 +12,7 @@
import spack.install_test import spack.install_test
import spack.spec import spack.spec
from spack.install_test import TestStatus
from spack.util.executable import which from spack.util.executable import which
@ -20,7 +21,7 @@ def _true(*args, **kwargs):
return True return True
def ensure_results(filename, expected): def ensure_results(filename, expected, present=True):
assert os.path.exists(filename) assert os.path.exists(filename)
with open(filename, "r") as fd: with open(filename, "r") as fd:
lines = fd.readlines() lines = fd.readlines()
@ -29,7 +30,10 @@ def ensure_results(filename, expected):
if expected in line: if expected in line:
have = True have = True
break break
assert have if present:
assert have, f"Expected '{expected}' in the file"
else:
assert not have, f"Expected '{expected}' NOT to be in the file"
def test_test_log_name(mock_packages, config): def test_test_log_name(mock_packages, config):
@ -78,8 +82,8 @@ def test_write_test_result(mock_packages, mock_test_stage):
assert spec.name in msg assert spec.name in msg
def test_test_uninstalled(mock_packages, install_mockery, mock_test_stage): def test_test_not_installed(mock_packages, install_mockery, mock_test_stage):
"""Attempt to perform stand-alone test for uninstalled package.""" """Attempt to perform stand-alone test for not_installed package."""
spec = spack.spec.Spec("trivial-smoke-test").concretized() spec = spack.spec.Spec("trivial-smoke-test").concretized()
test_suite = spack.install_test.TestSuite([spec]) test_suite = spack.install_test.TestSuite([spec])
@ -91,10 +95,7 @@ def test_test_uninstalled(mock_packages, install_mockery, mock_test_stage):
@pytest.mark.parametrize( @pytest.mark.parametrize(
"arguments,status,msg", "arguments,status,msg",
[ [({}, TestStatus.SKIPPED, "Skipped"), ({"externals": True}, TestStatus.NO_TESTS, "No tests")],
({}, spack.install_test.TestStatus.SKIPPED, "Skipped"),
({"externals": True}, spack.install_test.TestStatus.NO_TESTS, "No tests"),
],
) )
def test_test_external( def test_test_external(
mock_packages, install_mockery, mock_test_stage, monkeypatch, arguments, status, msg mock_packages, install_mockery, mock_test_stage, monkeypatch, arguments, status, msg
@ -156,6 +157,7 @@ def test_test_spec_passes(mock_packages, install_mockery, mock_test_stage, monke
ensure_results(test_suite.results_file, "PASSED") ensure_results(test_suite.results_file, "PASSED")
ensure_results(test_suite.log_file_for_spec(spec), "simple stand-alone") ensure_results(test_suite.log_file_for_spec(spec), "simple stand-alone")
ensure_results(test_suite.log_file_for_spec(spec), "standalone-ifc", present=False)
def test_get_test_suite(): def test_get_test_suite():
@ -212,8 +214,10 @@ def test_test_functions_pkgless(mock_packages, install_mockery, ensure_debug, ca
spec = spack.spec.Spec("simple-standalone-test").concretized() spec = spack.spec.Spec("simple-standalone-test").concretized()
fns = spack.install_test.test_functions(spec.package, add_virtuals=True) fns = spack.install_test.test_functions(spec.package, add_virtuals=True)
out = capsys.readouterr() out = capsys.readouterr()
assert len(fns) == 1, "Expected only one test function" assert len(fns) == 2, "Expected two test functions"
assert "does not appear to have a package file" in out[1] for f in fns:
assert f[1].__name__ in ["test_echo", "test_skip"]
assert "virtual does not appear to have a package file" in out[1]
# TODO: This test should go away when compilers as dependencies is supported # TODO: This test should go away when compilers as dependencies is supported
@ -301,7 +305,7 @@ def test_test_part_fail(tmpdir, install_mockery_mutable_config, mock_fetch, mock
for part_name, status in pkg.tester.test_parts.items(): for part_name, status in pkg.tester.test_parts.items():
assert part_name.endswith(name) assert part_name.endswith(name)
assert status == spack.install_test.TestStatus.FAILED assert status == TestStatus.FAILED
def test_test_part_pass(install_mockery_mutable_config, mock_fetch, mock_test_stage): def test_test_part_pass(install_mockery_mutable_config, mock_fetch, mock_test_stage):
@ -317,7 +321,7 @@ def test_test_part_pass(install_mockery_mutable_config, mock_fetch, mock_test_st
for part_name, status in pkg.tester.test_parts.items(): for part_name, status in pkg.tester.test_parts.items():
assert part_name.endswith(name) assert part_name.endswith(name)
assert status == spack.install_test.TestStatus.PASSED assert status == TestStatus.PASSED
def test_test_part_skip(install_mockery_mutable_config, mock_fetch, mock_test_stage): def test_test_part_skip(install_mockery_mutable_config, mock_fetch, mock_test_stage):
@ -331,7 +335,7 @@ def test_test_part_skip(install_mockery_mutable_config, mock_fetch, mock_test_st
for part_name, status in pkg.tester.test_parts.items(): for part_name, status in pkg.tester.test_parts.items():
assert part_name.endswith(name) assert part_name.endswith(name)
assert status == spack.install_test.TestStatus.SKIPPED assert status == TestStatus.SKIPPED
def test_test_part_missing_exe_fail_fast( def test_test_part_missing_exe_fail_fast(
@ -354,7 +358,7 @@ def test_test_part_missing_exe_fail_fast(
assert len(test_parts) == 1 assert len(test_parts) == 1
for part_name, status in test_parts.items(): for part_name, status in test_parts.items():
assert part_name.endswith(name) assert part_name.endswith(name)
assert status == spack.install_test.TestStatus.FAILED assert status == TestStatus.FAILED
def test_test_part_missing_exe( def test_test_part_missing_exe(
@ -375,7 +379,66 @@ def test_test_part_missing_exe(
assert len(test_parts) == 1 assert len(test_parts) == 1
for part_name, status in test_parts.items(): for part_name, status in test_parts.items():
assert part_name.endswith(name) assert part_name.endswith(name)
assert status == spack.install_test.TestStatus.FAILED assert status == TestStatus.FAILED
# TODO (embedded test parts): Update this once embedded test part tracking
# TODO (embedded test parts): properly handles the nested context managers.
@pytest.mark.parametrize(
"current,substatuses,expected",
[
(TestStatus.PASSED, [TestStatus.PASSED, TestStatus.PASSED], TestStatus.PASSED),
(TestStatus.FAILED, [TestStatus.PASSED, TestStatus.PASSED], TestStatus.FAILED),
(TestStatus.SKIPPED, [TestStatus.PASSED, TestStatus.PASSED], TestStatus.SKIPPED),
(TestStatus.NO_TESTS, [TestStatus.PASSED, TestStatus.PASSED], TestStatus.NO_TESTS),
(TestStatus.PASSED, [TestStatus.PASSED, TestStatus.SKIPPED], TestStatus.PASSED),
(TestStatus.PASSED, [TestStatus.PASSED, TestStatus.FAILED], TestStatus.FAILED),
(TestStatus.PASSED, [TestStatus.SKIPPED, TestStatus.SKIPPED], TestStatus.SKIPPED),
],
)
def test_embedded_test_part_status(
install_mockery_mutable_config, mock_fetch, mock_test_stage, current, substatuses, expected
):
"""Check to ensure the status of the enclosing test part reflects summary of embedded parts."""
s = spack.spec.Spec("trivial-smoke-test").concretized()
pkg = s.package
base_name = "test_example"
part_name = f"{pkg.__class__.__name__}::{base_name}"
pkg.tester.test_parts[part_name] = current
for i, status in enumerate(substatuses):
pkg.tester.test_parts[f"{part_name}_{i}"] = status
pkg.tester.status(base_name, current)
assert pkg.tester.test_parts[part_name] == expected
@pytest.mark.parametrize(
"statuses,expected",
[
([TestStatus.PASSED, TestStatus.PASSED], TestStatus.PASSED),
([TestStatus.PASSED, TestStatus.SKIPPED], TestStatus.PASSED),
([TestStatus.PASSED, TestStatus.FAILED], TestStatus.FAILED),
([TestStatus.SKIPPED, TestStatus.SKIPPED], TestStatus.SKIPPED),
([], TestStatus.NO_TESTS),
],
)
def test_write_tested_status(
tmpdir, install_mockery_mutable_config, mock_fetch, mock_test_stage, statuses, expected
):
"""Check to ensure the status of the enclosing test part reflects summary of embedded parts."""
s = spack.spec.Spec("trivial-smoke-test").concretized()
pkg = s.package
for i, status in enumerate(statuses):
pkg.tester.test_parts[f"test_{i}"] = status
pkg.tester.counts[status] += 1
pkg.tester.tested_file = tmpdir.join("test-log.txt")
pkg.tester.write_tested_status()
with open(pkg.tester.tested_file, "r") as f:
status = int(f.read().strip("\n"))
assert TestStatus(status) == expected
def test_check_special_outputs(tmpdir): def test_check_special_outputs(tmpdir):

View file

@ -7,16 +7,24 @@
class SimpleStandaloneTest(Package): class SimpleStandaloneTest(Package):
"""This package has a simple stand-alone test features.""" """This package has simple stand-alone test features."""
homepage = "http://www.example.com/simple_test" homepage = "http://www.example.com/simple_test"
url = "http://www.unit-test-should-replace-this-url/simple_test-1.0.tar.gz" url = "http://www.unit-test-should-replace-this-url/simple_test-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef") version("1.0", md5="123456789abcdef0123456789abcdefg")
version("0.9", md5="0123456789abcdef0123456789abcdef")
provides("standalone-test") provides("standalone-ifc")
def test_echo(self): def test_echo(self):
"""simple stand-alone test""" """simple stand-alone test"""
echo = which("echo") echo = which("echo")
echo("testing echo", output=str.split, error=str.split) echo("testing echo", output=str.split, error=str.split)
def test_skip(self):
"""simple skip test"""
if self.spec.satisfies("@1.0:"):
raise SkipTest("This test is not available from v1.0 on")
print("Ran test_skip")