summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Steinhardt <ps@pks.im>2025-01-02 11:42:34 +0100
committerEli Schwartz <eschwartz93@gmail.com>2025-02-27 15:25:57 -0500
commit7fd17ab12854ff5339f3309156cd9b705f517fd0 (patch)
tree2ed88495e7663b43468308f1e4249e386b14856d
parent4526a75e25c8f4e908e6c142cf6658bac0acb5e3 (diff)
downloadmeson-7fd17ab12854ff5339f3309156cd9b705f517fd0.tar.gz
mtest: introduce ignored tests
When running tests in interactive mode then the standard file streams will remain connected to the executing terminal so that the user can interact with the tests. This has the consequence that Meson itself does not have access to those streams anymore, which is problematic for any of the test types that require parsing, like for example with the TAP protocol. This means that Meson is essentially flying blind in those cases because the test result cannot be determined by parsing the exit code of the test, but can only reliably be derived from the parsed output. One obvious solution to this problem would be to splice the test output so that both Meson and the user's terminal have access to it. But when running in interactive mode it is quite likely that the test itself will actually be driven from the command line, and the chance is high that the resulting data on stdout cannot be parsed as properly anymore. This is for example the case in the Git project, where interactive mode is typically used to drop the user into a shell or invoke a debugger. So continuing to treat the output as properly formatted output that can be parsed is likely a dead end in many use cases. Instead, we introduce a new "IGNORED" test result: when executing tests in interactive mode, and when the test type indicates that it requires parsing, we will not try to parse the test at all but mark the test result as ignored instead. Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Patrick Steinhardt <ps@pks.im>
-rw-r--r--mesonbuild/mtest.py21
1 files changed, 18 insertions, 3 deletions
diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py
index 8e490d5ff..5a5d25758 100644
--- a/mesonbuild/mtest.py
+++ b/mesonbuild/mtest.py
@@ -266,6 +266,7 @@ class TestResult(enum.Enum):
EXPECTEDFAIL = 'EXPECTEDFAIL'
UNEXPECTEDPASS = 'UNEXPECTEDPASS'
ERROR = 'ERROR'
+ IGNORED = 'IGNORED'
@staticmethod
def maxlen() -> int:
@@ -287,7 +288,7 @@ class TestResult(enum.Enum):
def colorize(self, s: str) -> mlog.AnsiDecorator:
if self.is_bad():
decorator = mlog.red
- elif self in (TestResult.SKIP, TestResult.EXPECTEDFAIL):
+ elif self in (TestResult.SKIP, TestResult.IGNORED, TestResult.EXPECTEDFAIL):
decorator = mlog.yellow
elif self.is_finished():
decorator = mlog.green
@@ -857,7 +858,8 @@ class JunitBuilder(TestLogger):
{TestResult.INTERRUPT, TestResult.ERROR})),
failures=str(sum(1 for r in test.results if r.result in
{TestResult.FAIL, TestResult.UNEXPECTEDPASS, TestResult.TIMEOUT})),
- skipped=str(sum(1 for r in test.results if r.result is TestResult.SKIP)),
+ skipped=str(sum(1 for r in test.results if r.result in
+ {TestResult.SKIP, TestResult.IGNORED})),
time=str(test.duration),
)
@@ -867,6 +869,9 @@ class JunitBuilder(TestLogger):
testcase = et.SubElement(suite, 'testcase', name=str(subtest), classname=suitename)
if subtest.result is TestResult.SKIP:
et.SubElement(testcase, 'skipped')
+ elif subtest.result is TestResult.IGNORED:
+ skip = et.SubElement(testcase, 'skipped')
+ skip.text = 'Test output was not parsed.'
elif subtest.result is TestResult.ERROR:
et.SubElement(testcase, 'error')
elif subtest.result is TestResult.FAIL:
@@ -902,6 +907,10 @@ class JunitBuilder(TestLogger):
if test.res is TestResult.SKIP:
et.SubElement(testcase, 'skipped')
suite.attrib['skipped'] = str(int(suite.attrib['skipped']) + 1)
+ elif test.res is TestResult.IGNORED:
+ skip = et.SubElement(testcase, 'skipped')
+ skip.text = 'Test output was not parsed.'
+ suite.attrib['skipped'] = str(int(suite.attrib['skipped']) + 1)
elif test.res is TestResult.ERROR:
et.SubElement(testcase, 'error')
suite.attrib['errors'] = str(int(suite.attrib['errors']) + 1)
@@ -1001,7 +1010,7 @@ class TestRun:
if self.results:
# running or succeeded
passed = sum(x.result.is_ok() for x in self.results)
- ran = sum(x.result is not TestResult.SKIP for x in self.results)
+ ran = sum(x.result not in {TestResult.SKIP, TestResult.IGNORED} for x in self.results)
if passed == ran:
return f'{passed} subtests passed'
else:
@@ -1021,6 +1030,8 @@ class TestRun:
def _complete(self) -> None:
if self.res == TestResult.RUNNING:
self.res = TestResult.OK
+ if self.needs_parsing and self.console_mode is ConsoleUser.INTERACTIVE:
+ self.res = TestResult.IGNORED
assert isinstance(self.res, TestResult)
if self.should_fail and self.res in (TestResult.OK, TestResult.FAIL):
self.res = TestResult.UNEXPECTEDPASS if self.res is TestResult.OK else TestResult.EXPECTEDFAIL
@@ -1638,6 +1649,7 @@ class TestHarness:
self.unexpectedpass_count = 0
self.success_count = 0
self.skip_count = 0
+ self.ignored_count = 0
self.timeout_count = 0
self.test_count = 0
self.name_max_len = 0
@@ -1781,6 +1793,8 @@ class TestHarness:
self.timeout_count += 1
elif result.res is TestResult.SKIP:
self.skip_count += 1
+ elif result.res is TestResult.IGNORED:
+ self.ignored_count += 1
elif result.res is TestResult.OK:
self.success_count += 1
elif result.res in {TestResult.FAIL, TestResult.ERROR, TestResult.INTERRUPT}:
@@ -1845,6 +1859,7 @@ class TestHarness:
'Fail: ': self.fail_count,
'Unexpected Pass: ': self.unexpectedpass_count,
'Skipped: ': self.skip_count,
+ 'Ignored: ': self.ignored_count,
'Timeout: ': self.timeout_count,
}