tests: Split test results into running and verification
The test base class already assumes that test cases consists of a run stage and a verification stage. Reflect this in the results class to make it possible to detect cases where a run was successful, but didn't verify. Change-Id: I31ef393e496671221c5408aca41649cd8dda74ca Signed-off-by: Andreas Sandberg <andreas.sandberg@arm.com> Reviewed-by: Curtis Dunham <curtis.dunham@arm.com>
This commit is contained in:
parent
ddfc4c4593
commit
96b74fd31b
3 changed files with 33 additions and 23 deletions
|
@ -120,19 +120,16 @@ def print_test(target, source, env):
|
||||||
assert len(result) == 1
|
assert len(result) == 1
|
||||||
result = result[0]
|
result = result[0]
|
||||||
|
|
||||||
run = result.results[0]
|
|
||||||
assert run.name == "gem5"
|
|
||||||
|
|
||||||
formatter = None
|
formatter = None
|
||||||
if not run:
|
if result.skipped():
|
||||||
status = color_message(termcap.Red, "FAILED!")
|
|
||||||
formatter = results.Text()
|
|
||||||
elif run.skipped():
|
|
||||||
status = color_message(termcap.Cyan, "skipped.")
|
status = color_message(termcap.Cyan, "skipped.")
|
||||||
|
elif result.changed():
|
||||||
|
status = color_message(termcap.Yellow, "CHANGED!")
|
||||||
|
formatter = results.Text()
|
||||||
elif result:
|
elif result:
|
||||||
status = color_message(termcap.Green, "passed.")
|
status = color_message(termcap.Green, "passed.")
|
||||||
else:
|
else:
|
||||||
status = color_message(termcap.Yellow, "CHANGED!")
|
status = color_message(termcap.Red, "FAILED!")
|
||||||
formatter = results.Text()
|
formatter = results.Text()
|
||||||
|
|
||||||
if formatter:
|
if formatter:
|
||||||
|
@ -164,10 +161,7 @@ def update_test(target, source, env):
|
||||||
assert len(result) == 1
|
assert len(result) == 1
|
||||||
result = result[0]
|
result = result[0]
|
||||||
|
|
||||||
run = result.results[0]
|
if result.skipped():
|
||||||
assert run.name == "gem5"
|
|
||||||
|
|
||||||
if run.skipped():
|
|
||||||
print "*** %s: %s: Test skipped, not updating." % (
|
print "*** %s: %s: Test skipped, not updating." % (
|
||||||
source[0].dir, color_message(termcap.Yellow, "WARNING"), )
|
source[0].dir, color_message(termcap.Yellow, "WARNING"), )
|
||||||
return 0
|
return 0
|
||||||
|
@ -175,7 +169,7 @@ def update_test(target, source, env):
|
||||||
print "*** %s: %s: Test successful, not updating." % (
|
print "*** %s: %s: Test successful, not updating." % (
|
||||||
source[0].dir, color_message(termcap.Green, "skipped"), )
|
source[0].dir, color_message(termcap.Green, "skipped"), )
|
||||||
return 0
|
return 0
|
||||||
elif not run.success():
|
elif result.failed_run():
|
||||||
print "*** %s: %s: Test failed, not updating." % (
|
print "*** %s: %s: Test failed, not updating." % (
|
||||||
source[0].dir, color_message(termcap.Red, "ERROR"), )
|
source[0].dir, color_message(termcap.Red, "ERROR"), )
|
||||||
return 1
|
return 1
|
||||||
|
|
|
@ -105,27 +105,41 @@ class UnitResult(object):
|
||||||
class TestResult(object):
|
class TestResult(object):
|
||||||
"""Results for from a single test consisting of one or more units."""
|
"""Results for from a single test consisting of one or more units."""
|
||||||
|
|
||||||
def __init__(self, name, results=[]):
|
def __init__(self, name, run_results=[], verify_results=[]):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.results = results
|
self.results = run_results + verify_results
|
||||||
|
self.run_results = run_results
|
||||||
|
self.verify_results = verify_results
|
||||||
|
|
||||||
def success(self):
|
def success(self):
|
||||||
return all([ r.success() for r in self.results])
|
return self.success_run() and self.success_verify()
|
||||||
|
|
||||||
def skipped(self):
|
def success_run(self):
|
||||||
return all([ r.skipped() for r in self.results])
|
return all([ r.success() for r in self.run_results ])
|
||||||
|
|
||||||
def changed(self):
|
def success_verify(self):
|
||||||
return self.results[0].success() and self.failed()
|
return all([ r.success() for r in self.verify_results ])
|
||||||
|
|
||||||
def failed(self):
|
def failed(self):
|
||||||
return any([ not r for r in self.results])
|
return self.failed_run() or self.failed_verify()
|
||||||
|
|
||||||
|
def failed_run(self):
|
||||||
|
return any([ not r for r in self.run_results ])
|
||||||
|
|
||||||
|
def failed_verify(self):
|
||||||
|
return any([ not r for r in self.verify_results ])
|
||||||
|
|
||||||
|
def skipped(self):
|
||||||
|
return all([ r.skipped() for r in self.run_results ])
|
||||||
|
|
||||||
|
def changed(self):
|
||||||
|
return self.success_run() and self.failed_verify()
|
||||||
|
|
||||||
def runtime(self):
|
def runtime(self):
|
||||||
return sum([ r.runtime for r in self.results ])
|
return sum([ r.runtime for r in self.results ])
|
||||||
|
|
||||||
def __nonzero__(self):
|
def __nonzero__(self):
|
||||||
return all([r for r in self.results])
|
return all([ r for r in self.results ])
|
||||||
|
|
||||||
class ResultFormatter(object):
|
class ResultFormatter(object):
|
||||||
__metaclass__ = ABCMeta
|
__metaclass__ = ABCMeta
|
||||||
|
|
|
@ -213,7 +213,9 @@ class Test(object):
|
||||||
for u in self.verify_units()
|
for u in self.verify_units()
|
||||||
]
|
]
|
||||||
|
|
||||||
return TestResult(self.test_name, run_results + verify_results)
|
return TestResult(self.test_name,
|
||||||
|
run_results=run_results,
|
||||||
|
verify_results=verify_results)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.test_name
|
return self.test_name
|
||||||
|
|
Loading…
Reference in a new issue