Fix up regression execution to better handle tests that end abnormally.

E.g., mark aborts due to assertion failures as failed tests,
but those that get killed by the user as needing to be rerun, etc.
This commit is contained in:
Steve Reinhardt 2009-03-07 16:58:51 -08:00
parent 5cf0605766
commit 4f1855484c
3 changed files with 103 additions and 55 deletions

View file

@ -29,10 +29,18 @@
import os import os
from os.path import isdir, isfile, join as joinpath from os.path import isdir, isfile, join as joinpath
homedir = os.environ['HOME']
confdir = os.environ.get('M5_CONFIG', joinpath(homedir, '.m5')) confdir = os.environ.get('M5_CONFIG')
if not confdir:
# HOME is not set when running regressions, due to use of scons
# Execute() function.
homedir = os.environ.get('HOME')
if homedir and isdir(joinpath(homedir, '.m5')):
confdir = joinpath(homedir, '.m5')
def get(name): def get(name):
if not isdir(confdir): if not confdir:
return None return None
conffile = joinpath(confdir, name) conffile = joinpath(confdir, name)
if not isfile(conffile): if not isfile(conffile):

View file

@ -29,7 +29,7 @@
# Authors: Steve Reinhardt # Authors: Steve Reinhardt
# Kevin Lim # Kevin Lim
import os import os, signal
import sys import sys
import glob import glob
from SCons.Script.SConscript import SConsEnvironment from SCons.Script.SConscript import SConsEnvironment
@ -44,57 +44,113 @@ env.Tests = {}
def contents(node): def contents(node):
return file(str(node)).read() return file(str(node)).read()
def check_test(target, source, env): # functions to parse return value from scons Execute()... not the same
# as wait() etc., so python built-in os funcs don't work.
def signaled(status):
return (status & 0x80) != 0;
def signum(status):
return (status & 0x7f);
# List of signals that indicate that we should retry the test rather
# than consider it failed.
retry_signals = (signal.SIGTERM, signal.SIGKILL, signal.SIGINT,
signal.SIGQUIT, signal.SIGHUP)
# regular expressions of lines to ignore when diffing outputs
output_ignore_regexes = (
'^command line:', # for stdout file
'^M5 compiled ', # for stderr file
'^M5 started ', # for stderr file
'^M5 executing on ', # for stderr file
'^Simulation complete at', # for stderr file
'^Listening for', # for stderr file
'listening for remote gdb', # for stderr file
)
output_ignore_args = ' '.join(["-I '"+s+"'" for s in output_ignore_regexes])
output_ignore_args += ' --exclude=stats.txt --exclude=outdiff'
def run_test(target, source, env):
"""Check output from running test. """Check output from running test.
Targets are as follows: Targets are as follows:
target[0] : outdiff target[0] : status
target[1] : statsdiff
target[2] : status Sources are:
source[0] : M5 binary
source[1] : tests/run.py script
source[2] : reference stats file
""" """
# make sure target files are all gone # make sure target files are all gone
for t in target: for t in target:
if os.path.exists(t.abspath): if os.path.exists(t.abspath):
Execute(Delete(t.abspath)) Execute(Delete(t.abspath))
# Run diff on output & ref directories to find differences.
# Exclude the stats file since we will use diff-out on that. tgt_dir = os.path.dirname(str(target[0]))
Execute(env.subst('diff -ubr ${SOURCES[0].dir} ${SOURCES[1].dir} ' +
'-I "^command line:" ' + # for stdout file # Base command for running test. We mess around with indirectly
'-I "^M5 compiled " ' + # for stderr file # referring to files via SOURCES and TARGETS so that scons can mess
'-I "^M5 started " ' + # for stderr file # with paths all it wants to and we still get the right files.
'-I "^M5 executing on " ' + # for stderr file cmd = '${SOURCES[0]} -d %s -re ${SOURCES[1]} %s' % (tgt_dir, tgt_dir)
'-I "^Simulation complete at" ' + # for stderr file
'-I "^Listening for" ' + # for stderr file # Prefix test run with batch job submission command if appropriate.
'-I "listening for remote gdb" ' + # for stderr file # Batch command also supports timeout arg (in seconds, not minutes).
'--exclude=stats.txt --exclude=SCCS ' + timeout = 15 * 60 # used to be a param, probably should be again
'--exclude=${TARGETS[0].file} ' + if env['BATCH']:
'> ${TARGETS[0]}', target=target, source=source), None) cmd = '%s -t %d %s' % (env['BATCH_CMD'], timeout, cmd)
print "===== Output differences ====="
print contents(target[0]) status = Execute(env.subst(cmd, target=target, source=source))
# Run diff-out on stats.txt file if status == 0:
status = Execute(env.subst('$DIFFOUT $SOURCES > ${TARGETS[1]}', # M5 terminated normally.
target=target, source=source), # Run diff on output & ref directories to find differences.
strfunction=None) # Exclude the stats file since we will use diff-out on that.
print "===== Statistics differences =====" outdiff = os.path.join(tgt_dir, 'outdiff')
print contents(target[1]) diffcmd = 'diff -ubr %s ${SOURCES[2].dir} %s > %s' \
# Generate status file contents based on exit status of diff-out % (output_ignore_args, tgt_dir, outdiff)
Execute(env.subst(diffcmd, target=target, source=source))
print "===== Output differences ====="
print contents(outdiff)
# Run diff-out on stats.txt file
statsdiff = os.path.join(tgt_dir, 'statsdiff')
diffcmd = '$DIFFOUT ${SOURCES[2]} %s > %s' \
% (os.path.join(tgt_dir, 'stats.txt'), statsdiff)
diffcmd = env.subst(diffcmd, target=target, source=source)
status = Execute(diffcmd, strfunction=None)
print "===== Statistics differences ====="
print contents(statsdiff)
else: # m5 exit status != 0
# M5 did not terminate properly, so no need to check the output
if signaled(status) and signum(status) in retry_signals:
# Consider the test incomplete; don't create a 'status' output.
# Hand the return status to scons and let scons decide what
# to do about it (typically terminate unless run with -k).
print 'M5 terminated with signal', signum(status)
return status
# complete but failed execution (call to exit() with non-zero
# status, SIGABORT due to assertion failure, etc.)... fall through
# and generate FAILED status as if output comparison had failed
print 'M5 exited with non-zero status', status
# Generate status file contents based on exit status of m5 or diff-out
if status == 0: if status == 0:
status_str = "passed." status_str = "passed."
else: else:
status_str = "FAILED!" status_str = "FAILED!"
f = file(str(target[2]), 'w') f = file(str(target[0]), 'w')
print >>f, env.subst('${TARGETS[2].dir}', target=target, source=source), \ print >>f, tgt_dir, status_str
status_str
f.close() f.close()
# done # done
return 0 return 0
def check_test_string(target, source, env): def run_test_string(target, source, env):
return env.subst("Comparing outputs in ${TARGETS[0].dir}.", return env.subst("Running test in ${TARGETS[0].dir}.",
target=target, source=source) target=target, source=source)
testAction = env.Action(check_test, check_test_string) testAction = env.Action(run_test, run_test_string)
def print_test(target, source, env): def print_test(target, source, env):
print '***** ' + contents(source[0]) print '***** ' + contents(source[0])
@ -174,24 +230,8 @@ def test_builder(env, ref_dir):
new_stats = tgt('stats.txt') new_stats = tgt('stats.txt')
status_file = tgt('status') status_file = tgt('status')
# Base command for running test. We mess around with indirectly env.Command([status_file],
# referring to files via SOURCES and TARGETS so that scons can [env.M5Binary, 'run.py', ref_stats],
# mess with paths all it wants to and we still get the right
# files.
cmd = '${SOURCES[0]} -d $TARGET.dir -re ${SOURCES[1]} %s' % tgt_dir
# Prefix test run with batch job submission command if appropriate.
# Batch command also supports timeout arg (in seconds, not minutes).
timeout = 15 * 60 # used to be a param, probably should be again
if env['BATCH']:
cmd = '%s -t %d %s' % (env['BATCH_CMD'], timeout, cmd)
env.Command([tgt('simout'), tgt('simerr'), new_stats],
[env.M5Binary, 'run.py'], cmd)
# order of targets is important... see check_test
env.Command([tgt('outdiff'), tgt('statsdiff'), status_file],
[ref_stats, new_stats],
testAction) testAction)
# phony target to echo status # phony target to echo status

View file

@ -34,7 +34,7 @@ import m5
m5.disableAllListeners() m5.disableAllListeners()
# single "path" arg encodes everything we need to know about test # single "path" arg encodes everything we need to know about test
(category, name, isa, opsys, config) = sys.argv[1].split('/') (category, name, isa, opsys, config) = sys.argv[1].split('/')[-5:]
# find path to directory containing this file # find path to directory containing this file
tests_root = os.path.dirname(__file__) tests_root = os.path.dirname(__file__)