Browse Source

Fix test harness for Linux

Mostly just upgraded tools/test.py to the latest one that's in V8.  But also
fixing the before and after hooks to preserve the test/tmp directory so that
running tests manually usually works.
v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
97255c2651
  1. 12
      test/internet/testcfg.py
  2. 7
      test/message/testcfg.py
  3. 44
      test/pummel/testcfg.py
  4. 27
      test/simple/testcfg.py
  5. 135
      tools/test.py

12
test/internet/testcfg.py

@ -35,10 +35,10 @@ FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
class SimpleTestCase(test.TestCase):
class InternetTestCase(test.TestCase):
def __init__(self, path, file, mode, context, config):
super(SimpleTestCase, self).__init__(context, path)
super(InternetTestCase, self).__init__(context, path)
self.file = file
self.config = config
self.mode = mode
@ -68,10 +68,10 @@ class SimpleTestCase(test.TestCase):
return open(self.file).read()
class SimpleTestConfiguration(test.TestConfiguration):
class InternetTestConfiguration(test.TestConfiguration):
def __init__(self, context, root):
super(SimpleTestConfiguration, self).__init__(context, root)
super(InternetTestConfiguration, self).__init__(context, root)
def Ls(self, path):
def SelectTest(name):
@ -91,7 +91,7 @@ class SimpleTestConfiguration(test.TestConfiguration):
for test in all_tests:
if self.Contains(path, test):
file_path = join(self.root, reduce(join, test[1:], "") + ".js")
result.append(SimpleTestCase(test, file_path, mode, self.context, self))
result.append(InternetTestCase(test, file_path, mode, self.context, self))
return result
def GetBuildRequirements(self):
@ -105,4 +105,4 @@ class SimpleTestConfiguration(test.TestConfiguration):
def GetConfiguration(context, root):
return SimpleTestConfiguration(context, root)
return InternetTestConfiguration(context, root)

7
test/message/testcfg.py

@ -35,7 +35,7 @@ FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
class MessageTestCase(test.TestCase):
def __init__(self, path, file, expected, mode, context, config):
super(MessageTestCase, self).__init__(context, path)
super(MessageTestCase, self).__init__(context, path, mode)
self.file = file
self.expected = expected
self.config = config
@ -105,10 +105,7 @@ class MessageTestConfiguration(test.TestConfiguration):
return []
def ListTests(self, current_path, path, mode):
mjsunit = [current_path + [t] for t in self.Ls(self.root)]
#regress = [current_path + ['regress', t] for t in self.Ls(join(self.root, 'regress'))]
#bugs = [current_path + ['bugs', t] for t in self.Ls(join(self.root, 'bugs'))]
all_tests = mjsunit #+ regress + bugs
all_tests = [current_path + [t] for t in self.Ls(self.root)]
result = []
for test in all_tests:
if self.Contains(path, test):

44
test/pummel/testcfg.py

@ -27,6 +27,10 @@
import test
import os
import shutil
from shutil import rmtree
from os import mkdir
from glob import glob
from os.path import join, dirname, exists
import re
@ -35,13 +39,32 @@ FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
class SimpleTestCase(test.TestCase):
class PummelTestCase(test.TestCase):
def __init__(self, path, file, mode, context, config):
super(SimpleTestCase, self).__init__(context, path)
super(PummelTestCase, self).__init__(context, path, mode)
self.file = file
self.config = config
self.mode = mode
self.tmpdir = join(dirname(self.config.root), 'tmp')
def AfterRun(self, result):
# delete the whole tmp dir
try:
rmtree(self.tmpdir)
except:
pass
# make it again.
mkdir(self.tmpdir)
def BeforeRun(self):
# delete the whole tmp dir
try:
rmtree(self.tmpdir)
except:
pass
# make it again.
mkdir(self.tmpdir)
def GetLabel(self):
return "%s %s" % (self.mode, self.GetName())
@ -68,10 +91,10 @@ class SimpleTestCase(test.TestCase):
return open(self.file).read()
class SimpleTestConfiguration(test.TestConfiguration):
class PummelTestConfiguration(test.TestConfiguration):
def __init__(self, context, root):
super(SimpleTestConfiguration, self).__init__(context, root)
super(PummelTestConfiguration, self).__init__(context, root)
def Ls(self, path):
def SelectTest(name):
@ -79,19 +102,12 @@ class SimpleTestConfiguration(test.TestConfiguration):
return [f[:-3] for f in os.listdir(path) if SelectTest(f)]
def ListTests(self, current_path, path, mode):
simple = [current_path + [t] for t in self.Ls(self.root)]
#simple = [current_path + ['simple', t] for t in self.Ls(join(self.root, 'simple'))]
#pummel = [current_path + ['pummel', t] for t in self.Ls(join(self.root, 'pummel'))]
#internet = [current_path + ['internet', t] for t in self.Ls(join(self.root, 'internet'))]
#regress = [current_path + ['regress', t] for t in self.Ls(join(self.root, 'regress'))]
#bugs = [current_path + ['bugs', t] for t in self.Ls(join(self.root, 'bugs'))]
#tools = [current_path + ['tools', t] for t in self.Ls(join(self.root, 'tools'))]
all_tests = simple # + regress + bugs + tools
all_tests = [current_path + [t] for t in self.Ls(join(self.root))]
result = []
for test in all_tests:
if self.Contains(path, test):
file_path = join(self.root, reduce(join, test[1:], "") + ".js")
result.append(SimpleTestCase(test, file_path, mode, self.context, self))
result.append(PummelTestCase(test, file_path, mode, self.context, self))
return result
def GetBuildRequirements(self):
@ -105,4 +121,4 @@ class SimpleTestConfiguration(test.TestConfiguration):
def GetConfiguration(context, root):
return SimpleTestConfiguration(context, root)
return PummelTestConfiguration(context, root)

27
test/simple/testcfg.py

@ -30,6 +30,7 @@ import os
import shutil
from shutil import rmtree
from os import mkdir
from glob import glob
from os.path import join, dirname, exists
import re
@ -41,22 +42,29 @@ FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
class SimpleTestCase(test.TestCase):
def __init__(self, path, file, mode, context, config):
super(SimpleTestCase, self).__init__(context, path)
super(SimpleTestCase, self).__init__(context, path, mode)
self.file = file
self.config = config
self.mode = mode
self.tmpdir = join(dirname(self.config.root), 'tmp')
def tearDown(self):
def AfterRun(self, result):
# delete the whole tmp dir
try:
rmtree(join(dirname(self.config.root), 'tmp'))
rmtree(self.tmpdir)
except:
pass
# make it again.
mkdir(self.tmpdir)
def setUp(self):
def BeforeRun(self):
# delete the whole tmp dir
try:
mkdir(join(dirname(self.config.root), 'tmp'))
rmtree(self.tmpdir)
except:
pass
# make it again.
mkdir(self.tmpdir)
def GetLabel(self):
return "%s %s" % (self.mode, self.GetName())
@ -94,14 +102,7 @@ class SimpleTestConfiguration(test.TestConfiguration):
return [f[:-3] for f in os.listdir(path) if SelectTest(f)]
def ListTests(self, current_path, path, mode):
simple = [current_path + [t] for t in self.Ls(self.root)]
#simple = [current_path + ['simple', t] for t in self.Ls(join(self.root, 'simple'))]
#pummel = [current_path + ['pummel', t] for t in self.Ls(join(self.root, 'pummel'))]
#internet = [current_path + ['internet', t] for t in self.Ls(join(self.root, 'internet'))]
#regress = [current_path + ['regress', t] for t in self.Ls(join(self.root, 'regress'))]
#bugs = [current_path + ['bugs', t] for t in self.Ls(join(self.root, 'bugs'))]
#tools = [current_path + ['tools', t] for t in self.Ls(join(self.root, 'tools'))]
all_tests = simple # + regress + bugs + tools
all_tests = [current_path + [t] for t in self.Ls(join(self.root))]
result = []
for test in all_tests:
if self.Contains(path, test):

135
tools/test.py

@ -326,15 +326,16 @@ class CommandOutput(object):
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path):
def __init__(self, context, path, mode):
self.path = path
self.context = context
self.failed = None
self.duration = None
self.mode = mode
def IsNegative(self):
return False
@ -343,9 +344,9 @@ class TestCase(object):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if self.failed is None:
self.failed = self.IsFailureOutput(output)
return self.failed
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
@ -355,38 +356,55 @@ class TestCase(object):
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command, self.context, self.context.timeout)
return TestOutput(self, full_command, output)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode))
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.setUp()
result = self.RunCommand(self.GetCommand())
self.tearDown()
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand())
finally:
self.AfterRun(result)
return result
def setUp(self):
return
def tearDown(self):
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
@ -480,6 +498,13 @@ def PrintError(str):
sys.stderr.write('\n')
def CheckedUnlink(name):
try:
os.unlink(name)
except OSError, e:
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
@ -494,11 +519,6 @@ def Execute(args, context, timeout=None):
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
def CheckedUnlink(name):
try:
os.unlink(name)
except OSError, e:
PrintError("os.unlink() " + str(e))
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
@ -547,6 +567,11 @@ class TestSuite(object):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
@ -573,8 +598,12 @@ class TestRepository(TestSuite):
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def ListTests(self, current_path, path, context, mode):
return self.GetConfiguration(context).ListTests(current_path, path, mode)
def AddTestsToList(self, result, current_path, path, context, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
@ -601,7 +630,7 @@ class LiteralTestSuite(TestSuite):
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
result += test.ListTests(full_path, path, context, mode)
test.AddTestsToList(result, full_path, path, context, mode)
return result
def GetTestStatus(self, context, sections, defs):
@ -609,12 +638,20 @@ class LiteralTestSuite(TestSuite):
test.GetTestStatus(context, sections, defs)
SUFFIX = {'debug': '_g', 'release': ''}
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
@ -622,20 +659,28 @@ class Context(object):
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
if mode == 'debug':
name = 'build/debug/node_g'
else:
name = 'build/default/node'
if utils.IsWindows() and not name.endswith('.exe'):
name = name + '.exe'
return name
def RunTestCases(all_cases, progress, tasks):
def DoSkip(case):
return SKIP in c.outcomes or SLOW in c.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[mode]
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
@ -1088,6 +1133,8 @@ def BuildOptions():
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
@ -1096,6 +1143,8 @@ def BuildOptions():
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
@ -1113,7 +1162,13 @@ def BuildOptions():
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell");
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
return result
@ -1140,6 +1195,9 @@ def ProcessOptions(options):
# was found, set the arch to the guess.
if options.arch == 'none':
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
if options.snapshot:
options.scons_flags.append("snapshot=on")
return True
@ -1247,11 +1305,13 @@ def Main():
shell = abspath(options.shell)
buildspace = dirname(shell)
context = Context(workspace, buildspace, VERBOSE,
shell,
options.timeout,
GetSpecialCommandProcessor(options.special_command),
options.suppress_dialogs)
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
@ -1264,6 +1324,10 @@ def Main():
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
@ -1317,13 +1381,16 @@ def Main():
PrintReport(all_cases)
result = None
if len(all_cases) == 0:
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
if RunTestCases(all_cases, options.progress, options.j):
if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
@ -1337,7 +1404,7 @@ def Main():
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in all_cases if not t.case.duration is None ]
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:

Loading…
Cancel
Save