[Git][ghc/ghc][wip/T17987] 2 commits: Expect T4267 to pass
Ben Gamari
gitlab at gitlab.haskell.org
Mon Mar 30 21:09:48 UTC 2020
Ben Gamari pushed to branch wip/T17987 at Glasgow Haskell Compiler / GHC
Commits:
f024b6e3 by Sylvain Henry at 2020-03-30T12:48:39+02:00
Expect T4267 to pass
Since 54250f2d8de910b094070c1b48f086030df634b1 we expected T4267 to
fail, but it passes on CI.
- - - - -
f501d200 by Ben Gamari at 2020-03-30T17:09:41-04:00
testsuite: Refactor representation of expected test outcomes
This turns the the expected test outcome from a str into a proper
enumeration.
- - - - -
3 changed files:
- testsuite/driver/testglobals.py
- testsuite/driver/testlib.py
- testsuite/tests/perf/should_run/all.T
Changes:
=====================================
testsuite/driver/testglobals.py
=====================================
@@ -6,6 +6,7 @@ from my_typing import *
from pathlib import Path
from perf_notes import MetricChange, PerfStat, Baseline, MetricOracles
from datetime import datetime
+from enum import Enum
# -----------------------------------------------------------------------------
# Configuration info
@@ -261,6 +262,20 @@ t = TestRun()
def getTestRun() -> TestRun:
return t
+class ExpectedOutcome(Enum):
+ """
+ Whether we expect a test to pass or why it we expect it to fail.
+ """
+
+ # The test should pass
+ PASS = 'pass'
+ # The test should fail (e.g. when testing an error message)
+ FAIL = 'fail'
+ # The test should fail because it is currently broken
+ BROKEN = 'broken'
+ # The test should fail because we are lacking a library it requires
+ MISSING_LIB = 'missing-lib'
+
# -----------------------------------------------------------------------------
# Information about the current test
@@ -282,7 +297,7 @@ class TestOptions:
self.extra_ways = [] # type: List[WayName]
# the result we normally expect for this test
- self.expect = 'pass'
+ self.expect = ExpectedOutcome.PASS # type: ExpectedOutcome
# override the expected result for certain ways
self.expect_fail_for = [] # type: List[WayName]
=====================================
testsuite/driver/testlib.py
=====================================
@@ -19,7 +19,8 @@ import collections
import subprocess
from testglobals import config, ghc_env, default_testopts, brokens, t, \
- TestRun, TestResult, TestOptions, PerfMetric
+ TestRun, TestResult, TestOptions, PerfMetric, \
+ ExpectedOutcome
from testutil import strip_quotes, lndir, link_or_copy_file, passed, \
failBecause, testing_metrics, \
PassFail
@@ -114,7 +115,7 @@ def expect_fail( name, opts ):
# The compiler, testdriver, OS or platform is missing a certain
# feature, and we don't plan to or can't fix it now or in the
# future.
- opts.expect = 'fail';
+ opts.expect = ExpectedOutcome.FAIL
def reqlib( lib ):
return lambda name, opts, l=lib: _reqlib (name, opts, l )
@@ -174,28 +175,28 @@ def have_library(lib: str) -> bool:
def _reqlib( name, opts, lib ):
if not have_library(lib):
- opts.expect = 'missing-lib'
+ opts.expect = ExpectedOutcome.MISSING_LIB
def req_haddock( name, opts ):
if not config.haddock:
- opts.expect = 'missing-lib'
+ opts.expect = ExpectedOutcome.MISSING_LIB
def req_profiling( name, opts ):
'''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
if not config.have_profiling:
- opts.expect = 'fail'
+ opts.expect = ExpectedOutcome.FAIL
def req_shared_libs( name, opts ):
if not config.have_shared_libs:
- opts.expect = 'fail'
+ opts.expect = ExpectedOutcome.FAIL
def req_interp( name, opts ):
if not config.have_interp:
- opts.expect = 'fail'
+ opts.expect = ExpectedOutcome.FAIL
def req_rts_linker( name, opts ):
if not config.have_RTS_linker:
- opts.expect = 'fail'
+ opts.expect = ExpectedOutcome.FAIL
def req_th( name, opts ):
"""
@@ -210,7 +211,7 @@ def req_th( name, opts ):
def req_smp( name, opts ):
if not config.have_smp:
- opts.expect = 'fail'
+ opts.expect = ExpectedOutcome.FAIL
def ignore_stdout(name, opts):
opts.ignore_stdout = True
@@ -269,7 +270,7 @@ def expect_broken( bug: IssueNumber ):
"""
def helper( name: TestName, opts ):
record_broken(name, opts, bug)
- opts.expect = 'fail';
+ opts.expect = ExpectedOutcome.FAIL
return helper
@@ -291,7 +292,7 @@ def record_broken(name: TestName, opts, bug: IssueNumber):
def _expect_pass(way):
# Helper function. Not intended for use in .T files.
opts = getTestOpts()
- return opts.expect == 'pass' and way not in opts.expect_fail_for
+ return opts.expect == ExpectedOutcome.PASS and way not in opts.expect_fail_for
# -----
@@ -869,7 +870,7 @@ def test(name: TestName,
executeSetups([thisdir_settings, setup], name, myTestOpts)
if name in config.broken_tests:
- myTestOpts.expect = 'fail'
+ myTestOpts.expect = ExpectedOutcome.BROKEN
thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
if myTestOpts.alone:
@@ -1081,14 +1082,14 @@ def do_test(name: TestName,
print_output = config.verbose >= 3)
# If user used expect_broken then don't record failures of pre_cmd
- if exit_code != 0 and opts.expect not in ['fail']:
+ if exit_code != 0 and opts.expect not in [ExpectedOutcome.FAIL]:
framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
result = func(*[name,way] + args)
- if opts.expect not in ['pass', 'fail', 'missing-lib']:
- framework_fail(name, way, 'bad expected ' + opts.expect)
+ if opts.expect not in [ExpectedOutcome.PASS, ExpectedOutcome.FAIL, ExpectedOutcome.MISSING_LIB]:
+ framework_fail(name, way, 'bad expected ' + opts.expect.value)
try:
passFail = result.passFail
@@ -1126,7 +1127,7 @@ def do_test(name: TestName,
stderr=result.stderr)
t.unexpected_failures.append(tr)
else:
- if opts.expect == 'missing-lib':
+ if opts.expect == ExpectedOutcome.MISSING_LIB:
t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
else:
t.n_expected_failures += 1
@@ -1958,7 +1959,7 @@ def compare_outputs(way: WayName,
elif diff_file: diff_file.open('ab').close() # Make sure the file exists still as
# we will try to read it later
- if config.accept and (getTestOpts().expect == 'fail' or
+ if config.accept and (getTestOpts().expect == ExpectedOutcome.FAIL or
way in getTestOpts().expect_fail_for):
if_verbose(1, 'Test is expected to fail. Not accepting new output.')
return False
=====================================
testsuite/tests/perf/should_run/all.T
=====================================
@@ -233,8 +233,7 @@ test('T5949',
['-O'])
test('T4267',
- [expect_broken(4267),
- collect_stats('bytes allocated',10),
+ [collect_stats('bytes allocated',10),
only_ways(['normal'])],
compile_and_run,
['-O'])
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/7bcd37e53474bf071bd28d4ac69ce2032d8cad1e...f501d2006bdbc778e5f869f7032fc4ee82f9412b
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/7bcd37e53474bf071bd28d4ac69ce2032d8cad1e...f501d2006bdbc778e5f869f7032fc4ee82f9412b
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20200330/58f30a59/attachment-0001.html>
More information about the ghc-commits
mailing list