[Git][ghc/ghc][wip/T17987] testsuite: Don't consider broken tests to stat measurements

Ben Gamari gitlab at gitlab.haskell.org
Mon Mar 30 15:38:32 UTC 2020



Ben Gamari pushed to branch wip/T17987 at Glasgow Haskell Compiler / GHC


Commits:
22dbcb37 by Ben Gamari at 2020-03-30T11:38:24-04:00
testsuite: Don't consider broken tests to stat measurements

Previously we would add statistics from tests marked as broken to the
stats output. This broke in #17987 since the test was considered to be
"broken" solely on the basis of its allocations. In later testsuite runs
the "broken" allocations metric was then considered to be the baseline
and the test started unexpectedly passing.

We now ignore metrics that arise from tests marked as broken. Of course,
this required that we distinguish between "broken" and merely "expected
to fail". I took this opportunity to do a bit of refactoring in our
representation of test outcomes.

- - - - -


2 changed files:

- testsuite/driver/testglobals.py
- testsuite/driver/testlib.py


Changes:

=====================================
testsuite/driver/testglobals.py
=====================================
@@ -6,6 +6,7 @@ from my_typing import *
 from pathlib import Path
 from perf_notes import MetricChange, PerfStat, Baseline, MetricOracles
 from datetime import datetime
+from enum import Enum
 
 # -----------------------------------------------------------------------------
 # Configuration info
@@ -261,6 +262,20 @@ t = TestRun()
 def getTestRun() -> TestRun:
     return t
 
+class ExpectedOutcome(Enum):
+    """
+    Whether we expect a test to pass or why it we expect it to fail.
+    """
+
+    # The test should pass
+    PASS = 'pass'
+    # The test should fail (e.g. when testing an error message)
+    FAIL = 'fail'
+    # The test should fail because it is currently broken
+    BROKEN = 'broken'
+    # The test should fail because we are lacking a library it requires
+    MISSING_LIB = 'missing-lib'
+
 # -----------------------------------------------------------------------------
 # Information about the current test
 
@@ -282,7 +297,7 @@ class TestOptions:
        self.extra_ways = [] # type: List[WayName]
 
        # the result we normally expect for this test
-       self.expect = 'pass'
+       self.expect = ExpectedOutcome.Pass # type: ExpectedOutcome
 
        # override the expected result for certain ways
        self.expect_fail_for = [] # type: List[WayName]


=====================================
testsuite/driver/testlib.py
=====================================
@@ -114,7 +114,7 @@ def expect_fail( name, opts ):
     # The compiler, testdriver, OS or platform is missing a certain
     # feature, and we don't plan to or can't fix it now or in the
     # future.
-    opts.expect = 'fail';
+    opts.expect = ExpectedOutcome.FAIL
 
 def reqlib( lib ):
     return lambda name, opts, l=lib: _reqlib (name, opts, l )
@@ -174,28 +174,28 @@ def have_library(lib: str) -> bool:
 
 def _reqlib( name, opts, lib ):
     if not have_library(lib):
-        opts.expect = 'missing-lib'
+        opts.expect = ExpectedOutcome.MISSING_LIB
 
 def req_haddock( name, opts ):
     if not config.haddock:
-        opts.expect = 'missing-lib'
+        opts.expect = ExpectedOutcome.MISSING_LIB
 
 def req_profiling( name, opts ):
     '''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
     if not config.have_profiling:
-        opts.expect = 'fail'
+        opts.expect = ExpectedOutcome.FAIL
 
 def req_shared_libs( name, opts ):
     if not config.have_shared_libs:
-        opts.expect = 'fail'
+        opts.expect = ExpectedOutcome.FAIL
 
 def req_interp( name, opts ):
     if not config.have_interp:
-        opts.expect = 'fail'
+        opts.expect = ExpectedOutcome.FAIL
 
 def req_rts_linker( name, opts ):
     if not config.have_RTS_linker:
-        opts.expect = 'fail'
+        opts.expect = ExpectedOutcome.FAIL
 
 def req_th( name, opts ):
     """
@@ -210,7 +210,7 @@ def req_th( name, opts ):
 
 def req_smp( name, opts ):
     if not config.have_smp:
-        opts.expect = 'fail'
+        opts.expect = ExpectedOutcome.FAIL
 
 def ignore_stdout(name, opts):
     opts.ignore_stdout = True
@@ -269,7 +269,7 @@ def expect_broken( bug: IssueNumber ):
     """
     def helper( name: TestName, opts ):
         record_broken(name, opts, bug)
-        opts.expect = 'fail';
+        opts.expect = ExpectedOutcome.FAIL
 
     return helper
 
@@ -291,7 +291,7 @@ def record_broken(name: TestName, opts, bug: IssueNumber):
 def _expect_pass(way):
     # Helper function. Not intended for use in .T files.
     opts = getTestOpts()
-    return opts.expect == 'pass' and way not in opts.expect_fail_for
+    return opts.expect == ExpectedOutcome.PASS and way not in opts.expect_fail_for
 
 # -----
 
@@ -869,7 +869,7 @@ def test(name: TestName,
     executeSetups([thisdir_settings, setup], name, myTestOpts)
 
     if name in config.broken_tests:
-        myTestOpts.expect = 'fail'
+        myTestOpts.expect = ExpectedOutcome.BROKEN
 
     thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
     if myTestOpts.alone:
@@ -1081,13 +1081,13 @@ def do_test(name: TestName,
                            print_output = config.verbose >= 3)
 
         # If user used expect_broken then don't record failures of pre_cmd
-        if exit_code != 0 and opts.expect not in ['fail']:
+        if exit_code != 0 and opts.expect not in [ExpectedOutcome.FAIL]:
             framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
             if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
 
     result = func(*[name,way] + args)
 
-    if opts.expect not in ['pass', 'fail', 'missing-lib']:
+    if opts.expect not in [ExpectedOutcome.PASS, ExpectedOutcome.FAIL, ExpectedOutcome.MISSING_LIB]:
         framework_fail(name, way, 'bad expected ' + opts.expect)
 
     try:
@@ -1126,7 +1126,7 @@ def do_test(name: TestName,
                                 stderr=result.stderr)
                 t.unexpected_failures.append(tr)
         else:
-            if opts.expect == 'missing-lib':
+            if opts.expect == ExpectedOutcome.MISSING_LIB:
                 t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
             else:
                 t.n_expected_failures += 1
@@ -1406,6 +1406,10 @@ def check_stats(name: TestName,
                 stats_file: Path,
                 range_fields: Dict[MetricName, MetricOracles]
                 ) -> PassFail:
+    if getTestOpts().expect == ExpectedOutcome.BROKEN:
+        print('Skipping performance metrics test on broken test {}'.format(name))
+        return passed()
+
     head_commit = Perf.commit_hash(GitRef('HEAD')) if Perf.inside_git_repo() else None
     if head_commit is None:
         return passed()
@@ -1958,7 +1962,7 @@ def compare_outputs(way: WayName,
         elif diff_file: diff_file.open('ab').close() # Make sure the file exists still as
                                                      # we will try to read it later
 
-        if config.accept and (getTestOpts().expect == 'fail' or
+        if config.accept and (getTestOpts().expect == ExpectedOutcome.FAIL or
                               way in getTestOpts().expect_fail_for):
             if_verbose(1, 'Test is expected to fail. Not accepting new output.')
             return False



View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/commit/22dbcb375b0dc089671ea34376bf7fe82521597d

-- 
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/commit/22dbcb375b0dc089671ea34376bf7fe82521597d
You're receiving this email because of your account on gitlab.haskell.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20200330/4a5dd1b9/attachment-0001.html>


More information about the ghc-commits mailing list