[Git][ghc/ghc][wip/junit-fixes] 5 commits: testsuite: Add some type annotations to the driver

Ben Gamari gitlab at gitlab.haskell.org
Sat Jun 22 00:22:53 UTC 2019



Ben Gamari pushed to branch wip/junit-fixes at Glasgow Haskell Compiler / GHC


Commits:
1e0e7d9c by Ben Gamari at 2019-06-21T23:59:28Z
testsuite: Add some type annotations to the driver

- - - - -
53045998 by Ben Gamari at 2019-06-22T00:02:25Z
testsuite: Use pathlib.Path

- - - - -
96dc0a8b by Ben Gamari at 2019-06-22T00:13:08Z
testsuite: More type signatures

- - - - -
1b429465 by Ben Gamari at 2019-06-22T00:19:46Z
More fixes

- - - - -
4def9fe1 by Ben Gamari at 2019-06-22T00:22:20Z
More fixes

- - - - -


5 changed files:

- testsuite/driver/perf_notes.py
- testsuite/driver/runtests.py
- testsuite/driver/testglobals.py
- testsuite/driver/testlib.py
- testsuite/driver/testutil.py


Changes:

=====================================
testsuite/driver/perf_notes.py
=====================================
@@ -673,7 +673,7 @@ if __name__ == '__main__':
         print(\
             "<html>" + \
                 '<head>\n' + \
-                    (tooltipjsTag if tooltipjsTag != None else '') + \
+                    (tooltipjsTag if tooltipjsTag is not None else '') + \
                     chartjsTag + \
                 '</head>' + \
                 '<body style="padding: 20px"><canvas id="myChart"></canvas><script>' + \
@@ -681,7 +681,7 @@ if __name__ == '__main__':
                     "var commitMsgs = " + json.dumps(commitMsgs, indent=2) + ";" + \
                     "var chartData = " + json.dumps(chartData, indent=2) + ";" + \
                     (("var chart = new Chart(ctx, setCustomTooltip(chartData, commitMsgs));") \
-                        if tooltipjsTag != None else \
+                        if tooltipjsTag is not None else \
                      ("var chart = new Chart(ctx, chartData);")) + \
                 '</script></body>' + \
             "</html>"\


=====================================
testsuite/driver/runtests.py
=====================================
@@ -66,6 +66,10 @@ perf_group.add_argument("--only-perf-tests", action="store_true", help="Only do
 
 args = parser.parse_args()
 
+# Initialize variables that are set by the build system with -e
+windows = False
+darwin = False
+
 if args.e:
     for e in args.e:
         exec(e)
@@ -284,7 +288,7 @@ t_files = list(findTFiles(config.rootdirs))
 
 print('Found', len(t_files), '.T files...')
 
-t = getTestRun()
+t = getTestRun() # type: TestRun
 
 # Avoid cmd.exe built-in 'date' command on Windows
 t.start_time = time.localtime()
@@ -435,8 +439,8 @@ else:
     stats = [stat for (_, stat) in t.metrics]
     if hasMetricsFile:
         print('Appending ' + str(len(stats)) + ' stats to file: ' + config.metrics_file)
-        with open(config.metrics_file, 'a') as file:
-            file.write("\n" + Perf.format_perf_stat(stats))
+        with open(config.metrics_file, 'a') as f:
+            f.write("\n" + Perf.format_perf_stat(stats))
     elif inside_git_repo() and any(stats):
         if is_worktree_dirty():
             print()
@@ -448,8 +452,8 @@ else:
 
     # Write summary
     if config.summary_file:
-        with open(config.summary_file, 'w') as file:
-            summary(t, file)
+        with open(config.summary_file, 'w') as f:
+            summary(t, f)
 
     if args.junit:
         junit(t).write(args.junit)


=====================================
testsuite/driver/testglobals.py
=====================================
@@ -2,6 +2,8 @@
 # (c) Simon Marlow 2002
 #
 
+from my_typing import *
+
 # -----------------------------------------------------------------------------
 # Configuration info
 
@@ -37,6 +39,27 @@ class TestConfig:
         # File in which to save the summary
         self.summary_file = ''
 
+        # Path to Ghostscript
+        self.gs = ''
+
+        # Run tests requiring Haddock
+        self.haddock = False
+
+        # Compiler has native code generator?
+        self.have_ncg = False
+
+        # Is compiler unregisterised?
+        self.unregisterised = False
+
+        # Was the compiler executable compiled with profiling?
+        self.compiler_profiled = False
+
+        # Was the compiler compiled with DEBUG?
+        self.compiler_debugged = False
+
+        # Was the compiler compiled with LLVM?
+        self.ghc_built_by_llvm = False
+
         # Should we print the summary?
         # Disabling this is useful for Phabricator/Harbormaster
         # logfiles, which are truncated to 30 lines. TODO. Revise if
@@ -78,16 +101,16 @@ class TestConfig:
         # Which ways to run tests (when compiling and running respectively)
         # Other ways are added from the command line if we have the appropriate
         # libraries.
-        self.compile_ways = []
-        self.run_ways     = []
-        self.other_ways   = []
+        self.compile_ways = [] # type: List[WayName]
+        self.run_ways     = [] # type: List[WayName]
+        self.other_ways   = [] # type: List[WayName]
 
         # The ways selected via the command line.
-        self.cmdline_ways = []
+        self.cmdline_ways = [] # type: List[WayName]
 
         # Lists of flags for each way
-        self.way_flags = {}
-        self.way_rts_flags = {}
+        self.way_flags = {}  # type: Dict[WayName, List[str]]
+        self.way_rts_flags = {}  # type: Dict[WayName, List[str]]
 
         # Do we have vanilla libraries?
         self.have_vanilla = False
@@ -144,6 +167,10 @@ class TestConfig:
         # See Note [Haddock runtime stats files] at the bottom of this file.
         self.stats_files_dir = '/please_set_stats_files_dir'
 
+        # Should we cleanup after test runs?
+        self.cleanup = True
+
+
 global config
 config = TestConfig()
 
@@ -166,7 +193,13 @@ class TestResult:
     unexpected_failures, unexpected_stat_failures lists of TestRun.
     """
     __slots__ = 'directory', 'testname', 'reason', 'way', 'stdout', 'stderr'
-    def __init__(self, directory, testname, reason, way, stdout=None, stderr=None):
+    def __init__(self,
+                 directory: str,
+                 testname: TestName,
+                 reason: str,
+                 way: WayName,
+                 stdout: Optional[str]=None,
+                 stderr: Optional[str]=None):
         self.directory = directory
         self.testname = testname
         self.reason = reason
@@ -184,8 +217,7 @@ class TestRun:
        self.n_expected_passes = 0
        self.n_expected_failures = 0
 
-       # type: List[TestResult]
-       self.missing_libs = []
+       self.missing_libs = [] # type: List[TestResult]
        self.framework_failures = []
        self.framework_warnings = []
 
@@ -203,7 +235,7 @@ class TestRun:
 global t
 t = TestRun()
 
-def getTestRun():
+def getTestRun() -> TestRun:
     return t
 
 # -----------------------------------------------------------------------------
@@ -308,12 +340,12 @@ class TestOptions:
        self.compile_cmd_prefix = ''
 
        # Extra output normalisation
-       self.extra_normaliser = lambda x: x
+       self.extra_normaliser = lambda x: x # type: OutputNormalizer
 
        # Custom output checker, otherwise do a comparison with expected
        # stdout file.  Accepts two arguments: filename of actual stdout
        # output, and a normaliser function given other test options
-       self.check_stdout = None
+       self.check_stdout = None # type: Optional[Callable[Path, OutputNormalizer]]
 
        # Check .hp file when profiling libraries are available?
        self.check_hp = True
@@ -334,8 +366,6 @@ class TestOptions:
        self.compile_timeout_multiplier = 1.0
        self.run_timeout_multiplier = 1.0
 
-       self.cleanup = True
-
        # Sould we run tests in a local subdirectory (<testname>-run) or
        # in temporary directory in /tmp? See Note [Running tests in /tmp].
        self.local = True
@@ -344,6 +374,8 @@ class TestOptions:
 global default_testopts
 default_testopts = TestOptions()
 
+BugNumber = int
+
 # (bug, directory, name) of tests marked broken
 global brokens
-brokens = []
+brokens = []  # type: List[Tuple[BugNumber, str, str]]


=====================================
testsuite/driver/testlib.py
=====================================
@@ -3,6 +3,8 @@
 # (c) Simon Marlow 2002
 #
 
+from __future__ import annotations
+
 import io
 import shutil
 import os
@@ -14,17 +16,22 @@ import copy
 import glob
 import sys
 from math import ceil, trunc
-from pathlib import PurePath
+from pathlib import Path, PurePath
 import collections
 import subprocess
 
-from testglobals import config, ghc_env, default_testopts, brokens, t, TestResult
-from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, failBecauseStderr, str_fail, str_pass, testing_metrics
+from testglobals import config, ghc_env, default_testopts, brokens, t, \
+                        TestRun, TestResult, TestOptions
+from testutil import strip_quotes, lndir, link_or_copy_file, passed, \
+                     failBecause, failBecauseStderr, str_fail, str_pass, testing_metrics
+import testutil
 from cpu_features import have_cpu_feature
 import perf_notes as Perf
 from perf_notes import MetricChange
 extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
 
+from my_typing import * # type: ignore
+
 global pool_sema
 if config.use_threads:
     import threading
@@ -33,11 +40,11 @@ if config.use_threads:
 global wantToStop
 wantToStop = False
 
-def stopNow():
+def stopNow() -> None:
     global wantToStop
     wantToStop = True
 
-def stopping():
+def stopping() -> bool:
     return wantToStop
 
 
@@ -57,13 +64,13 @@ def getTestOpts():
 
 def setLocalTestOpts(opts):
     global testopts_local
-    testopts_local.x=opts
+    testopts_local.x = opts
 
-def isCompilerStatsTest():
+def isCompilerStatsTest() -> bool:
     opts = getTestOpts()
     return bool(opts.is_compiler_stats_test)
 
-def isStatsTest():
+def isStatsTest() -> bool:
     opts = getTestOpts()
     return opts.is_stats_test
 
@@ -133,9 +140,9 @@ def stage1(name, opts):
 
 # Cache the results of looking to see if we have a library or not.
 # This makes quite a difference, especially on Windows.
-have_lib_cache = {}
+have_lib_cache = {} # type: Dict[str, bool]
 
-def have_library(lib):
+def have_library(lib: str) -> bool:
     """ Test whether the given library is available """
     if lib in have_lib_cache:
         got_it = have_lib_cache[lib]
@@ -426,7 +433,7 @@ def _collect_stats(name, opts, metrics, deviation, is_compiler_stats_test=False)
 
 # -----
 
-def when(b, f):
+def when(b: bool, f):
     # When list_brokens is on, we want to see all expect_broken calls,
     # so we always do f
     if b or config.list_broken:
@@ -434,10 +441,10 @@ def when(b, f):
     else:
         return normal
 
-def unless(b, f):
+def unless(b: bool, f):
     return when(not b, f)
 
-def doing_ghci():
+def doing_ghci() -> bool:
     return 'ghci' in config.run_ways
 
 def requires_th(name, opts):
@@ -448,67 +455,67 @@ def requires_th(name, opts):
     """
     return when(ghc_dynamic(), omit_ways(['profasm']))
 
-def ghc_dynamic():
+def ghc_dynamic() -> bool:
     return config.ghc_dynamic
 
-def fast():
+def fast() -> bool:
     return config.speed == 2
 
-def platform( plat ):
+def platform( plat: str ) -> bool:
     return config.platform == plat
 
-def opsys( os ):
+def opsys( os: str ) -> bool:
     return config.os == os
 
-def arch( arch ):
+def arch( arch: str ) -> bool:
     return config.arch == arch
 
-def wordsize( ws ):
+def wordsize( ws: int ) -> bool:
     return config.wordsize == str(ws)
 
-def msys( ):
+def msys( ) -> bool:
     return config.msys
 
-def cygwin( ):
+def cygwin( ) -> bool:
     return config.cygwin
 
-def have_vanilla( ):
+def have_vanilla( ) -> bool:
     return config.have_vanilla
 
-def have_ncg( ):
+def have_ncg( ) -> bool:
     return config.have_ncg
 
-def have_dynamic( ):
+def have_dynamic( ) -> bool:
     return config.have_dynamic
 
-def have_profiling( ):
+def have_profiling( ) -> bool:
     return config.have_profiling
 
-def in_tree_compiler( ):
+def in_tree_compiler( ) -> bool:
     return config.in_tree_compiler
 
-def unregisterised( ):
+def unregisterised( ) -> bool:
     return config.unregisterised
 
-def compiler_profiled( ):
+def compiler_profiled( ) -> bool:
     return config.compiler_profiled
 
-def compiler_debugged( ):
+def compiler_debugged( ) -> bool:
     return config.compiler_debugged
 
-def have_gdb( ):
+def have_gdb( ) -> bool:
     return config.have_gdb
 
-def have_readelf( ):
+def have_readelf( ) -> bool:
     return config.have_readelf
 
-def integer_gmp( ):
+def integer_gmp( ) -> bool:
     return have_library("integer-gmp")
 
-def integer_simple( ):
+def integer_simple( ) -> bool:
     return have_library("integer-simple")
 
-def llvm_build ( ):
+def llvm_build ( ) -> bool:
     return config.ghc_built_by_llvm
 
 # ---
@@ -722,9 +729,9 @@ def _newTestDir(name, opts, tempdir, dir):
 
 parallelTests = []
 aloneTests = []
-allTestNames = set([])
+allTestNames = set([])  # type: Set[TestName]
 
-def runTest(watcher, opts, name, func, args):
+def runTest(watcher, opts, name: TestName, func, args):
     if config.use_threads:
         pool_sema.acquire()
         t = threading.Thread(target=test_common_thread,
@@ -737,15 +744,17 @@ def runTest(watcher, opts, name, func, args):
 
 # name  :: String
 # setup :: [TestOpt] -> IO ()
-def test(name, setup, func, args):
+def test(name: TestName,
+         setup: "Callable[[List[TestOptions]], None]",
+         func, args) -> None:
     global aloneTests
     global parallelTests
     global allTestNames
     global thisdir_settings
     if name in allTestNames:
-        framework_fail(name, 'duplicate', 'There are multiple tests with this name')
+        framework_fail(name, WayName('duplicate'), 'There are multiple tests with this name')
     if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
-        framework_fail(name, 'bad_name', 'This test has an invalid name')
+        framework_fail(name, WayName('bad_name'), 'This test has an invalid name')
 
     if config.run_only_some_tests:
         if name not in config.only:
@@ -779,7 +788,7 @@ if config.use_threads:
             finally:
                 pool_sema.release()
 
-def get_package_cache_timestamp():
+def get_package_cache_timestamp() -> float:
     if config.package_conf_cache_file == '':
         return 0.0
     else:
@@ -790,7 +799,9 @@ def get_package_cache_timestamp():
 
 do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
 
-def test_common_work(watcher, name, opts, func, args):
+def test_common_work(watcher: testutil.Watcher,
+                     name: TestName, opts,
+                     func, args) -> None:
     try:
         t.total_tests += 1
         setLocalTestOpts(opts)
@@ -803,12 +814,12 @@ def test_common_work(watcher, name, opts, func, args):
         elif func == compile_and_run or func == multimod_compile_and_run:
             all_ways = config.run_ways
         elif func == ghci_script:
-            if 'ghci' in config.run_ways:
-                all_ways = ['ghci']
+            if WayName('ghci') in config.run_ways:
+                all_ways = [WayName('ghci')]
             else:
                 all_ways = []
         else:
-            all_ways = ['normal']
+            all_ways = [WayName('normal')]
 
         # A test itself can request extra ways by setting opts.extra_ways
         all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
@@ -817,7 +828,7 @@ def test_common_work(watcher, name, opts, func, args):
 
         ok_way = lambda way: \
             not getTestOpts().skip \
-            and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
+            and (getTestOpts().only_ways is None or way in getTestOpts().only_ways) \
             and (config.cmdline_ways == [] or way in config.cmdline_ways) \
             and (not (config.skip_perf_tests and isStatsTest())) \
             and (not (config.only_perf_tests and not isStatsTest())) \
@@ -852,20 +863,20 @@ def test_common_work(watcher, name, opts, func, args):
                           not os.path.splitext(f)[1] in do_not_copy)
         for filename in (opts.extra_files + extra_src_files.get(name, [])):
             if filename.startswith('/'):
-                framework_fail(name, 'whole-test',
+                framework_fail(name, WayName('whole-test'),
                     'no absolute paths in extra_files please: ' + filename)
 
             elif '*' in filename:
                 # Don't use wildcards in extra_files too much, as
                 # globbing is slow.
                 files.update((os.path.relpath(f, opts.srcdir)
-                            for f in glob.iglob(in_srcdir(filename))))
+                            for f in glob.iglob(str(in_srcdir(filename)))))
 
             elif filename:
                 files.add(filename)
 
             else:
-                framework_fail(name, 'whole-test', 'extra_file is empty string')
+                framework_fail(name, WayName('whole-test'), 'extra_file is empty string')
 
         # Run the required tests...
         for way in do_ways:
@@ -885,19 +896,19 @@ def test_common_work(watcher, name, opts, func, args):
             try:
                 cleanup()
             except Exception as e:
-                framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
+                framework_fail(name, WayName('runTest'), 'Unhandled exception during cleanup: ' + str(e))
 
         package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
 
         if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
-            framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
+            framework_fail(name, WayName('whole-test'), 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
 
     except Exception as e:
-        framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
+        framework_fail(name, WayName('runTest'), 'Unhandled exception: ' + str(e))
     finally:
         watcher.notify()
 
-def do_test(name, way, func, args, files):
+def do_test(name: TestName, way: WayName, func, args, files) -> None:
     opts = getTestOpts()
 
     full_name = name + '(' + way + ')'
@@ -928,12 +939,12 @@ def do_test(name, way, func, args, files):
     for extra_file in files:
         src = in_srcdir(extra_file)
         dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
-        if os.path.isfile(src):
+        if src.is_file():
             link_or_copy_file(src, dst)
-        elif os.path.isdir(src):
-            if os.path.exists(dst):
+        elif src.is_dir():
+            if dst.exists():
                 shutil.rmtree(dst)
-            os.mkdir(dst)
+            dst.mkdir()
             lndir(src, dst)
         else:
             if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
@@ -951,10 +962,9 @@ def do_test(name, way, func, args, files):
         src_makefile = in_srcdir('Makefile')
         dst_makefile = in_testdir('Makefile')
         if os.path.exists(src_makefile):
-            with io.open(src_makefile, 'r', encoding='utf8') as src:
-                makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
-                with io.open(dst_makefile, 'w', encoding='utf8') as dst:
-                    dst.write(makefile)
+            makefile = src_makefile.read_text(encoding='UTF-8')
+            makefile = re.sub('TOP=.*', 'TOP=' + config.top, makefile, 1)
+            dst_makefile.write_text(makefile, encoding='UTF-8')
 
     if opts.pre_cmd:
         exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
@@ -1018,14 +1028,14 @@ def override_options(pre_cmd):
 
     return pre_cmd
 
-def framework_fail(name, way, reason):
+def framework_fail(name: TestName, way: WayName, reason: str) -> None:
     opts = getTestOpts()
     directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
     full_name = name + '(' + way + ')'
     if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
     t.framework_failures.append(TestResult(directory, name, reason, way))
 
-def framework_warn(name, way, reason):
+def framework_warn(name: TestName, way: WayName, reason: str) -> None:
     opts = getTestOpts()
     directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
     full_name = name + '(' + way + ')'
@@ -1246,7 +1256,7 @@ def metric_dict(name, way, metric, value):
 # range_fields: see TestOptions.stats_range_fields
 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
 # This prints the results for the user.
-def check_stats(name, way, stats_file, range_fields):
+def check_stats(name, way, stats_file, range_fields) -> Any:
     head_commit = Perf.commit_hash('HEAD') if Perf.inside_git_repo() else None
     result = passed()
     if range_fields:
@@ -1259,11 +1269,13 @@ def check_stats(name, way, stats_file, range_fields):
 
         for (metric, baseline_and_dev) in range_fields.items():
             field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
-            if field_match == None:
+            if field_match is None:
                 print('Failed to find metric: ', metric)
                 metric_result = failBecause('no such stats metric')
             else:
-                actual_val = int(field_match.group(1))
+                val = field_match.group(1)
+                assert val is not None
+                actual_val = int(val)
 
                 # Store the metric so it can later be stored in a git note.
                 perf_stat = metric_dict(name, way, metric, actual_val)
@@ -1272,7 +1284,7 @@ def check_stats(name, way, stats_file, range_fields):
                 # If this is the first time running the benchmark, then pass.
                 baseline = baseline_and_dev[0](way, head_commit) \
                     if Perf.inside_git_repo() else None
-                if baseline == None:
+                if baseline is None:
                     metric_result = passed()
                     change = MetricChange.NewMetric
                 else:
@@ -1300,13 +1312,14 @@ def extras_build( way, extra_mods, extra_hc_opts ):
     for mod, opts in extra_mods:
         result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
         if not (mod.endswith('.hs') or mod.endswith('.lhs')):
-            extra_hc_opts += ' ' + replace_suffix(mod, 'o')
+            extra_hc_opts += ' ' + Path(mod).with_suffix('o')
         if badResult(result):
             return result
 
     return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
 
-def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
+def simple_build(name: TestName, way: WayName,
+                 extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False) -> Any:
     opts = getTestOpts()
 
     # Redirect stdout and stderr to the same file
@@ -1380,11 +1393,11 @@ def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, b
 
     if should_fail:
         if exit_code == 0:
-            stderr_contents = open(actual_stderr_path, 'rb').read()
+            stderr_contents = actual_stderr_path.read_text(encoding='UTF-8', errors='replace')
             return failBecauseStderr('exit code 0', stderr_contents)
     else:
         if exit_code != 0:
-            stderr_contents = open(actual_stderr_path, 'rb').read()
+            stderr_contents = actual_stderr_path.read_text(encoding='UTF-8', errors='replace')
             return failBecauseStderr('exit code non-0', stderr_contents)
 
     return passed()
@@ -1396,22 +1409,22 @@ def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, b
 # from /dev/null.  Route output to testname.run.stdout and
 # testname.run.stderr.  Returns the exit code of the run.
 
-def simple_run(name, way, prog, extra_run_opts):
+def simple_run(name: TestName, way: WayName, prog: str, extra_run_opts: str) -> Any:
     opts = getTestOpts()
 
     # figure out what to use for stdin
     if opts.stdin:
-        stdin = in_testdir(opts.stdin)
+        stdin_arg = in_testdir(opts.stdin) # type: Optional[Path]
     elif os.path.exists(in_testdir(name, 'stdin')):
-        stdin = in_testdir(name, 'stdin')
+        stdin_arg = in_testdir(name, 'stdin')
     else:
-        stdin = None
+        stdin_arg = None
 
-    stdout = in_testdir(name, 'run.stdout')
+    stdout_arg = in_testdir(name, 'run.stdout')
     if opts.combined_output:
-        stderr = subprocess.STDOUT
+        stderr_arg = subprocess.STDOUT # type: Union[int, Path]
     else:
-        stderr = in_testdir(name, 'run.stderr')
+        stderr_arg = in_testdir(name, 'run.stderr')
 
     my_rts_flags = rts_flags(way)
 
@@ -1435,7 +1448,7 @@ def simple_run(name, way, prog, extra_run_opts):
     cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
 
     # run the command
-    exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
+    exit_code = runCmd(cmd, stdin_arg, stdout_arg, stderr_arg, opts.run_timeout_multiplier)
 
     # check the exit code
     if exit_code != opts.exit_code:
@@ -1465,14 +1478,14 @@ def simple_run(name, way, prog, extra_run_opts):
 
     return check_stats(name, way, in_testdir(stats_file), opts.stats_range_fields)
 
-def rts_flags(way):
+def rts_flags(way: WayName) -> str:
     args = config.way_rts_flags.get(way, [])
     return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
 
 # -----------------------------------------------------------------------------
 # Run a program in the interpreter and check its output
 
-def interpreter_run(name, way, extra_hc_opts, top_mod):
+def interpreter_run(name: TestName, way: WayName, extra_hc_opts: List[str], top_mod: str) -> None:
     opts = getTestOpts()
 
     stdout = in_testdir(name, 'interp.stdout')
@@ -1480,13 +1493,13 @@ def interpreter_run(name, way, extra_hc_opts, top_mod):
     script = in_testdir(name, 'genscript')
 
     if opts.combined_output:
-        framework_fail(name, 'unsupported',
+        framework_fail(name, WayName('unsupported'),
                        'WAY=ghci and combined_output together is not supported')
 
     if (top_mod == ''):
         srcname = add_hs_lhs_suffix(name)
     else:
-        srcname = top_mod
+        srcname = Path(top_mod)
 
     delimiter = '===== program output begins here\n'
 
@@ -1568,7 +1581,7 @@ def split_file(in_fn, delimiter, out1_fn, out2_fn):
 
 # -----------------------------------------------------------------------------
 # Utils
-def get_compiler_flags():
+def get_compiler_flags() -> List[str]:
     opts = getTestOpts()
 
     flags = copy.copy(opts.compiler_always_flags)
@@ -1580,7 +1593,7 @@ def get_compiler_flags():
 
     return flags
 
-def stdout_ok(name, way):
+def stdout_ok(name: TestName, way: WayName) -> bool:
    actual_stdout_file = add_suffix(name, 'run.stdout')
    expected_stdout_file = find_expected_file(name, 'stdout')
 
@@ -1594,17 +1607,16 @@ def stdout_ok(name, way):
    return compare_outputs(way, 'stdout', extra_norm,
                           expected_stdout_file, actual_stdout_file)
 
-def read_stdout( name ):
-    with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
-        return f.read()
+def read_stdout( name: TestName ) -> str:
+    return in_testdir(name, 'run.stdout').read_text(encoding='UTF-8')
 
-def dump_stdout( name ):
+def dump_stdout( name: TestName ) -> None:
     str = read_stdout(name).strip()
     if str:
         print("Stdout (", name, "):")
         print(str)
 
-def stderr_ok(name, way):
+def stderr_ok(name: TestName, way: WayName) -> bool:
    actual_stderr_file = add_suffix(name, 'run.stderr')
    expected_stderr_file = find_expected_file(name, 'stderr')
 
@@ -1613,32 +1625,31 @@ def stderr_ok(name, way):
                           expected_stderr_file, actual_stderr_file,
                           whitespace_normaliser=normalise_whitespace)
 
-def read_stderr( name ):
-    with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
-        return f.read()
+def read_stderr( name: TestName ) -> str:
+    return in_testdir(name, 'run.stderr').read_text(encoding='UTF-8')
 
-def dump_stderr( name ):
+def dump_stderr( name: TestName ) -> None:
     str = read_stderr(name).strip()
     if str:
         print("Stderr (", name, "):")
         print(str)
 
-def read_no_crs(file):
-    str = ''
+def read_no_crs(f: Path) -> str:
+    s = ''
     try:
         # See Note [Universal newlines].
-        with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
-            str = h.read()
+        with io.open(f, 'r', encoding='utf8', errors='replace', newline=None) as h:
+            s = h.read()
     except Exception:
         # On Windows, if the program fails very early, it seems the
         # files stdout/stderr are redirected to may not get created
         pass
-    return str
+    return s
 
-def write_file(file, str):
+def write_file(f: Path, s: str) -> None:
     # See Note [Universal newlines].
-    with io.open(file, 'w', encoding='utf8', newline='') as h:
-        h.write(str)
+    with io.open(f, 'w', encoding='utf8', newline='') as h:
+        h.write(s)
 
 # Note [Universal newlines]
 #
@@ -1665,7 +1676,7 @@ def write_file(file, str):
 # Another solution would be to open files in binary mode always, and
 # operate on bytes.
 
-def check_hp_ok(name):
+def check_hp_ok(name: TestName) -> bool:
     opts = getTestOpts()
 
     # do not qualify for hp2ps because we should be in the right directory
@@ -1680,18 +1691,20 @@ def check_hp_ok(name):
             if gs_working:
                 gsResult = runCmd(genGSCmd(actual_ps_path))
                 if (gsResult == 0):
-                    return (True)
+                    return True
                 else:
                     print("hp2ps output for " + name + " is not valid PostScript")
-            else: return (True) # assume postscript is valid without ghostscript
+                    return False
+            else:
+                return True # assume postscript is valid without ghostscript
         else:
             print("hp2ps did not generate PostScript for " + name)
-            return (False)
+            return  False
     else:
         print("hp2ps error when processing heap profile for " + name)
-        return(False)
+        return False
 
-def check_prof_ok(name, way):
+def check_prof_ok(name: TestName, way: WayName) -> bool:
     expected_prof_file = find_expected_file(name, 'prof.sample')
     expected_prof_path = in_testdir(expected_prof_file)
 
@@ -1704,11 +1717,11 @@ def check_prof_ok(name, way):
     actual_prof_path = in_testdir(actual_prof_file)
 
     if not os.path.exists(actual_prof_path):
-        print(actual_prof_path + " does not exist")
+        print("%s does not exist" % actual_prof_path)
         return(False)
 
     if os.path.getsize(actual_prof_path) == 0:
-        print(actual_prof_path + " is empty")
+        print("%s is empty" % actual_prof_path)
         return(False)
 
     return compare_outputs(way, 'prof', normalise_prof,
@@ -1719,8 +1732,11 @@ def check_prof_ok(name, way):
 # new output. Returns true if output matched or was accepted, false
 # otherwise. See Note [Output comparison] for the meaning of the
 # normaliser and whitespace_normaliser parameters.
-def compare_outputs(way, kind, normaliser, expected_file, actual_file, diff_file=None,
-                    whitespace_normaliser=lambda x:x):
+def compare_outputs(way: WayName,
+                    kind: str,
+                    normaliser: OutputNormalizer,
+                    expected_file, actual_file, diff_file=None,
+                    whitespace_normaliser: OutputNormalizer=lambda x:x) -> bool:
 
     expected_path = in_srcdir(expected_file)
     actual_path = in_testdir(actual_file)
@@ -1732,7 +1748,7 @@ def compare_outputs(way, kind, normaliser, expected_file, actual_file, diff_file
         expected_normalised_path = in_testdir(expected_normalised_file)
     else:
         expected_str = ''
-        expected_normalised_path = '/dev/null'
+        expected_normalised_path = Path('/dev/null')
 
     actual_raw = read_no_crs(actual_path)
     actual_str = normaliser(actual_raw)
@@ -1794,7 +1810,7 @@ def compare_outputs(way, kind, normaliser, expected_file, actual_file, diff_file
 
 # Checks that each line from pattern_file is present in actual_file as
 # a substring or regex pattern depending on is_substring.
-def grep_output(normaliser, pattern_file, actual_file, is_substring=True):
+def grep_output(normaliser: OutputNormalizer, pattern_file, actual_file, is_substring: bool=True):
     expected_path = in_srcdir(pattern_file)
     actual_path = in_testdir(actual_file)
 
@@ -1845,13 +1861,13 @@ def grep_output(normaliser, pattern_file, actual_file, is_substring=True):
 #    on the `diff` program to ignore whitespace changes as much as
 #    possible (#10152).
 
-def normalise_whitespace( str ):
+def normalise_whitespace(s: str) -> str:
     # Merge contiguous whitespace characters into a single space.
-    return ' '.join(str.split())
+    return ' '.join(s.split())
 
 callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
 
-def normalise_callstacks(s):
+def normalise_callstacks(s: str) -> str:
     opts = getTestOpts()
     def repl(matches):
         location = matches.group(1)
@@ -1869,72 +1885,72 @@ def normalise_callstacks(s):
 
 tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
 
-def normalise_type_reps(str):
+def normalise_type_reps(s: str) -> str:
     """ Normalise out fingerprints from Typeable TyCon representations """
-    return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
+    return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', s)
 
-def normalise_errmsg( str ):
+def normalise_errmsg(s: str) -> str:
     """Normalise error-messages emitted via stderr"""
     # IBM AIX's `ld` is a bit chatty
     if opsys('aix'):
-        str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
+        s = s.replace('ld: 0706-027 The -x flag is ignored.\n', '')
     # remove " error:" and lower-case " Warning:" to make patch for
     # trac issue #10021 smaller
-    str = modify_lines(str, lambda l: re.sub(' error:', '', l))
-    str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
-    str = normalise_callstacks(str)
-    str = normalise_type_reps(str)
+    s = modify_lines(s, lambda l: re.sub(' error:', '', l))
+    s = modify_lines(s, lambda l: re.sub(' Warning:', ' warning:', l))
+    s = normalise_callstacks(s)
+    s = normalise_type_reps(s)
 
     # If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
     #    the colon is there because it appears in error messages; this
     #    hacky solution is used in place of more sophisticated filename
     #    mangling
-    str = re.sub('([^\\s])\\.exe', '\\1', str)
+    s = re.sub('([^\\s])\\.exe', '\\1', s)
 
     # normalise slashes, minimise Windows/Unix filename differences
-    str = re.sub('\\\\', '/', str)
+    s = re.sub('\\\\', '/', s)
 
     # The inplace ghc's are called ghc-stage[123] to avoid filename
     # collisions, so we need to normalise that to just "ghc"
-    str = re.sub('ghc-stage[123]', 'ghc', str)
+    s = re.sub('ghc-stage[123]', 'ghc', s)
 
     # Error messages sometimes contain integer implementation package
-    str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
+    s = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', s)
 
     # Error messages sometimes contain this blurb which can vary
     # spuriously depending upon build configuration (e.g. based on integer
     # backend)
-    str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
-                 '...plus N instances involving out-of-scope types', str)
+    s = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
+                 '...plus N instances involving out-of-scope types', s)
 
     # Also filter out bullet characters.  This is because bullets are used to
     # separate error sections, and tests shouldn't be sensitive to how the
     # the division happens.
-    bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
-    str = str.replace(bullet, '')
+    bullet = '•'.encode('utf8') if isinstance(s, bytes) else '•'
+    s = s.replace(bullet, '')
 
     # Windows only, this is a bug in hsc2hs but it is preventing
     # stable output for the testsuite. See #9775. For now we filter out this
     # warning message to get clean output.
     if config.msys:
-        str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
-        str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
+        s = re.sub('Failed to remove file (.*); error= (.*)$', '', s)
+        s = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', s)
 
-    return str
+    return s
 
 # normalise a .prof file, so that we can reasonably compare it against
 # a sample.  This doesn't compare any of the actual profiling data,
 # only the shape of the profile and the number of entries.
-def normalise_prof (str):
-    # strip everything up to the line beginning "COST CENTRE"
-    str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
+def normalise_prof (s: str) -> str:
+    # sip everything up to the line beginning "COST CENTRE"
+    s = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',s)
 
-    # strip results for CAFs, these tend to change unpredictably
-    str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
+    # sip results for CAFs, these tend to change unpredictably
+    s = re.sub('[ \t]*(CAF|IDLE).*\n','',s)
 
     # XXX Ignore Main.main.  Sometimes this appears under CAF, and
     # sometimes under MAIN.
-    str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
+    s = re.sub('[ \t]*main[ \t]+Main.*\n','',s)
 
     # We have something like this:
     #
@@ -1967,70 +1983,74 @@ def normalise_prof (str):
     # Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
     # (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
     # this works fine.
-    str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
-            '\\1 \\2 \\3 \\5\n', str)
-    return str
+    s = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
+            '\\1 \\2 \\3 \\5\n', s)
+    return s
 
-def normalise_slashes_( str ):
-    str = re.sub('\\\\', '/', str)
-    str = re.sub('//', '/', str)
-    return str
+def normalise_slashes_( s: str ) -> str:
+    s = re.sub('\\\\', '/', s)
+    s = re.sub('//', '/', s)
+    return s
 
-def normalise_exe_( str ):
-    str = re.sub('\.exe', '', str)
-    return str
+def normalise_exe_( s: str ) -> str:
+    s = re.sub('\.exe', '', s)
+    return s
 
-def normalise_output( str ):
+def normalise_output( s: str ) -> str:
     # remove " error:" and lower-case " Warning:" to make patch for
     # trac issue #10021 smaller
-    str = modify_lines(str, lambda l: re.sub(' error:', '', l))
-    str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
+    s = modify_lines(s, lambda l: re.sub(' error:', '', l))
+    s = modify_lines(s, lambda l: re.sub(' Warning:', ' warning:', l))
     # Remove a .exe extension (for Windows)
     # This can occur in error messages generated by the program.
-    str = re.sub('([^\\s])\\.exe', '\\1', str)
-    str = normalise_callstacks(str)
-    str = normalise_type_reps(str)
-    return str
-
-def normalise_asm( str ):
-    lines = str.split('\n')
-    # Only keep instructions and labels not starting with a dot.
+    s = re.sub('([^\\s])\\.exe', '\\1', s)
+    s = normalise_callstacks(s)
+    s = normalise_type_reps(s)
+    return s
+
+def normalise_asm( s: str ) -> str:
+    lines = s.split('\n')
+    # Only keep insuctions and labels not starting with a dot.
     metadata = re.compile('^[ \t]*\\..*$')
     out = []
     for line in lines:
       # Drop metadata directives (e.g. ".type")
       if not metadata.match(line):
         line = re.sub('@plt', '', line)
-        instr = line.lstrip().split()
+        ins = line.lstrip().split()
         # Drop empty lines.
-        if not instr:
+        if not ins:
           continue
-        # Drop operands, except for call instructions.
-        elif instr[0] == 'call':
-          out.append(instr[0] + ' ' + instr[1])
+        # Drop operands, except for call insuctions.
+        elif ins[0] == 'call':
+          out.append(ins[0] + ' ' + ins[1])
         else:
-          out.append(instr[0])
-    out = '\n'.join(out)
-    return out
+          out.append(ins[0])
+    return '\n'.join(out)
 
-def if_verbose( n, s ):
+def if_verbose( n: int, s: str ) -> None:
     if config.verbose >= n:
         print(s)
 
-def dump_file(f):
+def dump_file(f: Path):
     try:
         with io.open(f) as file:
             print(file.read())
     except Exception:
         print('')
 
-def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
+def runCmd(cmd: str,
+           stdin: Union[None, int, Path]=None,
+           stdout: Union[None, int, Path]=None,
+           stderr: Union[None, int, Path]=None,
+           timeout_multiplier=1.0,
+           print_output=False) -> int:
     timeout_prog = strip_quotes(config.timeout_prog)
     timeout = str(int(ceil(config.timeout * timeout_multiplier)))
 
     # Format cmd using config. Example: cmd='{hpc} report A.tix'
     cmd = cmd.format(**config.__dict__)
-    if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
+    if_verbose(3, '%s< %s' % (cmd, os.path.basename(stdin) if isinstance(stdin, Path) else ''))
 
     stdin_file = io.open(stdin, 'rb') if stdin else None
     stdout_buffer = b''
@@ -2081,10 +2101,10 @@ def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, pr
 # -----------------------------------------------------------------------------
 # checking if ghostscript is available for checking the output of hp2ps
 
-def genGSCmd(psfile):
+def genGSCmd(psfile: Path) -> str:
     return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
 
-def gsNotWorking():
+def gsNotWorking() -> None:
     global gs_working
     print("GhostScript not available for hp2ps tests")
 
@@ -2106,13 +2126,13 @@ if config.have_profiling:
   else:
     gsNotWorking();
 
-def add_suffix( name, suffix ):
+def add_suffix( name: Union[str, Path], suffix: str ) -> Path:
     if suffix == '':
-        return name
+        return Path(name)
     else:
-        return name + '.' + suffix
+        return Path(str(name) + '.' + suffix)
 
-def add_hs_lhs_suffix(name):
+def add_hs_lhs_suffix(name: str) -> Path:
     if getTestOpts().c_src:
         return add_suffix(name, 'c')
     elif getTestOpts().cmm_src:
@@ -2126,43 +2146,39 @@ def add_hs_lhs_suffix(name):
     else:
         return add_suffix(name, 'hs')
 
-def replace_suffix( name, suffix ):
-    base, suf = os.path.splitext(name)
-    return base + '.' + suffix
-
-def in_testdir(name, suffix=''):
-    return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
+def in_testdir(name: Union[Path, str], suffix: str='') -> Path:
+    return getTestOpts().testdir / add_suffix(name, suffix)
 
-def in_srcdir(name, suffix=''):
-    return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
+def in_srcdir(name: Union[Path, str], suffix: str='') -> Path:
+    return getTestOpts().srcdir / add_suffix(name, suffix)
 
-def in_statsdir(name, suffix=''):
-    return os.path.join(config.stats_files_dir, add_suffix(name, suffix))
+def in_statsdir(name: Union[Path, str], suffix: str='') -> Path:
+    return getTestOpts().stats_file_dir / add_suffix(name, suffix)
 
 # Finding the sample output.  The filename is of the form
 #
 #   <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
 #
-def find_expected_file(name, suff):
+def find_expected_file(name: TestName, suff: str) -> Path:
     basename = add_suffix(name, suff)
     # Override the basename if the user has specified one, this will then be
     # subjected to the same name mangling scheme as normal to allow platform
     # specific overrides to work.
     basename = getTestOpts().use_specs.get (suff, basename)
 
-    files = [basename + ws + plat
+    files = [str(basename) + ws + plat
              for plat in ['-' + config.platform, '-' + config.os, '']
              for ws in ['-ws-' + config.wordsize, '']]
 
     for f in files:
-        if os.path.exists(in_srcdir(f)):
+        if in_srcdir(f).exists():
             return f
 
     return basename
 
 if config.msys:
     import stat
-    def cleanup():
+    def cleanup() -> None:
         testdir = getTestOpts().testdir
         max_attempts = 5
         retries = max_attempts
@@ -2202,7 +2218,7 @@ if config.msys:
             raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
                             % (testdir, exception))
 else:
-    def cleanup():
+    def cleanup() -> None:
         testdir = getTestOpts().testdir
         if os.path.exists(testdir):
             shutil.rmtree(testdir, ignore_errors=False)
@@ -2211,7 +2227,7 @@ else:
 # -----------------------------------------------------------------------------
 # Return a list of all the files ending in '.T' below directories roots.
 
-def findTFiles(roots):
+def findTFiles(roots: List[str]) -> Iterator[str]:
     for root in roots:
         for path, dirs, files in os.walk(root, topdown=True):
             # Never pick up .T files in uncleaned .run directories.
@@ -2224,7 +2240,7 @@ def findTFiles(roots):
 # -----------------------------------------------------------------------------
 # Output a test summary to the specified file object
 
-def summary(t, file, short=False, color=False):
+def summary(t: TestRun, file: TextIO, short=False, color=False) -> None:
 
     file.write('\n')
     printUnexpectedTests(file,
@@ -2299,7 +2315,7 @@ def summary(t, file, short=False, color=False):
     if stopping():
         file.write('WARNING: Testsuite run was terminated early\n')
 
-def printUnexpectedTests(file, testInfoss):
+def printUnexpectedTests(file: TextIO, testInfoss):
     unexpected = set(result.testname
                      for testInfos in testInfoss
                      for result in testInfos
@@ -2309,7 +2325,7 @@ def printUnexpectedTests(file, testInfoss):
         file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
         file.write('\n')
 
-def printTestInfosSummary(file, testInfos):
+def printTestInfosSummary(file: TextIO, testInfos):
     maxDirLen = max(len(tr.directory) for tr in testInfos)
     for result in sorted(testInfos, key=lambda r: (r.testname.lower(), r.way, r.directory)):
         directory = result.directory.ljust(maxDirLen)
@@ -2318,7 +2334,7 @@ def printTestInfosSummary(file, testInfos):
             directory = directory))
     file.write('\n')
 
-def modify_lines(s, f):
+def modify_lines(s: str, f: Callable[[str], str]) -> str:
     s = '\n'.join([f(l) for l in s.splitlines()])
     if s and s[-1] != '\n':
         # Prevent '\ No newline at end of file' warnings when diffing.


=====================================
testsuite/driver/testutil.py
=====================================
@@ -2,9 +2,12 @@ import os
 import platform
 import subprocess
 import shutil
+from pathlib import Path, PurePath
 
 import threading
 
+from my_typing import *
+
 def passed():
     return {'passFail': 'pass'}
 
@@ -30,7 +33,7 @@ def str_warn(s):
 def str_info(s):
     return '\033[1m\033[34m' + s + '\033[0m'
 
-def getStdout(cmd_and_args):
+def getStdout(cmd_and_args: "List[str]"):
     # Can't use subprocess.check_output, since we also verify that
     # no stderr was produced
     p = subprocess.Popen([strip_quotes(cmd_and_args[0])] + cmd_and_args[1:],
@@ -44,17 +47,17 @@ def getStdout(cmd_and_args):
         raise Exception("stderr from command: %s\nOutput:\n%s\n" % (cmd_and_args, stderr))
     return stdout.decode('utf-8')
 
-def lndir(srcdir, dstdir):
+def lndir(srcdir: Path, dstdir: Path):
     # Create symlinks for all files in src directory.
     # Not all developers might have lndir installed.
     # os.system('lndir -silent {0} {1}'.format(srcdir, dstdir))
     for filename in os.listdir(srcdir):
-        src = os.path.join(srcdir, filename)
-        dst = os.path.join(dstdir, filename)
-        if os.path.isfile(src):
+        src = srcdir / filename
+        dst = dstdir / filename
+        if src.is_file():
             link_or_copy_file(src, dst)
         else:
-            os.mkdir(dst)
+            dst.mkdir()
             lndir(src, dst)
 
 # All possible test metric strings.
@@ -69,12 +72,12 @@ def testing_metrics():
 # We define the following function to make this magic more
 # explicit/discoverable. You are enouraged to use it instead of os.symlink.
 if platform.system() == 'Windows' and os.getenv('FORCE_SYMLINKS') == None:
-    link_or_copy_file = shutil.copyfile
+    link_or_copy_file = lambda src, dst: shutil.copyfile(src, dst)
 else:
-    link_or_copy_file = os.symlink
+    link_or_copy_file = lambda src, dst: os.symlink(src, dst)
 
 class Watcher(object):
-    def __init__(self, count):
+    def __init__(self, count: int):
         self.pool = count
         self.evt = threading.Event()
         self.sync_lock = threading.Lock()



View it on GitLab: https://gitlab.haskell.org/ghc/ghc/compare/01acfeeee5b20f8085a8cb3cde151a86f9f670fd...4def9fe1de2c11c44689bcfc633457dd931b1cdb

-- 
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/compare/01acfeeee5b20f8085a8cb3cde151a86f9f670fd...4def9fe1de2c11c44689bcfc633457dd931b1cdb
You're receiving this email because of your account on gitlab.haskell.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20190621/79f4e345/attachment-0001.html>


More information about the ghc-commits mailing list