[commit: ghc] wip/std-hdr-llf: Fix test runner crash when not in a git repo (32f44ed)

git at git.haskell.org git at git.haskell.org
Thu Feb 21 15:13:25 UTC 2019


Repository : ssh://git@git.haskell.org/ghc

On branch  : wip/std-hdr-llf
Link       : http://ghc.haskell.org/trac/ghc/changeset/32f44ed81b0f16099d780e73ad2ea1a3cd812448/ghc

>---------------------------------------------------------------

commit 32f44ed81b0f16099d780e73ad2ea1a3cd812448
Author: David Eichmann <EichmannD at gmail.com>
Date:   Tue Feb 19 11:38:51 2019 +0000

    Fix test runner crash when not in a git repo
    
    Respect `inside_git_repo()` when checking performance stats.


>---------------------------------------------------------------

32f44ed81b0f16099d780e73ad2ea1a3cd812448
 testsuite/driver/perf_notes.py | 16 ++++++++++------
 testsuite/driver/runtests.py   | 11 +++++------
 testsuite/driver/testlib.py    |  5 +++--
 3 files changed, 18 insertions(+), 14 deletions(-)

diff --git a/testsuite/driver/perf_notes.py b/testsuite/driver/perf_notes.py
index 365732c..2b48353 100644
--- a/testsuite/driver/perf_notes.py
+++ b/testsuite/driver/perf_notes.py
@@ -23,13 +23,17 @@ from testutil import passed, failBecause
 
 # Check if "git rev-parse" can be run successfully.
 # True implies the current directory is a git repo.
+_inside_git_repo_cache = None
 def inside_git_repo():
-    try:
-        subprocess.check_call(['git', 'rev-parse', 'HEAD'],
-                              stdout=subprocess.DEVNULL)
-        return True
-    except subprocess.CalledProcessError:
-        return False
+    global _inside_git_repo_cache
+    if _inside_git_repo_cache == None:
+        try:
+            subprocess.check_call(['git', 'rev-parse', 'HEAD'],
+                                stdout=subprocess.DEVNULL)
+            _inside_git_repo_cache = True
+        except subprocess.CalledProcessError:
+            _inside_git_repo_cache = False
+    return _inside_git_repo_cache
 
 # Check if the worktree is dirty.
 def is_worktree_dirty():
diff --git a/testsuite/driver/runtests.py b/testsuite/driver/runtests.py
index f61ebbc..51f682e 100644
--- a/testsuite/driver/runtests.py
+++ b/testsuite/driver/runtests.py
@@ -122,8 +122,7 @@ if args.verbose is not None:
 # Note force skip perf tests: skip if this is not a git repo (estimated with inside_git_repo)
 # and no metrics file is given. In this case there is no way to read the previous commit's
 # perf test results, nor a way to store new perf test results.
-canGitStatus = inside_git_repo()
-forceSkipPerfTests = not hasMetricsFile and not canGitStatus
+forceSkipPerfTests = not hasMetricsFile and not inside_git_repo()
 config.skip_perf_tests = args.skip_perf_tests or forceSkipPerfTests
 config.only_perf_tests = args.only_perf_tests
 
@@ -371,14 +370,14 @@ else:
     spacing = "       "
     if forceSkipPerfTests and not args.skip_perf_tests:
         print()
-        print(str_warn('Skipping All Performance Tests') + ' `git status` exited with non-zero exit code.')
-        print(spacing + 'Git is required because performance test results are compared with the previous git commit\'s results (stored with git notes).')
+        print(str_warn('Skipping All Performance Tests') + ' `git` exited with non-zero exit code.')
+        print(spacing + 'Git is required because performance test results are compared with ancestor git commits\' results (stored with git notes).')
         print(spacing + 'You can still run the tests without git by specifying an output file with --metrics-file FILE.')
 
     # Warn of new metrics.
     new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric]
     if any(new_metrics):
-        if canGitStatus:
+        if inside_git_repo():
             reason = 'a baseline (expected value) cannot be recovered from' + \
                 ' previous git commits. This may be due to HEAD having' + \
                 ' new tests or having expected changes, the presence of' + \
@@ -421,7 +420,7 @@ else:
         print('Appending ' + str(len(stats)) + ' stats to file: ' + config.metrics_file)
         with open(config.metrics_file, 'a') as file:
             file.write("\n" + Perf.format_perf_stat(stats))
-    elif canGitStatus and any(stats):
+    elif inside_git_repo() and any(stats):
         if is_worktree_dirty():
             print()
             print(str_warn('Performance Metrics NOT Saved') + \
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index e800772..fb7888c 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -1182,7 +1182,7 @@ def metric_dict(name, way, metric, value):
 # Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
 # This prints the results for the user.
 def check_stats(name, way, stats_file, range_fields):
-    head_commit = Perf.commit_hash('HEAD')
+    head_commit = Perf.commit_hash('HEAD') if Perf.inside_git_repo() else None
     result = passed()
     if range_fields:
         try:
@@ -1205,7 +1205,8 @@ def check_stats(name, way, stats_file, range_fields):
                 change = None
 
                 # If this is the first time running the benchmark, then pass.
-                baseline = baseline_and_dev[0](way, head_commit)
+                baseline = baseline_and_dev[0](way, head_commit) \
+                    if Perf.inside_git_repo() else None
                 if baseline == None:
                     metric_result = passed()
                     change = MetricChange.NewMetric



More information about the ghc-commits mailing list