[Git][ghc/ghc][wip/testsuite-generic-stats] testsuite: Add mechanism to collect generic metrics
Matthew Pickering (@mpickering)
gitlab at gitlab.haskell.org
Thu Nov 16 09:15:36 UTC 2023
Matthew Pickering pushed to branch wip/testsuite-generic-stats at Glasgow Haskell Compiler / GHC
Commits:
4d40f8f1 by Matthew Pickering at 2023-11-16T09:15:27+00:00
testsuite: Add mechanism to collect generic metrics
TODO
- - - - -
5 changed files:
- testsuite/driver/testglobals.py
- testsuite/driver/testlib.py
- + testsuite/tests/perf/size/Makefile
- + testsuite/tests/perf/size/all.T
- + testsuite/tests/perf/size/size_hello.hs
Changes:
=====================================
testsuite/driver/testglobals.py
=====================================
@@ -396,6 +396,9 @@ class TestOptions:
# Does this test the compiler's performance as opposed to the generated code.
self.is_compiler_stats_test = False
+ # Does this test define a generic stats test?
+ self.generic_stats_test = None
+
# should we run this test alone, i.e. not run it in parallel with
# any other threads
self.alone = False
=====================================
testsuite/driver/testlib.py
=====================================
@@ -99,6 +99,11 @@ def isCompilerStatsTest() -> bool:
opts = getTestOpts()
return bool(opts.is_compiler_stats_test)
+def isGenericStatsTest() -> bool:
+ opts = getTestOpts()
+ print(opts.generic_stats_test)
+ return bool(opts.generic_stats_test)
+
def isStatsTest() -> bool:
opts = getTestOpts()
return opts.is_stats_test
@@ -599,6 +604,43 @@ def extra_files(files):
def _extra_files(name, opts, files):
opts.extra_files.extend(files)
+# Record the size of a specific file
+def collect_size ( deviation, path ):
+ return collect_generic_stat ( 'static/size', deviation, lambda: os.path.getsize(in_testdir(path)) )
+
+# Read a number from a specific file
+def stat_from_file ( metric, deviation, path ):
+ def read_file ():
+ with open(in_testdir(path)) as f:
+ return int(f.read())
+ return collect_generic_stat ( metric, deviation, read_file )
+
+
+# Define a set of generic stat tests
+def collect_generic_stats ( get_stats ):
+ def f(name, opts, f=get_stats):
+ return _collect_stat(name, opts, get_stats)
+ return f
+
+# Define the a generic stat test, which computes the statistic by calling the function
+# given as the third argument.
+def collect_generic_stat ( metric, deviation, get_stat ):
+ return collect_generic_stats ( { metric: { 'deviation': deviation, 'action': get_stat } } )
+
+def _collect_stat(name : TestName, opts, get_stat):
+
+ opts.generic_stats_test = get_stat
+
+ for (metric, info) in get_stat.items():
+
+ def baselineByWay(way, target_commit, metric=metric):
+ return Perf.baseline_metric( \
+ target_commit, name, config.test_env, metric, way, \
+ config.baseline_commit )
+
+ opts.stats_range_fields[metric] = MetricOracles(baseline=baselineByWay,
+ deviation=info['deviation'])
+
# -----
# Defaults to "test everything, and only break on extreme cases"
@@ -1733,6 +1775,54 @@ def metric_dict(name, way, metric, value) -> PerfStat:
metric = metric,
value = value)
+
+
+def check_generic_stats(name, way, get_stats, range_fields):
+
+ for (metric, baseline_and_dev) in range_fields.items():
+ if metric in get_stats:
+ actual_val = get_stats[metric]['action']()
+ basline_and_dev = range_fields[metric]
+ return report_stats(name, way, metric, basline_and_dev, actual_val)
+
+def report_stats(name, way, metric, baseline_and_dev, actual_val):
+ head_commit = Perf.commit_hash(GitRef('HEAD')) if Perf.inside_git_repo() else None
+ if head_commit is None:
+ return passed()
+
+ result = passed()
+ # Store the metric so it can later be stored in a git note.
+ perf_stat = metric_dict(name, way, metric, actual_val)
+
+ # If this is the first time running the benchmark, then pass.
+ baseline = baseline_and_dev.baseline(way, head_commit) \
+ if Perf.inside_git_repo() else None
+ if baseline is None:
+ metric_result = passed()
+ perf_change = MetricChange.NewMetric
+ else:
+ tolerance_dev = baseline_and_dev.deviation
+ (perf_change, metric_result) = Perf.check_stats_change(
+ perf_stat,
+ baseline,
+ tolerance_dev,
+ config.allowed_perf_changes,
+ config.verbose >= 4)
+
+ t.metrics.append(PerfMetric(change=perf_change, stat=perf_stat, baseline=baseline))
+
+ # If any metric fails then the test fails.
+ # Note, the remaining metrics are still run so that
+ # a complete list of changes can be presented to the user.
+ if not metric_result.passed:
+ if config.ignore_perf_increases and perf_change == MetricChange.Increase:
+ metric_result = passed()
+ elif config.ignore_perf_decreases and perf_change == MetricChange.Decrease:
+ metric_result = passed()
+
+ result = metric_result
+ return result
+
# -----------------------------------------------------------------------------
# Check test stats. This prints the results for the user.
# name: name of the test.
@@ -1746,9 +1836,6 @@ def check_stats(name: TestName,
stats_file: Path,
range_fields: Dict[MetricName, MetricOracles]
) -> PassFail:
- head_commit = Perf.commit_hash(GitRef('HEAD')) if Perf.inside_git_repo() else None
- if head_commit is None:
- return passed()
result = passed()
if range_fields:
@@ -1771,36 +1858,8 @@ def check_stats(name: TestName,
assert val is not None
actual_val = int(val)
- # Store the metric so it can later be stored in a git note.
- perf_stat = metric_dict(name, way, metric, actual_val)
+ result = report_stats(name, way, metric, baseline_and_dev, actual_val)
- # If this is the first time running the benchmark, then pass.
- baseline = baseline_and_dev.baseline(way, head_commit) \
- if Perf.inside_git_repo() else None
- if baseline is None:
- metric_result = passed()
- perf_change = MetricChange.NewMetric
- else:
- tolerance_dev = baseline_and_dev.deviation
- (perf_change, metric_result) = Perf.check_stats_change(
- perf_stat,
- baseline,
- tolerance_dev,
- config.allowed_perf_changes,
- config.verbose >= 4)
-
- t.metrics.append(PerfMetric(change=perf_change, stat=perf_stat, baseline=baseline))
-
- # If any metric fails then the test fails.
- # Note, the remaining metrics are still run so that
- # a complete list of changes can be presented to the user.
- if not metric_result.passed:
- if config.ignore_perf_increases and perf_change == MetricChange.Increase:
- metric_result = passed()
- elif config.ignore_perf_decreases and perf_change == MetricChange.Decrease:
- metric_result = passed()
-
- result = metric_result
return result
@@ -1918,6 +1977,11 @@ async def simple_build(name: Union[TestName, str],
if badResult(statsResult):
return statsResult
+ if isGenericStatsTest():
+ statsResult = check_generic_stats(TestName(name), way, opts.generic_stats_test, opts.stats_range_fields)
+ if badResult(statsResult):
+ return statsResult
+
return passed()
# -----------------------------------------------------------------------------
@@ -2002,8 +2066,14 @@ async def simple_run(name: TestName, way: WayName, prog: str, extra_run_opts: st
# Check runtime stats if desired.
if stats_file is not None:
return check_stats(name, way, in_testdir(stats_file), opts.stats_range_fields)
- else:
- return passed()
+
+ # Check other generic stats tests
+ if isGenericStatsTest():
+ statsResult = check_generic_stats(TestName(name), way, opts.generic_stats_test, opts.stats_range_fields)
+ if badResult(statsResult):
+ return statsResult
+
+ return passed()
def rts_flags(way: WayName) -> str:
args = config.way_rts_flags.get(way, [])
=====================================
testsuite/tests/perf/size/Makefile
=====================================
@@ -0,0 +1,7 @@
+TOP=../../..
+include $(TOP)/mk/boilerplate.mk
+include $(TOP)/mk/test.mk
+
+libdir_size:
+ du -s `$(TEST_HC) --print-libdir` | cut -f1 > SIZE
+
=====================================
testsuite/tests/perf/size/all.T
=====================================
@@ -0,0 +1,3 @@
+test('size_hello', [collect_size(3, 'size_hello.o')], compile, [''])
+
+test('libdir',[stat_from_file('static/size', 3, 'SIZE')], makefile_test, ['libdir_size'] )
=====================================
testsuite/tests/perf/size/size_hello.hs
=====================================
@@ -0,0 +1,3 @@
+module Main where
+
+main = print "Hello World!"
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/commit/4d40f8f1eb903e55b106f33e7482f1985f524627
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/commit/4d40f8f1eb903e55b106f33e7482f1985f524627
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20231116/fd69d967/attachment-0001.html>
More information about the ghc-commits
mailing list