[Git][ghc/ghc][master] 2 commits: testsuite: Allow baseline commit to be set explicitly
Marge Bot
gitlab at gitlab.haskell.org
Tue Aug 18 19:40:09 UTC 2020
Marge Bot pushed to branch master at Glasgow Haskell Compiler / GHC
Commits:
194b25ee by Ben Gamari at 2020-08-18T15:40:05-04:00
testsuite: Allow baseline commit to be set explicitly
- - - - -
fdcf7645 by Ben Gamari at 2020-08-18T15:40:05-04:00
gitlab-ci: Use MR base commit as performance baseline
- - - - -
8 changed files:
- .gitlab-ci.yml
- .gitlab/ci.sh
- hadrian/src/Settings/Builders/RunTest.hs
- testsuite/driver/perf_notes.py
- testsuite/driver/runtests.py
- testsuite/driver/testglobals.py
- testsuite/driver/testlib.py
- testsuite/mk/test.mk
Changes:
=====================================
.gitlab-ci.yml
=====================================
@@ -26,12 +26,28 @@ stages:
- testing # head.hackage correctness and compiler performance testing
- deploy # push documentation
+# Note [The CI Story]
+# ~~~~~~~~~~~~~~~~~~~
+#
+# There are two different types of pipelines:
+#
+# - marge-bot merges to `master`. Here we perform an exhaustive validation
+# across all of the platforms which we support. In addition, we push
+# performance metric notes upstream, providing a persistent record of the
+# performance characteristics of the compiler.
+#
+# - merge requests. Here we perform a slightly less exhaustive battery of
+# testing. Namely we omit some configurations (e.g. the unregisterised job).
+# These use the merge request's base commit for performance metric
+# comparisons.
+#
+
workflow:
- # N.B.Don't run on wip/ branches, instead on run on merge requests.
+ # N.B. Don't run on wip/ branches, instead on run on merge requests.
rules:
- if: $CI_MERGE_REQUEST_ID
- if: $CI_COMMIT_TAG
- - if: '$CI_COMMIT_BRANCH == "master"'
+ - if: '$CI_COMMIT_BRANCH == "wip/marge_bot_batch_merge_job"'
- if: '$CI_COMMIT_BRANCH =~ /ghc-[0.9]+\.[0-9]+/'
- if: '$CI_PIPELINE_SOURCE == "web"'
=====================================
.gitlab/ci.sh
=====================================
@@ -363,6 +363,13 @@ function push_perf_notes() {
"$TOP/.gitlab/test-metrics.sh" push
}
+# Figure out which commit should be used by the testsuite driver as a
+# performance baseline. See Note [The CI Story].
+function determine_metric_baseline() {
+ export PERF_BASELINE_COMMIT="$(git merge-base $CI_MERGE_REQUEST_TARGET_BRANCH_NAME HEAD)"
+ info "Using $PERF_BASELINE_COMMIT for performance metric baseline..."
+}
+
function test_make() {
run "$MAKE" test_bindist TEST_PREP=YES
run "$MAKE" V=0 test \
=====================================
hadrian/src/Settings/Builders/RunTest.hs
=====================================
@@ -78,6 +78,7 @@ runTestBuilderArgs = builder RunTest ? do
<*> (maybe False (=="YES") <$> lookupEnv "OS")
(testEnv, testMetricsFile) <- expr . liftIO $
(,) <$> lookupEnv "TEST_ENV" <*> lookupEnv "METRICS_FILE"
+ perfBaseline <- expr . liftIO $ lookupEnv "PERF_BASELINE_COMMIT"
threads <- shakeThreads <$> expr getShakeOptions
os <- getTestSetting TestHostOS
@@ -141,6 +142,9 @@ runTestBuilderArgs = builder RunTest ? do
, arg "--config", arg $ "timeout_prog=" ++ show (top -/- timeoutProg)
, arg "--config", arg $ "stats_files_dir=" ++ statsFilesDir
, arg $ "--threads=" ++ show threads
+ , case perfBaseline of
+ Just commit | not (null commit) -> arg ("--perf-baseline=" ++ show commit)
+ _ -> mempty
, emitWhenSet testEnv $ \env -> arg ("--test-env=" ++ show env)
, emitWhenSet testMetricsFile $ \file -> arg ("--metrics-file=" ++ file)
, getTestArgs -- User-provided arguments from command line.
=====================================
testsuite/driver/perf_notes.py
=====================================
@@ -76,8 +76,7 @@ PerfStat = NamedTuple('PerfStat', [('test_env', TestEnv),
# A baseline recovered form stored metrics.
Baseline = NamedTuple('Baseline', [('perfStat', PerfStat),
- ('commit', GitHash),
- ('commitDepth', int)])
+ ('commit', GitHash)])
class MetricChange(Enum):
# The metric appears to have no baseline and is presumably a new test.
@@ -402,7 +401,8 @@ def baseline_metric(commit: GitHash,
name: TestName,
test_env: TestEnv,
metric: MetricName,
- way: WayName
+ way: WayName,
+ baseline_ref: Optional[GitRef]
) -> Optional[Baseline]:
# For performance reasons (in order to avoid calling commit_hash), we assert
# commit is already a commit hash.
@@ -411,6 +411,8 @@ def baseline_metric(commit: GitHash,
# Get all recent commit hashes.
commit_hashes = baseline_commit_log(commit)
+ baseline_commit = commit_hash(baseline_ref) if baseline_ref else None
+
def has_expected_change(commit: GitHash) -> bool:
return get_allowed_perf_changes(commit).get(name) is not None
@@ -418,11 +420,18 @@ def baseline_metric(commit: GitHash,
def find_baseline(namespace: NoteNamespace,
test_env: TestEnv
) -> Optional[Baseline]:
+ if baseline_commit is not None:
+ current_metric = get_commit_metric(namespace, baseline_commit, test_env, name, metric, way)
+ if current_metric is not None:
+ return Baseline(current_metric, baseline_commit)
+ else:
+ return None
+
for depth, current_commit in list(enumerate(commit_hashes))[1:]:
# Check for a metric on this commit.
current_metric = get_commit_metric(namespace, current_commit, test_env, name, metric, way)
if current_metric is not None:
- return Baseline(current_metric, current_commit, depth)
+ return Baseline(current_metric, current_commit)
# Stop if there is an expected change at this commit. In that case
# metrics on ancestor commits will not be a valid baseline.
@@ -552,7 +561,7 @@ def check_stats_change(actual: PerfStat,
result = passed()
if not change_allowed:
error = str(change) + ' from ' + baseline.perfStat.test_env + \
- ' baseline @ HEAD~' + str(baseline.commitDepth)
+ ' baseline @ %s' % baseline.commit
print(actual.metric, error + ':')
result = failBecause('stat ' + error, tag='stat')
=====================================
testsuite/driver/runtests.py
=====================================
@@ -27,7 +27,7 @@ from testutil import getStdout, Watcher, str_warn, str_info
from testglobals import getConfig, ghc_env, getTestRun, TestConfig, \
TestOptions, brokens, PerfMetric
from my_typing import TestName
-from perf_notes import MetricChange, inside_git_repo, is_worktree_dirty, format_perf_stat
+from perf_notes import MetricChange, GitRef, inside_git_repo, is_worktree_dirty, format_perf_stat
from junit import junit
import term_color
from term_color import Color, colored
@@ -70,6 +70,7 @@ parser.add_argument("--verbose", type=int, choices=[0,1,2,3,4,5], help="verbose
parser.add_argument("--junit", type=argparse.FileType('wb'), help="output testsuite summary in JUnit format")
parser.add_argument("--broken-test", action="append", default=[], help="a test name to mark as broken for this run")
parser.add_argument("--test-env", default='local', help="Override default chosen test-env.")
+parser.add_argument("--perf-baseline", type=GitRef, metavar='COMMIT', help="Baseline commit for performance comparsons.")
perf_group.add_argument("--skip-perf-tests", action="store_true", help="skip performance tests")
perf_group.add_argument("--only-perf-tests", action="store_true", help="Only do performance tests")
@@ -101,6 +102,7 @@ config.metrics_file = args.metrics_file
hasMetricsFile = config.metrics_file is not None
config.summary_file = args.summary_file
config.no_print_summary = args.no_print_summary
+config.baseline_commit = args.perf_baseline
if args.only:
config.only = args.only
@@ -351,8 +353,8 @@ def tabulate_metrics(metrics: List[PerfMetric]) -> None:
rel = 100 * (val1 - val0) / val0
print("{space:24} {herald:40} {value:15.3f} [{direction}, {rel:2.1f}%]".format(
space = "",
- herald = "(baseline @ HEAD~{depth})".format(
- depth = metric.baseline.commitDepth),
+ herald = "(baseline @ {commit})".format(
+ commit = metric.baseline.commit),
value = val0,
direction = metric.change,
rel = rel
@@ -422,6 +424,8 @@ else:
# Dump metrics data.
print("\nPerformance Metrics (test environment: {}):\n".format(config.test_env))
+ if config.baseline_commit:
+ print('Performance baseline: %s\n' % config.baseline_commit)
if any(t.metrics):
tabulate_metrics(t.metrics)
else:
@@ -477,19 +481,19 @@ else:
summary(t, sys.stdout, config.no_print_summary, config.supports_colors)
# Write perf stats if any exist or if a metrics file is specified.
- stats = [stat for (_, stat, __) in t.metrics]
+ stats_metrics = [stat for (_, stat, __) in t.metrics] # type: List[PerfStat]
if hasMetricsFile:
- print('Appending ' + str(len(stats)) + ' stats to file: ' + config.metrics_file)
+ print('Appending ' + str(len(stats_metrics)) + ' stats to file: ' + config.metrics_file)
with open(config.metrics_file, 'a') as f:
- f.write("\n" + Perf.format_perf_stat(stats))
- elif inside_git_repo() and any(stats):
+ f.write("\n" + Perf.format_perf_stat(stats_metrics))
+ elif inside_git_repo() and any(stats_metrics):
if is_worktree_dirty():
print()
print(str_warn('Performance Metrics NOT Saved') + \
' working tree is dirty. Commit changes or use ' + \
'--metrics-file to save metrics to a file.')
else:
- Perf.append_perf_stat(stats)
+ Perf.append_perf_stat(stats_metrics)
# Write summary
if config.summary_file:
=====================================
testsuite/driver/testglobals.py
=====================================
@@ -4,7 +4,7 @@
from my_typing import *
from pathlib import Path
-from perf_notes import MetricChange, PerfStat, Baseline, MetricOracles
+from perf_notes import MetricChange, PerfStat, Baseline, MetricOracles, GitRef
from datetime import datetime
# -----------------------------------------------------------------------------
@@ -163,6 +163,9 @@ class TestConfig:
# run.
self.broken_tests = set() # type: Set[TestName]
+ # Baseline commit for performane metric comparisons.
+ self.baseline_commit = None # type: Optional[GitRef]
+
# Should we skip performance tests
self.skip_perf_tests = False
=====================================
testsuite/driver/testlib.py
=====================================
@@ -477,7 +477,8 @@ def _collect_stats(name: TestName, opts, metrics, deviation, is_compiler_stats_t
metric = '{}/{}'.format(tag, metric_name)
def baselineByWay(way, target_commit, metric=metric):
return Perf.baseline_metric( \
- target_commit, name, config.test_env, metric, way)
+ target_commit, name, config.test_env, metric, way, \
+ config.baseline_commit )
opts.stats_range_fields[metric] = MetricOracles(baseline=baselineByWay,
deviation=deviation)
=====================================
testsuite/mk/test.mk
=====================================
@@ -232,6 +232,10 @@ ifneq "$(VERBOSE)" ""
RUNTEST_OPTS += --verbose=$(VERBOSE)
endif
+ifneq "$(PERF_TEST_BASELINE_COMMIT)" ""
+RUNTEST_OPTS += --perf-baseline=$(PERF_TEST_BASELINE_COMMIT)
+endif
+
ifeq "$(SKIP_PERF_TESTS)" "YES"
RUNTEST_OPTS += --skip-perf-tests
endif
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/a87a0b498f4c93c33e3db8d7f68fbaa5d812b408...fdcf76450348d0554b7fd1768331f9efaf691e13
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/a87a0b498f4c93c33e3db8d7f68fbaa5d812b408...fdcf76450348d0554b7fd1768331f9efaf691e13
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20200818/803eabdc/attachment-0001.html>
More information about the ghc-commits
mailing list