[Git][ghc/ghc][wip/T16748] 2 commits: testsuite: Add support for generating OpenMetrics comparisons
Ben Gamari
gitlab at gitlab.haskell.org
Tue Jun 4 17:08:24 UTC 2019
Ben Gamari pushed to branch wip/T16748 at Glasgow Haskell Compiler / GHC
Commits:
4638602d by Ben Gamari at 2019-06-04T17:05:18Z
testsuite: Add support for generating OpenMetrics comparisons
This adds a flag to the perf_notes script, `--openmetrics`, allowing it
to produce OpenMetrics output summarizing the largest changes between
two commits. This can be fed to GitLab for visualization.
See #16748.
- - - - -
bd2525f4 by Ben Gamari at 2019-06-04T17:08:05Z
gitlab-ci: Collect OpenMetrics report
- - - - -
2 changed files:
- .gitlab-ci.yml
- testsuite/driver/perf_notes.py
Changes:
=====================================
.gitlab-ci.yml
=====================================
@@ -227,6 +227,16 @@ hadrian-ghc-in-ghci:
- |
THREADS=`mk/detect-cpu-count.sh`
make $TEST_TYPE THREADS=$THREADS JUNIT_FILE=../../junit.xml METRICS_FILE=$METRICS_FILE
+ - |
+ # Generate OpenMetrics summary
+ if [ -n "$CI_MERGE_REQUEST_SOURCE_BRANCH_SHA" ]; then
+ testsuite/driver/perf_notes.py \
+ --test-env=$TEST_ENV --openmetrics \
+ $CI_MERGE_REQUEST_SOURCE_BRANCH_SHA $CI_COMMIT_SHA \
+ > metrics.txt
+ else
+ echo "No base commit, skipping OpenMetrics generation..."
+ fi
- |
# Push git notes.
METRICS_FILE=$METRICS_FILE .gitlab/push-test-metrics.sh
@@ -234,6 +244,7 @@ hadrian-ghc-in-ghci:
artifacts:
reports:
junit: junit.xml
+ metrics: metrics.txt
expire_in: 2 week
paths:
- ghc-*.tar.xz
=====================================
testsuite/driver/perf_notes.py
=====================================
@@ -503,6 +503,8 @@ if __name__ == '__main__':
parser.add_argument("--add-note", nargs=3,
help="Development only. --add-note N commit seed \
Adds N fake metrics to the given commit using the random seed.")
+ parser.add_argument("--openmetrics", action="store_true",
+ help="Produce an OpenMetrics report comparing two commits' metrics")
parser.add_argument("--ref", type=str, default='perf',
help="Git notes ref")
parser.add_argument("commits", nargs=argparse.REMAINDER,
@@ -576,6 +578,13 @@ if __name__ == '__main__':
print(second_line)
print("-" * (len(second_line)+1))
+ def get_metric_avg(commit, test):
+ values = [float(t.stat.value) for t in metrics if t.commit == commit and t.stat.test == test]
+ if values == []:
+ return None
+ else:
+ return sum(values) / len(values)
+
def commit_string(test, flag):
def delta(v1, v2):
return round((100 * (v1 - v2)/v2),2)
@@ -584,11 +593,7 @@ if __name__ == '__main__':
# Note: if the test environment is not set, this will combine metrics from all test environments.
averageValuesOrNones = []
for commit in args.commits:
- values = [float(t.stat.value) for t in metrics if t.commit == commit and t.stat.test == test]
- if values == []:
- averageValuesOrNones.append(None)
- else:
- averageValuesOrNones.append(sum(values) / len(values))
+ averageValuesOrNones.append(get_metric_avg(commit, test))
if flag == 'metrics':
strings = [str(v) if v != None else '-' for v in averageValuesOrNones]
@@ -606,17 +611,40 @@ if __name__ == '__main__':
#
# The pretty-printed output
#
-
- header('commit')
- # Printing out metrics.
all_tests = sorted(set([(test.stat.test, test.stat.metric) for test in metrics]))
- for test, metric in all_tests:
- print("{:27}{:30}".format(test, metric) + commit_string(test,'metrics'))
-
- # Has no meaningful output if there is no commit to compare to.
- if not singleton_commit:
- header('percent')
+ if args.openmetrics:
+ if len(args.commits) == 2:
+ ref_commit, commit = args.commits
+ else:
+ raise ValueError("--openmetrics expects precisely two commits to compare")
- # Printing out percentages.
+ metrics_by_test = {}
+ for test, metric in all_tests:
+ ref = get_metric_avg(ref_commit, test)
+ val = get_metric_avg(commit, test)
+ metrics_by_test[(test, metric)] = (ref, val)
+
+ def rel_change(x):
+ (_, (ref, val)) = x
+ return (val - ref) / ref
+ sorted_metrics = sorted(metrics_by_test.items(), key=rel_change)
+
+ num_results = 20
+ to_render = sorted_metrics[:num_results] + sorted_metrics[-num_results:]
+ print('# Top {n} changes between {ref_commit} and {commit}'.format(n=num_results, ref_commit=ref_commit, commit=commit))
+ for ((test, metric), (ref, val)) in to_render:
+ print("# {}/{}: {:10} -> {:10}: {:2.2}%".format(test, metric, ref, val, (val-ref) / ref * 100.0))
+ print("{:27} {:30} {:10}".format(test, metric, val))
+ else:
+ header('commit')
+ # Printing out metrics.
for test, metric in all_tests:
- print("{:27}{:30}".format(test, metric) + commit_string(test,'percentages'))
+ print("{:27}{:30}".format(test, metric) + commit_string(test,'metrics'))
+
+ # Has no meaningful output if there is no commit to compare to.
+ if not singleton_commit:
+ header('percent')
+
+ # Printing out percentages.
+ for test, metric in all_tests:
+ print("{:27}{:30}".format(test, metric) + commit_string(test,'percentages'))
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/compare/96dbe869050a82133718581cb841787e5cb8d316...bd2525f442ae28155f17e339b7276f1033e01edf
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/compare/96dbe869050a82133718581cb841787e5cb8d316...bd2525f442ae28155f17e339b7276f1033e01edf
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20190604/79559628/attachment-0001.html>
More information about the ghc-commits
mailing list