[Git][ghc/ghc][wip/gc/nonmoving-pinned] 2 commits: nonmoving: Teach allocatePinned() to allocate into nonmoving heap
Ben Gamari
gitlab at gitlab.haskell.org
Mon Jul 27 17:04:44 UTC 2020
Ben Gamari pushed to branch wip/gc/nonmoving-pinned at Glasgow Haskell Compiler / GHC
Commits:
02e97485 by Ben Gamari at 2020-07-27T12:58:37-04:00
nonmoving: Teach allocatePinned() to allocate into nonmoving heap
The allocatePinned() function is used to allocate pinned memory (e.g.
for newPinnedByteArray#)
- - - - -
f16461d3 by Ben Gamari at 2020-07-27T13:03:58-04:00
gitlab-ci: Respect TEST_TYPE
It seems that this was dropped in the refactoring of the CI driver.
- - - - -
6 changed files:
- .gitlab/ci.sh
- rts/sm/NonMoving.c
- rts/sm/NonMoving.h
- rts/sm/NonMovingMark.c
- rts/sm/NonMovingScav.c
- rts/sm/Storage.c
Changes:
=====================================
.gitlab/ci.sh
=====================================
@@ -364,8 +364,11 @@ function push_perf_notes() {
}
function test_make() {
+ if [[ -z "$TEST_TYPE" ]]; then
+ TEST_TYPE="test"
+ fi
run "$MAKE" test_bindist TEST_PREP=YES
- run "$MAKE" V=0 test \
+ run "$MAKE" V=0 $TEST_TYPE \
THREADS="$cores" \
JUNIT_FILE=../../junit.xml
}
=====================================
rts/sm/NonMoving.c
=====================================
@@ -474,6 +474,24 @@ Mutex concurrent_coll_finished_lock;
* remembered set during the preparatory GC. This allows us to safely skip the
* non-moving write barrier without jeopardizing the snapshot invariant.
*
+ *
+ * Note [Allocating pinned objects into the non-moving heap]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Under the moving collector small, pinned ByteArray#s are allocated by
+ * Storage.c:allocatePinned() into a per-capability accumulator block which is
+ * filled in a bump-pointer fashion. While this scheme is simple, it can lead
+ * to very poor fragmentation behavior as objects become unreachable: a single
+ * live ByteArray# can keep an entire block of memory alive.
+ *
+ * When the non-moving collector is in use we can do better by allocating small
+ * pinned objects directly into the non-moving heap.
+ *
+ * One wrinkle here is that pinned ByteArrays may have alignment requirements
+ * which requires that we insert padding zero-words before the beginning of the
+ * object. We must be certain to account for this padding when inspecting the
+ * object.
+ *
*/
memcount nonmoving_live_words = 0;
=====================================
rts/sm/NonMoving.h
=====================================
@@ -73,11 +73,17 @@ struct NonmovingAllocator {
// allocators cover block sizes of 2^NONMOVING_ALLOCA0 to
// 2^(NONMOVING_ALLOCA0 + NONMOVING_ALLOCA_CNT) (in bytes)
+// The largest allocator class must be at least LARGE_OBJECT_THRESHOLD in size
+// as Storage.c:allocatePinned will allocate small pinned allocations into the
+// non-moving heap.
#define NONMOVING_ALLOCA_CNT 12
// maximum number of free segments to hold on to
#define NONMOVING_MAX_FREE 16
+// block size of largest allocator in bytes.
+#define NONMOVING_MAX_BLOCK_SZ (1 << (NONMOVING_ALLOCA0 + NONMOVING_ALLOCA_CNT))
+
struct NonmovingHeap {
struct NonmovingAllocator *allocators[NONMOVING_ALLOCA_CNT];
// free segment list. This is a cache where we keep up to
=====================================
rts/sm/NonMovingMark.c
=====================================
@@ -1359,6 +1359,11 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
// Trace pointers
/////////////////////////////////////////////////////
+ // Find beginning of object.
+ // See Note [Allocating pinned objects into the non-moving heap].
+ while (*(StgPtr*) p == NULL)
+ p = (StgClosure *) ((StgPtr*) p + 1);
+
const StgInfoTable *info = get_itbl(p);
switch (info->type) {
=====================================
rts/sm/NonMovingScav.c
=====================================
@@ -11,9 +11,18 @@
#include "MarkWeak.h" // scavengeLiveWeak
void
-nonmovingScavengeOne (StgClosure *q)
+nonmovingScavengeOne (StgClosure *q0)
{
+ StgClosure *q = q0;
+
+ // N.B. There may be a gap before the first word of the closure in the case
+ // of an aligned ByteArray# as allocated by allocatePinned().
+ // See Note [Allocating pinned objects into the non-moving heap].
+ while (*(StgPtr*) q == NULL)
+ q = (StgClosure *) ((StgPtr*) q + 1);
+
ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
+
StgPtr p = (StgPtr)q;
const StgInfoTable *info = get_itbl(q);
const bool saved_eager_promotion = gct->eager_promotion;
=====================================
rts/sm/Storage.c
=====================================
@@ -1165,6 +1165,23 @@ allocatePinned (Capability *cap, W_ n /*words*/, W_ alignment /*bytes*/, W_ alig
const StgWord alignment_w = alignment / sizeof(W_);
+ // If the non-moving collector is enabled then we can allocate small,
+ // pinned allocations directly into the non-moving heap. This is a bit more
+ // expensive up-front but reduces fragmentation and is worthwhile since
+ // pinned allocations are often long-lived..
+ //
+ // See Note [Allocating pinned objects into the non-moving heap].
+ if (RTS_UNLIKELY(RtsFlags.GcFlags.useNonmoving)
+ && n * sizeof(W_) + alignment_w <= NONMOVING_MAX_BLOCK_SZ)
+ {
+ p = nonmovingAllocate(cap, n + alignment_w);
+ W_ off_w = ALIGN_WITH_OFF_W(p, alignment, align_off);
+ memset(p, 0, off_w);
+ p += off_w;
+ MEMSET_IF_PROFILING_W(p + n, 0, alignment_w - off_w - 1);
+ return p;
+ }
+
// If the request is for a large object, then allocate()
// will give us a pinned object anyway.
if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/43b0bcf92bf1cf3708c140a2e5d9b98111ba5817...f16461d354a0e11954a350c7a23cbe33c1032e6e
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/43b0bcf92bf1cf3708c140a2e5d9b98111ba5817...f16461d354a0e11954a350c7a23cbe33c1032e6e
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20200727/711661cc/attachment-0001.html>
More information about the ghc-commits
mailing list