[Git][ghc/ghc][wip/T22264-9.2] 5 commits: nonmoving: Ensure that pinned accumulator block is promoted
Ben Gamari (@bgamari)
gitlab at gitlab.haskell.org
Fri Oct 21 15:50:05 UTC 2022
Ben Gamari pushed to branch wip/T22264-9.2 at Glasgow Haskell Compiler / GHC
Commits:
c17610bd by Ben Gamari at 2022-10-21T11:46:56-04:00
nonmoving: Ensure that pinned accumulator block is promoted
It is important that when we perform a nonmoving collection cycle the
entire heap contents is promoted
TODO: Is this true?
- - - - -
dd17770d by Ben Gamari at 2022-10-21T11:49:48-04:00
nonmoving: Deduplicate assertion
- - - - -
4e7febf6 by Ben Gamari at 2022-10-21T11:49:48-04:00
rts/MarkWeak: Add some documentation comments
- - - - -
48d95f68 by Ben Gamari at 2022-10-21T11:49:48-04:00
nonmoving: Tighten up weak pointer assertions
Previously we would use `nonmovingClosureMarkedThisCycle` on objects
which may live in large-object blocks, which is not valid. Fix this by
instead using `nonmovingIsNowAlive`, which handles large objects.
- - - - -
85c5167e by Ben Gamari at 2022-10-21T11:49:48-04:00
nonmoving: Fix handling of weak pointers
This fixes an interaction between aging and weak pointer handling which
prevented the finalization of some weak pointers. In particular, weak
pointers
While in the area, we also significantly improve the assertions
regarding weak pointers
- - - - -
5 changed files:
- rts/sm/GC.c
- rts/sm/MarkWeak.c
- rts/sm/NonMoving.c
- rts/sm/NonMoving.h
- rts/sm/NonMovingMark.c
Changes:
=====================================
rts/sm/GC.c
=====================================
@@ -1833,10 +1833,11 @@ collect_pinned_object_blocks (void)
generation *const gen = (use_nonmoving && major_gc) ? oldest_gen : g0;
for (uint32_t n = 0; n < n_capabilities; n++) {
+ bdescr *first = RELAXED_LOAD(&capabilities[n]->pinned_object_blocks);
bdescr *last = NULL;
if (use_nonmoving && gen == oldest_gen) {
// Mark objects as belonging to the nonmoving heap
- for (bdescr *bd = RELAXED_LOAD(&capabilities[n]->pinned_object_blocks); bd != NULL; bd = bd->link) {
+ for (bdescr *bd = first; bd != NULL; bd = bd->link) {
bd->flags |= BF_NONMOVING;
bd->gen = oldest_gen;
bd->gen_no = oldest_gen->no;
@@ -1844,18 +1845,43 @@ collect_pinned_object_blocks (void)
oldest_gen->n_large_blocks += bd->blocks;
last = bd;
}
+
+ // Link the pinned accumulator block onto the end of the list
+ // This will result in some fragmentation due to the
+ // partially-filled block but is necessary to ensure that weak
+ // pointers keyed on recently-allocated pinned arrays can be
+ // correctly finalized. See #22264.
+ {
+ bdescr *bd = capabilities[n]->pinned_object_block;
+ if (bd != NULL) {
+ if (first == NULL) {
+ first = bd;
+ }
+ capabilities[n]->pinned_object_block = NULL;
+ bd->flags |= BF_NONMOVING;
+ bd->gen = oldest_gen;
+ bd->gen_no = oldest_gen->no;
+ if (last != NULL) {
+ last->link = bd;
+ }
+ oldest_gen->n_large_words += bd->free - bd->start;
+ oldest_gen->n_large_blocks += bd->blocks;
+ last = bd;
+ }
+ }
+
} else {
- for (bdescr *bd = capabilities[n]->pinned_object_blocks; bd != NULL; bd = bd->link) {
+ for (bdescr *bd = first; bd != NULL; bd = bd->link) {
last = bd;
}
}
- if (last != NULL) {
+ if (first != NULL) {
last->link = gen->large_objects;
if (gen->large_objects != NULL) {
gen->large_objects->u.back = last;
}
- gen->large_objects = RELAXED_LOAD(&capabilities[n]->pinned_object_blocks);
+ gen->large_objects = first;
RELAXED_STORE(&capabilities[n]->pinned_object_blocks, NULL);
}
}
=====================================
rts/sm/MarkWeak.c
=====================================
@@ -50,7 +50,7 @@
- weak_stage == WeakPtrs
- We process all the weak pointers whos keys are alive (evacuate
+ We process all the weak pointers whose keys are alive (evacuate
their values and finalizers), and repeat until we can find no new
live keys. If no live keys are found in this pass, then we
evacuate the finalizers of all the dead weak pointers in order to
@@ -82,12 +82,43 @@ static bool tidyWeakList (generation *gen);
static bool resurrectUnreachableThreads (generation *gen, StgTSO **resurrected_threads);
static void tidyThreadList (generation *gen);
+/*
+ * Note [Weak pointer processing and the non-moving GC]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * When using the non-moving GC we defer weak pointer processing
+ * until the concurrent marking phase as weaks in the non-moving heap may be
+ * keyed on objects living in the non-moving generation. To accomplish this
+ * initWeakForGC keeps all weak pointers on oldest_gen->weak_ptr_list, where
+ * nonmovingCollect will find them. From there they will be moved to
+ * nonmoving_old_weak_ptr_list. During the mark loop we will move weaks with
+ * reachable keys to nonmoving_weak_ptr_list. At the end of concurrent marking
+ * we tidy the weak list (in nonmovingTidyWeakList) and perform another set of
+ * marking as necessary, just as is done in tidyWeakList.
+ *
+ * Note that this treatment takes advantage of the fact that we usually need
+ * not worry about Weak#s living in the non-moving heap but being keyed on an
+ * object in the moving heap since the Weak# must be strictly older than the
+ * key. Such objects would otherwise pose a problem since the non-moving
+ * collector would be unable to safely determine the liveness of the key.
+ *
+ */
+
+/*
+ * Prepare the weak object lists for GC. Specifically, reset weak_stage
+ * and move all generations' `weak_ptr_list`s to `old_weak_ptr_list`.
+ * Weaks with live keys will later be moved back to `weak_ptr_list` by
+ * `tidyWeakList`.
+ */
void
initWeakForGC(void)
{
- uint32_t g;
+ uint32_t oldest = N;
+ if (RtsFlags.GcFlags.useNonmoving && N == oldest_gen->no) {
+ // See Note [Weak pointer processing and the non-moving GC].
+ oldest = oldest_gen->no - 1;
+ }
- for (g = 0; g <= N; g++) {
+ for (uint32_t g = 0; g <= oldest; g++) {
generation *gen = &generations[g];
gen->old_weak_ptr_list = gen->weak_ptr_list;
gen->weak_ptr_list = NULL;
@@ -96,6 +127,14 @@ initWeakForGC(void)
weak_stage = WeakThreads;
}
+/*
+ * Walk the weak pointer lists after having finished a round of scavenging,
+ * tidying the weak (and possibly thread) lists (depending upon the current
+ * weak_stage).
+ *
+ * Returns true if new live weak pointers were found, implying that another
+ * round of scavenging is necessary.
+ */
bool
traverseWeakPtrList(StgWeak **dead_weak_ptr_list, StgTSO **resurrected_threads)
{
@@ -182,6 +221,11 @@ traverseWeakPtrList(StgWeak **dead_weak_ptr_list, StgTSO **resurrected_threads)
}
}
+/*
+ * Deal with weak pointers with unreachable keys after GC has concluded.
+ * This means marking the finalizer (and possibly value) in preparation for
+ * later finalization.
+ */
static void collectDeadWeakPtrs (generation *gen, StgWeak **dead_weak_ptr_list)
{
StgWeak *w, *next_w;
@@ -198,6 +242,10 @@ static void collectDeadWeakPtrs (generation *gen, StgWeak **dead_weak_ptr_list)
}
}
+/*
+ * Deal with threads left on the old_threads list after GC has concluded,
+ * moving them onto the resurrected_threads list where appropriate.
+ */
static bool resurrectUnreachableThreads (generation *gen, StgTSO **resurrected_threads)
{
StgTSO *t, *tmp, *next;
@@ -233,8 +281,21 @@ static bool resurrectUnreachableThreads (generation *gen, StgTSO **resurrected_t
return flag;
}
+/*
+ * Walk over the `old_weak_ptr_list` of the given generation and:
+ *
+ * - remove any DEAD_WEAKs
+ * - move any weaks with reachable keys to the `weak_ptr_list` of the
+ * appropriate to-space and mark the weak's value and finalizer.
+ */
static bool tidyWeakList(generation *gen)
{
+ if (RtsFlags.GcFlags.useNonmoving && gen == oldest_gen) {
+ // See Note [Weak pointer processing and the non-moving GC].
+ ASSERT(gen->old_weak_ptr_list == NULL);
+ return false;
+ }
+
StgWeak *w, **last_w, *next_w;
const StgInfoTable *info;
StgClosure *new;
@@ -322,6 +383,10 @@ static bool tidyWeakList(generation *gen)
return flag;
}
+/*
+ * Walk over the `old_threads` list of the given generation and move any
+ * reachable threads onto the `threads` list.
+ */
static void tidyThreadList (generation *gen)
{
StgTSO *t, *tmp, *next, **prev;
@@ -381,6 +446,10 @@ static void checkWeakPtrSanity(StgWeak *hd, StgWeak *tl)
}
#endif
+/*
+ * Traverse the capabilities' local new-weak-pointer lists at the beginning of
+ * GC and move them to the nursery's weak_ptr_list.
+ */
void collectFreshWeakPtrs()
{
uint32_t i;
=====================================
rts/sm/NonMoving.c
=====================================
@@ -244,6 +244,9 @@ Mutex concurrent_coll_finished_lock;
* how we use the DIRTY flags associated with MUT_VARs and TVARs to improve
* barrier efficiency.
*
+ * - Note [Weak pointer processing and the non-moving GC] (MarkWeak.c) describes
+ * how weak pointers are handled when the non-moving GC is in use.
+ *
* [ueno 2016]:
* Katsuhiro Ueno and Atsushi Ohori. 2016. A fully concurrent garbage
* collector for functional programs on multicore processors. SIGPLAN Not. 51,
@@ -282,8 +285,8 @@ Mutex concurrent_coll_finished_lock;
* was (unsurprisingly) also found to result in significant amounts of
* unnecessary copying.
*
- * Consequently, we now allow aging. Aging allows the preparatory GC leading up
- * to a major collection to evacuate some objects into the young generation.
+ * Consequently, we now allow "aging", allows the preparatory GC leading up
+ * to a major collection to evacuate objects into the young generation.
* However, this introduces the following tricky case that might arise after
* we have finished the preparatory GC:
*
@@ -292,6 +295,7 @@ Mutex concurrent_coll_finished_lock;
* ┆
* B ←────────────── A ←─────────────── root
* │ ┆ ↖─────────────── gen1 mut_list
+ * │ ┆
* ╰───────────────→ C
* ┆
*
@@ -332,6 +336,7 @@ Mutex concurrent_coll_finished_lock;
* The implementation details of this are described in Note [Non-moving GC:
* Marking evacuated objects] in Evac.c.
*
+ *
* Note [Deadlock detection under the non-moving collector]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* In GHC the garbage collector is responsible for identifying deadlocked
=====================================
rts/sm/NonMoving.h
=====================================
@@ -256,15 +256,22 @@ INLINE_HEADER struct NonmovingSegment *nonmovingGetSegment_unchecked(StgPtr p)
return (struct NonmovingSegment *) (((uintptr_t) p) & mask);
}
+INLINE_HEADER bool nonmovingIsInSegment(StgPtr p)
+{
+ bdescr *bd = Bdescr(p);
+ return HEAP_ALLOCED_GC(p) &&
+ (bd->flags & BF_NONMOVING) &&
+ !(bd->flags & BF_LARGE);
+}
+
INLINE_HEADER struct NonmovingSegment *nonmovingGetSegment(StgPtr p)
{
- ASSERT(HEAP_ALLOCED_GC(p) && (Bdescr(p)->flags & BF_NONMOVING));
+ ASSERT(nonmovingIsInSegment(p));
return nonmovingGetSegment_unchecked(p);
}
INLINE_HEADER nonmoving_block_idx nonmovingGetBlockIdx(StgPtr p)
{
- ASSERT(HEAP_ALLOCED_GC(p) && (Bdescr(p)->flags & BF_NONMOVING));
struct NonmovingSegment *seg = nonmovingGetSegment(p);
ptrdiff_t blk0 = (ptrdiff_t)nonmovingSegmentGetBlock(seg, 0);
ptrdiff_t offset = (ptrdiff_t)p - blk0;
@@ -289,7 +296,6 @@ INLINE_HEADER void nonmovingSetClosureMark(StgPtr p)
nonmovingSetMark(nonmovingGetSegment(p), nonmovingGetBlockIdx(p));
}
-// TODO: Audit the uses of these
/* Was the given closure marked this major GC cycle? */
INLINE_HEADER bool nonmovingClosureMarkedThisCycle(StgPtr p)
{
=====================================
rts/sm/NonMovingMark.c
=====================================
@@ -1842,6 +1842,11 @@ bool nonmovingTidyWeaks (struct MarkQueue_ *queue)
// Otherwise it's a live weak
ASSERT(w->header.info == &stg_WEAK_info);
+ // See Note [Weak pointer processing and the non-moving GC] in
+ // MarkWeak.c
+ bool key_in_nonmoving = Bdescr((StgPtr) w->key)->flags & BF_NONMOVING;
+ ASSERT(key_in_nonmoving);
+
if (nonmovingIsNowAlive(w->key)) {
nonmovingMarkLiveWeak(queue, w);
did_work = true;
@@ -1872,7 +1877,7 @@ void nonmovingMarkDeadWeak (struct MarkQueue_ *queue, StgWeak *w)
void nonmovingMarkLiveWeak (struct MarkQueue_ *queue, StgWeak *w)
{
- ASSERT(nonmovingClosureMarkedThisCycle((P_)w));
+ ASSERT(nonmovingIsNowAlive((P_)w));
markQueuePushClosure_(queue, w->value);
markQueuePushClosure_(queue, w->finalizer);
markQueuePushClosure_(queue, w->cfinalizers);
@@ -1886,9 +1891,9 @@ void nonmovingMarkDeadWeaks (struct MarkQueue_ *queue, StgWeak **dead_weaks)
{
StgWeak *next_w;
for (StgWeak *w = nonmoving_old_weak_ptr_list; w; w = next_w) {
- ASSERT(!nonmovingClosureMarkedThisCycle((P_)(w->key)));
+ ASSERT(!nonmovingIsNowAlive((P_)(w->key)));
nonmovingMarkDeadWeak(queue, w);
- next_w = w ->link;
+ next_w = w->link;
w->link = *dead_weaks;
*dead_weaks = w;
}
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/4ea1cf1279ac82a18a492ec7cb6616bc57853b8e...85c5167e928157287a556da482835ac29115141f
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/4ea1cf1279ac82a18a492ec7cb6616bc57853b8e...85c5167e928157287a556da482835ac29115141f
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20221021/0b897a69/attachment-0001.html>
More information about the ghc-commits
mailing list