[Git][ghc/ghc][ghc-8.10] 6 commits: nonmoving: Fix small CPP bug
Ben Gamari
gitlab at gitlab.haskell.org
Fri Dec 11 07:08:59 UTC 2020
Ben Gamari pushed to branch ghc-8.10 at Glasgow Haskell Compiler / GHC
Commits:
1abaf38a by Ben Gamari at 2020-12-10T04:29:23+00:00
nonmoving: Fix small CPP bug
Previously an incorrect semicolon meant that we would fail to call
busy_wait_nop when spinning.
- - - - -
4123b929 by GHC GitLab CI at 2020-12-10T04:29:25+00:00
nonmoving: Assert deadlock-gc promotion invariant
When performing a deadlock-detection GC we must ensure that all objects
end up in the non-moving generation. Assert this in scavenge.
- - - - -
13b6696b by GHC GitLab CI at 2020-12-10T04:29:25+00:00
nonmoving: Ensure deadlock detection promotion works
Previously the deadlock-detection promotion logic in alloc_for_copy was
just plain wrong: it failed to fire when gct->evac_gen_no !=
oldest_gen->gen_no. The fix is simple: move the
- - - - -
0c7f20e2 by GHC GitLab CI at 2020-12-10T04:30:10+00:00
nonmoving: Refactor alloc_for_copy
Pull the cold non-moving allocation path out of alloc_for_copy.
- - - - -
b1b55be1 by Ben Gamari at 2020-12-10T04:30:34+00:00
nonmoving: Don't push objects during deadlock detect GC
Previously we would push large objects and compact regions to the mark
queue during the deadlock detect GC, resulting in failure to detect
deadlocks.
- - - - -
b0ad86fb by GHC GitLab CI at 2020-12-10T04:31:18+00:00
nonmoving: Add comments to nonmovingResurrectThreads
- - - - -
3 changed files:
- rts/sm/Evac.c
- rts/sm/NonMovingMark.c
- rts/sm/Scav.c
Changes:
=====================================
rts/sm/Evac.c
=====================================
@@ -64,14 +64,92 @@ STATIC_INLINE void evacuate_large(StgPtr p);
Allocate some space in which to copy an object.
-------------------------------------------------------------------------- */
+static StgPtr
+alloc_in_nonmoving_heap (uint32_t size)
+{
+ gct->copied += size;
+ StgPtr to = nonmovingAllocate(gct->cap, size);
+
+ // Add segment to the todo list unless it's already there
+ // current->todo_link == NULL means not in todo list
+ struct NonmovingSegment *seg = nonmovingGetSegment(to);
+ if (!seg->todo_link) {
+ gen_workspace *ws = &gct->gens[oldest_gen->no];
+ seg->todo_link = ws->todo_seg;
+ ws->todo_seg = seg;
+ }
+
+ // The object which refers to this closure may have been aged (i.e.
+ // retained in a younger generation). Consequently, we must add the
+ // closure to the mark queue to ensure that it will be marked.
+ //
+ // However, if we are in a deadlock detection GC then we disable aging
+ // so there is no need.
+ //
+ // See Note [Non-moving GC: Marking evacuated objects].
+ if (major_gc && !deadlock_detect_gc) {
+ markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) to);
+ }
+ return to;
+}
+
+/* Inlined helper shared between alloc_for_copy_nonmoving and alloc_for_copy. */
+STATIC_INLINE StgPtr
+alloc_in_moving_heap (uint32_t size, uint32_t gen_no)
+{
+ gen_workspace *ws = &gct->gens[gen_no]; // zero memory references here
+
+ /* chain a new block onto the to-space for the destination gen if
+ * necessary.
+ */
+ StgPtr to = ws->todo_free;
+ ws->todo_free += size;
+ if (ws->todo_free > ws->todo_lim) {
+ to = todo_block_full(size, ws);
+ }
+ ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
+
+ return to;
+}
+
+/*
+ * N.B. We duplicate much of alloc_for_copy here to minimize the number of
+ * branches introduced in the moving GC path of alloc_for_copy while minimizing
+ * repeated work.
+ */
+static StgPtr
+alloc_for_copy_nonmoving (uint32_t size, uint32_t gen_no)
+{
+ /* See Note [Deadlock detection under nonmoving collector]. */
+ if (deadlock_detect_gc) {
+ return alloc_in_nonmoving_heap(size);
+ }
+
+ /* Should match logic from alloc_for_copy */
+ if (gen_no < gct->evac_gen_no) {
+ if (gct->eager_promotion) {
+ gen_no = gct->evac_gen_no;
+ } else {
+ gct->failed_to_evac = true;
+ }
+ }
+
+ if (gen_no == oldest_gen->no) {
+ return alloc_in_nonmoving_heap(size);
+ } else {
+ return alloc_in_moving_heap(size, gen_no);
+ }
+}
+
/* size is in words */
STATIC_INLINE StgPtr
alloc_for_copy (uint32_t size, uint32_t gen_no)
{
ASSERT(gen_no < RtsFlags.GcFlags.generations);
- StgPtr to;
- gen_workspace *ws;
+ if (RTS_UNLIKELY(RtsFlags.GcFlags.useNonmoving)) {
+ return alloc_for_copy_nonmoving(size, gen_no);
+ }
/* Find out where we're going, using the handy "to" pointer in
* the gen of the source object. If it turns out we need to
@@ -81,55 +159,12 @@ alloc_for_copy (uint32_t size, uint32_t gen_no)
if (gen_no < gct->evac_gen_no) {
if (gct->eager_promotion) {
gen_no = gct->evac_gen_no;
- } else if (RTS_UNLIKELY(RtsFlags.GcFlags.useNonmoving) && deadlock_detect_gc) {
- /* See Note [Deadlock detection under nonmoving collector]. */
- gen_no = oldest_gen->no;
} else {
gct->failed_to_evac = true;
}
}
- if (RTS_UNLIKELY(RtsFlags.GcFlags.useNonmoving)) {
- if (gen_no == oldest_gen->no) {
- gct->copied += size;
- to = nonmovingAllocate(gct->cap, size);
-
- // Add segment to the todo list unless it's already there
- // current->todo_link == NULL means not in todo list
- struct NonmovingSegment *seg = nonmovingGetSegment(to);
- if (!seg->todo_link) {
- gen_workspace *ws = &gct->gens[oldest_gen->no];
- seg->todo_link = ws->todo_seg;
- ws->todo_seg = seg;
- }
-
- // The object which refers to this closure may have been aged (i.e.
- // retained in a younger generation). Consequently, we must add the
- // closure to the mark queue to ensure that it will be marked.
- //
- // However, if we are in a deadlock detection GC then we disable aging
- // so there is no need.
- //
- // See Note [Non-moving GC: Marking evacuated objects].
- if (major_gc && !deadlock_detect_gc)
- markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) to);
- return to;
- }
- }
-
- ws = &gct->gens[gen_no]; // zero memory references here
-
- /* chain a new block onto the to-space for the destination gen if
- * necessary.
- */
- to = ws->todo_free;
- ws->todo_free += size;
- if (ws->todo_free > ws->todo_lim) {
- to = todo_block_full(size, ws);
- }
- ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
-
- return to;
+ return alloc_in_moving_heap(size, gen_no);
}
/* -----------------------------------------------------------------------------
@@ -406,7 +441,9 @@ evacuate_large(StgPtr p)
__atomic_fetch_or(&bd->flags, BF_NONMOVING, __ATOMIC_ACQ_REL);
// See Note [Non-moving GC: Marking evacuated objects].
- markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) p);
+ if (major_gc && !deadlock_detect_gc) {
+ markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) p);
+ }
}
initBdescr(bd, new_gen, new_gen->to);
@@ -563,7 +600,9 @@ evacuate_compact (StgPtr p)
__atomic_fetch_or(&bd->flags, BF_NONMOVING, __ATOMIC_RELAXED);
// See Note [Non-moving GC: Marking evacuated objects].
- markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) str);
+ if (major_gc && !deadlock_detect_gc) {
+ markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) str);
+ }
}
initBdescr(bd, new_gen, new_gen->to);
=====================================
rts/sm/NonMovingMark.c
=====================================
@@ -737,9 +737,11 @@ void updateRemembSetPushStack(Capability *cap, StgStack *stack)
// The concurrent GC has claimed the right to mark the stack.
// Wait until it finishes marking before proceeding with
// mutation.
- while (needs_upd_rem_set_mark((StgClosure *) stack));
+ while (needs_upd_rem_set_mark((StgClosure *) stack))
#if defined(PARALLEL_GC)
busy_wait_nop(); // TODO: Spinning here is unfortunate
+#else
+ ;
#endif
return;
}
@@ -1927,6 +1929,8 @@ void nonmovingTidyThreads ()
}
}
+// Mark threads which appear to be dead but still need to be properly torn down
+// by resurrectThreads.
void nonmovingResurrectThreads (struct MarkQueue_ *queue, StgTSO **resurrected_threads)
{
StgTSO *next;
@@ -1938,6 +1942,9 @@ void nonmovingResurrectThreads (struct MarkQueue_ *queue, StgTSO **resurrected_t
case ThreadComplete:
continue;
default:
+ // The thread may be, e.g., deadlocked in which case we must ensure
+ // it isn't swept since resurrectThreads will need to throw it an
+ // exception.
markQueuePushClosure_(queue, (StgClosure*)t);
t->global_link = *resurrected_threads;
*resurrected_threads = t;
=====================================
rts/sm/Scav.c
=====================================
@@ -440,6 +440,14 @@ scavenge_block (bdescr *bd)
p = bd->u.scan;
+ // Sanity check: See Note [Deadlock detection under nonmoving collector].
+#if defined(DEBUG)
+ if (RtsFlags.GcFlags.useNonmoving && deadlock_detect_gc) {
+ ASSERT(bd->gen == oldest_gen);
+ }
+#endif
+
+
// we might be evacuating into the very object that we're
// scavenging, so we have to check the real bd->free pointer each
// time around the loop.
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/e7dbdbfadf897b246a0b7d669bb559d18030c3c6...b0ad86fb84fbd2ac78208e6545c48c7a09e7f4aa
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/e7dbdbfadf897b246a0b7d669bb559d18030c3c6...b0ad86fb84fbd2ac78208e6545c48c7a09e7f4aa
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20201211/351d4d09/attachment-0001.html>
More information about the ghc-commits
mailing list