[Git][ghc/ghc][wip/T22264-9.2] 5 commits: nonmoving: Move current segment array into Capability
Ben Gamari (@bgamari)
gitlab at gitlab.haskell.org
Wed Feb 8 06:00:09 UTC 2023
Ben Gamari pushed to branch wip/T22264-9.2 at Glasgow Haskell Compiler / GHC
Commits:
748d0619 by Ben Gamari at 2023-02-08T00:59:59-05:00
nonmoving: Move current segment array into Capability
(cherry picked from commit 9d245c1baec91ee79d715062b127e487456d9c9e)
- - - - -
ee09ffe1 by Ben Gamari at 2023-02-08T00:59:59-05:00
nonmoving: Don't call prepareUnloadCheck
When the nonmoving GC is in use we do not call `checkUnload` (since we
don't unload code) and therefore should not call `prepareUnloadCheck`,
lest we run into assertions.
(cherry picked from commit 6bdce35cdd59112a8cb4a4a3b061e854ada3ff63)
- - - - -
9905e5ae by Ben Gamari at 2023-02-08T00:59:59-05:00
nonmoving: Fix unregisterised build
(cherry picked from commit 6df2709e7215ea80d7267800e0318aee2a7c277f)
- - - - -
d7bad937 by Ben Gamari at 2023-02-08T00:59:59-05:00
nonmoving: Avoid n_caps race
(cherry picked from commit c00d6de815d4e125c1c4d8ff06549042f502f759)
- - - - -
10e43f7d by Ben Gamari at 2023-02-08T00:59:59-05:00
relnotes: Mention various non-moving GC fixes
- - - - -
13 changed files:
- docs/users_guide/9.2.6-notes.rst
- rts/Capability.c
- rts/Capability.h
- rts/Schedule.c
- rts/sm/GC.c
- rts/sm/NonMoving.c
- rts/sm/NonMoving.h
- rts/sm/NonMovingCensus.c
- rts/sm/NonMovingCensus.h
- rts/sm/NonMovingMark.c
- rts/sm/NonMovingMark.h
- rts/sm/Sanity.c
- rts/sm/Storage.c
Changes:
=====================================
docs/users_guide/9.2.6-notes.rst
=====================================
@@ -66,6 +66,23 @@ Runtime system
- Truncate eventlog events with a large payload (:ghc-ticket:`20221`).
+- A bug in the nonmoving garbage collector regarding the treatment of
+ zero-length ``SmallArray#``\ s has been fixed (:ghc-ticket:`22264`)
+
+- A number of bugs regarding the non-moving garbage collector's treatment of
+ ``Weak#`` pointers have been fixed (:ghc-ticket:`22327`)
+
+- A few race conditions between the non-moving collector and
+ ``setNumCapabilities`` which could result in undefined behavior have been
+ fixed (:ghc-ticket:`22926`, :ghc-ticket:`22927`)
+
+- The non-moving collector is now able to better schedule marking work during
+ the post-mark synchronization phase of collection, significantly reducing
+ pause times in some workloads (:ghc-ticket:`22929`).
+
+- Various bugs in the non-moving collector's implementation of the selector
+ optimisation have been fixed (:ghc-ticket:`22930`)
+
Build system and packaging
--------------------------
=====================================
rts/Capability.c
=====================================
@@ -294,6 +294,7 @@ initCapability (Capability *cap, uint32_t i)
cap->saved_mut_lists = stgMallocBytes(sizeof(bdescr *) *
RtsFlags.GcFlags.generations,
"initCapability");
+ cap->current_segments = NULL;
// At this point storage manager is not initialized yet, so this will be
@@ -1267,6 +1268,9 @@ freeCapability (Capability *cap)
{
stgFree(cap->mut_lists);
stgFree(cap->saved_mut_lists);
+ if (cap->current_segments) {
+ stgFree(cap->current_segments);
+ }
#if defined(THREADED_RTS)
freeSparkPool(cap->sparks);
#endif
=====================================
rts/Capability.h
=====================================
@@ -88,6 +88,9 @@ struct Capability_ {
// The update remembered set for the non-moving collector
UpdRemSet upd_rem_set;
+ // Array of current segments for the non-moving collector.
+ // Of length NONMOVING_ALLOCA_CNT.
+ struct NonmovingSegment **current_segments;
// block for allocating pinned objects into
bdescr *pinned_object_block;
=====================================
rts/Schedule.c
=====================================
@@ -1710,7 +1710,9 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
stat_startGCSync(gc_threads[cap->no]);
+#if defined(DEBUG)
unsigned int old_n_capabilities = getNumCapabilities();
+#endif
interruptAllCapabilities();
@@ -2306,7 +2308,9 @@ setNumCapabilities (uint32_t new_n_capabilities USED_IF_THREADS)
moreCapabilities(n_capabilities, new_n_capabilities);
// Resize and update storage manager data structures
+ ACQUIRE_SM_LOCK;
storageAddCapabilities(n_capabilities, new_n_capabilities);
+ RELEASE_SM_LOCK;
}
}
=====================================
rts/sm/GC.c
=====================================
@@ -375,7 +375,8 @@ GarbageCollect (uint32_t collect_gen,
static_flag == STATIC_FLAG_A ? STATIC_FLAG_B : STATIC_FLAG_A;
}
- if (major_gc) {
+ /* N.B. We currently don't unload code with the non-moving collector. */
+ if (major_gc && !RtsFlags.GcFlags.useNonmoving) {
unload_mark_needed = prepareUnloadCheck();
} else {
unload_mark_needed = false;
=====================================
rts/sm/NonMoving.c
=====================================
@@ -707,10 +707,11 @@ void *nonmovingAllocate(Capability *cap, StgWord sz)
// object and not moved) which is covered by allocator 9.
ASSERT(log_block_size < NONMOVING_ALLOCA0 + NONMOVING_ALLOCA_CNT);
- struct NonmovingAllocator *alloca = nonmovingHeap.allocators[log_block_size - NONMOVING_ALLOCA0];
+ unsigned int alloca_idx = log_block_size - NONMOVING_ALLOCA0;
+ struct NonmovingAllocator *alloca = &nonmovingHeap.allocators[alloca_idx];
// Allocate into current segment
- struct NonmovingSegment *current = alloca->current[cap->no];
+ struct NonmovingSegment *current = cap->current_segments[alloca_idx];
ASSERT(current); // current is never NULL
void *ret = nonmovingSegmentGetBlock_(current, log_block_size, current->next_free);
ASSERT(GET_CLOSURE_TAG(ret) == 0); // check alignment
@@ -743,29 +744,12 @@ void *nonmovingAllocate(Capability *cap, StgWord sz)
// make it current
new_current->link = NULL;
SET_SEGMENT_STATE(new_current, CURRENT);
- alloca->current[cap->no] = new_current;
+ cap->current_segments[alloca_idx] = new_current;
}
return ret;
}
-/* Allocate a nonmovingAllocator */
-static struct NonmovingAllocator *alloc_nonmoving_allocator(uint32_t n_caps)
-{
- size_t allocator_sz =
- sizeof(struct NonmovingAllocator) +
- sizeof(void*) * n_caps; // current segment pointer for each capability
- struct NonmovingAllocator *alloc =
- stgMallocBytes(allocator_sz, "nonmovingInit");
- memset(alloc, 0, allocator_sz);
- return alloc;
-}
-
-static void free_nonmoving_allocator(struct NonmovingAllocator *alloc)
-{
- stgFree(alloc);
-}
-
void nonmovingInit(void)
{
if (! RtsFlags.GcFlags.useNonmoving) return;
@@ -774,10 +758,7 @@ void nonmovingInit(void)
initCondition(&concurrent_coll_finished);
initMutex(&concurrent_coll_finished_lock);
#endif
- for (unsigned int i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
- nonmovingHeap.allocators[i] = alloc_nonmoving_allocator(getNumCapabilities());
- }
- nonmovingMarkInitUpdRemSet();
+ nonmovingMarkInit();
}
// Stop any nonmoving collection in preparation for RTS shutdown.
@@ -806,44 +787,24 @@ void nonmovingExit(void)
closeCondition(&concurrent_coll_finished);
closeMutex(&nonmoving_collection_mutex);
#endif
-
- for (unsigned int i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
- free_nonmoving_allocator(nonmovingHeap.allocators[i]);
- }
}
-/*
- * Assumes that no garbage collector or mutator threads are running to safely
- * resize the nonmoving_allocators.
- *
- * Must hold sm_mutex.
- */
-void nonmovingAddCapabilities(uint32_t new_n_caps)
+/* Initialize a new capability. Caller must hold SM_LOCK */
+void nonmovingInitCapability(Capability *cap)
{
- unsigned int old_n_caps = nonmovingHeap.n_caps;
- struct NonmovingAllocator **allocs = nonmovingHeap.allocators;
-
+ // Initialize current segment array
+ struct NonmovingSegment **segs =
+ stgMallocBytes(sizeof(struct NonmovingSegment*) * NONMOVING_ALLOCA_CNT, "current segment array");
for (unsigned int i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
- struct NonmovingAllocator *old = allocs[i];
- allocs[i] = alloc_nonmoving_allocator(new_n_caps);
-
- // Copy the old state
- allocs[i]->filled = old->filled;
- allocs[i]->active = old->active;
- for (unsigned int j = 0; j < old_n_caps; j++) {
- allocs[i]->current[j] = old->current[j];
- }
- stgFree(old);
-
- // Initialize current segments for the new capabilities
- for (unsigned int j = old_n_caps; j < new_n_caps; j++) {
- allocs[i]->current[j] = nonmovingAllocSegment(capabilities[j]->node);
- nonmovingInitSegment(allocs[i]->current[j], NONMOVING_ALLOCA0 + i);
- SET_SEGMENT_STATE(allocs[i]->current[j], CURRENT);
- allocs[i]->current[j]->link = NULL;
- }
+ segs[i] = nonmovingAllocSegment(cap->node);
+ nonmovingInitSegment(segs[i], NONMOVING_ALLOCA0 + i);
+ SET_SEGMENT_STATE(segs[i], CURRENT);
}
- nonmovingHeap.n_caps = new_n_caps;
+ cap->current_segments = segs;
+
+ // Initialize update remembered set
+ cap->upd_rem_set.queue.blocks = NULL;
+ nonmovingInitUpdRemSet(&cap->upd_rem_set);
}
void nonmovingClearBitmap(struct NonmovingSegment *seg)
@@ -863,13 +824,15 @@ static void nonmovingPrepareMark(void)
// Should have been cleared by the last sweep
ASSERT(nonmovingHeap.sweep_list == NULL);
+ nonmovingHeap.n_caps = n_capabilities;
nonmovingBumpEpoch();
for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
- struct NonmovingAllocator *alloca = nonmovingHeap.allocators[alloca_idx];
+ struct NonmovingAllocator *alloca = &nonmovingHeap.allocators[alloca_idx];
// Update current segments' snapshot pointers
- for (uint32_t cap_n = 0; cap_n < getNumCapabilities(); ++cap_n) {
- struct NonmovingSegment *seg = alloca->current[cap_n];
+ for (uint32_t cap_n = 0; cap_n < nonmovingHeap.n_caps; ++cap_n) {
+ Capability *cap = capabilities[cap_n];
+ struct NonmovingSegment *seg = cap->current_segments[alloca_idx];
nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free;
}
@@ -1101,7 +1064,7 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
// Walk the list of filled segments that we collected during preparation,
// updated their snapshot pointers and move them to the sweep list.
for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
- struct NonmovingSegment *filled = nonmovingHeap.allocators[alloca_idx]->saved_filled;
+ struct NonmovingSegment *filled = nonmovingHeap.allocators[alloca_idx].saved_filled;
uint32_t n_filled = 0;
if (filled) {
struct NonmovingSegment *seg = filled;
@@ -1120,7 +1083,7 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
seg->link = nonmovingHeap.sweep_list;
nonmovingHeap.sweep_list = filled;
}
- nonmovingHeap.allocators[alloca_idx]->saved_filled = NULL;
+ nonmovingHeap.allocators[alloca_idx].saved_filled = NULL;
}
// Mark Weak#s
@@ -1329,10 +1292,12 @@ void assert_in_nonmoving_heap(StgPtr p)
}
for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
- struct NonmovingAllocator *alloca = nonmovingHeap.allocators[alloca_idx];
+ struct NonmovingAllocator *alloca = &nonmovingHeap.allocators[alloca_idx];
+
// Search current segments
- for (uint32_t cap_idx = 0; cap_idx < getNumCapabilities(); ++cap_idx) {
- struct NonmovingSegment *seg = alloca->current[cap_idx];
+ for (uint32_t cap_idx = 0; cap_idx < nonmovingHeap.n_caps; ++cap_idx) {
+ Capability *cap = capabilities[cap_idx];
+ struct NonmovingSegment *seg = cap->current_segments[alloca_idx];
if (p >= (P_)seg && p < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
return;
}
@@ -1391,33 +1356,16 @@ void nonmovingPrintSegment(struct NonmovingSegment *seg)
debugBelch("End of segment\n\n");
}
-void nonmovingPrintAllocator(struct NonmovingAllocator *alloc)
-{
- debugBelch("Allocator at %p\n", (void*)alloc);
- debugBelch("Filled segments:\n");
- for (struct NonmovingSegment *seg = alloc->filled; seg != NULL; seg = seg->link) {
- debugBelch("%p ", (void*)seg);
- }
- debugBelch("\nActive segments:\n");
- for (struct NonmovingSegment *seg = alloc->active; seg != NULL; seg = seg->link) {
- debugBelch("%p ", (void*)seg);
- }
- debugBelch("\nCurrent segments:\n");
- for (uint32_t i = 0; i < getNumCapabilities(); ++i) {
- debugBelch("%p ", alloc->current[i]);
- }
- debugBelch("\n");
-}
-
void locate_object(P_ obj)
{
// Search allocators
for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
- struct NonmovingAllocator *alloca = nonmovingHeap.allocators[alloca_idx];
- for (uint32_t cap = 0; cap < getNumCapabilities(); ++cap) {
- struct NonmovingSegment *seg = alloca->current[cap];
+ struct NonmovingAllocator *alloca = &nonmovingHeap.allocators[alloca_idx];
+ for (uint32_t cap_n = 0; cap_n < nonmovingHeap.n_caps; ++cap_n) {
+ Capability *cap = capabilities[cap_n];
+ struct NonmovingSegment *seg = cap->current_segments[alloca_idx];
if (obj >= (P_)seg && obj < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
- debugBelch("%p is in current segment of capability %d of allocator %d at %p\n", obj, cap, alloca_idx, (void*)seg);
+ debugBelch("%p is in current segment of capability %d of allocator %d at %p\n", obj, cap_n, alloca_idx, (void*)seg);
return;
}
}
=====================================
rts/sm/NonMoving.h
=====================================
@@ -84,8 +84,7 @@ struct NonmovingAllocator {
struct NonmovingSegment *filled;
struct NonmovingSegment *saved_filled;
struct NonmovingSegment *active;
- // indexed by capability number
- struct NonmovingSegment *current[];
+ // N.B. Per-capabilty "current" segment lives in Capability
};
// first allocator is of size 2^NONMOVING_ALLOCA0 (in bytes)
@@ -99,7 +98,7 @@ struct NonmovingAllocator {
#define NONMOVING_MAX_FREE 16
struct NonmovingHeap {
- struct NonmovingAllocator *allocators[NONMOVING_ALLOCA_CNT];
+ struct NonmovingAllocator allocators[NONMOVING_ALLOCA_CNT];
// free segment list. This is a cache where we keep up to
// NONMOVING_MAX_FREE segments to avoid thrashing the block allocator.
// Note that segments in this list are still counted towards
@@ -149,7 +148,7 @@ void nonmovingCollect(StgWeak **dead_weaks,
StgTSO **resurrected_threads);
void *nonmovingAllocate(Capability *cap, StgWord sz);
-void nonmovingAddCapabilities(uint32_t new_n_caps);
+void nonmovingInitCapability(Capability *cap);
void nonmovingPushFreeSegment(struct NonmovingSegment *seg);
void nonmovingClearBitmap(struct NonmovingSegment *seg);
@@ -166,7 +165,7 @@ INLINE_HEADER uint8_t nonmovingSegmentLogBlockSize(struct NonmovingSegment *seg)
INLINE_HEADER void nonmovingPushActiveSegment(struct NonmovingSegment *seg)
{
struct NonmovingAllocator *alloc =
- nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
+ &nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
SET_SEGMENT_STATE(seg, ACTIVE);
while (true) {
struct NonmovingSegment *current_active = (struct NonmovingSegment*)VOLATILE_LOAD(&alloc->active);
@@ -181,7 +180,7 @@ INLINE_HEADER void nonmovingPushActiveSegment(struct NonmovingSegment *seg)
INLINE_HEADER void nonmovingPushFilledSegment(struct NonmovingSegment *seg)
{
struct NonmovingAllocator *alloc =
- nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
+ &nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
SET_SEGMENT_STATE(seg, FILLED);
while (true) {
struct NonmovingSegment *current_filled = (struct NonmovingSegment*)VOLATILE_LOAD(&alloc->filled);
@@ -333,10 +332,14 @@ INLINE_HEADER bool nonmovingClosureBeingSwept(StgClosure *p)
}
}
+// N.B. RtsFlags is defined as a pointer in STG code consequently this code
+// doesn't typecheck.
+#if !IN_STG_CODE
INLINE_HEADER bool isNonmovingClosure(StgClosure *p)
{
return RtsFlags.GcFlags.useNonmoving && (!HEAP_ALLOCED_GC(p) || Bdescr((P_)p)->flags & BF_NONMOVING);
}
+#endif
#if defined(DEBUG)
=====================================
rts/sm/NonMovingCensus.c
=====================================
@@ -21,10 +21,12 @@
// stopped. In this case is safe to look at active and current segments so we can
// also collect statistics on live words.
static struct NonmovingAllocCensus
-nonmovingAllocatorCensus_(struct NonmovingAllocator *alloc, bool collect_live_words)
+nonmovingAllocatorCensus_(uint32_t alloc_idx, bool collect_live_words)
{
struct NonmovingAllocCensus census = {collect_live_words, 0, 0, 0, 0};
+ struct NonmovingAllocator *alloc = &nonmovingHeap.allocators[alloc_idx];
+ // filled segments
for (struct NonmovingSegment *seg = alloc->filled;
seg != NULL;
seg = seg->link)
@@ -40,6 +42,7 @@ nonmovingAllocatorCensus_(struct NonmovingAllocator *alloc, bool collect_live_wo
}
}
+ // active segments
for (struct NonmovingSegment *seg = alloc->active;
seg != NULL;
seg = seg->link)
@@ -56,9 +59,11 @@ nonmovingAllocatorCensus_(struct NonmovingAllocator *alloc, bool collect_live_wo
}
}
- for (unsigned int cap=0; cap < getNumCapabilities(); cap++)
+ // current segments
+ for (unsigned int cap_n=0; cap_n < getNumCapabilities(); cap_n++)
{
- struct NonmovingSegment *seg = alloc->current[cap];
+ Capability *cap = capabilities[cap_n];
+ struct NonmovingSegment *seg = cap->current_segments[alloc_idx];
unsigned int n = nonmovingSegmentBlockCount(seg);
for (unsigned int i=0; i < n; i++) {
if (nonmovingGetMark(seg, i)) {
@@ -76,15 +81,15 @@ nonmovingAllocatorCensus_(struct NonmovingAllocator *alloc, bool collect_live_wo
* all blocks in nonmoving heap are valid closures.
*/
struct NonmovingAllocCensus
-nonmovingAllocatorCensusWithWords(struct NonmovingAllocator *alloc)
+nonmovingAllocatorCensusWithWords(uint32_t alloc_idx)
{
- return nonmovingAllocatorCensus_(alloc, true);
+ return nonmovingAllocatorCensus_(alloc_idx, true);
}
struct NonmovingAllocCensus
-nonmovingAllocatorCensus(struct NonmovingAllocator *alloc)
+nonmovingAllocatorCensus(uint32_t alloc_idx)
{
- return nonmovingAllocatorCensus_(alloc, false);
+ return nonmovingAllocatorCensus_(alloc_idx, false);
}
@@ -130,7 +135,7 @@ void nonmovingPrintAllocatorCensus(bool collect_live_words)
for (int i=0; i < NONMOVING_ALLOCA_CNT; i++) {
struct NonmovingAllocCensus census =
- nonmovingAllocatorCensus_(nonmovingHeap.allocators[i], collect_live_words);
+ nonmovingAllocatorCensus_(i, collect_live_words);
print_alloc_census(i, census);
}
@@ -143,8 +148,7 @@ void nonmovingTraceAllocatorCensus()
return;
for (int i=0; i < NONMOVING_ALLOCA_CNT; i++) {
- const struct NonmovingAllocCensus census =
- nonmovingAllocatorCensus(nonmovingHeap.allocators[i]);
+ const struct NonmovingAllocCensus census = nonmovingAllocatorCensus(i);
const uint32_t log_blk_size = i + NONMOVING_ALLOCA0;
traceNonmovingHeapCensus(log_blk_size, &census);
}
=====================================
rts/sm/NonMovingCensus.h
=====================================
@@ -20,10 +20,10 @@ struct NonmovingAllocCensus {
struct NonmovingAllocCensus
-nonmovingAllocatorCensusWithWords(struct NonmovingAllocator *alloc);
+nonmovingAllocatorCensusWithWords(uint32_t alloc_idx);
struct NonmovingAllocCensus
-nonmovingAllocatorCensus(struct NonmovingAllocator *alloc);
+nonmovingAllocatorCensus(uint32_t alloc_idx);
void nonmovingPrintAllocatorCensus(bool collect_live_words);
void nonmovingTraceAllocatorCensus(void);
=====================================
rts/sm/NonMovingMark.c
=====================================
@@ -257,7 +257,7 @@ StgWord nonmoving_write_barrier_enabled = false;
MarkQueue *current_mark_queue = NULL;
/* Initialise update remembered set data structures */
-void nonmovingMarkInitUpdRemSet() {
+void nonmovingMarkInit() {
#if defined(THREADED_RTS)
initMutex(&upd_rem_set_lock);
initCondition(&upd_rem_set_flushed_cond);
@@ -301,8 +301,8 @@ static void nonmovingAddUpdRemSetBlocks_lock(MarkQueue *rset)
// Reset the state of the remembered set.
ACQUIRE_SM_LOCK;
init_mark_queue_(rset);
- rset->is_upd_rem_set = true;
RELEASE_SM_LOCK;
+ rset->is_upd_rem_set = true;
}
/*
=====================================
rts/sm/NonMovingMark.h
=====================================
@@ -140,7 +140,7 @@ extern MarkQueue *current_mark_queue;
extern bdescr *upd_rem_set_block_list;
-void nonmovingMarkInitUpdRemSet(void);
+void nonmovingMarkInit(void);
void nonmovingInitUpdRemSet(UpdRemSet *rset);
void updateRemembSetPushClosure(Capability *cap, StgClosure *p);
=====================================
rts/sm/Sanity.c
=====================================
@@ -619,11 +619,12 @@ static void checkNonmovingSegments (struct NonmovingSegment *seg)
void checkNonmovingHeap (const struct NonmovingHeap *heap)
{
for (unsigned int i=0; i < NONMOVING_ALLOCA_CNT; i++) {
- const struct NonmovingAllocator *alloc = heap->allocators[i];
+ const struct NonmovingAllocator *alloc = &heap->allocators[i];
checkNonmovingSegments(alloc->filled);
checkNonmovingSegments(alloc->active);
- for (unsigned int cap=0; cap < getNumCapabilities(); cap++) {
- checkNonmovingSegments(alloc->current[cap]);
+ for (unsigned int cap_n=0; cap_n < getNumCapabilities(); cap_n++) {
+ Capability *cap = capabilities[cap_n];
+ checkNonmovingSegments(cap->current_segments[i]);
}
}
}
@@ -1047,11 +1048,12 @@ findMemoryLeak (void)
markBlocks(nonmoving_compact_objects);
markBlocks(nonmoving_marked_compact_objects);
for (i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
- struct NonmovingAllocator *alloc = nonmovingHeap.allocators[i];
+ struct NonmovingAllocator *alloc = &nonmovingHeap.allocators[i];
markNonMovingSegments(alloc->filled);
markNonMovingSegments(alloc->active);
for (j = 0; j < getNumCapabilities(); j++) {
- markNonMovingSegments(alloc->current[j]);
+ Capability *cap = capabilities[j];
+ markNonMovingSegments(cap->current_segments[i]);
}
}
markNonMovingSegments(nonmovingHeap.sweep_list);
@@ -1156,23 +1158,18 @@ countNonMovingSegments(struct NonmovingSegment *segs)
return ret;
}
-static W_
-countNonMovingAllocator(struct NonmovingAllocator *alloc)
-{
- W_ ret = countNonMovingSegments(alloc->filled)
- + countNonMovingSegments(alloc->active);
- for (uint32_t i = 0; i < getNumCapabilities(); ++i) {
- ret += countNonMovingSegments(alloc->current[i]);
- }
- return ret;
-}
-
static W_
countNonMovingHeap(struct NonmovingHeap *heap)
{
W_ ret = 0;
for (int alloc_idx = 0; alloc_idx < NONMOVING_ALLOCA_CNT; alloc_idx++) {
- ret += countNonMovingAllocator(heap->allocators[alloc_idx]);
+ struct NonmovingAllocator *alloc = &heap->allocators[alloc_idx];
+ ret += countNonMovingSegments(alloc->filled);
+ ret += countNonMovingSegments(alloc->active);
+ for (uint32_t c = 0; c < getNumCapabilities(); ++c) {
+ Capability *cap = capabilities[c];
+ ret += countNonMovingSegments(cap->current_segments[alloc_idx]);
+ }
}
ret += countNonMovingSegments(heap->sweep_list);
ret += countNonMovingSegments(heap->free);
=====================================
rts/sm/Storage.c
=====================================
@@ -214,17 +214,14 @@ initStorage (void)
}
oldest_gen->to = oldest_gen;
- // Nonmoving heap uses oldest_gen so initialize it after initializing oldest_gen
- nonmovingInit();
-
#if defined(THREADED_RTS)
// nonmovingAddCapabilities allocates segments, which requires taking the gc
// sync lock, so initialize it before nonmovingAddCapabilities
initSpinLock(&gc_alloc_block_sync);
#endif
- if (RtsFlags.GcFlags.useNonmoving)
- nonmovingAddCapabilities(getNumCapabilities());
+ // Nonmoving heap uses oldest_gen so initialize it after initializing oldest_gen
+ nonmovingInit();
/* The oldest generation has one step. */
if (RtsFlags.GcFlags.compact || RtsFlags.GcFlags.sweep) {
@@ -320,12 +317,10 @@ void storageAddCapabilities (uint32_t from, uint32_t to)
}
}
- // Initialize NonmovingAllocators and UpdRemSets
+ // Initialize non-moving collector
if (RtsFlags.GcFlags.useNonmoving) {
- nonmovingAddCapabilities(to);
- for (i = from; i < to; ++i) {
- capabilities[i]->upd_rem_set.queue.blocks = NULL;
- nonmovingInitUpdRemSet(&capabilities[i]->upd_rem_set);
+ for (i = from; i < to; i++) {
+ nonmovingInitCapability(capabilities[i]);
}
}
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/5fe2c6b2cd1a88a1c0edf9dfe3377805e46cf2d8...10e43f7d3f31badae0bd11d12ec145840d9b259b
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/5fe2c6b2cd1a88a1c0edf9dfe3377805e46cf2d8...10e43f7d3f31badae0bd11d12ec145840d9b259b
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20230208/cfd36e95/attachment-0001.html>
More information about the ghc-commits
mailing list