[Git][ghc/ghc][wip/T22264] 4 commits: testsuite: Mark ffi023 as broken due to #23089
Ben Gamari (@bgamari)
gitlab at gitlab.haskell.org
Wed Mar 8 03:41:37 UTC 2023
Ben Gamari pushed to branch wip/T22264 at Glasgow Haskell Compiler / GHC
Commits:
600fdd58 by Ben Gamari at 2023-03-07T22:41:27-05:00
testsuite: Mark ffi023 as broken due to #23089
- - - - -
9c9bd1af by Ben Gamari at 2023-03-07T22:41:27-05:00
testsuite: Skip T7160 in the nonmoving way
Finalization order is different under the nonmoving collector.
- - - - -
ac0240af by Ben Gamari at 2023-03-07T22:41:27-05:00
rts: Capture GC configuration in a struct
The number of distinct arguments passed to GarbageCollect was getting a
bit out of hand.
- - - - -
be9b4ca4 by Ben Gamari at 2023-03-07T22:41:27-05:00
nonmoving: Non-concurrent collection
- - - - -
10 changed files:
- rts/Schedule.c
- rts/include/rts/storage/GC.h
- rts/sm/GC.c
- rts/sm/GC.h
- rts/sm/NonMoving.c
- rts/sm/NonMoving.h
- rts/sm/NonMovingMark.c
- testsuite/tests/ffi/should_run/all.T
- testsuite/tests/ffi/should_run/ffi023_c.c
- testsuite/tests/rts/all.T
Changes:
=====================================
rts/Schedule.c
=====================================
@@ -157,7 +157,10 @@ static bool scheduleHandleThreadFinished( Capability *cap, Task *task,
StgTSO *t );
static bool scheduleNeedHeapProfile(bool ready_to_gc);
static void scheduleDoGC( Capability **pcap, Task *task,
- bool force_major, bool is_overflow_gc, bool deadlock_detect );
+ bool force_major,
+ bool is_overflow_gc,
+ bool deadlock_detect,
+ bool nonconcurrent );
static void deleteThread (StgTSO *tso);
static void deleteAllThreads (void);
@@ -259,7 +262,7 @@ schedule (Capability *initialCapability, Task *task)
case SCHED_INTERRUPTING:
debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
/* scheduleDoGC() deletes all the threads */
- scheduleDoGC(&cap,task,true,false,false);
+ scheduleDoGC(&cap,task,true,false,false,false);
// after scheduleDoGC(), we must be shutting down. Either some
// other Capability did the final GC, or we did it above,
@@ -572,7 +575,7 @@ run_thread:
}
if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
- scheduleDoGC(&cap,task,false,ready_to_gc,false);
+ scheduleDoGC(&cap,task,false,ready_to_gc,false,false);
}
} /* end of while() */
}
@@ -966,7 +969,7 @@ scheduleDetectDeadlock (Capability **pcap, Task *task)
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
- scheduleDoGC (pcap, task, true/*force major GC*/, false /* Whether it is an overflow GC */, true/*deadlock detection*/);
+ scheduleDoGC (pcap, task, true/*force major GC*/, false /* Whether it is an overflow GC */, true/*deadlock detection*/, false/*nonconcurrent*/);
cap = *pcap;
// when force_major == true. scheduleDoGC sets
// recent_activity to ACTIVITY_DONE_GC and turns off the timer
@@ -1015,7 +1018,7 @@ scheduleProcessInbox (Capability **pcap USED_IF_THREADS)
while (!emptyInbox(cap)) {
// Executing messages might use heap, so we should check for GC.
if (doYouWantToGC(cap)) {
- scheduleDoGC(pcap, cap->running_task, false, false, false);
+ scheduleDoGC(pcap, cap->running_task, false, false, false, false);
cap = *pcap;
}
@@ -1583,7 +1586,10 @@ void releaseAllCapabilities(uint32_t n, Capability *keep_cap, Task *task)
// behind deadlock_detect argument.
static void
scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
- bool force_major, bool is_overflow_gc, bool deadlock_detect)
+ bool force_major,
+ bool is_overflow_gc,
+ bool deadlock_detect,
+ bool nonconcurrent)
{
Capability *cap = *pcap;
bool heap_census;
@@ -1873,14 +1879,23 @@ delete_threads_and_gc:
// Do any remaining idle GC work from the previous GC
doIdleGCWork(cap, true /* all of it */);
+ struct GcConfig config = {
+ .collect_gen = collect_gen,
+ .do_heap_census = heap_census,
+ .overflow_gc = is_overflow_gc,
+ .deadlock_detect = deadlock_detect,
+ .nonconcurrent = nonconcurrent
+ };
+
#if defined(THREADED_RTS)
// reset pending_sync *before* GC, so that when the GC threads
// emerge they don't immediately re-enter the GC.
RELAXED_STORE(&pending_sync, 0);
signalCondition(&sync_finished_cond);
- GarbageCollect(collect_gen, heap_census, is_overflow_gc, deadlock_detect, gc_type, cap, idle_cap);
+ config.parallel = gc_type == SYNC_GC_PAR;
+ GarbageCollect(config, cap, idle_cap);
#else
- GarbageCollect(collect_gen, heap_census, is_overflow_gc, deadlock_detect, 0, cap, NULL);
+ GarbageCollect(config, cap, NULL);
#endif
// If we're shutting down, don't leave any idle GC work to do.
@@ -2770,7 +2785,7 @@ exitScheduler (bool wait_foreign USED_IF_THREADS)
nonmovingStop();
Capability *cap = task->cap;
waitForCapability(&cap,task);
- scheduleDoGC(&cap,task,true,false,false);
+ scheduleDoGC(&cap,task,true,false,false,true);
ASSERT(task->incall->tso == NULL);
releaseCapability(cap);
}
@@ -2815,7 +2830,7 @@ freeScheduler( void )
-------------------------------------------------------------------------- */
static void
-performGC_(bool force_major)
+performGC_(bool force_major, bool nonconcurrent)
{
Task *task;
Capability *cap = NULL;
@@ -2828,7 +2843,7 @@ performGC_(bool force_major)
// TODO: do we need to traceTask*() here?
waitForCapability(&cap,task);
- scheduleDoGC(&cap,task,force_major,false,false);
+ scheduleDoGC(&cap,task,force_major,false,false,nonconcurrent);
releaseCapability(cap);
exitMyTask();
}
@@ -2836,13 +2851,19 @@ performGC_(bool force_major)
void
performGC(void)
{
- performGC_(false);
+ performGC_(false, false);
}
void
performMajorGC(void)
{
- performGC_(true);
+ performGC_(true, false);
+}
+
+void
+performBlockingMajorGC(void)
+{
+ performGC_(true, true);
}
/* ---------------------------------------------------------------------------
=====================================
rts/include/rts/storage/GC.h
=====================================
@@ -215,6 +215,7 @@ extern W_ large_alloc_lim;
void performGC(void);
void performMajorGC(void);
+void performBlockingMajorGC(void);
/* -----------------------------------------------------------------------------
The CAF table - used to let us revert CAFs in GHCi
=====================================
rts/sm/GC.c
=====================================
@@ -278,11 +278,7 @@ addMutListScavStats(const MutListScavStats *src,
-------------------------------------------------------------------------- */
void
-GarbageCollect (uint32_t collect_gen,
- const bool do_heap_census,
- const bool is_overflow_gc,
- const bool deadlock_detect,
- uint32_t gc_type USED_IF_THREADS,
+GarbageCollect (struct GcConfig config,
Capability *cap,
bool idle_cap[])
{
@@ -298,7 +294,7 @@ GarbageCollect (uint32_t collect_gen,
// The time we should report our heap census as occurring at, if necessary.
Time mut_time = 0;
- if (do_heap_census) {
+ if (config.do_heap_census) {
RTSStats stats;
getRTSStats(&stats);
mut_time = stats.mutator_cpu_ns;
@@ -307,6 +303,8 @@ GarbageCollect (uint32_t collect_gen,
// necessary if we stole a callee-saves register for gct:
#if defined(THREADED_RTS)
saved_gct = gct;
+#else
+ ASSERT(!config.parallel);
#endif
#if defined(PROFILING)
@@ -349,11 +347,11 @@ GarbageCollect (uint32_t collect_gen,
/* Figure out which generation to collect
*/
- N = collect_gen;
+ N = config.collect_gen;
major_gc = (N == RtsFlags.GcFlags.generations-1);
/* See Note [Deadlock detection under the nonmoving collector]. */
- deadlock_detect_gc = deadlock_detect;
+ deadlock_detect_gc = config.deadlock_detect;
#if defined(THREADED_RTS)
if (major_gc && RtsFlags.GcFlags.useNonmoving && RELAXED_LOAD(&concurrent_coll_running)) {
@@ -362,7 +360,7 @@ GarbageCollect (uint32_t collect_gen,
* TODO: Catch heap-size runaway.
*/
N--;
- collect_gen--;
+ config.collect_gen--;
major_gc = false;
}
#endif
@@ -397,7 +395,7 @@ GarbageCollect (uint32_t collect_gen,
* we set n_gc_threads, work_stealing, n_gc_idle_threads, gc_running_threads
* here
*/
- if (gc_type == SYNC_GC_PAR) {
+ if (config.parallel) {
n_gc_threads = getNumCapabilities();
n_gc_idle_threads = 0;
for (uint32_t i = 0; i < getNumCapabilities(); ++i) {
@@ -858,6 +856,8 @@ GarbageCollect (uint32_t collect_gen,
// oldest_gen->scavenged_large_objects back to oldest_gen->large_objects.
ASSERT(oldest_gen->scavenged_large_objects == NULL);
if (RtsFlags.GcFlags.useNonmoving && major_gc) {
+ bool concurrent = false;
+
// All threads in non-moving heap should be found to be alive, because
// threads in the non-moving generation's list should live in the
// non-moving heap, and we consider non-moving objects alive during
@@ -871,18 +871,21 @@ GarbageCollect (uint32_t collect_gen,
// old_weak_ptr_list should be empty.
ASSERT(oldest_gen->old_weak_ptr_list == NULL);
+#if defined(THREADED_RTS)
+ concurrent = !config.nonconcurrent;
+#else
+ // In the non-threaded runtime this is the only time we push to the
+ // upd_rem_set
+ nonmovingAddUpdRemSetBlocks(&gct->cap->upd_rem_set);
+#endif
+
// dead_weak_ptr_list contains weak pointers with dead keys. Those need to
// be kept alive because we'll use them in finalizeSchedulers(). Similarly
// resurrected_threads are also going to be used in resurrectedThreads()
// so we need to mark those too.
// Note that in sequential case these lists will be appended with more
// weaks and threads found to be dead in mark.
-#if !defined(THREADED_RTS)
- // In the non-threaded runtime this is the only time we push to the
- // upd_rem_set
- nonmovingAddUpdRemSetBlocks(&gct->cap->upd_rem_set);
-#endif
- nonmovingCollect(&dead_weak_ptr_list, &resurrected_threads);
+ nonmovingCollect(&dead_weak_ptr_list, &resurrected_threads, concurrent);
}
// Update the max size of older generations after a major GC:
@@ -963,7 +966,7 @@ GarbageCollect (uint32_t collect_gen,
// resurrectThreads(), for the same reason as checkSanity above:
// resurrectThreads() will overwrite some closures and leave slop
// behind.
- if (do_heap_census) {
+ if (config.do_heap_census) {
debugTrace(DEBUG_sched, "performing heap census");
RELEASE_SM_LOCK;
heapCensus(mut_time);
@@ -1014,7 +1017,7 @@ GarbageCollect (uint32_t collect_gen,
#endif
// Reset the counter if the major GC was caused by a heap overflow
- consec_idle_gcs = is_overflow_gc ? 0 : consec_idle_gcs + 1;
+ consec_idle_gcs = config.overflow_gc ? 0 : consec_idle_gcs + 1;
// See Note [Scaling retained memory]
double scaled_factor =
=====================================
rts/sm/GC.h
=====================================
@@ -17,11 +17,23 @@
#include "BeginPrivate.h"
-void GarbageCollect (uint32_t collect_gen,
- bool do_heap_census,
- bool is_overflow_gc,
- bool deadlock_detect,
- uint32_t gc_type,
+struct GcConfig {
+ // which generation are we requesting be collected?
+ uint32_t collect_gen;
+ // is a heap census requested?
+ bool do_heap_census;
+ // is this GC triggered by a heap overflow?
+ bool overflow_gc;
+ // is this GC triggered by a deadlock?
+ bool deadlock_detect;
+ // should we force non-concurrent collection if the non-moving collector is
+ // being used?
+ bool nonconcurrent;
+ // should we use parallel scavenging?
+ bool parallel;
+};
+
+void GarbageCollect (struct GcConfig config,
Capability *cap,
bool idle_cap[]);
=====================================
rts/sm/NonMoving.c
=====================================
@@ -548,7 +548,7 @@ MarkBudget sync_phase_marking_budget = 200000;
#if defined(THREADED_RTS)
static void* nonmovingConcurrentMark(void *mark_queue);
#endif
-static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO **resurrected_threads);
+static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO **resurrected_threads, bool concurrent);
// Add a segment to the free list.
void nonmovingPushFreeSegment(struct NonmovingSegment *seg)
@@ -712,7 +712,7 @@ static void nonmovingPrepareMark(void)
#endif
}
-void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
+void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads, bool concurrent STG_UNUSED)
{
#if defined(THREADED_RTS)
// We can't start a new collection until the old one has finished
@@ -799,7 +799,7 @@ void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
}
trace(TRACE_nonmoving_gc, "Finished nonmoving GC preparation");
- // We are now safe to start concurrent marking
+ // We are now safe to start (possibly concurrent) marking
// Note that in concurrent mark we can't use dead_weaks and
// resurrected_threads from the preparation to add new weaks and threads as
@@ -807,13 +807,17 @@ void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
// those lists to mark function in sequential case. In concurrent case we
// allocate fresh lists.
-#if defined(THREADED_RTS)
// If we're interrupting or shutting down, do not let this capability go and
// run a STW collection. Reason: we won't be able to acquire this capability
// again for the sync if we let it go, because it'll immediately start doing
// a major GC, because that's what we do when exiting scheduler (see
// exitScheduler()).
- if (getSchedState() == SCHED_RUNNING) {
+ if (getSchedState() != SCHED_RUNNING) {
+ concurrent = false;
+ }
+
+#if defined(THREADED_RTS)
+ if (concurrent) {
RELAXED_STORE(&concurrent_coll_running, true);
nonmoving_write_barrier_enabled = true;
debugTrace(DEBUG_nonmoving_gc, "Starting concurrent mark thread");
@@ -823,14 +827,19 @@ void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
barf("nonmovingCollect: failed to spawn mark thread: %s", strerror(errno));
}
RELAXED_STORE(&mark_thread, thread);
+ return;
} else {
- nonmovingConcurrentMark(mark_queue);
+ RELEASE_SM_LOCK;
}
-#else
+#endif
+
// Use the weak and thread lists from the preparation for any new weaks and
// threads found to be dead in mark.
- nonmovingMark_(mark_queue, dead_weaks, resurrected_threads);
-#endif
+ nonmovingMark_(mark_queue, dead_weaks, resurrected_threads, false);
+
+ if (!concurrent) {
+ ACQUIRE_SM_LOCK;
+ }
}
/* Mark queue, threads, and weak pointers until no more weaks have been
@@ -862,7 +871,7 @@ static void* nonmovingConcurrentMark(void *data)
MarkQueue *mark_queue = (MarkQueue*)data;
StgWeak *dead_weaks = NULL;
StgTSO *resurrected_threads = (StgTSO*)&stg_END_TSO_QUEUE_closure;
- nonmovingMark_(mark_queue, &dead_weaks, &resurrected_threads);
+ nonmovingMark_(mark_queue, &dead_weaks, &resurrected_threads, true);
return NULL;
}
@@ -876,8 +885,11 @@ static void appendWeakList( StgWeak **w1, StgWeak *w2 )
}
#endif
-static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO **resurrected_threads)
+static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO **resurrected_threads, bool concurrent)
{
+#if !defined(THREADED_RTS)
+ ASSERT(!concurrent);
+#endif
ACQUIRE_LOCK(&nonmoving_collection_mutex);
debugTrace(DEBUG_nonmoving_gc, "Starting mark...");
stat_startNonmovingGc();
@@ -920,38 +932,41 @@ concurrent_marking:
}
#if defined(THREADED_RTS)
- Task *task = newBoundTask();
-
- // If at this point if we've decided to exit then just return
- if (getSchedState() > SCHED_RUNNING) {
- // Note that we break our invariants here and leave segments in
- // nonmovingHeap.sweep_list, don't free nonmoving_large_objects etc.
- // However because we won't be running sweep in the final GC this
- // is OK.
- //
- // However, we must move any weak pointers remaining on
- // nonmoving_old_weak_ptr_list back to nonmoving_weak_ptr_list
- // such that their C finalizers can be run by hs_exit_.
- appendWeakList(&nonmoving_weak_ptr_list, nonmoving_old_weak_ptr_list);
- goto finish;
- }
-
- // We're still running, request a sync
- nonmovingBeginFlush(task);
-
- bool all_caps_syncd;
- MarkBudget sync_marking_budget = sync_phase_marking_budget;
- do {
- all_caps_syncd = nonmovingWaitForFlush();
- if (nonmovingMarkThreadsWeaks(&sync_marking_budget, mark_queue) == false) {
- // We ran out of budget for marking. Abort sync.
- // See Note [Sync phase marking budget].
- traceConcSyncEnd();
- stat_endNonmovingGcSync();
- releaseAllCapabilities(n_capabilities, NULL, task);
- goto concurrent_marking;
+ Task *task = NULL;
+ if (concurrent) {
+ task = newBoundTask();
+
+ // If at this point if we've decided to exit then just return
+ if (getSchedState() > SCHED_RUNNING) {
+ // Note that we break our invariants here and leave segments in
+ // nonmovingHeap.sweep_list, don't free nonmoving_large_objects etc.
+ // However because we won't be running sweep in the final GC this
+ // is OK.
+ //
+ // However, we must move any weak pointers remaining on
+ // nonmoving_old_weak_ptr_list back to nonmoving_weak_ptr_list
+ // such that their C finalizers can be run by hs_exit_.
+ appendWeakList(&nonmoving_weak_ptr_list, nonmoving_old_weak_ptr_list);
+ goto finish;
}
- } while (!all_caps_syncd);
+
+ // We're still running, request a sync
+ nonmovingBeginFlush(task);
+
+ bool all_caps_syncd;
+ MarkBudget sync_marking_budget = sync_phase_marking_budget;
+ do {
+ all_caps_syncd = nonmovingWaitForFlush();
+ if (nonmovingMarkThreadsWeaks(&sync_marking_budget, mark_queue) == false) {
+ // We ran out of budget for marking. Abort sync.
+ // See Note [Sync phase marking budget].
+ traceConcSyncEnd();
+ stat_endNonmovingGcSync();
+ releaseAllCapabilities(n_capabilities, NULL, task);
+ goto concurrent_marking;
+ }
+ } while (!all_caps_syncd);
+ }
#endif
nonmovingResurrectThreads(mark_queue, resurrected_threads);
@@ -981,15 +996,15 @@ concurrent_marking:
// Schedule finalizers and resurrect threads
-#if defined(THREADED_RTS)
- // Just pick a random capability. Not sure if this is a good idea -- we use
- // only one capability for all finalizers.
- scheduleFinalizers(getCapability(0), *dead_weaks);
- // Note that this mutates heap and causes running write barriers.
- // See Note [Unintentional marking in resurrectThreads] in NonMovingMark.c
- // for how we deal with this.
- resurrectThreads(*resurrected_threads);
-#endif
+ if (concurrent) {
+ // Just pick a random capability. Not sure if this is a good idea -- we use
+ // only one capability for all finalizers.
+ scheduleFinalizers(getCapability(0), *dead_weaks);
+ // Note that this mutates heap and causes running write barriers.
+ // See Note [Unintentional marking in resurrectThreads] in NonMovingMark.c
+ // for how we deal with this.
+ resurrectThreads(*resurrected_threads);
+ }
#if defined(DEBUG)
// Zap CAFs that we will sweep
@@ -1019,15 +1034,19 @@ concurrent_marking:
// Prune spark lists
// See Note [Spark management under the nonmoving collector].
#if defined(THREADED_RTS)
- for (uint32_t n = 0; n < getNumCapabilities(); n++) {
- pruneSparkQueue(true, getCapability(n));
+ if (concurrent) {
+ for (uint32_t n = 0; n < getNumCapabilities(); n++) {
+ pruneSparkQueue(true, getCapability(n));
+ }
}
-#endif
// Everything has been marked; allow the mutators to proceed
-#if defined(THREADED_RTS) && !defined(NONCONCURRENT_SWEEP)
- nonmoving_write_barrier_enabled = false;
- nonmovingFinishFlush(task);
+#if !defined(NONCONCURRENT_SWEEP)
+ if (concurrent) {
+ nonmoving_write_barrier_enabled = false;
+ nonmovingFinishFlush(task);
+ }
+#endif
#endif
current_mark_queue = NULL;
@@ -1064,24 +1083,28 @@ concurrent_marking:
nonmovingTraceAllocatorCensus();
#endif
-#if defined(THREADED_RTS) && defined(NONCONCURRENT_SWEEP)
+#if defined(NONCONCURRENT_SWEEP)
#if defined(DEBUG)
checkNonmovingHeap(&nonmovingHeap);
checkSanity(true, true);
#endif
- nonmoving_write_barrier_enabled = false;
- nonmovingFinishFlush(task);
+ if (concurrent) {
+ nonmoving_write_barrier_enabled = false;
+ nonmovingFinishFlush(task);
+ }
#endif
// TODO: Remainder of things done by GarbageCollect (update stats)
#if defined(THREADED_RTS)
finish:
- exitMyTask();
+ if (concurrent) {
+ exitMyTask();
- // We are done...
- RELAXED_STORE(&mark_thread, 0);
- stat_endNonmovingGc();
+ // We are done...
+ RELAXED_STORE(&mark_thread, 0);
+ stat_endNonmovingGc();
+ }
// Signal that the concurrent collection is finished, allowing the next
// non-moving collection to proceed
=====================================
rts/sm/NonMoving.h
=====================================
@@ -149,7 +149,8 @@ void nonmovingExit(void);
// directly, but in a pause.
//
void nonmovingCollect(StgWeak **dead_weaks,
- StgTSO **resurrected_threads);
+ StgTSO **resurrected_threads,
+ bool concurrent);
void nonmovingPushFreeSegment(struct NonmovingSegment *seg);
=====================================
rts/sm/NonMovingMark.c
=====================================
@@ -27,8 +27,10 @@
#include "sm/Storage.h"
#include "CNF.h"
+#if defined(THREADED_RTS)
static void nonmovingResetUpdRemSetQueue (MarkQueue *rset);
static void nonmovingResetUpdRemSet (UpdRemSet *rset);
+#endif
static bool check_in_nonmoving_heap(StgClosure *p);
static void mark_closure (MarkQueue *queue, const StgClosure *p, StgClosure **origin);
static void trace_tso (MarkQueue *queue, StgTSO *tso);
@@ -955,6 +957,7 @@ void nonmovingInitUpdRemSet (UpdRemSet *rset)
rset->queue.is_upd_rem_set = true;
}
+#if defined(THREADED_RTS)
static void nonmovingResetUpdRemSetQueue (MarkQueue *rset)
{
// UpdRemSets always have one block for the mark queue. This assertion is to
@@ -968,6 +971,7 @@ void nonmovingResetUpdRemSet (UpdRemSet *rset)
{
nonmovingResetUpdRemSetQueue(&rset->queue);
}
+#endif
void freeMarkQueue (MarkQueue *queue)
{
=====================================
testsuite/tests/ffi/should_run/all.T
=====================================
@@ -191,6 +191,7 @@ test('T8083', [omit_ways(['ghci']), req_c], compile_and_run, ['T8083_c.c'])
test('T9274', [omit_ways(['ghci'])], compile_and_run, [''])
test('ffi023', [ omit_ways(['ghci']),
+ expect_broken_for(23089, ['threaded2', 'nonmoving_thr', 'nonmoving_thr_sanity', 'nonmoving_thr_ghc']),
extra_run_opts('1000 4'),
js_broken(22363),
pre_cmd('$MAKE -s --no-print-directory ffi023_setup') ],
=====================================
testsuite/tests/ffi/should_run/ffi023_c.c
=====================================
@@ -4,7 +4,7 @@
HsInt out (HsInt x)
{
- performMajorGC();
+ performBlockingMajorGC();
rts_clearMemory();
return incall(x);
}
=====================================
testsuite/tests/rts/all.T
=====================================
@@ -261,8 +261,8 @@ test('T6006', [ omit_ways(prof_ways + ['ghci']),
test('T7037', req_c, makefile_test, ['T7037'])
test('T7087', exit_code(1), compile_and_run, [''])
-test('T7160', [ # finalization order is too nondeterministic in the concurrent GC
- omit_ways(['nonmoving_thr', 'nonmoving_thr_ghc', 'nonmoving_thr_sanity'])
+test('T7160', [ # finalization order is different in the nonmoving
+ omit_ways(['nonmoving', 'nonmoving_thr', 'nonmoving_thr_ghc', 'nonmoving_thr_sanity'])
, js_broken(22261)
], compile_and_run, [''])
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/6cb91e16d661f9ef755c5fc399cf90b82368b43b...be9b4ca4da78b10bc065957472ecf2c8ce7599a4
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/6cb91e16d661f9ef755c5fc399cf90b82368b43b...be9b4ca4da78b10bc065957472ecf2c8ce7599a4
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20230307/1ca114b9/attachment-0001.html>
More information about the ghc-commits
mailing list