[Git][ghc/ghc][wip/gc/ben] Remove redundant write barrier checks and fix prediction
Ben Gamari
gitlab at gitlab.haskell.org
Thu Apr 18 03:51:45 UTC 2019
Ben Gamari pushed to branch wip/gc/ben at Glasgow Haskell Compiler / GHC
Commits:
90e0ece0 by Ben Gamari at 2019-04-17T23:50:30Z
Remove redundant write barrier checks and fix prediction
- - - - -
10 changed files:
- includes/Cmm.h
- rts/Messages.c
- rts/PrimOps.cmm
- rts/STM.c
- rts/Schedule.c
- rts/ThreadPaused.c
- rts/Threads.c
- rts/Updates.h
- rts/sm/NonMovingMark.c
- rts/sm/Storage.c
Changes:
=====================================
includes/Cmm.h
=====================================
@@ -797,11 +797,6 @@
Misc junk
-------------------------------------------------------------------------- */
-#if !defined(THREADED_RTS)
-// This is also done in rts/NonMoving.h, but that isn't visible from C--
-#define nonmoving_write_barrier_enabled 0
-#endif
-
#define NO_TREC stg_NO_TREC_closure
#define END_TSO_QUEUE stg_END_TSO_QUEUE_closure
#define STM_AWOKEN stg_STM_AWOKEN_closure
@@ -940,9 +935,19 @@
return (dst);
+#if defined(THREADED_RTS)
+#define IF_WRITE_BARRIER_ENABLED \
+ if (W_[nonmoving_write_barrier_enabled] != 0) (likely: False)
+#else
+// A similar measure is also taken in rts/NonMoving.h, but that isn't visible from C--
+#define IF_WRITE_BARRIER_ENABLED \
+ if (0)
+#define nonmoving_write_barrier_enabled 0
+#endif
+
// A useful helper for pushing a pointer to the update remembered set.
// See Note [Update remembered set] in NonMovingMark.c.
-#define updateRemembSetPushPtr(p) \
- if (nonmoving_write_barrier_enabled != 0) (likely: False) { \
- ccall updateRemembSetPushClosure_(BaseReg "ptr", p "ptr"); \
+#define updateRemembSetPushPtr(p) \
+ IF_WRITE_BARRIER_ENABLED { \
+ ccall updateRemembSetPushClosure_(BaseReg "ptr", p "ptr"); \
}
=====================================
rts/Messages.c
=====================================
@@ -256,7 +256,7 @@ loop:
// point to the BLOCKING_QUEUE from the BLACKHOLE
write_barrier(); // make the BQ visible
- if (nonmoving_write_barrier_enabled) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) {
updateRemembSetPushClosure(cap, (StgClosure*)p);
}
((StgInd*)bh)->indirectee = (StgClosure *)bq;
@@ -287,7 +287,7 @@ loop:
}
#endif
- if (nonmoving_write_barrier_enabled) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) {
// We are about to overwrite bq->queue; make sure its current value
// makes it into the update remembered set
updateRemembSetPushClosure(cap, (StgClosure*)bq->queue);
=====================================
rts/PrimOps.cmm
=====================================
@@ -474,7 +474,9 @@ stg_copyArray_barrier ( W_ hdr_size, gcptr dst, W_ dst_off, W_ n)
end = p + WDS(n);
again:
- ccall updateRemembSetPushClosure_(BaseReg "ptr", W_[p] "ptr");
+ IF_WRITE_BARRIER_ENABLED {
+ ccall updateRemembSetPushClosure_(BaseReg "ptr", W_[p] "ptr");
+ }
p = p + WDS(1);
if (p < end) {
goto again;
@@ -488,7 +490,7 @@ stg_copySmallArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n)
W_ dst_p, src_p, bytes;
if (n > 0) {
- if (nonmoving_write_barrier_enabled != 0 || 1) {
+ IF_WRITE_BARRIER_ENABLED {
call stg_copyArray_barrier(SIZEOF_StgSmallMutArrPtrs,
dst, dst_off, n);
}
@@ -509,7 +511,7 @@ stg_copySmallMutableArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n
W_ dst_p, src_p, bytes;
if (n > 0) {
- if (nonmoving_write_barrier_enabled != 0 || 1) {
+ IF_WRITE_BARRIER_ENABLED {
call stg_copyArray_barrier(SIZEOF_StgSmallMutArrPtrs,
dst, dst_off, n);
}
=====================================
rts/STM.c
=====================================
@@ -297,7 +297,7 @@ static StgClosure *lock_tvar(Capability *cap,
} while (cas((void *)&(s -> current_value),
(StgWord)result, (StgWord)trec) != (StgWord)result);
- if (nonmoving_write_barrier_enabled && result) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled && result)) {
updateRemembSetPushClosure(cap, result);
}
return result;
@@ -323,7 +323,7 @@ static StgBool cond_lock_tvar(Capability *cap,
TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
w = cas((void *)&(s -> current_value), (StgWord)expected, (StgWord)trec);
result = (StgClosure *)w;
- if (nonmoving_write_barrier_enabled && result) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled && result)) {
updateRemembSetPushClosure(cap, expected);
}
TRACE("%p : %s", trec, result ? "success" : "failure");
=====================================
rts/Schedule.c
=====================================
@@ -2501,7 +2501,9 @@ resumeThread (void *task_)
incall->suspended_tso = NULL;
incall->suspended_cap = NULL;
// we will modify tso->_link
- updateRemembSetPushClosure(cap, (StgClosure *)tso->_link);
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) {
+ updateRemembSetPushClosure(cap, (StgClosure *)tso->_link);
+ }
tso->_link = END_TSO_QUEUE;
traceEventRunThread(cap, tso);
=====================================
rts/ThreadPaused.c
=====================================
@@ -330,8 +330,8 @@ threadPaused(Capability *cap, StgTSO *tso)
}
#endif
- if (nonmoving_write_barrier_enabled
- && ip_THUNK(INFO_PTR_TO_STRUCT(bh_info))) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled
+ && ip_THUNK(INFO_PTR_TO_STRUCT(bh_info)))) {
// We are about to replace a thunk with a blackhole.
// Add the free variables of the closure we are about to
// overwrite to the update remembered set.
=====================================
rts/Threads.c
=====================================
@@ -712,7 +712,7 @@ threadStackUnderflow (Capability *cap, StgTSO *tso)
barf("threadStackUnderflow: not enough space for return values");
}
- if (nonmoving_write_barrier_enabled) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) {
// ensure that values that we copy into the new stack are marked
// for the nonmoving collector. Note that these values won't
// necessarily form a full closure so we need to handle them
=====================================
rts/Updates.h
=====================================
@@ -44,7 +44,7 @@
W_ bd; \
\
OVERWRITING_CLOSURE(p1); \
- if (nonmoving_write_barrier_enabled != 0) { \
+ IF_WRITE_BARRIER_ENABLED { \
ccall updateRemembSetPushThunk_(BaseReg, p1 "ptr"); \
} \
StgInd_indirectee(p1) = p2; \
@@ -73,7 +73,7 @@ INLINE_HEADER void updateWithIndirection (Capability *cap,
/* not necessarily true: ASSERT( !closure_IND(p1) ); */
/* occurs in RaiseAsync.c:raiseAsync() */
OVERWRITING_CLOSURE(p1);
- if (nonmoving_write_barrier_enabled) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) {
updateRemembSetPushThunk(cap, (StgThunk*)p1);
}
((StgInd *)p1)->indirectee = p2;
=====================================
rts/sm/NonMovingMark.c
=====================================
@@ -459,8 +459,6 @@ bool check_in_nonmoving_heap(StgClosure *p) {
*/
void updateRemembSetPushThunk(Capability *cap, StgThunk *thunk)
{
- // TODO: Eliminate this conditional once it's folded into codegen
- if (!nonmoving_write_barrier_enabled) return;
const StgInfoTable *info;
do {
info = get_volatile_itbl((StgClosure *) thunk);
@@ -517,14 +515,11 @@ void updateRemembSetPushThunkEager(Capability *cap,
void updateRemembSetPushThunk_(StgRegTable *reg, StgThunk *p)
{
- // TODO: Eliminate this conditional once it's folded into codegen
- if (!nonmoving_write_barrier_enabled) return;
updateRemembSetPushThunk(regTableToCapability(reg), p);
}
void updateRemembSetPushClosure(Capability *cap, StgClosure *p)
{
- if (!nonmoving_write_barrier_enabled) return;
if (!check_in_nonmoving_heap(p)) return;
MarkQueue *queue = &cap->upd_rem_set.queue;
push_closure(queue, p, NULL);
@@ -578,8 +573,6 @@ STATIC_INLINE void finish_upd_rem_set_mark(StgClosure *p)
void updateRemembSetPushTSO(Capability *cap, StgTSO *tso)
{
- // TODO: Eliminate this conditional once it's folded into codegen
- if (!nonmoving_write_barrier_enabled) return;
if (needs_upd_rem_set_mark((StgClosure *) tso)) {
debugTrace(DEBUG_nonmoving_gc, "upd_rem_set: TSO %p", tso);
mark_tso(&cap->upd_rem_set.queue, tso);
=====================================
rts/sm/Storage.c
=====================================
@@ -430,7 +430,7 @@ lockCAF (StgRegTable *reg, StgIndStatic *caf)
// reference should be in SRTs
ASSERT(orig_info_tbl->layout.payload.ptrs == 0);
// Becuase the payload is empty we just push the SRT
- if (nonmoving_write_barrier_enabled) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) {
StgThunkInfoTable *thunk_info = itbl_to_thunk_itbl(orig_info_tbl);
if (thunk_info->i.srt) {
updateRemembSetPushClosure(cap, GET_SRT(thunk_info));
@@ -1140,7 +1140,7 @@ dirty_MUT_VAR(StgRegTable *reg, StgMutVar *mvar, StgClosure *old)
if (mvar->header.info == &stg_MUT_VAR_CLEAN_info) {
mvar->header.info = &stg_MUT_VAR_DIRTY_info;
recordClosureMutated(cap, (StgClosure *) mvar);
- if (nonmoving_write_barrier_enabled != 0) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled != 0)) {
updateRemembSetPushClosure_(reg, old);
}
}
@@ -1159,7 +1159,7 @@ dirty_TVAR(Capability *cap, StgTVar *p,
if (p->header.info == &stg_TVAR_CLEAN_info) {
p->header.info = &stg_TVAR_DIRTY_info;
recordClosureMutated(cap,(StgClosure*)p);
- if (nonmoving_write_barrier_enabled != 0) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled != 0)) {
updateRemembSetPushClosure(cap, old);
}
}
@@ -1176,7 +1176,7 @@ setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target)
if (tso->dirty == 0) {
tso->dirty = 1;
recordClosureMutated(cap,(StgClosure*)tso);
- if (nonmoving_write_barrier_enabled)
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled))
updateRemembSetPushClosure(cap, (StgClosure *) tso->_link);
}
tso->_link = target;
@@ -1188,7 +1188,7 @@ setTSOPrev (Capability *cap, StgTSO *tso, StgTSO *target)
if (tso->dirty == 0) {
tso->dirty = 1;
recordClosureMutated(cap,(StgClosure*)tso);
- if (nonmoving_write_barrier_enabled)
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled))
updateRemembSetPushClosure(cap, (StgClosure *) tso->block_info.prev);
}
tso->block_info.prev = target;
@@ -1202,7 +1202,7 @@ dirty_TSO (Capability *cap, StgTSO *tso)
recordClosureMutated(cap,(StgClosure*)tso);
}
- if (nonmoving_write_barrier_enabled)
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled))
updateRemembSetPushTSO(cap, tso);
}
@@ -1211,7 +1211,7 @@ dirty_STACK (Capability *cap, StgStack *stack)
{
// First push to upd_rem_set before we set stack->dirty since we
// the nonmoving collector may already be marking the stack.
- if (nonmoving_write_barrier_enabled)
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled))
updateRemembSetPushStack(cap, stack);
if (! (stack->dirty & STACK_DIRTY)) {
@@ -1236,7 +1236,7 @@ void
update_MVAR(StgRegTable *reg, StgClosure *p, StgClosure *old_val)
{
Capability *cap = regTableToCapability(reg);
- if (nonmoving_write_barrier_enabled) {
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) {
StgMVar *mvar = (StgMVar *) p;
updateRemembSetPushClosure(cap, old_val);
updateRemembSetPushClosure(cap, (StgClosure *) mvar->head);
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/commit/90e0ece0f88d13cf703cc2831f7f9b772b81b4ed
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/commit/90e0ece0f88d13cf703cc2831f7f9b772b81b4ed
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20190417/4ac49fc8/attachment-0001.html>
More information about the ghc-commits
mailing list