[Git][ghc/ghc][wip/tsan/storage] 2 commits: rts/SpinLock: Separate out slow path
Ben Gamari
gitlab at gitlab.haskell.org
Fri Oct 30 18:02:24 UTC 2020
Ben Gamari pushed to branch wip/tsan/storage at Glasgow Haskell Compiler / GHC
Commits:
105d43db by Ben Gamari at 2020-10-30T14:02:19-04:00
rts/SpinLock: Separate out slow path
Not only is this in general a good idea, but it turns out that GCC
unrolls the retry loop, resulting is massive code bloat in critical
parts of the RTS (e.g. `evacuate`).
- - - - -
f7b45cde by Ben Gamari at 2020-10-30T14:02:19-04:00
rts: Use relaxed ordering on spinlock counters
- - - - -
4 changed files:
- includes/rts/SpinLock.h
- includes/stg/SMP.h
- + rts/SpinLock.c
- rts/rts.cabal.in
Changes:
=====================================
includes/rts/SpinLock.h
=====================================
@@ -39,19 +39,14 @@ typedef struct SpinLock_
#define IF_PROF_SPIN(x)
#endif
+void acquire_spin_lock_slow_path(SpinLock * p);
+
// acquire spin lock
INLINE_HEADER void ACQUIRE_SPIN_LOCK(SpinLock * p)
{
- do {
- for (uint32_t i = 0; i < SPIN_COUNT; i++) {
- StgWord32 r = cas((StgVolatilePtr)&(p->lock), 1, 0);
- if (r != 0) return;
- IF_PROF_SPIN(__atomic_fetch_add(&p->spin, 1, __ATOMIC_RELAXED));
- busy_wait_nop();
- }
- IF_PROF_SPIN(__atomic_fetch_add(&p->yield, 1, __ATOMIC_RELAXED));
- yieldThread();
- } while (1);
+ StgWord32 r = cas((StgVolatilePtr)&(p->lock), 1, 0);
+ if (RTS_UNLIKELY(r == 0))
+ acquire_spin_lock_slow_path(p);
}
// release spin lock
=====================================
includes/stg/SMP.h
=====================================
@@ -440,6 +440,7 @@ load_load_barrier(void) {
// Relaxed atomic operations.
#define RELAXED_LOAD(ptr) __atomic_load_n(ptr, __ATOMIC_RELAXED)
#define RELAXED_STORE(ptr,val) __atomic_store_n(ptr, val, __ATOMIC_RELAXED)
+#define RELAXED_ADD(ptr,val) __atomic_add_fetch(ptr, val, __ATOMIC_RELAXED)
// Acquire/release atomic operations
#define ACQUIRE_LOAD(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
@@ -466,6 +467,7 @@ EXTERN_INLINE void load_load_barrier () {} /* nothing */
// Relaxed atomic operations
#define RELAXED_LOAD(ptr) *ptr
#define RELAXED_STORE(ptr,val) *ptr = val
+#define RELAXED_ADD(ptr,val) *ptr += val
// Acquire/release atomic operations
#define ACQUIRE_LOAD(ptr) *ptr
=====================================
rts/SpinLock.c
=====================================
@@ -0,0 +1,41 @@
+/* ----------------------------------------------------------------------------
+ *
+ * (c) The GHC Team, 2006-2009
+ *
+ * Spin locks
+ *
+ * These are simple spin-only locks as opposed to Mutexes which
+ * probably spin for a while before blocking in the kernel. We use
+ * these when we are sure that all our threads are actively running on
+ * a CPU, eg. in the GC.
+ *
+ * TODO: measure whether we really need these, or whether Mutexes
+ * would do (and be a bit safer if a CPU becomes loaded).
+ *
+ * Do not #include this file directly: #include "Rts.h" instead.
+ *
+ * To understand the structure of the RTS headers, see the wiki:
+ * https://gitlab.haskell.org/ghc/ghc/wikis/commentary/source-tree/includes
+ *
+ * -------------------------------------------------------------------------- */
+
+#include "PosixSource.h"
+#include "Rts.h"
+
+#if defined(THREADED_RTS)
+
+void acquire_spin_lock_slow_path(SpinLock * p)
+{
+ do {
+ for (uint32_t i = 0; i < SPIN_COUNT; i++) {
+ StgWord32 r = cas((StgVolatilePtr)&(p->lock), 1, 0);
+ if (r != 0) return;
+ IF_PROF_SPIN(RELAXED_ADD(&p->spin, 1));
+ busy_wait_nop();
+ }
+ IF_PROF_SPIN(RELAXED_ADD(&p->yield, 1));
+ yieldThread();
+ } while (1);
+}
+
+#endif
=====================================
rts/rts.cabal.in
=====================================
@@ -462,6 +462,7 @@ library
STM.c
Schedule.c
Sparks.c
+ SpinLock.c
StableName.c
StablePtr.c
StaticPtrTable.c
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/f538641f2319370c88fd3dee3770d5f8ac970ccc...f7b45cde43f47f94b77411477aabdb56f8f63d66
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/f538641f2319370c88fd3dee3770d5f8ac970ccc...f7b45cde43f47f94b77411477aabdb56f8f63d66
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20201030/e3bc979e/attachment-0001.html>
More information about the ghc-commits
mailing list