[Git][ghc/ghc][wip/T22012] 2 commits: testsuite: Add simple test exercising C11 atomics in GHCi
Ben Gamari (@bgamari)
gitlab at gitlab.haskell.org
Fri Sep 1 01:49:28 UTC 2023
Ben Gamari pushed to branch wip/T22012 at Glasgow Haskell Compiler / GHC
Commits:
1b7f1cde by Ben Gamari at 2023-08-31T21:49:16-04:00
testsuite: Add simple test exercising C11 atomics in GHCi
See #22012.
- - - - -
ab659ad9 by Ben Gamari at 2023-08-31T21:49:16-04:00
rts/RtsSymbols: Add AArch64 outline atomic operations
Fixes #22012 by adding the symbols described in
https://github.com/llvm/llvm-project/blob/main/llvm/docs/Atomics.rst#libcalls-atomic.
Ultimately this would be better addressed by #22011, but this is a first
step in the right direction and fixes the immediate symptom.
Note that we dropped the `__arch64_cas16` operations as these provided
by all platforms's compilers. Also, we don't link directly against the
libgcc/compiler-rt definitions but rather provide our own wrappers to
work around broken toolchains (e.g. https://bugs.gentoo.org/868018).
Generated via https://gitlab.haskell.org/ghc/ghc/-/snippets/5733.
- - - - -
8 changed files:
- configure.ac
- + m4/fp_armv8_outline_atomics.m4
- + rts/ARMOutlineAtomicsSymbols.h
- rts/RtsSymbols.c
- + testsuite/tests/rts/T22012.hs
- + testsuite/tests/rts/T22012.stdout
- + testsuite/tests/rts/T22012_c.c
- testsuite/tests/rts/all.T
Changes:
=====================================
configure.ac
=====================================
@@ -1120,6 +1120,10 @@ AC_DEFINE_UNQUOTED([RTS_LINKER_USE_MMAP], [$RtsLinkerUseMmap],
GHC_ADJUSTORS_METHOD([Target])
AC_SUBST([UseLibffiForAdjustors])
+dnl ** ARM outline atomics
+dnl --------------------------------------------------------------
+FP_ARM_OUTLINE_ATOMICS
+
dnl ** IPE data compression
dnl --------------------------------------------------------------
FP_FIND_LIBZSTD
=====================================
m4/fp_armv8_outline_atomics.m4
=====================================
@@ -0,0 +1,30 @@
+# FP_ARMV8_OUTLINE_ATOMICS
+# ----------
+#
+# Note [ARM outline atomics and the RTS linker]
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# Sets HAVE_ARM_OUTLINE_ATOMICS depending upon whether the target compiler
+# provides ARMv8's outline atomics symbols. In this case we ensure that the
+# runtime system linker's symbol table includes these symbols since code generated
+# by the C compiler may include references to them.
+#
+# This is surprisingly tricky as not all implementations provide all symbols.
+# For instance:
+#
+# - some compilers don't include 128-bit atomics
+# - some (misconfigured?) toolchains don't define certain _sync operations
+# (see https://bugs.gentoo.org/868018)
+#
+# For this reason we do not link directly against the symbols provided by
+# compiler-rt/libgcc. Instead, we provide our own wrappers (defined in
+# rts/ARMOutlineAtomicsSymbols.h), which should compile to equivalent code.
+# This is all horrible.
+#
+
+AC_DEFUN([FP_ARM_OUTLINE_ATOMICS], [
+ AC_CHECK_FUNC(
+ [__aarch64_ldadd1_acq],
+ [AC_DEFINE([HAVE_ARM_OUTLINE_ATOMICS], [1], [Does the toolchain use ARMv8 outline atomics])]
+ )
+])
+
=====================================
rts/ARMOutlineAtomicsSymbols.h
=====================================
@@ -0,0 +1,710 @@
+/*
+ * Declarations and RTS symbol table entries for the outline atomics
+ * symbols provided by some ARMv8 compilers.
+ *
+ * See Note [ARM outline atomics and the RTS linker] in m4/fp_armv8_outline_atomics.m4.
+ *
+ * See #22012.
+ */
+
+#include <stdint.h>
+#include <stdatomic.h>
+
+uint8_t ghc___aarch64_cas1_relax(uint8_t old, uint8_t new, uint8_t* p);
+uint8_t ghc___aarch64_cas1_relax(uint8_t old, uint8_t new, uint8_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_relaxed, memory_order_relaxed); return old;
+}
+
+uint8_t ghc___aarch64_cas1_acq(uint8_t old, uint8_t new, uint8_t* p);
+uint8_t ghc___aarch64_cas1_acq(uint8_t old, uint8_t new, uint8_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_acquire, memory_order_acquire); return old;
+}
+
+uint8_t ghc___aarch64_cas1_acq_rel(uint8_t old, uint8_t new, uint8_t* p);
+uint8_t ghc___aarch64_cas1_acq_rel(uint8_t old, uint8_t new, uint8_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_acq_rel, memory_order_acquire); return old;
+}
+
+uint8_t ghc___aarch64_cas1_sync(uint8_t old, uint8_t new, uint8_t* p);
+uint8_t ghc___aarch64_cas1_sync(uint8_t old, uint8_t new, uint8_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_seq_cst, memory_order_seq_cst); return old;
+}
+
+uint16_t ghc___aarch64_cas2_relax(uint16_t old, uint16_t new, uint16_t* p);
+uint16_t ghc___aarch64_cas2_relax(uint16_t old, uint16_t new, uint16_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_relaxed, memory_order_relaxed); return old;
+}
+
+uint16_t ghc___aarch64_cas2_acq(uint16_t old, uint16_t new, uint16_t* p);
+uint16_t ghc___aarch64_cas2_acq(uint16_t old, uint16_t new, uint16_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_acquire, memory_order_acquire); return old;
+}
+
+uint16_t ghc___aarch64_cas2_acq_rel(uint16_t old, uint16_t new, uint16_t* p);
+uint16_t ghc___aarch64_cas2_acq_rel(uint16_t old, uint16_t new, uint16_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_acq_rel, memory_order_acquire); return old;
+}
+
+uint16_t ghc___aarch64_cas2_sync(uint16_t old, uint16_t new, uint16_t* p);
+uint16_t ghc___aarch64_cas2_sync(uint16_t old, uint16_t new, uint16_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_seq_cst, memory_order_seq_cst); return old;
+}
+
+uint32_t ghc___aarch64_cas4_relax(uint32_t old, uint32_t new, uint32_t* p);
+uint32_t ghc___aarch64_cas4_relax(uint32_t old, uint32_t new, uint32_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_relaxed, memory_order_relaxed); return old;
+}
+
+uint32_t ghc___aarch64_cas4_acq(uint32_t old, uint32_t new, uint32_t* p);
+uint32_t ghc___aarch64_cas4_acq(uint32_t old, uint32_t new, uint32_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_acquire, memory_order_acquire); return old;
+}
+
+uint32_t ghc___aarch64_cas4_acq_rel(uint32_t old, uint32_t new, uint32_t* p);
+uint32_t ghc___aarch64_cas4_acq_rel(uint32_t old, uint32_t new, uint32_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_acq_rel, memory_order_acquire); return old;
+}
+
+uint32_t ghc___aarch64_cas4_sync(uint32_t old, uint32_t new, uint32_t* p);
+uint32_t ghc___aarch64_cas4_sync(uint32_t old, uint32_t new, uint32_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_seq_cst, memory_order_seq_cst); return old;
+}
+
+uint64_t ghc___aarch64_cas8_relax(uint64_t old, uint64_t new, uint64_t* p);
+uint64_t ghc___aarch64_cas8_relax(uint64_t old, uint64_t new, uint64_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_relaxed, memory_order_relaxed); return old;
+}
+
+uint64_t ghc___aarch64_cas8_acq(uint64_t old, uint64_t new, uint64_t* p);
+uint64_t ghc___aarch64_cas8_acq(uint64_t old, uint64_t new, uint64_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_acquire, memory_order_acquire); return old;
+}
+
+uint64_t ghc___aarch64_cas8_acq_rel(uint64_t old, uint64_t new, uint64_t* p);
+uint64_t ghc___aarch64_cas8_acq_rel(uint64_t old, uint64_t new, uint64_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_acq_rel, memory_order_acquire); return old;
+}
+
+uint64_t ghc___aarch64_cas8_sync(uint64_t old, uint64_t new, uint64_t* p);
+uint64_t ghc___aarch64_cas8_sync(uint64_t old, uint64_t new, uint64_t* p) {
+ atomic_compare_exchange_strong_explicit(p, &old, new, memory_order_seq_cst, memory_order_seq_cst); return old;
+}
+
+uint8_t ghc___aarch64_swp1_relax(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_swp1_relax(uint8_t v, uint8_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_relaxed);
+}
+
+uint8_t ghc___aarch64_swp1_acq(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_swp1_acq(uint8_t v, uint8_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_acquire);
+}
+
+uint8_t ghc___aarch64_swp1_rel(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_swp1_rel(uint8_t v, uint8_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_release);
+}
+
+uint8_t ghc___aarch64_swp1_acq_rel(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_swp1_acq_rel(uint8_t v, uint8_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_acq_rel);
+}
+
+uint8_t ghc___aarch64_swp1_sync(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_swp1_sync(uint8_t v, uint8_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_seq_cst);
+}
+
+uint16_t ghc___aarch64_swp2_relax(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_swp2_relax(uint16_t v, uint16_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_relaxed);
+}
+
+uint16_t ghc___aarch64_swp2_acq(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_swp2_acq(uint16_t v, uint16_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_acquire);
+}
+
+uint16_t ghc___aarch64_swp2_rel(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_swp2_rel(uint16_t v, uint16_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_release);
+}
+
+uint16_t ghc___aarch64_swp2_acq_rel(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_swp2_acq_rel(uint16_t v, uint16_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_acq_rel);
+}
+
+uint16_t ghc___aarch64_swp2_sync(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_swp2_sync(uint16_t v, uint16_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_seq_cst);
+}
+
+uint32_t ghc___aarch64_swp4_relax(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_swp4_relax(uint32_t v, uint32_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_relaxed);
+}
+
+uint32_t ghc___aarch64_swp4_acq(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_swp4_acq(uint32_t v, uint32_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_acquire);
+}
+
+uint32_t ghc___aarch64_swp4_rel(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_swp4_rel(uint32_t v, uint32_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_release);
+}
+
+uint32_t ghc___aarch64_swp4_acq_rel(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_swp4_acq_rel(uint32_t v, uint32_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_acq_rel);
+}
+
+uint32_t ghc___aarch64_swp4_sync(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_swp4_sync(uint32_t v, uint32_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_seq_cst);
+}
+
+uint64_t ghc___aarch64_swp8_relax(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_swp8_relax(uint64_t v, uint64_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_relaxed);
+}
+
+uint64_t ghc___aarch64_swp8_acq(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_swp8_acq(uint64_t v, uint64_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_acquire);
+}
+
+uint64_t ghc___aarch64_swp8_rel(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_swp8_rel(uint64_t v, uint64_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_release);
+}
+
+uint64_t ghc___aarch64_swp8_acq_rel(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_swp8_acq_rel(uint64_t v, uint64_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_acq_rel);
+}
+
+uint64_t ghc___aarch64_swp8_sync(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_swp8_sync(uint64_t v, uint64_t* p) {
+ return atomic_exchange_explicit(p, v, memory_order_seq_cst);
+}
+
+uint8_t ghc___aarch64_ldadd1_relax(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldadd1_relax(uint8_t v, uint8_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_relaxed);
+}
+
+uint8_t ghc___aarch64_ldadd1_acq(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldadd1_acq(uint8_t v, uint8_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_acquire);
+}
+
+uint8_t ghc___aarch64_ldadd1_rel(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldadd1_rel(uint8_t v, uint8_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_release);
+}
+
+uint8_t ghc___aarch64_ldadd1_acq_rel(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldadd1_acq_rel(uint8_t v, uint8_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_acq_rel);
+}
+
+uint8_t ghc___aarch64_ldadd1_sync(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldadd1_sync(uint8_t v, uint8_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_seq_cst);
+}
+
+uint16_t ghc___aarch64_ldadd2_relax(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldadd2_relax(uint16_t v, uint16_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_relaxed);
+}
+
+uint16_t ghc___aarch64_ldadd2_acq(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldadd2_acq(uint16_t v, uint16_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_acquire);
+}
+
+uint16_t ghc___aarch64_ldadd2_rel(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldadd2_rel(uint16_t v, uint16_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_release);
+}
+
+uint16_t ghc___aarch64_ldadd2_acq_rel(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldadd2_acq_rel(uint16_t v, uint16_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_acq_rel);
+}
+
+uint16_t ghc___aarch64_ldadd2_sync(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldadd2_sync(uint16_t v, uint16_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_seq_cst);
+}
+
+uint32_t ghc___aarch64_ldadd4_relax(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldadd4_relax(uint32_t v, uint32_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_relaxed);
+}
+
+uint32_t ghc___aarch64_ldadd4_acq(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldadd4_acq(uint32_t v, uint32_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_acquire);
+}
+
+uint32_t ghc___aarch64_ldadd4_rel(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldadd4_rel(uint32_t v, uint32_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_release);
+}
+
+uint32_t ghc___aarch64_ldadd4_acq_rel(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldadd4_acq_rel(uint32_t v, uint32_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_acq_rel);
+}
+
+uint32_t ghc___aarch64_ldadd4_sync(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldadd4_sync(uint32_t v, uint32_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_seq_cst);
+}
+
+uint64_t ghc___aarch64_ldadd8_relax(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldadd8_relax(uint64_t v, uint64_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_relaxed);
+}
+
+uint64_t ghc___aarch64_ldadd8_acq(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldadd8_acq(uint64_t v, uint64_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_acquire);
+}
+
+uint64_t ghc___aarch64_ldadd8_rel(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldadd8_rel(uint64_t v, uint64_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_release);
+}
+
+uint64_t ghc___aarch64_ldadd8_acq_rel(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldadd8_acq_rel(uint64_t v, uint64_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_acq_rel);
+}
+
+uint64_t ghc___aarch64_ldadd8_sync(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldadd8_sync(uint64_t v, uint64_t* p) {
+ return atomic_fetch_add_explicit(p, v, memory_order_seq_cst);
+}
+
+uint8_t ghc___aarch64_ldclr1_relax(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldclr1_relax(uint8_t v, uint8_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_relaxed);
+}
+
+uint8_t ghc___aarch64_ldclr1_acq(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldclr1_acq(uint8_t v, uint8_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_acquire);
+}
+
+uint8_t ghc___aarch64_ldclr1_rel(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldclr1_rel(uint8_t v, uint8_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_release);
+}
+
+uint8_t ghc___aarch64_ldclr1_acq_rel(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldclr1_acq_rel(uint8_t v, uint8_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_acq_rel);
+}
+
+uint8_t ghc___aarch64_ldclr1_sync(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldclr1_sync(uint8_t v, uint8_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_seq_cst);
+}
+
+uint16_t ghc___aarch64_ldclr2_relax(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldclr2_relax(uint16_t v, uint16_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_relaxed);
+}
+
+uint16_t ghc___aarch64_ldclr2_acq(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldclr2_acq(uint16_t v, uint16_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_acquire);
+}
+
+uint16_t ghc___aarch64_ldclr2_rel(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldclr2_rel(uint16_t v, uint16_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_release);
+}
+
+uint16_t ghc___aarch64_ldclr2_acq_rel(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldclr2_acq_rel(uint16_t v, uint16_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_acq_rel);
+}
+
+uint16_t ghc___aarch64_ldclr2_sync(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldclr2_sync(uint16_t v, uint16_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_seq_cst);
+}
+
+uint32_t ghc___aarch64_ldclr4_relax(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldclr4_relax(uint32_t v, uint32_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_relaxed);
+}
+
+uint32_t ghc___aarch64_ldclr4_acq(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldclr4_acq(uint32_t v, uint32_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_acquire);
+}
+
+uint32_t ghc___aarch64_ldclr4_rel(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldclr4_rel(uint32_t v, uint32_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_release);
+}
+
+uint32_t ghc___aarch64_ldclr4_acq_rel(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldclr4_acq_rel(uint32_t v, uint32_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_acq_rel);
+}
+
+uint32_t ghc___aarch64_ldclr4_sync(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldclr4_sync(uint32_t v, uint32_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_seq_cst);
+}
+
+uint64_t ghc___aarch64_ldclr8_relax(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldclr8_relax(uint64_t v, uint64_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_relaxed);
+}
+
+uint64_t ghc___aarch64_ldclr8_acq(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldclr8_acq(uint64_t v, uint64_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_acquire);
+}
+
+uint64_t ghc___aarch64_ldclr8_rel(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldclr8_rel(uint64_t v, uint64_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_release);
+}
+
+uint64_t ghc___aarch64_ldclr8_acq_rel(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldclr8_acq_rel(uint64_t v, uint64_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_acq_rel);
+}
+
+uint64_t ghc___aarch64_ldclr8_sync(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldclr8_sync(uint64_t v, uint64_t* p) {
+ return atomic_fetch_and_explicit(p, v, memory_order_seq_cst);
+}
+
+uint8_t ghc___aarch64_ldeor1_relax(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldeor1_relax(uint8_t v, uint8_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_relaxed);
+}
+
+uint8_t ghc___aarch64_ldeor1_acq(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldeor1_acq(uint8_t v, uint8_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_acquire);
+}
+
+uint8_t ghc___aarch64_ldeor1_rel(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldeor1_rel(uint8_t v, uint8_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_release);
+}
+
+uint8_t ghc___aarch64_ldeor1_acq_rel(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldeor1_acq_rel(uint8_t v, uint8_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_acq_rel);
+}
+
+uint8_t ghc___aarch64_ldeor1_sync(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldeor1_sync(uint8_t v, uint8_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_seq_cst);
+}
+
+uint16_t ghc___aarch64_ldeor2_relax(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldeor2_relax(uint16_t v, uint16_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_relaxed);
+}
+
+uint16_t ghc___aarch64_ldeor2_acq(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldeor2_acq(uint16_t v, uint16_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_acquire);
+}
+
+uint16_t ghc___aarch64_ldeor2_rel(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldeor2_rel(uint16_t v, uint16_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_release);
+}
+
+uint16_t ghc___aarch64_ldeor2_acq_rel(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldeor2_acq_rel(uint16_t v, uint16_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_acq_rel);
+}
+
+uint16_t ghc___aarch64_ldeor2_sync(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldeor2_sync(uint16_t v, uint16_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_seq_cst);
+}
+
+uint32_t ghc___aarch64_ldeor4_relax(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldeor4_relax(uint32_t v, uint32_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_relaxed);
+}
+
+uint32_t ghc___aarch64_ldeor4_acq(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldeor4_acq(uint32_t v, uint32_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_acquire);
+}
+
+uint32_t ghc___aarch64_ldeor4_rel(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldeor4_rel(uint32_t v, uint32_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_release);
+}
+
+uint32_t ghc___aarch64_ldeor4_acq_rel(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldeor4_acq_rel(uint32_t v, uint32_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_acq_rel);
+}
+
+uint32_t ghc___aarch64_ldeor4_sync(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldeor4_sync(uint32_t v, uint32_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_seq_cst);
+}
+
+uint64_t ghc___aarch64_ldeor8_relax(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldeor8_relax(uint64_t v, uint64_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_relaxed);
+}
+
+uint64_t ghc___aarch64_ldeor8_acq(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldeor8_acq(uint64_t v, uint64_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_acquire);
+}
+
+uint64_t ghc___aarch64_ldeor8_rel(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldeor8_rel(uint64_t v, uint64_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_release);
+}
+
+uint64_t ghc___aarch64_ldeor8_acq_rel(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldeor8_acq_rel(uint64_t v, uint64_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_acq_rel);
+}
+
+uint64_t ghc___aarch64_ldeor8_sync(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldeor8_sync(uint64_t v, uint64_t* p) {
+ return atomic_fetch_xor_explicit(p, v, memory_order_seq_cst);
+}
+
+uint8_t ghc___aarch64_ldset1_relax(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldset1_relax(uint8_t v, uint8_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_relaxed);
+}
+
+uint8_t ghc___aarch64_ldset1_acq(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldset1_acq(uint8_t v, uint8_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_acquire);
+}
+
+uint8_t ghc___aarch64_ldset1_rel(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldset1_rel(uint8_t v, uint8_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_release);
+}
+
+uint8_t ghc___aarch64_ldset1_acq_rel(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldset1_acq_rel(uint8_t v, uint8_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_acq_rel);
+}
+
+uint8_t ghc___aarch64_ldset1_sync(uint8_t v, uint8_t* p);
+uint8_t ghc___aarch64_ldset1_sync(uint8_t v, uint8_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_seq_cst);
+}
+
+uint16_t ghc___aarch64_ldset2_relax(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldset2_relax(uint16_t v, uint16_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_relaxed);
+}
+
+uint16_t ghc___aarch64_ldset2_acq(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldset2_acq(uint16_t v, uint16_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_acquire);
+}
+
+uint16_t ghc___aarch64_ldset2_rel(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldset2_rel(uint16_t v, uint16_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_release);
+}
+
+uint16_t ghc___aarch64_ldset2_acq_rel(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldset2_acq_rel(uint16_t v, uint16_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_acq_rel);
+}
+
+uint16_t ghc___aarch64_ldset2_sync(uint16_t v, uint16_t* p);
+uint16_t ghc___aarch64_ldset2_sync(uint16_t v, uint16_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_seq_cst);
+}
+
+uint32_t ghc___aarch64_ldset4_relax(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldset4_relax(uint32_t v, uint32_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_relaxed);
+}
+
+uint32_t ghc___aarch64_ldset4_acq(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldset4_acq(uint32_t v, uint32_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_acquire);
+}
+
+uint32_t ghc___aarch64_ldset4_rel(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldset4_rel(uint32_t v, uint32_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_release);
+}
+
+uint32_t ghc___aarch64_ldset4_acq_rel(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldset4_acq_rel(uint32_t v, uint32_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_acq_rel);
+}
+
+uint32_t ghc___aarch64_ldset4_sync(uint32_t v, uint32_t* p);
+uint32_t ghc___aarch64_ldset4_sync(uint32_t v, uint32_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_seq_cst);
+}
+
+uint64_t ghc___aarch64_ldset8_relax(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldset8_relax(uint64_t v, uint64_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_relaxed);
+}
+
+uint64_t ghc___aarch64_ldset8_acq(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldset8_acq(uint64_t v, uint64_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_acquire);
+}
+
+uint64_t ghc___aarch64_ldset8_rel(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldset8_rel(uint64_t v, uint64_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_release);
+}
+
+uint64_t ghc___aarch64_ldset8_acq_rel(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldset8_acq_rel(uint64_t v, uint64_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_acq_rel);
+}
+
+uint64_t ghc___aarch64_ldset8_sync(uint64_t v, uint64_t* p);
+uint64_t ghc___aarch64_ldset8_sync(uint64_t v, uint64_t* p) {
+ return atomic_fetch_or_explicit(p, v, memory_order_seq_cst);
+}
+
+
+#define RTS_ARM_OUTLINE_ATOMIC_SYMBOLS \
+ SymI_HasProto_redirect(__aarch64_cas1_relax, ghc___aarch64_cas1_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas1_acq, ghc___aarch64_cas1_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas1_acq_rel, ghc___aarch64_cas1_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas1_sync, ghc___aarch64_cas1_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas2_relax, ghc___aarch64_cas2_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas2_acq, ghc___aarch64_cas2_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas2_acq_rel, ghc___aarch64_cas2_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas2_sync, ghc___aarch64_cas2_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas4_relax, ghc___aarch64_cas4_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas4_acq, ghc___aarch64_cas4_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas4_acq_rel, ghc___aarch64_cas4_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas4_sync, ghc___aarch64_cas4_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas8_relax, ghc___aarch64_cas8_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas8_acq, ghc___aarch64_cas8_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas8_acq_rel, ghc___aarch64_cas8_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_cas8_sync, ghc___aarch64_cas8_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp1_relax, ghc___aarch64_swp1_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp1_acq, ghc___aarch64_swp1_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp1_rel, ghc___aarch64_swp1_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp1_acq_rel, ghc___aarch64_swp1_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp1_sync, ghc___aarch64_swp1_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp2_relax, ghc___aarch64_swp2_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp2_acq, ghc___aarch64_swp2_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp2_rel, ghc___aarch64_swp2_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp2_acq_rel, ghc___aarch64_swp2_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp2_sync, ghc___aarch64_swp2_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp4_relax, ghc___aarch64_swp4_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp4_acq, ghc___aarch64_swp4_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp4_rel, ghc___aarch64_swp4_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp4_acq_rel, ghc___aarch64_swp4_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp4_sync, ghc___aarch64_swp4_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp8_relax, ghc___aarch64_swp8_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp8_acq, ghc___aarch64_swp8_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp8_rel, ghc___aarch64_swp8_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp8_acq_rel, ghc___aarch64_swp8_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_swp8_sync, ghc___aarch64_swp8_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd1_relax, ghc___aarch64_ldadd1_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd1_acq, ghc___aarch64_ldadd1_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd1_rel, ghc___aarch64_ldadd1_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd1_acq_rel, ghc___aarch64_ldadd1_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd1_sync, ghc___aarch64_ldadd1_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd2_relax, ghc___aarch64_ldadd2_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd2_acq, ghc___aarch64_ldadd2_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd2_rel, ghc___aarch64_ldadd2_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd2_acq_rel, ghc___aarch64_ldadd2_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd2_sync, ghc___aarch64_ldadd2_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd4_relax, ghc___aarch64_ldadd4_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd4_acq, ghc___aarch64_ldadd4_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd4_rel, ghc___aarch64_ldadd4_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd4_acq_rel, ghc___aarch64_ldadd4_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd4_sync, ghc___aarch64_ldadd4_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd8_relax, ghc___aarch64_ldadd8_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd8_acq, ghc___aarch64_ldadd8_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd8_rel, ghc___aarch64_ldadd8_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd8_acq_rel, ghc___aarch64_ldadd8_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldadd8_sync, ghc___aarch64_ldadd8_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr1_relax, ghc___aarch64_ldclr1_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr1_acq, ghc___aarch64_ldclr1_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr1_rel, ghc___aarch64_ldclr1_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr1_acq_rel, ghc___aarch64_ldclr1_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr1_sync, ghc___aarch64_ldclr1_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr2_relax, ghc___aarch64_ldclr2_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr2_acq, ghc___aarch64_ldclr2_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr2_rel, ghc___aarch64_ldclr2_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr2_acq_rel, ghc___aarch64_ldclr2_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr2_sync, ghc___aarch64_ldclr2_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr4_relax, ghc___aarch64_ldclr4_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr4_acq, ghc___aarch64_ldclr4_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr4_rel, ghc___aarch64_ldclr4_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr4_acq_rel, ghc___aarch64_ldclr4_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr4_sync, ghc___aarch64_ldclr4_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr8_relax, ghc___aarch64_ldclr8_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr8_acq, ghc___aarch64_ldclr8_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr8_rel, ghc___aarch64_ldclr8_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr8_acq_rel, ghc___aarch64_ldclr8_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldclr8_sync, ghc___aarch64_ldclr8_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor1_relax, ghc___aarch64_ldeor1_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor1_acq, ghc___aarch64_ldeor1_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor1_rel, ghc___aarch64_ldeor1_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor1_acq_rel, ghc___aarch64_ldeor1_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor1_sync, ghc___aarch64_ldeor1_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor2_relax, ghc___aarch64_ldeor2_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor2_acq, ghc___aarch64_ldeor2_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor2_rel, ghc___aarch64_ldeor2_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor2_acq_rel, ghc___aarch64_ldeor2_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor2_sync, ghc___aarch64_ldeor2_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor4_relax, ghc___aarch64_ldeor4_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor4_acq, ghc___aarch64_ldeor4_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor4_rel, ghc___aarch64_ldeor4_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor4_acq_rel, ghc___aarch64_ldeor4_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor4_sync, ghc___aarch64_ldeor4_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor8_relax, ghc___aarch64_ldeor8_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor8_acq, ghc___aarch64_ldeor8_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor8_rel, ghc___aarch64_ldeor8_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor8_acq_rel, ghc___aarch64_ldeor8_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldeor8_sync, ghc___aarch64_ldeor8_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset1_relax, ghc___aarch64_ldset1_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset1_acq, ghc___aarch64_ldset1_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset1_rel, ghc___aarch64_ldset1_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset1_acq_rel, ghc___aarch64_ldset1_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset1_sync, ghc___aarch64_ldset1_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset2_relax, ghc___aarch64_ldset2_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset2_acq, ghc___aarch64_ldset2_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset2_rel, ghc___aarch64_ldset2_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset2_acq_rel, ghc___aarch64_ldset2_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset2_sync, ghc___aarch64_ldset2_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset4_relax, ghc___aarch64_ldset4_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset4_acq, ghc___aarch64_ldset4_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset4_rel, ghc___aarch64_ldset4_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset4_acq_rel, ghc___aarch64_ldset4_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset4_sync, ghc___aarch64_ldset4_sync, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset8_relax, ghc___aarch64_ldset8_relax, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset8_acq, ghc___aarch64_ldset8_acq, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset8_rel, ghc___aarch64_ldset8_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset8_acq_rel, ghc___aarch64_ldset8_acq_rel, STRENGTH_STRONG, SYM_TYPE_CODE) \
+ SymI_HasProto_redirect(__aarch64_ldset8_sync, ghc___aarch64_ldset8_sync, STRENGTH_STRONG, SYM_TYPE_CODE)
=====================================
rts/RtsSymbols.c
=====================================
@@ -970,6 +970,13 @@ extern char **environ;
#define RTS_LIBGCC_SYMBOLS
#endif
+// Symbols defined by libgcc/compiler-rt for AArch64's outline atomics.
+#if defined(HAVE_ARM_OUTLINE_ATOMICS)
+#include "ARMOutlineAtomicsSymbols.h"
+#else
+#define RTS_ARM_OUTLINE_ATOMIC_SYMBOLS
+#endif
+
// Symbols defined by libc
#define RTS_LIBC_SYMBOLS \
SymI_HasProto_redirect(atexit, atexit, STRENGTH_STRONG, SYM_TYPE_CODE) /* See Note [Strong symbols] */ \
@@ -1017,6 +1024,7 @@ RTS_LIBC_SYMBOLS
RTS_LIBGCC_SYMBOLS
RTS_FINI_ARRAY_SYMBOLS
RTS_LIBFFI_SYMBOLS
+RTS_ARM_OUTLINE_ATOMIC_SYMBOLS
#undef SymI_NeedsProto
#undef SymI_NeedsDataProto
@@ -1058,6 +1066,7 @@ RtsSymbolVal rtsSyms[] = {
RTS_LIBGCC_SYMBOLS
RTS_FINI_ARRAY_SYMBOLS
RTS_LIBFFI_SYMBOLS
+ RTS_ARM_OUTLINE_ATOMIC_SYMBOLS
SymI_HasDataProto(nonmoving_write_barrier_enabled)
#if defined(darwin_HOST_OS) && defined(i386_HOST_ARCH)
// dyld stub code contains references to this,
=====================================
testsuite/tests/rts/T22012.hs
=====================================
@@ -0,0 +1,17 @@
+-- Ensure that C11 atomics, which may be implemented as function calls on ARMv8
+-- (c.f. `-moutline-atomics`) work in GHCi.
+--
+-- See #22012.
+--
+-- See Note [ARM outline atomics and the RTS linker] in m4/fp_armv8_outline_atomics.m4.
+
+{-# LANGUAGE ForeignFunctionInterface #-}
+
+module Main where
+
+import Foreign.C.Types
+
+foreign import ccall unsafe "test" c_test :: IO CInt
+
+main = c_test
+
=====================================
testsuite/tests/rts/T22012.stdout
=====================================
@@ -0,0 +1,11 @@
+# CAS
+success=1
+old=42
+x=43
+# Swap
+x=2
+y=43
+# Fetch-Add
+x=4
+y=2
+
=====================================
testsuite/tests/rts/T22012_c.c
=====================================
@@ -0,0 +1,26 @@
+#include <stdio.h>
+#include <stdint.h>
+#include <stdatomic.h>
+#include <stdbool.h>
+
+void test (void) {
+ _Atomic uint32_t x = 42;
+ uint32_t y = 42;
+
+ bool success = atomic_compare_exchange_strong(&x, &y, 43);
+ printf("# CAS\n");
+ printf("success=%u\n", (int) success);
+ printf("old=%u\n", y);
+ printf("x=%u\n", x);
+
+ printf("# Swap\n");
+ y = atomic_exchange(&x, 2);
+ printf("x=%u\n", x);
+ printf("y=%u\n", y);
+
+ printf("# Fetch-Add\n");
+ y = atomic_fetch_add(&x, 2);
+ printf("x=%u\n", x);
+ printf("y=%u\n", y);
+}
+
=====================================
testsuite/tests/rts/all.T
=====================================
@@ -581,6 +581,8 @@ test('decodeMyStack_emptyListForMissingFlag',
, js_broken(22261) # cloneMyStack# not yet implemented
], compile_and_run, [''])
+test('T22012', extra_ways(['ghci']), compile_and_run, ['T22012_c.c'])
+
# Skip for JS platform as the JS RTS is always single threaded
test('T22795a', [only_ways(['normal']), js_skip, req_ghc_with_threaded_rts], compile_and_run, ['-threaded'])
test('T22795b', [only_ways(['normal']), js_skip], compile_and_run, ['-single-threaded'])
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/d636cf400bc82669dec5fe536e12b41e887f13b4...ab659ad9a0201eb86029c6e119672a83fcb2d95d
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/d636cf400bc82669dec5fe536e12b41e887f13b4...ab659ad9a0201eb86029c6e119672a83fcb2d95d
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20230831/e221ddf3/attachment-0001.html>
More information about the ghc-commits
mailing list