summaryrefslogtreecommitdiff
path: root/arch/aarch64/atomic_arch.h
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2016-01-21 19:50:55 +0000
committerRich Felker <dalias@aerifal.cx>2016-01-21 19:50:55 +0000
commitaa0db4b5d08ff6ac180a93678d8fd1799569a530 (patch)
tree0523962a13eda32b0d35dfe147bb53df2c8bf232 /arch/aarch64/atomic_arch.h
parent61b1e75f7d8004461f2e18f171c26c2f545eed32 (diff)
downloadmusl-aa0db4b5d08ff6ac180a93678d8fd1799569a530.tar.gz
overhaul aarch64 atomics for new atomics framework
Diffstat (limited to 'arch/aarch64/atomic_arch.h')
-rw-r--r--arch/aarch64/atomic_arch.h199
1 files changed, 25 insertions, 174 deletions
diff --git a/arch/aarch64/atomic_arch.h b/arch/aarch64/atomic_arch.h
index 0755534f..84499e2c 100644
--- a/arch/aarch64/atomic_arch.h
+++ b/arch/aarch64/atomic_arch.h
@@ -1,19 +1,28 @@
-#define a_ctz_64 a_ctz_64
-static inline int a_ctz_64(uint64_t x)
+#define a_ll a_ll
+static inline int a_ll(volatile int *p)
{
- __asm__(
- " rbit %0, %1\n"
- " clz %0, %0\n"
- : "=r"(x) : "r"(x));
- return x;
+ int v;
+ __asm__ __volatile__ ("ldxr %0, %1" : "=r"(v) : "Q"(*p));
+ return v;
+}
+
+#define a_sc a_sc
+static inline int a_sc(volatile int *p, int v)
+{
+ int r;
+ __asm__ __volatile__ ("stxr %w0,%1,%2" : "=&r"(r) : "r"(v), "Q"(*p) : "memory");
+ return !r;
}
#define a_barrier a_barrier
static inline void a_barrier()
{
- __asm__ __volatile__("dmb ish");
+ __asm__ __volatile__ ("dmb ish" : : : "memory");
}
+#define a_pre_llsc a_barrier
+#define a_post_llsc a_barrier
+
#define a_cas_p a_cas_p
static inline void *a_cas_p(volatile void *p, void *t, void *s)
{
@@ -28,175 +37,17 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s)
" mov %0,%1\n"
"1: dmb ish\n"
: "=&r"(old)
- : "r"(t), "r"(s), "Q"(*(long*)p)
- : "memory", "cc");
- return old;
-}
-
-#define a_cas a_cas
-static inline int a_cas(volatile int *p, int t, int s)
-{
- int old;
- __asm__ __volatile__(
- " dmb ish\n"
- "1: ldxr %w0,%3\n"
- " cmp %w0,%w1\n"
- " b.ne 1f\n"
- " stxr %w0,%w2,%3\n"
- " cbnz %w0,1b\n"
- " mov %w0,%w1\n"
- "1: dmb ish\n"
- : "=&r"(old)
- : "r"(t), "r"(s), "Q"(*p)
+ : "r"(t), "r"(s), "Q"(*(void *volatile *)p)
: "memory", "cc");
return old;
}
-#define a_swap a_swap
-static inline int a_swap(volatile int *x, int v)
-{
- int old, tmp;
- __asm__ __volatile__(
- " dmb ish\n"
- "1: ldxr %w0,%3\n"
- " stxr %w1,%w2,%3\n"
- " cbnz %w1,1b\n"
- " dmb ish\n"
- : "=&r"(old), "=&r"(tmp)
- : "r"(v), "Q"(*x)
- : "memory", "cc" );
- return old;
-}
-
-#define a_fetch_add a_fetch_add
-static inline int a_fetch_add(volatile int *x, int v)
-{
- int old, tmp;
- __asm__ __volatile__(
- " dmb ish\n"
- "1: ldxr %w0,%3\n"
- " add %w0,%w0,%w2\n"
- " stxr %w1,%w0,%3\n"
- " cbnz %w1,1b\n"
- " dmb ish\n"
- : "=&r"(old), "=&r"(tmp)
- : "r"(v), "Q"(*x)
- : "memory", "cc" );
- return old-v;
-}
-
-#define a_inc a_inc
-static inline void a_inc(volatile int *x)
-{
- int tmp, tmp2;
- __asm__ __volatile__(
- " dmb ish\n"
- "1: ldxr %w0,%2\n"
- " add %w0,%w0,#1\n"
- " stxr %w1,%w0,%2\n"
- " cbnz %w1,1b\n"
- " dmb ish\n"
- : "=&r"(tmp), "=&r"(tmp2)
- : "Q"(*x)
- : "memory", "cc" );
-}
-
-#define a_dec a_dec
-static inline void a_dec(volatile int *x)
-{
- int tmp, tmp2;
- __asm__ __volatile__(
- " dmb ish\n"
- "1: ldxr %w0,%2\n"
- " sub %w0,%w0,#1\n"
- " stxr %w1,%w0,%2\n"
- " cbnz %w1,1b\n"
- " dmb ish\n"
- : "=&r"(tmp), "=&r"(tmp2)
- : "Q"(*x)
- : "memory", "cc" );
-}
-
-#define a_and_64 a_and_64
-static inline void a_and_64(volatile uint64_t *p, uint64_t v)
-{
- int tmp, tmp2;
- __asm__ __volatile__(
- " dmb ish\n"
- "1: ldxr %0,%3\n"
- " and %0,%0,%2\n"
- " stxr %w1,%0,%3\n"
- " cbnz %w1,1b\n"
- " dmb ish\n"
- : "=&r"(tmp), "=&r"(tmp2)
- : "r"(v), "Q"(*p)
- : "memory", "cc" );
-}
-
-#define a_and a_and
-static inline void a_and(volatile int *p, int v)
-{
- int tmp, tmp2;
- __asm__ __volatile__(
- " dmb ish\n"
- "1: ldxr %w0,%3\n"
- " and %w0,%w0,%w2\n"
- " stxr %w1,%w0,%3\n"
- " cbnz %w1,1b\n"
- " dmb ish\n"
- : "=&r"(tmp), "=&r"(tmp2)
- : "r"(v), "Q"(*p)
- : "memory", "cc" );
-}
-
-#define a_or_64 a_or_64
-static inline void a_or_64(volatile uint64_t *p, uint64_t v)
-{
- int tmp, tmp2;
- __asm__ __volatile__(
- " dmb ish\n"
- "1: ldxr %0,%3\n"
- " orr %0,%0,%2\n"
- " stxr %w1,%0,%3\n"
- " cbnz %w1,1b\n"
- " dmb ish\n"
- : "=&r"(tmp), "=&r"(tmp2)
- : "r"(v), "Q"(*p)
- : "memory", "cc" );
-}
-
-#define a_or_l a_or_l
-static inline void a_or_l(volatile void *p, long v)
-{
- return a_or_64(p, v);
-}
-
-#define a_or a_or
-static inline void a_or(volatile int *p, int v)
-{
- int tmp, tmp2;
- __asm__ __volatile__(
- " dmb ish\n"
- "1: ldxr %w0,%3\n"
- " orr %w0,%w0,%w2\n"
- " stxr %w1,%w0,%3\n"
- " cbnz %w1,1b\n"
- " dmb ish\n"
- : "=&r"(tmp), "=&r"(tmp2)
- : "r"(v), "Q"(*p)
- : "memory", "cc" );
-}
-
-#define a_store a_store
-static inline void a_store(volatile int *p, int x)
+#define a_ctz_64 a_ctz_64
+static inline int a_ctz_64(uint64_t x)
{
- __asm__ __volatile__(
- " dmb ish\n"
- " str %w1,%0\n"
- " dmb ish\n"
- : "=m"(*p)
- : "r"(x)
- : "memory", "cc" );
+ __asm__(
+ " rbit %0, %1\n"
+ " clz %0, %0\n"
+ : "=r"(x) : "r"(x));
+ return x;
}
-
-#define a_spin a_barrier