summaryrefslogtreecommitdiff
path: root/arch/sh/atomic_arch.h
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2016-01-21 19:08:54 +0000
committerRich Felker <dalias@aerifal.cx>2016-01-21 19:08:54 +0000
commit1315596b510189b5159e742110b504177bdd4932 (patch)
tree27159b7b95b944671454b11f36ee13308241f4b5 /arch/sh/atomic_arch.h
parentce3e24eaae91e7a90f87eb7f1edea8df5942de11 (diff)
downloadmusl-1315596b510189b5159e742110b504177bdd4932.tar.gz
refactor internal atomic.h
rather than having each arch provide its own atomic.h, there is a new shared atomic.h in src/internal which pulls arch-specific definitions from arc/$(ARCH)/atomic_arch.h. the latter can be extremely minimal, defining only a_cas or new ll/sc type primitives which the shared atomic.h will use to construct everything else. this commit avoids making heavy changes to the individual archs' atomic implementations. definitions which are identical or near-identical to what the new shared atomic.h would produce have been removed, but otherwise the changes made are just hooking up the arch-specific files to the new infrastructure. major changes to take advantage of the new system will come in subsequent commits.
Diffstat (limited to 'arch/sh/atomic_arch.h')
-rw-r--r--arch/sh/atomic_arch.h96
1 files changed, 96 insertions, 0 deletions
diff --git a/arch/sh/atomic_arch.h b/arch/sh/atomic_arch.h
new file mode 100644
index 00000000..2ac77246
--- /dev/null
+++ b/arch/sh/atomic_arch.h
@@ -0,0 +1,96 @@
+#define LLSC_CLOBBERS "r0", "t", "memory"
+#define LLSC_START(mem) "synco\n" \
+ "0: movli.l @" mem ", r0\n"
+#define LLSC_END(mem) \
+ "1: movco.l r0, @" mem "\n" \
+ " bf 0b\n" \
+ " synco\n"
+
+static inline int __sh_cas_llsc(volatile int *p, int t, int s)
+{
+ int old;
+ __asm__ __volatile__(
+ LLSC_START("%1")
+ " mov r0, %0\n"
+ " cmp/eq %0, %2\n"
+ " bf 1f\n"
+ " mov %3, r0\n"
+ LLSC_END("%1")
+ : "=&r"(old) : "r"(p), "r"(t), "r"(s) : LLSC_CLOBBERS);
+ return old;
+}
+
+static inline int __sh_swap_llsc(volatile int *x, int v)
+{
+ int old;
+ __asm__ __volatile__(
+ LLSC_START("%1")
+ " mov r0, %0\n"
+ " mov %2, r0\n"
+ LLSC_END("%1")
+ : "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS);
+ return old;
+}
+
+static inline int __sh_fetch_add_llsc(volatile int *x, int v)
+{
+ int old;
+ __asm__ __volatile__(
+ LLSC_START("%1")
+ " mov r0, %0\n"
+ " add %2, r0\n"
+ LLSC_END("%1")
+ : "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS);
+ return old;
+}
+
+static inline void __sh_store_llsc(volatile int *p, int x)
+{
+ __asm__ __volatile__(
+ " synco\n"
+ " mov.l %1, @%0\n"
+ " synco\n"
+ : : "r"(p), "r"(x) : "memory");
+}
+
+static inline void __sh_and_llsc(volatile int *x, int v)
+{
+ __asm__ __volatile__(
+ LLSC_START("%0")
+ " and %1, r0\n"
+ LLSC_END("%0")
+ : : "r"(x), "r"(v) : LLSC_CLOBBERS);
+}
+
+static inline void __sh_or_llsc(volatile int *x, int v)
+{
+ __asm__ __volatile__(
+ LLSC_START("%0")
+ " or %1, r0\n"
+ LLSC_END("%0")
+ : : "r"(x), "r"(v) : LLSC_CLOBBERS);
+}
+
+#ifdef __SH4A__
+#define a_cas(p,t,s) __sh_cas_llsc(p,t,s)
+#define a_swap(x,v) __sh_swap_llsc(x,v)
+#define a_fetch_add(x,v) __sh_fetch_add_llsc(x, v)
+#define a_store(x,v) __sh_store_llsc(x, v)
+#define a_and(x,v) __sh_and_llsc(x, v)
+#define a_or(x,v) __sh_or_llsc(x, v)
+#else
+
+int __sh_cas(volatile int *, int, int);
+int __sh_swap(volatile int *, int);
+int __sh_fetch_add(volatile int *, int);
+void __sh_store(volatile int *, int);
+void __sh_and(volatile int *, int);
+void __sh_or(volatile int *, int);
+
+#define a_cas(p,t,s) __sh_cas(p,t,s)
+#define a_swap(x,v) __sh_swap(x,v)
+#define a_fetch_add(x,v) __sh_fetch_add(x, v)
+#define a_store(x,v) __sh_store(x, v)
+#define a_and(x,v) __sh_and(x, v)
+#define a_or(x,v) __sh_or(x, v)
+#endif