summaryrefslogtreecommitdiff
path: root/arch/aarch64/syscall_arch.h
diff options
context:
space:
mode:
authorSzabolcs Nagy <nsz@port70.net>2015-03-10 21:18:41 +0000
committerRich Felker <dalias@aerifal.cx>2015-03-11 20:12:35 -0400
commit01ef3dd9c5fa7a56aa370f244dd08e05c73010f5 (patch)
tree8fe90d994a43124ff309d3af4c185e28b2b1d6ff /arch/aarch64/syscall_arch.h
parentf4e4632abfa8297db1485e132bb15b9ef6c32a1b (diff)
downloadmusl-01ef3dd9c5fa7a56aa370f244dd08e05c73010f5.tar.gz
add aarch64 port
This adds complete aarch64 target support including bigendian subarch. Some of the long double math functions are known to be broken otherwise interfaces should be fully functional, but at this point consider this port experimental. Initial work on this port was done by Sireesh Tripurari and Kevin Bortis.
Diffstat (limited to 'arch/aarch64/syscall_arch.h')
-rw-r--r--arch/aarch64/syscall_arch.h80
1 files changed, 80 insertions, 0 deletions
diff --git a/arch/aarch64/syscall_arch.h b/arch/aarch64/syscall_arch.h
new file mode 100644
index 00000000..ec7cc785
--- /dev/null
+++ b/arch/aarch64/syscall_arch.h
@@ -0,0 +1,80 @@
+#define __SYSCALL_LL_E(x) \
+((union { long long ll; long l[2]; }){ .ll = x }).l[0], \
+((union { long long ll; long l[2]; }){ .ll = x }).l[1]
+#define __SYSCALL_LL_O(x) 0, __SYSCALL_LL_E((x))
+
+long (__syscall)(long, ...);
+
+#define __asm_syscall(...) do { \
+ __asm__ __volatile__ ( "svc 0" \
+ : "=r"(x0) : __VA_ARGS__ : "memory", "cc"); \
+ return x0; \
+ } while (0)
+
+static inline long __syscall0(long n)
+{
+ register long x8 __asm__("x8") = n;
+ register long x0 __asm__("x0");
+ __asm_syscall("r"(x8));
+}
+
+static inline long __syscall1(long n, long a)
+{
+ register long x8 __asm__("x8") = n;
+ register long x0 __asm__("x0") = a;
+ __asm_syscall("r"(x8), "0"(x0));
+}
+
+static inline long __syscall2(long n, long a, long b)
+{
+ register long x8 __asm__("x8") = n;
+ register long x0 __asm__("x0") = a;
+ register long x1 __asm__("x1") = b;
+ __asm_syscall("r"(x8), "0"(x0), "r"(x1));
+}
+
+static inline long __syscall3(long n, long a, long b, long c)
+{
+ register long x8 __asm__("x8") = n;
+ register long x0 __asm__("x0") = a;
+ register long x1 __asm__("x1") = b;
+ register long x2 __asm__("x2") = c;
+ __asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2));
+}
+
+static inline long __syscall4(long n, long a, long b, long c, long d)
+{
+ register long x8 __asm__("x8") = n;
+ register long x0 __asm__("x0") = a;
+ register long x1 __asm__("x1") = b;
+ register long x2 __asm__("x2") = c;
+ register long x3 __asm__("x3") = d;
+ __asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3));
+}
+
+static inline long __syscall5(long n, long a, long b, long c, long d, long e)
+{
+ register long x8 __asm__("x8") = n;
+ register long x0 __asm__("x0") = a;
+ register long x1 __asm__("x1") = b;
+ register long x2 __asm__("x2") = c;
+ register long x3 __asm__("x3") = d;
+ register long x4 __asm__("x4") = e;
+ __asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4));
+}
+
+static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
+{
+ register long x8 __asm__("x8") = n;
+ register long x0 __asm__("x0") = a;
+ register long x1 __asm__("x1") = b;
+ register long x2 __asm__("x2") = c;
+ register long x3 __asm__("x3") = d;
+ register long x4 __asm__("x4") = e;
+ register long x5 __asm__("x5") = f;
+ __asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5));
+}
+
+#define VDSO_USEFUL
+#define VDSO_CGT_SYM "__kernel_clock_gettime"
+#define VDSO_CGT_VER "LINUX_2.6.39"