summaryrefslogtreecommitdiff
path: root/arch/x32/syscall_arch.h
diff options
context:
space:
mode:
authorrofl0r <retnyg@gmx.net>2014-01-07 22:53:38 +0100
committerrofl0r <retnyg@gmx.net>2014-02-23 11:09:16 +0100
commit664cd341921007cea52c8891f27ce35927dca378 (patch)
treeb6aa7594609a40616db8458f879dfdb0dddf4b7c /arch/x32/syscall_arch.h
parent323272db175204b951f119dae4bd99ef05e20f13 (diff)
downloadmusl-664cd341921007cea52c8891f27ce35927dca378.tar.gz
x32 port (diff against vanilla x86_64)
Diffstat (limited to 'arch/x32/syscall_arch.h')
-rw-r--r--arch/x32/syscall_arch.h93
1 files changed, 76 insertions, 17 deletions
diff --git a/arch/x32/syscall_arch.h b/arch/x32/syscall_arch.h
index a85c440c..57e76896 100644
--- a/arch/x32/syscall_arch.h
+++ b/arch/x32/syscall_arch.h
@@ -1,29 +1,45 @@
#define __SYSCALL_LL_E(x) (x)
#define __SYSCALL_LL_O(x) (x)
-static __inline long __syscall0(long n)
+#define __scc(X) sizeof(1?(X):0ULL) < 8 ? (unsigned long) (X) : (long long) (X)
+#define syscall_arg_t long long
+struct __timespec { long long tv_sec; long tv_nsec; };
+struct __timespec_kernel { long long tv_sec; long long tv_nsec; };
+#define __tsc(X) ((struct __timespec*)(unsigned long)(X))
+
+static __inline long __syscall0(long long n)
{
unsigned long ret;
__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n) : "rcx", "r11", "memory");
return ret;
}
-static __inline long __syscall1(long n, long a1)
+static __inline long __syscall1(long long n, long long a1)
{
unsigned long ret;
__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1) : "rcx", "r11", "memory");
return ret;
}
-static __inline long __syscall2(long n, long a1, long a2)
+static __inline long __syscall2(long long n, long long a1, long long a2)
{
unsigned long ret;
+ struct __timespec *ts2 = 0;
+ switch (n) {
+ case SYS_nanosleep:
+ if(a1) a1 = (unsigned long) (&(struct __timespec_kernel) {
+ .tv_sec = __tsc(a1)->tv_sec, .tv_nsec = __tsc(a1)->tv_nsec});
+ break;
+ case SYS_clock_settime:
+ if(a2) a2 = (unsigned long) (&(struct __timespec_kernel) {
+ .tv_sec = __tsc(a2)->tv_sec, .tv_nsec = __tsc(a2)->tv_nsec});
+ }
__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2)
- : "rcx", "r11", "memory");
+ : "rcx", "r11", "memory");
return ret;
}
-static __inline long __syscall3(long n, long a1, long a2, long a3)
+static __inline long __syscall3(long long n, long long a1, long long a2, long long a3)
{
unsigned long ret;
__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2),
@@ -31,32 +47,75 @@ static __inline long __syscall3(long n, long a1, long a2, long a3)
return ret;
}
-static __inline long __syscall4(long n, long a1, long a2, long a3, long a4)
+static __inline long __syscall4(long long n, long long a1, long long a2, long long a3,
+ long long a4)
{
unsigned long ret;
- register long r10 __asm__("r10") = a4;
+ register long long r10 __asm__("r10") = a4;
+ switch (n) {
+ case SYS_futex:
+ if((a2 & (~128 /* FUTEX_PRIVATE_FLAG */)) == 0 /* FUTEX_WAIT */) {
+ if(r10) r10 = (unsigned long) (&(struct __timespec_kernel) {
+ .tv_sec = __tsc(r10)->tv_sec, .tv_nsec = __tsc(r10)->tv_nsec});
+ }
+ break;
+ case SYS_utimensat:
+ if(a3) a3 = (unsigned long) ((struct __timespec_kernel[2]) {
+ [0] = {.tv_sec = __tsc(a3)[0].tv_sec, .tv_nsec = __tsc(a3)[0].tv_nsec},
+ [1] = {.tv_sec = __tsc(a3)[1].tv_sec, .tv_nsec = __tsc(a3)[1].tv_nsec},
+ });
+ break;
+ case SYS_clock_nanosleep:
+ case SYS_rt_sigtimedwait: case SYS_ppoll:
+ if(a3) a3 = (unsigned long) (&(struct __timespec_kernel) {
+ .tv_sec = __tsc(a3)->tv_sec, .tv_nsec = __tsc(a3)->tv_nsec});
+ }
__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2),
- "d"(a3), "r"(r10): "rcx", "r11", "memory");
+ "d"(a3), "r"(r10): "rcx", "r11", "memory");
return ret;
}
-static __inline long __syscall5(long n, long a1, long a2, long a3, long a4, long a5)
+static __inline long __syscall5(long long n, long long a1, long long a2, long long a3,
+ long long a4, long long a5)
{
unsigned long ret;
- register long r10 __asm__("r10") = a4;
- register long r8 __asm__("r8") = a5;
+ register long long r10 __asm__("r10") = a4;
+ register long long r8 __asm__("r8") = a5;
+ switch (n) {
+ case SYS_futex:
+ if((a2 & (~128 /* FUTEX_PRIVATE_FLAG */)) == 0 /* FUTEX_WAIT */) {
+ if(r10) r10 = (unsigned long) (&(struct __timespec_kernel) {
+ .tv_sec = __tsc(r10)->tv_sec, .tv_nsec = __tsc(r10)->tv_nsec});
+ }
+ break;
+ case SYS_mq_timedsend: case SYS_mq_timedreceive:
+ if(r8) r8 = (unsigned long) (&(struct __timespec_kernel) {
+ .tv_sec = __tsc(r8)->tv_sec, .tv_nsec = __tsc(r8)->tv_nsec});
+ }
__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2),
- "d"(a3), "r"(r10), "r"(r8) : "rcx", "r11", "memory");
+ "d"(a3), "r"(r10), "r"(r8) : "rcx", "r11", "memory");
return ret;
}
-static __inline long __syscall6(long n, long a1, long a2, long a3, long a4, long a5, long a6)
+static __inline long __syscall6(long long n, long long a1, long long a2, long long a3,
+ long long a4, long long a5, long long a6)
{
unsigned long ret;
- register long r10 __asm__("r10") = a4;
- register long r8 __asm__("r8") = a5;
- register long r9 __asm__("r9") = a6;
+ register long long r10 __asm__("r10") = a4;
+ register long long r8 __asm__("r8") = a5;
+ register long long r9 __asm__("r9") = a6;
+ switch (n) {
+ case SYS_futex:
+ if((a2 & (~128 /* FUTEX_PRIVATE_FLAG */)) == 0 /* FUTEX_WAIT */) {
+ if(r10) r10 = (unsigned long) (&(struct __timespec_kernel) {
+ .tv_sec = __tsc(r10)->tv_sec, .tv_nsec = __tsc(r10)->tv_nsec});
+ }
+ break;
+ case SYS_pselect6:
+ if(r8) r8 = (unsigned long) (&(struct __timespec_kernel) {
+ .tv_sec = __tsc(r8)->tv_sec, .tv_nsec = __tsc(r8)->tv_nsec});
+ }
__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2),
- "d"(a3), "r"(r10), "r"(r8), "r"(r9) : "rcx", "r11", "memory");
+ "d"(a3), "r"(r10), "r"(r8), "r"(r9) : "rcx", "r11", "memory");
return ret;
}