From 208eb584efbf995e0c5d92f76d5f4c08ae0054b4 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Sat, 8 Sep 2012 22:43:14 -0400 Subject: syscall organization overhaul now public syscall.h only exposes __NR_* and SYS_* constants and the variadic syscall function. no macros or inline functions, no __syscall_ret or other internal details, no 16-/32-bit legacy syscall renaming, etc. this logic has all been moved to src/internal/syscall.h with the arch-specific parts in arch/$(ARCH)/syscall_arch.h, and the amount of arch-specific stuff has been reduced to a minimum. changes still need to be reviewed/double-checked. minimal testing on i386 and mips has already been performed. --- arch/x86_64/syscall_arch.h | 64 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 arch/x86_64/syscall_arch.h (limited to 'arch/x86_64/syscall_arch.h') diff --git a/arch/x86_64/syscall_arch.h b/arch/x86_64/syscall_arch.h new file mode 100644 index 00000000..1c4b48a2 --- /dev/null +++ b/arch/x86_64/syscall_arch.h @@ -0,0 +1,64 @@ +#define __SYSCALL_LL_E(x) (x) +#define __SYSCALL_LL_O(x) (x) + +#define __SYSCALL_SSLEN 8 + +static __inline long __syscall0(long n) +{ + unsigned long ret; + __asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n) : "rcx", "r11", "memory"); + return ret; +} + +static __inline long __syscall1(long n, long a1) +{ + unsigned long ret; + __asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1) : "rcx", "r11", "memory"); + return ret; +} + +static __inline long __syscall2(long n, long a1, long a2) +{ + unsigned long ret; + __asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2) + : "rcx", "r11", "memory"); + return ret; +} + +static __inline long __syscall3(long n, long a1, long a2, long a3) +{ + unsigned long ret; + __asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2), + "d"(a3) : "rcx", "r11", "memory"); + return ret; +} + +static __inline long __syscall4(long n, long a1, long a2, long a3, long a4) +{ + unsigned long ret; + register long r10 __asm__("r10") = a4; + __asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2), + "d"(a3), "r"(r10): "rcx", "r11", "memory"); + return ret; +} + +static __inline long __syscall5(long n, long a1, long a2, long a3, long a4, long a5) +{ + unsigned long ret; + register long r10 __asm__("r10") = a4; + register long r8 __asm__("r8") = a5; + __asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2), + "d"(a3), "r"(r10), "r"(r8) : "rcx", "r11", "memory"); + return ret; +} + +static __inline long __syscall6(long n, long a1, long a2, long a3, long a4, long a5, long a6) +{ + unsigned long ret; + register long r10 __asm__("r10") = a4; + register long r8 __asm__("r8") = a5; + register long r9 __asm__("r9") = a6; + __asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2), + "d"(a3), "r"(r10), "r"(r8), "r"(r9) : "rcx", "r11", "memory"); + return ret; +} -- cgit v1.2.1