diff options
| author | Rich Felker <dalias@aerifal.cx> | 2019-04-10 18:34:38 -0400 | 
|---|---|---|
| committer | Rich Felker <dalias@aerifal.cx> | 2019-04-10 18:34:38 -0400 | 
| commit | 6aeb9c6703670649ee09b3c8575fb428168bb75c (patch) | |
| tree | 673b8bf44593bce265ae7abba07d13d07b307459 | |
| parent | f76d51a1cca1236efc50ee60c0bad3354079b854 (diff) | |
| download | musl-6aeb9c6703670649ee09b3c8575fb428168bb75c.tar.gz | |
use inline syscalls for powerpc (32-bit)
the inline syscall code is copied directly from powerpc64. the extent
of register clobber specifiers may be excessive on both; if that turns
out to be the case it can be fixed later.
| -rw-r--r-- | arch/powerpc/syscall_arch.h | 86 | 
1 files changed, 84 insertions, 2 deletions
diff --git a/arch/powerpc/syscall_arch.h b/arch/powerpc/syscall_arch.h index 004060e6..e26a3c34 100644 --- a/arch/powerpc/syscall_arch.h +++ b/arch/powerpc/syscall_arch.h @@ -3,7 +3,89 @@  ((union { long long ll; long l[2]; }){ .ll = x }).l[1]  #define __SYSCALL_LL_O(x) 0, __SYSCALL_LL_E((x)) -#undef SYSCALL_NO_INLINE -#define SYSCALL_NO_INLINE +static inline long __syscall0(long n) +{ +	register long r0 __asm__("r0") = n; +	register long r3 __asm__("r3"); +	__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" +	: "+r"(r0), "=r"(r3) +	:: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); +	return r3; +} + +static inline long __syscall1(long n, long a) +{ +	register long r0 __asm__("r0") = n; +	register long r3 __asm__("r3") = a; +	__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" +	: "+r"(r0), "+r"(r3) +	:: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); +	return r3; +} + +static inline long __syscall2(long n, long a, long b) +{ +	register long r0 __asm__("r0") = n; +	register long r3 __asm__("r3") = a; +	register long r4 __asm__("r4") = b; +	__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" +	: "+r"(r0), "+r"(r3), "+r"(r4) +	:: "memory", "cr0", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); +	return r3; +} + +static inline long __syscall3(long n, long a, long b, long c) +{ +	register long r0 __asm__("r0") = n; +	register long r3 __asm__("r3") = a; +	register long r4 __asm__("r4") = b; +	register long r5 __asm__("r5") = c; +	__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" +	: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5) +	:: "memory", "cr0", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); +	return r3; +} + +static inline long __syscall4(long n, long a, long b, long c, long d) +{ +	register long r0 __asm__("r0") = n; +	register long r3 __asm__("r3") = a; +	register long r4 __asm__("r4") = b; +	register long r5 __asm__("r5") = c; +	register long r6 __asm__("r6") = d; +	__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" +	: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6) +	:: "memory", "cr0", "r7", "r8", "r9", "r10", "r11", "r12"); +	return r3; +} + +static inline long __syscall5(long n, long a, long b, long c, long d, long e) +{ +	register long r0 __asm__("r0") = n; +	register long r3 __asm__("r3") = a; +	register long r4 __asm__("r4") = b; +	register long r5 __asm__("r5") = c; +	register long r6 __asm__("r6") = d; +	register long r7 __asm__("r7") = e; +	__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" +	: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6), "+r"(r7) +	:: "memory", "cr0", "r8", "r9", "r10", "r11", "r12"); +	return r3; +} + +static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f) +{ +	register long r0 __asm__("r0") = n; +	register long r3 __asm__("r3") = a; +	register long r4 __asm__("r4") = b; +	register long r5 __asm__("r5") = c; +	register long r6 __asm__("r6") = d; +	register long r7 __asm__("r7") = e; +	register long r8 __asm__("r8") = f; +	__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" +	: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6), "+r"(r7), "+r"(r8) +	:: "memory", "cr0", "r9", "r10", "r11", "r12"); +	return r3; +}  #define SYSCALL_FADVISE_6_ARG  | 
