summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/bits/hwcap.h2
-rw-r--r--arch/powerpc/syscall_arch.h14
-rw-r--r--arch/powerpc64/bits/hwcap.h2
-rw-r--r--arch/powerpc64/syscall_arch.h14
-rw-r--r--include/elf.h2
-rw-r--r--include/netinet/in.h3
-rw-r--r--include/sys/stat.h10
-rw-r--r--ldso/dlstart.c2
-rw-r--r--src/include/sys/auxv.h2
-rw-r--r--src/setjmp/riscv32/longjmp.S30
-rw-r--r--src/setjmp/riscv32/setjmp.S30
-rw-r--r--src/setjmp/riscv64/longjmp.S30
-rw-r--r--src/setjmp/riscv64/setjmp.S30
-rw-r--r--src/stdio/vfprintf.c14
-rw-r--r--src/thread/aarch64/__set_thread_area.c27
-rw-r--r--src/thread/aarch64/__set_thread_area.s7
-rw-r--r--src/thread/aarch64/clone.s2
-rw-r--r--src/thread/s390x/__tls_get_offset.s14
18 files changed, 148 insertions, 87 deletions
diff --git a/arch/powerpc/bits/hwcap.h b/arch/powerpc/bits/hwcap.h
index 803de9b5..12981623 100644
--- a/arch/powerpc/bits/hwcap.h
+++ b/arch/powerpc/bits/hwcap.h
@@ -41,3 +41,5 @@
#define PPC_FEATURE2_DARN 0x00200000
#define PPC_FEATURE2_SCV 0x00100000
#define PPC_FEATURE2_HTM_NO_SUSPEND 0x00080000
+#define PPC_FEATURE2_ARCH_3_1 0x00040000
+#define PPC_FEATURE2_MMA 0x00020000
diff --git a/arch/powerpc/syscall_arch.h b/arch/powerpc/syscall_arch.h
index 54c885cb..fe893af4 100644
--- a/arch/powerpc/syscall_arch.h
+++ b/arch/powerpc/syscall_arch.h
@@ -9,7 +9,7 @@ static inline long __syscall0(long n)
register long r3 __asm__("r3");
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "=r"(r3)
- :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -19,7 +19,7 @@ static inline long __syscall1(long n, long a)
register long r3 __asm__("r3") = a;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3)
- :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -30,7 +30,7 @@ static inline long __syscall2(long n, long a, long b)
register long r4 __asm__("r4") = b;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3), "+r"(r4)
- :: "memory", "cr0", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -42,7 +42,7 @@ static inline long __syscall3(long n, long a, long b, long c)
register long r5 __asm__("r5") = c;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5)
- :: "memory", "cr0", "r6", "r7", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -55,7 +55,7 @@ static inline long __syscall4(long n, long a, long b, long c, long d)
register long r6 __asm__("r6") = d;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6)
- :: "memory", "cr0", "r7", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -69,7 +69,7 @@ static inline long __syscall5(long n, long a, long b, long c, long d, long e)
register long r7 __asm__("r7") = e;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6), "+r"(r7)
- :: "memory", "cr0", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -84,7 +84,7 @@ static inline long __syscall6(long n, long a, long b, long c, long d, long e, lo
register long r8 __asm__("r8") = f;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6), "+r"(r7), "+r"(r8)
- :: "memory", "cr0", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
diff --git a/arch/powerpc64/bits/hwcap.h b/arch/powerpc64/bits/hwcap.h
index 803de9b5..12981623 100644
--- a/arch/powerpc64/bits/hwcap.h
+++ b/arch/powerpc64/bits/hwcap.h
@@ -41,3 +41,5 @@
#define PPC_FEATURE2_DARN 0x00200000
#define PPC_FEATURE2_SCV 0x00100000
#define PPC_FEATURE2_HTM_NO_SUSPEND 0x00080000
+#define PPC_FEATURE2_ARCH_3_1 0x00040000
+#define PPC_FEATURE2_MMA 0x00020000
diff --git a/arch/powerpc64/syscall_arch.h b/arch/powerpc64/syscall_arch.h
index 7d34fbe4..4c5d3ae9 100644
--- a/arch/powerpc64/syscall_arch.h
+++ b/arch/powerpc64/syscall_arch.h
@@ -7,7 +7,7 @@ static inline long __syscall0(long n)
register long r3 __asm__("r3");
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "=r"(r3)
- :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -17,7 +17,7 @@ static inline long __syscall1(long n, long a)
register long r3 __asm__("r3") = a;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3)
- :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -28,7 +28,7 @@ static inline long __syscall2(long n, long a, long b)
register long r4 __asm__("r4") = b;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3), "+r"(r4)
- :: "memory", "cr0", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -40,7 +40,7 @@ static inline long __syscall3(long n, long a, long b, long c)
register long r5 __asm__("r5") = c;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5)
- :: "memory", "cr0", "r6", "r7", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -53,7 +53,7 @@ static inline long __syscall4(long n, long a, long b, long c, long d)
register long r6 __asm__("r6") = d;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6)
- :: "memory", "cr0", "r7", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -67,7 +67,7 @@ static inline long __syscall5(long n, long a, long b, long c, long d, long e)
register long r7 __asm__("r7") = e;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6), "+r"(r7)
- :: "memory", "cr0", "r8", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r8", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
@@ -82,7 +82,7 @@ static inline long __syscall6(long n, long a, long b, long c, long d, long e, lo
register long r8 __asm__("r8") = f;
__asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:"
: "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6), "+r"(r7), "+r"(r8)
- :: "memory", "cr0", "r9", "r10", "r11", "r12");
+ :: "memory", "cr0", "r9", "r10", "r11", "r12", "ctr", "xer");
return r3;
}
diff --git a/include/elf.h b/include/elf.h
index 8b622f63..d6ae539a 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -1040,6 +1040,8 @@ typedef struct {
#define AT_RANDOM 25
#define AT_HWCAP2 26
+#define AT_HWCAP3 29
+#define AT_HWCAP4 30
#define AT_EXECFN 31
diff --git a/include/netinet/in.h b/include/netinet/in.h
index fb628b61..60bbaa75 100644
--- a/include/netinet/in.h
+++ b/include/netinet/in.h
@@ -132,7 +132,8 @@ uint16_t ntohs(uint16_t);
#define IN6_IS_ADDR_V4COMPAT(a) \
(((uint32_t *) (a))[0] == 0 && ((uint32_t *) (a))[1] == 0 && \
- ((uint32_t *) (a))[2] == 0 && ((uint8_t *) (a))[15] > 1)
+ ((uint32_t *) (a))[2] == 0 && \
+ !IN6_IS_ADDR_UNSPECIFIED(a) && !IN6_IS_ADDR_LOOPBACK(a))
#define IN6_IS_ADDR_MC_NODELOCAL(a) \
(IN6_IS_ADDR_MULTICAST(a) && ((((uint8_t *) (a))[1] & 0xf) == 0x1))
diff --git a/include/sys/stat.h b/include/sys/stat.h
index c924ce2f..4f7dc2b1 100644
--- a/include/sys/stat.h
+++ b/include/sys/stat.h
@@ -123,6 +123,8 @@ int lchmod(const char *, mode_t);
#define STATX_MNT_ID 0x1000U
#define STATX_DIOALIGN 0x2000U
#define STATX_MNT_ID_UNIQUE 0x4000U
+#define STATX_SUBVOL 0x8000U
+#define STATX_WRITE_ATOMIC 0x10000U
#define STATX_ATTR_COMPRESSED 0x4
#define STATX_ATTR_IMMUTABLE 0x10
@@ -133,6 +135,7 @@ int lchmod(const char *, mode_t);
#define STATX_ATTR_MOUNT_ROOT 0x2000
#define STATX_ATTR_VERITY 0x100000
#define STATX_ATTR_DAX 0x200000
+#define STATX_ATTR_WRITE_ATOMIC 0x400000
struct statx_timestamp {
int64_t tv_sec;
@@ -164,7 +167,12 @@ struct statx {
uint32_t stx_dio_mem_align;
uint32_t stx_dio_offset_align;
uint64_t stx_subvol;
- uint64_t __pad1[11];
+ uint32_t stx_atomic_write_unit_min;
+ uint32_t stx_atomic_write_unit_max;
+ uint32_t stx_atomic_write_segments_max;
+ uint32_t __pad1[1];
+ uint64_t __pad2[9];
+
};
int statx(int, const char *__restrict, int, unsigned, struct statx *__restrict);
diff --git a/ldso/dlstart.c b/ldso/dlstart.c
index 259f5e18..4aac42bc 100644
--- a/ldso/dlstart.c
+++ b/ldso/dlstart.c
@@ -45,7 +45,7 @@ hidden void _dlstart_c(size_t *sp, size_t *dynv)
/* If dynv is null, the entry point was started from loader
* that is not fdpic-aware. We can assume normal fixed-
* displacement ELF loading was performed, but when ldso was
- * run as a command, finding the Ehdr is a heursitic: we
+ * run as a command, finding the Ehdr is a heuristic: we
* have to assume Phdrs start in the first 4k of the file. */
base = aux[AT_BASE];
if (!base) base = aux[AT_PHDR] & -4096;
diff --git a/src/include/sys/auxv.h b/src/include/sys/auxv.h
index 9358a4a5..63c5bfe9 100644
--- a/src/include/sys/auxv.h
+++ b/src/include/sys/auxv.h
@@ -5,6 +5,6 @@
#include <features.h>
-hidden unsigned long __getauxval(unsigned long);
+unsigned long __getauxval(unsigned long);
#endif
diff --git a/src/setjmp/riscv32/longjmp.S b/src/setjmp/riscv32/longjmp.S
index f9cb3318..b4e5458d 100644
--- a/src/setjmp/riscv32/longjmp.S
+++ b/src/setjmp/riscv32/longjmp.S
@@ -23,18 +23,24 @@ longjmp:
lw ra, 52(a0)
#ifndef __riscv_float_abi_soft
- fld fs0, 56(a0)
- fld fs1, 64(a0)
- fld fs2, 72(a0)
- fld fs3, 80(a0)
- fld fs4, 88(a0)
- fld fs5, 96(a0)
- fld fs6, 104(a0)
- fld fs7, 112(a0)
- fld fs8, 120(a0)
- fld fs9, 128(a0)
- fld fs10, 136(a0)
- fld fs11, 144(a0)
+#ifdef __riscv_float_abi_double
+#define FLX fld
+#else
+#define FLX flw
+#endif
+
+ FLX fs0, 56(a0)
+ FLX fs1, 64(a0)
+ FLX fs2, 72(a0)
+ FLX fs3, 80(a0)
+ FLX fs4, 88(a0)
+ FLX fs5, 96(a0)
+ FLX fs6, 104(a0)
+ FLX fs7, 112(a0)
+ FLX fs8, 120(a0)
+ FLX fs9, 128(a0)
+ FLX fs10, 136(a0)
+ FLX fs11, 144(a0)
#endif
seqz a0, a1
diff --git a/src/setjmp/riscv32/setjmp.S b/src/setjmp/riscv32/setjmp.S
index 8a75cf55..5a1a41ef 100644
--- a/src/setjmp/riscv32/setjmp.S
+++ b/src/setjmp/riscv32/setjmp.S
@@ -23,18 +23,24 @@ setjmp:
sw ra, 52(a0)
#ifndef __riscv_float_abi_soft
- fsd fs0, 56(a0)
- fsd fs1, 64(a0)
- fsd fs2, 72(a0)
- fsd fs3, 80(a0)
- fsd fs4, 88(a0)
- fsd fs5, 96(a0)
- fsd fs6, 104(a0)
- fsd fs7, 112(a0)
- fsd fs8, 120(a0)
- fsd fs9, 128(a0)
- fsd fs10, 136(a0)
- fsd fs11, 144(a0)
+#ifdef __riscv_float_abi_double
+#define FSX fsd
+#else
+#define FSX fsw
+#endif
+
+ FSX fs0, 56(a0)
+ FSX fs1, 64(a0)
+ FSX fs2, 72(a0)
+ FSX fs3, 80(a0)
+ FSX fs4, 88(a0)
+ FSX fs5, 96(a0)
+ FSX fs6, 104(a0)
+ FSX fs7, 112(a0)
+ FSX fs8, 120(a0)
+ FSX fs9, 128(a0)
+ FSX fs10, 136(a0)
+ FSX fs11, 144(a0)
#endif
li a0, 0
diff --git a/src/setjmp/riscv64/longjmp.S b/src/setjmp/riscv64/longjmp.S
index 41e2d210..982475c7 100644
--- a/src/setjmp/riscv64/longjmp.S
+++ b/src/setjmp/riscv64/longjmp.S
@@ -23,18 +23,24 @@ longjmp:
ld ra, 104(a0)
#ifndef __riscv_float_abi_soft
- fld fs0, 112(a0)
- fld fs1, 120(a0)
- fld fs2, 128(a0)
- fld fs3, 136(a0)
- fld fs4, 144(a0)
- fld fs5, 152(a0)
- fld fs6, 160(a0)
- fld fs7, 168(a0)
- fld fs8, 176(a0)
- fld fs9, 184(a0)
- fld fs10, 192(a0)
- fld fs11, 200(a0)
+#ifdef __riscv_float_abi_double
+#define FLX fld
+#else
+#define FLX flw
+#endif
+
+ FLX fs0, 112(a0)
+ FLX fs1, 120(a0)
+ FLX fs2, 128(a0)
+ FLX fs3, 136(a0)
+ FLX fs4, 144(a0)
+ FLX fs5, 152(a0)
+ FLX fs6, 160(a0)
+ FLX fs7, 168(a0)
+ FLX fs8, 176(a0)
+ FLX fs9, 184(a0)
+ FLX fs10, 192(a0)
+ FLX fs11, 200(a0)
#endif
seqz a0, a1
diff --git a/src/setjmp/riscv64/setjmp.S b/src/setjmp/riscv64/setjmp.S
index 51249672..0795bf7d 100644
--- a/src/setjmp/riscv64/setjmp.S
+++ b/src/setjmp/riscv64/setjmp.S
@@ -23,18 +23,24 @@ setjmp:
sd ra, 104(a0)
#ifndef __riscv_float_abi_soft
- fsd fs0, 112(a0)
- fsd fs1, 120(a0)
- fsd fs2, 128(a0)
- fsd fs3, 136(a0)
- fsd fs4, 144(a0)
- fsd fs5, 152(a0)
- fsd fs6, 160(a0)
- fsd fs7, 168(a0)
- fsd fs8, 176(a0)
- fsd fs9, 184(a0)
- fsd fs10, 192(a0)
- fsd fs11, 200(a0)
+#ifdef __riscv_float_abi_double
+#define FSX fsd
+#else
+#define FSX fsw
+#endif
+
+ FSX fs0, 112(a0)
+ FSX fs1, 120(a0)
+ FSX fs2, 128(a0)
+ FSX fs3, 136(a0)
+ FSX fs4, 144(a0)
+ FSX fs5, 152(a0)
+ FSX fs6, 160(a0)
+ FSX fs7, 168(a0)
+ FSX fs8, 176(a0)
+ FSX fs9, 184(a0)
+ FSX fs10, 192(a0)
+ FSX fs11, 200(a0)
#endif
li a0, 0
diff --git a/src/stdio/vfprintf.c b/src/stdio/vfprintf.c
index 76733997..514a44dd 100644
--- a/src/stdio/vfprintf.c
+++ b/src/stdio/vfprintf.c
@@ -180,11 +180,13 @@ typedef char compiler_defines_long_double_incorrectly[9-(int)sizeof(long double)
static int fmt_fp(FILE *f, long double y, int w, int p, int fl, int t, int ps)
{
- int bufsize = (ps==BIGLPRE)
- ? (LDBL_MANT_DIG+28)/29 + 1 + // mantissa expansion
- (LDBL_MAX_EXP+LDBL_MANT_DIG+28+8)/9 // exponent expansion
- : (DBL_MANT_DIG+28)/29 + 1 +
- (DBL_MAX_EXP+DBL_MANT_DIG+28+8)/9;
+ int max_mant_dig = (ps==BIGLPRE) ? LDBL_MANT_DIG : DBL_MANT_DIG;
+ int max_exp = (ps==BIGLPRE) ? LDBL_MAX_EXP : DBL_MAX_EXP;
+ /* One slot for 29 bits left of radix point, a slot for every 29-21=8
+ * bits right of the radix point, and one final zero slot. */
+ int max_mant_slots = 1 + (max_mant_dig-29+7)/8 + 1;
+ int max_exp_slots = (max_exp+max_mant_dig+28+8)/9;
+ int bufsize = max_mant_slots + max_exp_slots;
uint32_t big[bufsize];
uint32_t *a, *d, *r, *z;
int e2=0, e, i, j, l;
@@ -266,7 +268,7 @@ static int fmt_fp(FILE *f, long double y, int w, int p, int fl, int t, int ps)
if (y) y *= 0x1p28, e2-=28;
if (e2<0) a=r=z=big;
- else a=r=z=big+sizeof(big)/sizeof(*big) - LDBL_MANT_DIG - 1;
+ else a=r=z=big+sizeof(big)/sizeof(*big) - max_mant_slots - 1;
do {
*z = y;
diff --git a/src/thread/aarch64/__set_thread_area.c b/src/thread/aarch64/__set_thread_area.c
new file mode 100644
index 00000000..2ec788e8
--- /dev/null
+++ b/src/thread/aarch64/__set_thread_area.c
@@ -0,0 +1,27 @@
+#include <elf.h>
+#include "libc.h"
+
+#define BITRANGE(a,b) (2*(1UL<<(b))-(1UL<<(a)))
+
+int __set_thread_area(void *p)
+{
+ __asm__ __volatile__ ("msr tpidr_el0,%0" : : "r"(p) : "memory");
+
+ /* Mask off hwcap bits for SME and unknown future features. This is
+ * necessary because SME is not safe to use without libc support for
+ * it, and we do not (yet) have such support. */
+ for (size_t *v = libc.auxv; *v; v+=2) {
+ if (v[0]==AT_HWCAP) {
+ v[1] &= ~BITRANGE(42,63); /* 42-47 are SME */
+ } else if (v[0]==AT_HWCAP2) {
+ v[1] &= ~(BITRANGE(23,30)
+ | BITRANGE(37,42)
+ | BITRANGE(57,62));
+ } else if (v[0]==AT_HWCAP3 || v[0]==AT_HWCAP4) {
+ v[0] = AT_IGNORE;
+ v[1] = 0;
+ }
+ }
+
+ return 0;
+}
diff --git a/src/thread/aarch64/__set_thread_area.s b/src/thread/aarch64/__set_thread_area.s
deleted file mode 100644
index fd0df34b..00000000
--- a/src/thread/aarch64/__set_thread_area.s
+++ /dev/null
@@ -1,7 +0,0 @@
-.global __set_thread_area
-.hidden __set_thread_area
-.type __set_thread_area,@function
-__set_thread_area:
- msr tpidr_el0,x0
- mov w0,#0
- ret
diff --git a/src/thread/aarch64/clone.s b/src/thread/aarch64/clone.s
index 9ac272bd..aff8155b 100644
--- a/src/thread/aarch64/clone.s
+++ b/src/thread/aarch64/clone.s
@@ -24,7 +24,7 @@ __clone:
// parent
ret
// child
-1: mov fp, 0
+1: mov x29, 0
ldp x1,x0,[sp],#16
blr x1
mov x8,#93 // SYS_exit
diff --git a/src/thread/s390x/__tls_get_offset.s b/src/thread/s390x/__tls_get_offset.s
index 405f118b..056c9110 100644
--- a/src/thread/s390x/__tls_get_offset.s
+++ b/src/thread/s390x/__tls_get_offset.s
@@ -1,17 +1,17 @@
.global __tls_get_offset
.type __tls_get_offset,%function
__tls_get_offset:
- ear %r0, %a0
- sllg %r0, %r0, 32
- ear %r0, %a1
+ ear %r3, %a0
+ sllg %r3, %r3, 32
+ ear %r3, %a1
la %r1, 0(%r2, %r12)
- lg %r3, 0(%r1)
- sllg %r4, %r3, 3
- lg %r5, 8(%r0)
+ lg %r0, 0(%r1)
+ sllg %r4, %r0, 3
+ lg %r5, 8(%r3)
lg %r2, 0(%r4, %r5)
ag %r2, 8(%r1)
- sgr %r2, %r0
+ sgr %r2, %r3
br %r14