summaryrefslogtreecommitdiff
path: root/arch/or1k
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2018-10-16 14:08:01 -0400
committerRich Felker <dalias@aerifal.cx>2018-10-16 14:11:46 -0400
commita4a3e4dbc086eb58e5cf6118480ef4825788e231 (patch)
tree25d6ffb2cedf301bf69306e87b36aa5b3a68dbcb /arch/or1k
parent7f01a734feddaabf366bc644c926e675656cab62 (diff)
downloadmusl-a4a3e4dbc086eb58e5cf6118480ef4825788e231.tar.gz
make thread-pointer-loading asm non-volatile
this will allow the compiler to cache and reuse the result, meaning we no longer have to take care not to load it more than once for the sake of archs where the load may be expensive. depends on commit 1c84c99913bf1cd47b866ed31e665848a0da84a2 for correctness, since otherwise the compiler could hoist loads during stage 3 of dynamic linking before the initial thread-pointer setup.
Diffstat (limited to 'arch/or1k')
-rw-r--r--arch/or1k/pthread_arch.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/or1k/pthread_arch.h b/arch/or1k/pthread_arch.h
index 521b9c53..1b806f89 100644
--- a/arch/or1k/pthread_arch.h
+++ b/arch/or1k/pthread_arch.h
@@ -3,10 +3,10 @@ static inline struct pthread *__pthread_self()
{
#ifdef __clang__
char *tp;
- __asm__ __volatile__ ("l.ori %0, r10, 0" : "=r" (tp) );
+ __asm__ ("l.ori %0, r10, 0" : "=r" (tp) );
#else
register char *tp __asm__("r10");
- __asm__ __volatile__ ("" : "=r" (tp) );
+ __asm__ ("" : "=r" (tp) );
#endif
return (struct pthread *) (tp - sizeof(struct pthread));
}