diff options
author | Rich Felker <dalias@aerifal.cx> | 2015-08-16 18:15:18 +0000 |
---|---|---|
committer | Rich Felker <dalias@aerifal.cx> | 2015-08-16 18:15:18 +0000 |
commit | 5a9c8c05a5a0cdced4122589184fd795b761bb4a (patch) | |
tree | 34b9f909c151de0d24ec29ef194815a5fa4daede /arch | |
parent | c13f2af1fe1856e36dd1b2773cac05d5d72641dc (diff) | |
download | musl-5a9c8c05a5a0cdced4122589184fd795b761bb4a.tar.gz |
mitigate performance regression in libc-internal locks on x86_64
commit 3c43c0761e1725fd5f89a9c028cbf43250abb913 fixed missing
synchronization in the atomic store operation for i386 and x86_64, but
opted to use mfence for the barrier on x86_64 where it's always
available. however, in practice mfence is significantly slower than
the barrier approach used on i386 (a nop-like lock orl operation).
this commit changes x86_64 (and x32) to use the faster barrier.
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x32/atomic.h | 2 | ||||
-rw-r--r-- | arch/x86_64/atomic.h | 2 |
2 files changed, 2 insertions, 2 deletions
diff --git a/arch/x32/atomic.h b/arch/x32/atomic.h index 2ab1f7a2..7690183d 100644 --- a/arch/x32/atomic.h +++ b/arch/x32/atomic.h @@ -83,7 +83,7 @@ static inline void a_dec(volatile int *x) static inline void a_store(volatile int *p, int x) { - __asm__( "mov %1, %0 ; mfence" : "=m"(*p) : "r"(x) : "memory" ); + __asm__( "mov %1, %0 ; lock ; orl $0,(%%rsp)" : "=m"(*p) : "r"(x) : "memory" ); } static inline void a_spin() diff --git a/arch/x86_64/atomic.h b/arch/x86_64/atomic.h index 2ab1f7a2..7690183d 100644 --- a/arch/x86_64/atomic.h +++ b/arch/x86_64/atomic.h @@ -83,7 +83,7 @@ static inline void a_dec(volatile int *x) static inline void a_store(volatile int *p, int x) { - __asm__( "mov %1, %0 ; mfence" : "=m"(*p) : "r"(x) : "memory" ); + __asm__( "mov %1, %0 ; lock ; orl $0,(%%rsp)" : "=m"(*p) : "r"(x) : "memory" ); } static inline void a_spin() |