summaryrefslogtreecommitdiff
path: root/src/malloc
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2020-05-22 17:45:47 -0400
committerRich Felker <dalias@aerifal.cx>2020-05-22 17:45:47 -0400
commit8d81ba8c0bc6fe31136cb15c9c82ef4c24965040 (patch)
tree356aea3c8d201c577158b163eace2e0014e03fed /src/malloc
parentf12888e9eb9eed60cc266b899dcafecb4752964a (diff)
downloadmusl-8d81ba8c0bc6fe31136cb15c9c82ef4c24965040.tar.gz
restore lock-skipping for processes that return to single-threaded state
the design used here relies on the barrier provided by the first lock operation after the process returns to single-threaded state to synchronize with actions by the last thread that exited. by storing the intent to change modes in the same object used to detect whether locking is needed, it's possible to avoid an extra (possibly costly) memory load after the lock is taken.
Diffstat (limited to 'src/malloc')
-rw-r--r--src/malloc/malloc.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
index 2553a62e..a803d4c9 100644
--- a/src/malloc/malloc.c
+++ b/src/malloc/malloc.c
@@ -26,8 +26,11 @@ int __malloc_replaced;
static inline void lock(volatile int *lk)
{
- if (libc.threaded)
+ int need_locks = libc.need_locks;
+ if (need_locks) {
while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
+ if (need_locks < 0) libc.need_locks = 0;
+ }
}
static inline void unlock(volatile int *lk)