summaryrefslogtreecommitdiff
path: root/src/thread/__lock.c
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2013-09-20 02:00:27 -0400
committerRich Felker <dalias@aerifal.cx>2013-09-20 02:00:27 -0400
commite803829e6b087c0ed91adc11f87185109bc59b31 (patch)
treead2fdda58fbba2c542785d9cc8babcd5f7d001ec /src/thread/__lock.c
parentd8e283df58eb8bff1aa2f8a99347e294c7f67cb9 (diff)
downloadmusl-e803829e6b087c0ed91adc11f87185109bc59b31.tar.gz
fix potential deadlock bug in libc-internal locking logic
if a multithreaded program became non-multithreaded (i.e. all other threads exited) while one thread held an internal lock, the remaining thread would fail to release the lock. the the program then became multithreaded again at a later time, any further attempts to obtain the lock would deadlock permanently. the underlying cause is that the value of libc.threads_minus_1 at unlock time might not match the value at lock time. one solution would be returning a flag to the caller indicating whether the lock was taken and needs to be unlocked, but there is a simpler solution: using the lock itself as such a flag. note that this flag is not needed anyway for correctness; if the lock is not held, the unlock code is harmless. however, the memory synchronization properties associated with a_store are costly on some archs, so it's best to avoid executing the unlock code when it is unnecessary.
Diffstat (limited to 'src/thread/__lock.c')
-rw-r--r--src/thread/__lock.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/src/thread/__lock.c b/src/thread/__lock.c
index 2f345ae7..0874c04a 100644
--- a/src/thread/__lock.c
+++ b/src/thread/__lock.c
@@ -2,11 +2,14 @@
void __lock(volatile int *l)
{
- while (a_swap(l, 1)) __wait(l, l+1, 1, 1);
+ if (libc.threads_minus_1)
+ while (a_swap(l, 1)) __wait(l, l+1, 1, 1);
}
void __unlock(volatile int *l)
{
- a_store(l, 0);
- if (l[1]) __wake(l, 1, 1);
+ if (l[0]) {
+ a_store(l, 0);
+ if (l[1]) __wake(l, 1, 1);
+ }
}