summaryrefslogtreecommitdiff
path: root/src/thread/pthread_mutex_unlock.c
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2015-04-10 02:27:52 -0400
committerRich Felker <dalias@aerifal.cx>2015-04-10 02:27:52 -0400
commitf08ab9e61a147630497198fe3239149275c0a3f4 (patch)
tree65f0898637a5306485e665ec95c753b99f4e3740 /src/thread/pthread_mutex_unlock.c
parent4e98cce1c529a304d7b55b5455078b9532f93e9b (diff)
downloadmusl-f08ab9e61a147630497198fe3239149275c0a3f4.tar.gz
redesign and simplify vmlock system
this global lock allows certain unlock-type primitives to exclude mmap/munmap operations which could change the identity of virtual addresses while references to them still exist. the original design mistakenly assumed mmap/munmap would conversely need to exclude the same operations which exclude mmap/munmap, so the vmlock was implemented as a sort of 'symmetric recursive rwlock'. this turned out to be unnecessary. commit 25d12fc0fc51f1fae0f85b4649a6463eb805aa8f already shortened the interval during which mmap/munmap held their side of the lock, but left the inappropriate lock design and some inefficiency. the new design uses a separate function, __vm_wait, which does not hold any lock itself and only waits for lock users which were already present when it was called to release the lock. this is sufficient because of the way operations that need to be excluded are sequenced: the "unlock-type" operations using the vmlock need only block mmap/munmap operations that are precipitated by (and thus sequenced after) the atomic-unlock they perform while holding the vmlock. this allows for a spectacular lack of synchronization in the __vm_wait function itself.
Diffstat (limited to 'src/thread/pthread_mutex_unlock.c')
-rw-r--r--src/thread/pthread_mutex_unlock.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c
index a7f39c7f..02da92a9 100644
--- a/src/thread/pthread_mutex_unlock.c
+++ b/src/thread/pthread_mutex_unlock.c
@@ -1,8 +1,5 @@
#include "pthread_impl.h"
-void __vm_lock_impl(int);
-void __vm_unlock_impl(void);
-
int __pthread_mutex_unlock(pthread_mutex_t *m)
{
pthread_t self;
@@ -19,7 +16,7 @@ int __pthread_mutex_unlock(pthread_mutex_t *m)
return m->_m_count--, 0;
if (!priv) {
self->robust_list.pending = &m->_m_next;
- __vm_lock_impl(+1);
+ __vm_lock();
}
volatile void *prev = m->_m_prev;
volatile void *next = m->_m_next;
@@ -30,7 +27,7 @@ int __pthread_mutex_unlock(pthread_mutex_t *m)
cont = a_swap(&m->_m_lock, (type & 8) ? 0x40000000 : 0);
if (type != PTHREAD_MUTEX_NORMAL && !priv) {
self->robust_list.pending = 0;
- __vm_unlock_impl();
+ __vm_unlock();
}
if (waiters || cont<0)
__wake(&m->_m_lock, 1, priv);