From f08ab9e61a147630497198fe3239149275c0a3f4 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Fri, 10 Apr 2015 02:27:52 -0400 Subject: redesign and simplify vmlock system this global lock allows certain unlock-type primitives to exclude mmap/munmap operations which could change the identity of virtual addresses while references to them still exist. the original design mistakenly assumed mmap/munmap would conversely need to exclude the same operations which exclude mmap/munmap, so the vmlock was implemented as a sort of 'symmetric recursive rwlock'. this turned out to be unnecessary. commit 25d12fc0fc51f1fae0f85b4649a6463eb805aa8f already shortened the interval during which mmap/munmap held their side of the lock, but left the inappropriate lock design and some inefficiency. the new design uses a separate function, __vm_wait, which does not hold any lock itself and only waits for lock users which were already present when it was called to release the lock. this is sufficient because of the way operations that need to be excluded are sequenced: the "unlock-type" operations using the vmlock need only block mmap/munmap operations that are precipitated by (and thus sequenced after) the atomic-unlock they perform while holding the vmlock. this allows for a spectacular lack of synchronization in the __vm_wait function itself. --- src/thread/vmlock.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'src/thread/vmlock.c') diff --git a/src/thread/vmlock.c b/src/thread/vmlock.c index 125c6dc9..0a69b3e3 100644 --- a/src/thread/vmlock.c +++ b/src/thread/vmlock.c @@ -2,21 +2,20 @@ static volatile int vmlock[2]; -void __vm_lock(int inc) +void __vm_wait() { - for (;;) { - int v = vmlock[0]; - if (inc*v < 0) __wait(vmlock, vmlock+1, v, 1); - else if (a_cas(vmlock, v, v+inc)==v) break; - } + int tmp; + while ((tmp=vmlock[0])) + __wait(vmlock, vmlock+1, tmp, 1); } -void __vm_unlock(void) +void __vm_lock(pthread_t self) { - int inc = vmlock[0]>0 ? -1 : 1; - if (a_fetch_add(vmlock, inc)==-inc && vmlock[1]) - __wake(vmlock, -1, 1); + a_inc(vmlock); } -weak_alias(__vm_lock, __vm_lock_impl); -weak_alias(__vm_unlock, __vm_unlock_impl); +void __vm_unlock(pthread_t self) +{ + if (a_fetch_add(vmlock, -1)==1 && vmlock[1]) + __wake(vmlock, -1, 1); +} -- cgit v1.2.1