summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2011-09-28 18:57:18 -0400
committerRich Felker <dalias@aerifal.cx>2011-09-28 18:57:18 -0400
commit9cee9307e3b8ed31e772d564d1dc53e6d888acba (patch)
tree90355be0615b94ba2cd5ddf7581173862ae7a063 /src
parent95b147967216a057d51a1b970da777486be716e3 (diff)
downloadmusl-9cee9307e3b8ed31e772d564d1dc53e6d888acba.tar.gz
improve pshared barriers
eliminate the sequence number field and instead use the counter as the futex because of the way the lock is held, sequence numbers are completely useless, and this frees up a field in the barrier structure to be used as a waiter count for the count futex, which lets us avoid some syscalls in the best case. as of now, self-synchronized destruction and unmapping should be fully safe. before any thread can return from the barrier, all threads in the barrier have obtained the vm lock, and each holds a shared lock on the barrier. the barrier memory is not inspected after the shared lock count reaches 0, nor after the vm lock is released.
Diffstat (limited to 'src')
-rw-r--r--src/internal/pthread_impl.h2
-rw-r--r--src/thread/pthread_barrier_wait.c24
2 files changed, 14 insertions, 12 deletions
diff --git a/src/internal/pthread_impl.h b/src/internal/pthread_impl.h
index 82d62426..dd3863fc 100644
--- a/src/internal/pthread_impl.h
+++ b/src/internal/pthread_impl.h
@@ -77,7 +77,7 @@ struct __timer {
#define _b_waiters __u.__i[1]
#define _b_limit __u.__i[2]
#define _b_count __u.__i[3]
-#define _b_seq __u.__i[4]
+#define _b_waiters2 __u.__i[4]
#define _b_inst __u.__p[4]
#include "pthread_arch.h"
diff --git a/src/thread/pthread_barrier_wait.c b/src/thread/pthread_barrier_wait.c
index 9b1edbf9..71f7b5fe 100644
--- a/src/thread/pthread_barrier_wait.c
+++ b/src/thread/pthread_barrier_wait.c
@@ -21,7 +21,6 @@ void __vm_unlock(void)
static int pshared_barrier_wait(pthread_barrier_t *b)
{
int limit = (b->_b_limit & INT_MAX) + 1;
- int seq;
int ret = 0;
int v, w;
@@ -30,33 +29,36 @@ static int pshared_barrier_wait(pthread_barrier_t *b)
while ((v=a_cas(&b->_b_lock, 0, limit)))
__wait(&b->_b_lock, &b->_b_waiters, v, 0);
- seq = b->_b_seq;
-
+ /* Wait for <limit> threads to get to the barrier */
if (++b->_b_count == limit) {
+ a_store(&b->_b_count, 0);
ret = PTHREAD_BARRIER_SERIAL_THREAD;
- b->_b_seq++;
- __wake(&b->_b_seq, -1, 0);
+ if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
} else {
a_store(&b->_b_lock, 0);
if (b->_b_waiters) __wake(&b->_b_lock, 1, 0);
- __wait(&b->_b_seq, 0, seq, 0);
+ while ((v=b->_b_count)>0)
+ __wait(&b->_b_count, &b->_b_waiters2, v, 0);
}
__vm_lock(+1);
- if (a_fetch_add(&b->_b_count, -1)==1) {
- b->_b_seq++;
- __wake(&b->_b_seq, -1, 0);
+ /* Ensure all threads have a vm lock before proceeding */
+ if (a_fetch_add(&b->_b_count, -1)==1-limit) {
+ a_store(&b->_b_count, 0);
+ if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
} else {
- __wait(&b->_b_seq, 0, seq+1, 0);
+ while ((v=b->_b_count))
+ __wait(&b->_b_count, &b->_b_waiters2, v, 0);
}
/* Perform a recursive unlock suitable for self-sync'd destruction */
do {
v = b->_b_lock;
w = b->_b_waiters;
- } while (a_cas(&b->_b_lock, v, v-1 & INT_MAX) != v);
+ } while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v);
+ /* Wake a thread waiting to reuse or destroy the barrier */
if (v==INT_MIN+1 || (v==1 && w))
__wake(&b->_b_lock, 1, 0);