diff options
authorRich Felker <>2014-08-25 15:58:19 -0400
committerRich Felker <>2014-08-25 15:58:19 -0400
commitb8a9c90e4fe29821f2ef40796c07c48bd8fac8cc (patch)
parentea818ea8340c13742a4f41e6077f732291aea4bc (diff)
sanitize number of spins in userspace before futex wait
the previous spin limit of 10000 was utterly unreasonable. empirically, it could consume up to 200000 cycles, whereas a failed futex wait (EAGAIN) typically takes 1000 cycles or less, and even a true wait/wake round seems much less expensive. the new counts (100 for general wait, 200 in barrier) were simply chosen to be in the range of what's reasonable without having adverse effects on casual micro-benchmark tests I have been running. they may still be too high, from a standpoint of not wasting cpu cycles, but at least they're a lot better than before. rigorous testing across different archs and cpu models should be performed at some point to determine whether further adjustments should be made.
2 files changed, 2 insertions, 2 deletions
diff --git a/src/thread/__wait.c b/src/thread/__wait.c
index 3d0d9204..c1d6b61c 100644
--- a/src/thread/__wait.c
+++ b/src/thread/__wait.c
@@ -2,7 +2,7 @@
void __wait(volatile int *addr, volatile int *waiters, int val, int priv)
- int spins=10000;
+ int spins=100;
if (priv) priv = 128;
while (spins--) {
if (*addr==val) a_spin();
diff --git a/src/thread/pthread_barrier_wait.c b/src/thread/pthread_barrier_wait.c
index 4a964fe3..e15abb84 100644
--- a/src/thread/pthread_barrier_wait.c
+++ b/src/thread/pthread_barrier_wait.c
@@ -79,7 +79,7 @@ int pthread_barrier_wait(pthread_barrier_t *b)
/* First thread to enter the barrier becomes the "instance owner" */
if (!inst) {
struct instance new_inst = { 0 };
- int spins = 10000;
+ int spins = 200;
b->_b_inst = inst = &new_inst;
a_store(&b->_b_lock, 0);
if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);