summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/internal/libc.h1
-rw-r--r--src/malloc/malloc.c5
-rw-r--r--src/thread/__lock.c4
-rw-r--r--src/thread/pthread_create.c8
4 files changed, 12 insertions, 6 deletions
diff --git a/src/internal/libc.h b/src/internal/libc.h
index d47f58e0..619bba86 100644
--- a/src/internal/libc.h
+++ b/src/internal/libc.h
@@ -21,6 +21,7 @@ struct __libc {
char can_do_threads;
char threaded;
char secure;
+ volatile signed char need_locks;
int threads_minus_1;
size_t *auxv;
struct tls_module *tls_head;
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
index 2553a62e..a803d4c9 100644
--- a/src/malloc/malloc.c
+++ b/src/malloc/malloc.c
@@ -26,8 +26,11 @@ int __malloc_replaced;
static inline void lock(volatile int *lk)
{
- if (libc.threaded)
+ int need_locks = libc.need_locks;
+ if (need_locks) {
while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
+ if (need_locks < 0) libc.need_locks = 0;
+ }
}
static inline void unlock(volatile int *lk)
diff --git a/src/thread/__lock.c b/src/thread/__lock.c
index 5b9b144e..60eece49 100644
--- a/src/thread/__lock.c
+++ b/src/thread/__lock.c
@@ -18,9 +18,11 @@
void __lock(volatile int *l)
{
- if (!libc.threaded) return;
+ int need_locks = libc.need_locks;
+ if (!need_locks) return;
/* fast path: INT_MIN for the lock, +1 for the congestion */
int current = a_cas(l, 0, INT_MIN + 1);
+ if (need_locks < 0) libc.need_locks = 0;
if (!current) return;
/* A first spin loop, for medium congestion. */
for (unsigned i = 0; i < 10; ++i) {
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
index 6a3b0c21..6bdfb44f 100644
--- a/src/thread/pthread_create.c
+++ b/src/thread/pthread_create.c
@@ -118,8 +118,8 @@ _Noreturn void __pthread_exit(void *result)
* until the lock is released, which only happens after SYS_exit
* has been called, via the exit futex address pointing at the lock.
* This needs to happen after any possible calls to LOCK() that might
- * skip locking if libc.threads_minus_1 is zero. */
- libc.threads_minus_1--;
+ * skip locking if process appears single-threaded. */
+ if (!--libc.threads_minus_1) libc.need_locks = -1;
self->next->prev = self->prev;
self->prev->next = self->next;
self->prev = self->next = self;
@@ -339,7 +339,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
__tl_lock();
- libc.threads_minus_1++;
+ if (!libc.threads_minus_1++) libc.need_locks = 1;
ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
/* All clone failures translate to EAGAIN. If explicit scheduling
@@ -363,7 +363,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
new->next->prev = new;
new->prev->next = new;
} else {
- libc.threads_minus_1--;
+ if (!--libc.threads_minus_1) libc.need_locks = 0;
}
__tl_unlock();
__restore_sigs(&set);