summaryrefslogtreecommitdiff
path: root/src/thread
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2019-09-06 16:17:44 -0400
committerRich Felker <dalias@aerifal.cx>2019-09-06 16:17:44 -0400
commit8a544ee3a2a75af278145b09531177cab4939b41 (patch)
tree19d8be4deb3afc672e7e7b56befe387b80d021b8 /src/thread
parent022f27d541af68f047d367ad9d93f941135555c1 (diff)
downloadmusl-8a544ee3a2a75af278145b09531177cab4939b41.tar.gz
synchronously clean up pthread_create failure due to scheduling errors
previously, when pthread_create failed due to inability to set explicit scheduling according to the requested attributes, the nascent thread was detached and made responsible for its own cleanup via the standard pthread_exit code path. this left it consuming resources potentially well after pthread_create returned, in a way that the application could not see or mitigate, and unnecessarily exposed its existence to the rest of the implementation via the global thread list. instead, attempt explicit scheduling early and reuse the failure path for __clone failure if it fails. the nascent thread's exit futex is not needed for unlocking the thread list, since the thread calling pthread_create holds the thread list lock the whole time, so it can be repurposed to ensure the thread has finished exiting. no pthread_exit is needed, and freeing the stack, if needed, can happen just as it would if __clone failed.
Diffstat (limited to 'src/thread')
-rw-r--r--src/thread/pthread_create.c31
1 files changed, 18 insertions, 13 deletions
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
index edcdf041..5d00d765 100644
--- a/src/thread/pthread_create.c
+++ b/src/thread/pthread_create.c
@@ -184,8 +184,8 @@ static int start(void *p)
if (a_cas(&args->control, 1, 2)==1)
__wait(&args->control, 0, 2, 1);
if (args->control) {
- __pthread_self()->detach_state = DT_DETACHED;
- __pthread_exit(0);
+ __syscall(SYS_set_tid_address, &args->control);
+ return 0;
}
}
__syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8);
@@ -339,8 +339,21 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
libc.threads_minus_1++;
ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
- /* If clone succeeded, new thread must be linked on the thread
- * list before unlocking it, even if scheduling may still fail. */
+ /* All clone failures translate to EAGAIN. If explicit scheduling
+ * was requested, attempt it before unlocking the thread list so
+ * that the failed thread is never exposed and so that we can
+ * clean up all transient resource usage before returning. */
+ if (ret < 0) {
+ ret = -EAGAIN;
+ } else if (attr._a_sched) {
+ ret = __syscall(SYS_sched_setscheduler,
+ new->tid, attr._a_policy, &attr._a_prio);
+ if (a_swap(&args->control, ret ? 3 : 0)==2)
+ __wake(&args->control, 1, 1);
+ if (ret)
+ __wait(&args->control, 0, 3, 0);
+ }
+
if (ret >= 0) {
new->next = self->next;
new->prev = self;
@@ -355,15 +368,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
if (ret < 0) {
if (map) __munmap(map, size);
- return EAGAIN;
- }
-
- if (attr._a_sched) {
- int ret = -__syscall(SYS_sched_setscheduler, new->tid,
- attr._a_policy, &attr._a_prio);
- if (a_swap(&args->control, ret ? 3 : 0)==2)
- __wake(&args->control, 1, 1);
- if (ret) return ret;
+ return -ret;
}
*res = new;