summaryrefslogtreecommitdiff
path: root/src/thread
diff options
context:
space:
mode:
Diffstat (limited to 'src/thread')
-rw-r--r--src/thread/__lock.c4
-rw-r--r--src/thread/i386/__set_thread_area.s1
-rw-r--r--src/thread/loongarch64/__set_thread_area.s7
-rw-r--r--src/thread/loongarch64/__unmapself.s7
-rw-r--r--src/thread/loongarch64/clone.s29
-rw-r--r--src/thread/loongarch64/syscall_cp.s29
-rw-r--r--src/thread/pthread_atfork.c8
-rw-r--r--src/thread/pthread_attr_get.c2
-rw-r--r--src/thread/pthread_cancel.c9
-rw-r--r--src/thread/pthread_cond_timedwait.c14
-rw-r--r--src/thread/pthread_create.c73
-rw-r--r--src/thread/pthread_detach.c8
-rw-r--r--src/thread/pthread_getname_np.c25
-rw-r--r--src/thread/pthread_getschedparam.c3
-rw-r--r--src/thread/pthread_key_create.c8
-rw-r--r--src/thread/pthread_kill.c6
-rw-r--r--src/thread/pthread_mutex_destroy.c6
-rw-r--r--src/thread/pthread_mutexattr_setprotocol.c19
-rw-r--r--src/thread/pthread_mutexattr_setrobust.c20
-rw-r--r--src/thread/pthread_setname_np.c2
-rw-r--r--src/thread/pthread_setschedparam.c3
-rw-r--r--src/thread/pthread_setschedprio.c3
-rw-r--r--src/thread/riscv32/__set_thread_area.s6
-rw-r--r--src/thread/riscv32/__unmapself.s7
-rw-r--r--src/thread/riscv32/clone.s34
-rw-r--r--src/thread/riscv32/syscall_cp.s29
-rw-r--r--src/thread/s390x/clone.s6
-rw-r--r--src/thread/s390x/syscall_cp.s2
-rw-r--r--src/thread/sem_getvalue.c3
-rw-r--r--src/thread/sem_open.c15
-rw-r--r--src/thread/sem_post.c12
-rw-r--r--src/thread/sem_timedwait.c10
-rw-r--r--src/thread/sem_trywait.c6
-rw-r--r--src/thread/synccall.c13
-rw-r--r--src/thread/vmlock.c2
35 files changed, 347 insertions, 84 deletions
diff --git a/src/thread/__lock.c b/src/thread/__lock.c
index 45557c88..60eece49 100644
--- a/src/thread/__lock.c
+++ b/src/thread/__lock.c
@@ -18,9 +18,11 @@
void __lock(volatile int *l)
{
- if (!libc.threads_minus_1) return;
+ int need_locks = libc.need_locks;
+ if (!need_locks) return;
/* fast path: INT_MIN for the lock, +1 for the congestion */
int current = a_cas(l, 0, INT_MIN + 1);
+ if (need_locks < 0) libc.need_locks = 0;
if (!current) return;
/* A first spin loop, for medium congestion. */
for (unsigned i = 0; i < 10; ++i) {
diff --git a/src/thread/i386/__set_thread_area.s b/src/thread/i386/__set_thread_area.s
index c2c21dd5..aa6852be 100644
--- a/src/thread/i386/__set_thread_area.s
+++ b/src/thread/i386/__set_thread_area.s
@@ -28,6 +28,7 @@ __set_thread_area:
ret
2:
mov %ebx,%ecx
+ xor %eax,%eax
xor %ebx,%ebx
xor %edx,%edx
mov %ebx,(%esp)
diff --git a/src/thread/loongarch64/__set_thread_area.s b/src/thread/loongarch64/__set_thread_area.s
new file mode 100644
index 00000000..021307fc
--- /dev/null
+++ b/src/thread/loongarch64/__set_thread_area.s
@@ -0,0 +1,7 @@
+.global __set_thread_area
+.hidden __set_thread_area
+.type __set_thread_area,@function
+__set_thread_area:
+ move $tp, $a0
+ move $a0, $zero
+ jr $ra
diff --git a/src/thread/loongarch64/__unmapself.s b/src/thread/loongarch64/__unmapself.s
new file mode 100644
index 00000000..719ad056
--- /dev/null
+++ b/src/thread/loongarch64/__unmapself.s
@@ -0,0 +1,7 @@
+.global __unmapself
+.type __unmapself, @function
+__unmapself:
+ li.d $a7, 215 # call munmap
+ syscall 0
+ li.d $a7, 93 # call exit
+ syscall 0
diff --git a/src/thread/loongarch64/clone.s b/src/thread/loongarch64/clone.s
new file mode 100644
index 00000000..a165b365
--- /dev/null
+++ b/src/thread/loongarch64/clone.s
@@ -0,0 +1,29 @@
+#__clone(func, stack, flags, arg, ptid, tls, ctid)
+# a0, a1, a2, a3, a4, a5, a6
+# sys_clone(flags, stack, ptid, ctid, tls)
+# a0, a1, a2, a3, a4
+
+.global __clone
+.hidden __clone
+.type __clone,@function
+__clone:
+ bstrins.d $a1, $zero, 3, 0 #stack to 16 align
+ # Save function pointer and argument pointer on new thread stack
+ addi.d $a1, $a1, -16
+ st.d $a0, $a1, 0 # save function pointer
+ st.d $a3, $a1, 8 # save argument pointer
+ or $a0, $a2, $zero
+ or $a2, $a4, $zero
+ or $a3, $a6, $zero
+ or $a4, $a5, $zero
+ ori $a7, $zero, 220
+ syscall 0 # call clone
+
+ beqz $a0, 1f # whether child process
+ jirl $zero, $ra, 0 # parent process return
+1:
+ ld.d $t8, $sp, 0 # function pointer
+ ld.d $a0, $sp, 8 # argument pointer
+ jirl $ra, $t8, 0 # call the user's function
+ ori $a7, $zero, 93
+ syscall 0 # child process exit
diff --git a/src/thread/loongarch64/syscall_cp.s b/src/thread/loongarch64/syscall_cp.s
new file mode 100644
index 00000000..c057a97b
--- /dev/null
+++ b/src/thread/loongarch64/syscall_cp.s
@@ -0,0 +1,29 @@
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.global __syscall_cp_asm
+.hidden __syscall_cp_asm
+.type __syscall_cp_asm,@function
+
+__syscall_cp_asm:
+__cp_begin:
+ ld.w $a0, $a0, 0
+ bnez $a0, __cp_cancel
+ move $t8, $a1 # reserve system call number
+ move $a0, $a2
+ move $a1, $a3
+ move $a2, $a4
+ move $a3, $a5
+ move $a4, $a6
+ move $a5, $a7
+ move $a7, $t8
+ syscall 0
+__cp_end:
+ jr $ra
+__cp_cancel:
+ la.local $t8, __cancel
+ jr $t8
diff --git a/src/thread/pthread_atfork.c b/src/thread/pthread_atfork.c
index 76497401..26d32543 100644
--- a/src/thread/pthread_atfork.c
+++ b/src/thread/pthread_atfork.c
@@ -1,7 +1,13 @@
#include <pthread.h>
+#include <errno.h>
#include "libc.h"
#include "lock.h"
+#define malloc __libc_malloc
+#define calloc undef
+#define realloc undef
+#define free undef
+
static struct atfork_funcs {
void (*prepare)(void);
void (*parent)(void);
@@ -34,7 +40,7 @@ void __fork_handler(int who)
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
{
struct atfork_funcs *new = malloc(sizeof *new);
- if (!new) return -1;
+ if (!new) return ENOMEM;
LOCK(lock);
new->next = funcs;
diff --git a/src/thread/pthread_attr_get.c b/src/thread/pthread_attr_get.c
index 4aa5afdb..f12ff442 100644
--- a/src/thread/pthread_attr_get.c
+++ b/src/thread/pthread_attr_get.c
@@ -70,7 +70,7 @@ int pthread_condattr_getpshared(const pthread_condattr_t *restrict a, int *restr
int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *restrict a, int *restrict protocol)
{
- *protocol = PTHREAD_PRIO_NONE;
+ *protocol = a->__attr / 8U % 2;
return 0;
}
int pthread_mutexattr_getpshared(const pthread_mutexattr_t *restrict a, int *restrict pshared)
diff --git a/src/thread/pthread_cancel.c b/src/thread/pthread_cancel.c
index 2f9d5e97..139a6fc8 100644
--- a/src/thread/pthread_cancel.c
+++ b/src/thread/pthread_cancel.c
@@ -56,7 +56,12 @@ static void cancel_handler(int sig, siginfo_t *si, void *ctx)
_sigaddset(&uc->uc_sigmask, SIGCANCEL);
- if (self->cancelasync || pc >= (uintptr_t)__cp_begin && pc < (uintptr_t)__cp_end) {
+ if (self->cancelasync) {
+ pthread_sigmask(SIG_SETMASK, &uc->uc_sigmask, 0);
+ __cancel();
+ }
+
+ if (pc >= (uintptr_t)__cp_begin && pc < (uintptr_t)__cp_end) {
uc->uc_mcontext.MC_PC = (uintptr_t)__cp_cancel;
#ifdef CANCEL_GOT
uc->uc_mcontext.MC_GOT = CANCEL_GOT;
@@ -77,7 +82,7 @@ void __testcancel()
static void init_cancellation()
{
struct sigaction sa = {
- .sa_flags = SA_SIGINFO | SA_RESTART,
+ .sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK,
.sa_sigaction = cancel_handler
};
memset(&sa.sa_mask, -1, _NSIG/8);
diff --git a/src/thread/pthread_cond_timedwait.c b/src/thread/pthread_cond_timedwait.c
index d1501240..6b761455 100644
--- a/src/thread/pthread_cond_timedwait.c
+++ b/src/thread/pthread_cond_timedwait.c
@@ -146,14 +146,18 @@ relock:
if (oldstate == WAITING) goto done;
- if (!node.next) a_inc(&m->_m_waiters);
+ if (!node.next && !(m->_m_type & 8))
+ a_inc(&m->_m_waiters);
/* Unlock the barrier that's holding back the next waiter, and
* either wake it or requeue it to the mutex. */
- if (node.prev)
- unlock_requeue(&node.prev->barrier, &m->_m_lock, m->_m_type & 128);
- else
- a_dec(&m->_m_waiters);
+ if (node.prev) {
+ int val = m->_m_lock;
+ if (val>0) a_cas(&m->_m_lock, val, val|0x80000000);
+ unlock_requeue(&node.prev->barrier, &m->_m_lock, m->_m_type & (8|128));
+ } else if (!(m->_m_type & 8)) {
+ a_dec(&m->_m_waiters);
+ }
/* Since a signal was consumed, cancellation is not permitted. */
if (e == ECANCELED) e = 0;
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
index 5f491092..087f6206 100644
--- a/src/thread/pthread_create.c
+++ b/src/thread/pthread_create.c
@@ -69,15 +69,29 @@ _Noreturn void __pthread_exit(void *result)
__pthread_tsd_run_dtors();
+ __block_app_sigs(&set);
+
+ /* This atomic potentially competes with a concurrent pthread_detach
+ * call; the loser is responsible for freeing thread resources. */
+ int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
+
+ if (state==DT_DETACHED && self->map_base) {
+ /* Since __unmapself bypasses the normal munmap code path,
+ * explicitly wait for vmlock holders first. This must be
+ * done before any locks are taken, to avoid lock ordering
+ * issues that could lead to deadlock. */
+ __vm_wait();
+ }
+
/* Access to target the exiting thread with syscalls that use
* its kernel tid is controlled by killlock. For detached threads,
* any use past this point would have undefined behavior, but for
- * joinable threads it's a valid usage that must be handled. */
+ * joinable threads it's a valid usage that must be handled.
+ * Signals must be blocked since pthread_kill must be AS-safe. */
LOCK(self->killlock);
- /* The thread list lock must be AS-safe, and thus requires
- * application signals to be blocked before it can be taken. */
- __block_app_sigs(&set);
+ /* The thread list lock must be AS-safe, and thus depends on
+ * application signals being blocked above. */
__tl_lock();
/* If this is the only thread in the list, don't proceed with
@@ -85,19 +99,23 @@ _Noreturn void __pthread_exit(void *result)
* signal state to prepare for exit to call atexit handlers. */
if (self->next == self) {
__tl_unlock();
- __restore_sigs(&set);
UNLOCK(self->killlock);
+ self->detach_state = state;
+ __restore_sigs(&set);
exit(0);
}
- /* At this point we are committed to thread termination. Unlink
- * the thread from the list. This change will not be visible
- * until the lock is released, which only happens after SYS_exit
- * has been called, via the exit futex address pointing at the lock. */
- libc.threads_minus_1--;
- self->next->prev = self->prev;
- self->prev->next = self->next;
- self->prev = self->next = self;
+ /* At this point we are committed to thread termination. */
+
+ /* After the kernel thread exits, its tid may be reused. Clear it
+ * to prevent inadvertent use and inform functions that would use
+ * it that it's no longer available. At this point the killlock
+ * may be released, since functions that use it will consistently
+ * see the thread as having exited. Release it now so that no
+ * remaining locks (except thread list) are held if we end up
+ * resetting need_locks below. */
+ self->tid = 0;
+ UNLOCK(self->killlock);
/* Process robust list in userspace to handle non-pshared mutexes
* and the detached thread case where the robust list head will
@@ -121,9 +139,15 @@ _Noreturn void __pthread_exit(void *result)
__do_orphaned_stdio_locks();
__dl_thread_cleanup();
- /* This atomic potentially competes with a concurrent pthread_detach
- * call; the loser is responsible for freeing thread resources. */
- int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
+ /* Last, unlink thread from the list. This change will not be visible
+ * until the lock is released, which only happens after SYS_exit
+ * has been called, via the exit futex address pointing at the lock.
+ * This needs to happen after any possible calls to LOCK() that might
+ * skip locking if process appears single-threaded. */
+ if (!--libc.threads_minus_1) libc.need_locks = -1;
+ self->next->prev = self->prev;
+ self->prev->next = self->next;
+ self->prev = self->next = self;
if (state==DT_DETACHED && self->map_base) {
/* Detached threads must block even implementation-internal
@@ -136,24 +160,15 @@ _Noreturn void __pthread_exit(void *result)
if (self->robust_list.off)
__syscall(SYS_set_robust_list, 0, 3*sizeof(long));
- /* Since __unmapself bypasses the normal munmap code path,
- * explicitly wait for vmlock holders first. */
- __vm_wait();
-
/* The following call unmaps the thread's stack mapping
* and then exits without touching the stack. */
__unmapself(self->map_base, self->map_size);
}
/* Wake any joiner. */
+ a_store(&self->detach_state, DT_EXITED);
__wake(&self->detach_state, 1, 1);
- /* After the kernel thread exits, its tid may be reused. Clear it
- * to prevent inadvertent use and inform functions that would use
- * it that it's no longer available. */
- self->tid = 0;
- UNLOCK(self->killlock);
-
for (;;) __syscall(SYS_exit, 0);
}
@@ -310,7 +325,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
new->detach_state = DT_JOINABLE;
}
new->robust_list.head = &new->robust_list.head;
- new->CANARY = self->CANARY;
+ new->canary = self->canary;
new->sysinfo = self->sysinfo;
/* Setup argument structure for the new thread on its stack.
@@ -336,7 +351,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
__tl_lock();
- libc.threads_minus_1++;
+ if (!libc.threads_minus_1++) libc.need_locks = 1;
ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
/* All clone failures translate to EAGAIN. If explicit scheduling
@@ -360,7 +375,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
new->next->prev = new;
new->prev->next = new;
} else {
- libc.threads_minus_1--;
+ if (!--libc.threads_minus_1) libc.need_locks = 0;
}
__tl_unlock();
__restore_sigs(&set);
diff --git a/src/thread/pthread_detach.c b/src/thread/pthread_detach.c
index 77772af2..d73a500e 100644
--- a/src/thread/pthread_detach.c
+++ b/src/thread/pthread_detach.c
@@ -5,8 +5,12 @@ static int __pthread_detach(pthread_t t)
{
/* If the cas fails, detach state is either already-detached
* or exiting/exited, and pthread_join will trap or cleanup. */
- if (a_cas(&t->detach_state, DT_JOINABLE, DT_DETACHED) != DT_JOINABLE)
- return __pthread_join(t, 0);
+ if (a_cas(&t->detach_state, DT_JOINABLE, DT_DETACHED) != DT_JOINABLE) {
+ int cs;
+ __pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
+ __pthread_join(t, 0);
+ __pthread_setcancelstate(cs, 0);
+ }
return 0;
}
diff --git a/src/thread/pthread_getname_np.c b/src/thread/pthread_getname_np.c
new file mode 100644
index 00000000..85504e45
--- /dev/null
+++ b/src/thread/pthread_getname_np.c
@@ -0,0 +1,25 @@
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/prctl.h>
+
+#include "pthread_impl.h"
+
+int pthread_getname_np(pthread_t thread, char *name, size_t len)
+{
+ int fd, cs, status = 0;
+ char f[sizeof "/proc/self/task//comm" + 3*sizeof(int)];
+
+ if (len < 16) return ERANGE;
+
+ if (thread == pthread_self())
+ return prctl(PR_GET_NAME, (unsigned long)name, 0UL, 0UL, 0UL) ? errno : 0;
+
+ snprintf(f, sizeof f, "/proc/self/task/%d/comm", thread->tid);
+ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
+ if ((fd = open(f, O_RDONLY|O_CLOEXEC)) < 0 || (len = read(fd, name, len)) == -1) status = errno;
+ else name[len-1] = 0; /* remove trailing new line only if successful */
+ if (fd >= 0) close(fd);
+ pthread_setcancelstate(cs, 0);
+ return status;
+}
diff --git a/src/thread/pthread_getschedparam.c b/src/thread/pthread_getschedparam.c
index 1cba073d..c098befb 100644
--- a/src/thread/pthread_getschedparam.c
+++ b/src/thread/pthread_getschedparam.c
@@ -4,6 +4,8 @@
int pthread_getschedparam(pthread_t t, int *restrict policy, struct sched_param *restrict param)
{
int r;
+ sigset_t set;
+ __block_app_sigs(&set);
LOCK(t->killlock);
if (!t->tid) {
r = ESRCH;
@@ -14,5 +16,6 @@ int pthread_getschedparam(pthread_t t, int *restrict policy, struct sched_param
}
}
UNLOCK(t->killlock);
+ __restore_sigs(&set);
return r;
}
diff --git a/src/thread/pthread_key_create.c b/src/thread/pthread_key_create.c
index d1120941..39770c7a 100644
--- a/src/thread/pthread_key_create.c
+++ b/src/thread/pthread_key_create.c
@@ -1,4 +1,5 @@
#include "pthread_impl.h"
+#include "fork_impl.h"
volatile size_t __pthread_tsd_size = sizeof(void *) * PTHREAD_KEYS_MAX;
void *__pthread_tsd_main[PTHREAD_KEYS_MAX] = { 0 };
@@ -20,6 +21,13 @@ static void dummy_0(void)
weak_alias(dummy_0, __tl_lock);
weak_alias(dummy_0, __tl_unlock);
+void __pthread_key_atfork(int who)
+{
+ if (who<0) __pthread_rwlock_rdlock(&key_lock);
+ else if (!who) __pthread_rwlock_unlock(&key_lock);
+ else key_lock = (pthread_rwlock_t)PTHREAD_RWLOCK_INITIALIZER;
+}
+
int __pthread_key_create(pthread_key_t *k, void (*dtor)(void *))
{
pthread_t self = __pthread_self();
diff --git a/src/thread/pthread_kill.c b/src/thread/pthread_kill.c
index 3d9395cb..79ddb209 100644
--- a/src/thread/pthread_kill.c
+++ b/src/thread/pthread_kill.c
@@ -4,9 +4,15 @@
int pthread_kill(pthread_t t, int sig)
{
int r;
+ sigset_t set;
+ /* Block not just app signals, but internal ones too, since
+ * pthread_kill is used to implement pthread_cancel, which
+ * must be async-cancel-safe. */
+ __block_all_sigs(&set);
LOCK(t->killlock);
r = t->tid ? -__syscall(SYS_tkill, t->tid, sig)
: (sig+0U >= _NSIG ? EINVAL : 0);
UNLOCK(t->killlock);
+ __restore_sigs(&set);
return r;
}
diff --git a/src/thread/pthread_mutex_destroy.c b/src/thread/pthread_mutex_destroy.c
index 6d49e689..8d1bf77b 100644
--- a/src/thread/pthread_mutex_destroy.c
+++ b/src/thread/pthread_mutex_destroy.c
@@ -1,6 +1,10 @@
-#include <pthread.h>
+#include "pthread_impl.h"
int pthread_mutex_destroy(pthread_mutex_t *mutex)
{
+ /* If the mutex being destroyed is process-shared and has nontrivial
+ * type (tracking ownership), it might be in the pending slot of a
+ * robust_list; wait for quiescence. */
+ if (mutex->_m_type > 128) __vm_wait();
return 0;
}
diff --git a/src/thread/pthread_mutexattr_setprotocol.c b/src/thread/pthread_mutexattr_setprotocol.c
index 511cc32d..8b80c1ce 100644
--- a/src/thread/pthread_mutexattr_setprotocol.c
+++ b/src/thread/pthread_mutexattr_setprotocol.c
@@ -1,24 +1,23 @@
#include "pthread_impl.h"
#include "syscall.h"
-static pthread_once_t check_pi_once;
-static int check_pi_result;
-
-static void check_pi()
-{
- volatile int lk = 0;
- check_pi_result = -__syscall(SYS_futex, &lk, FUTEX_LOCK_PI, 0, 0);
-}
+static volatile int check_pi_result = -1;
int pthread_mutexattr_setprotocol(pthread_mutexattr_t *a, int protocol)
{
+ int r;
switch (protocol) {
case PTHREAD_PRIO_NONE:
a->__attr &= ~8;
return 0;
case PTHREAD_PRIO_INHERIT:
- pthread_once(&check_pi_once, check_pi);
- if (check_pi_result) return check_pi_result;
+ r = check_pi_result;
+ if (r < 0) {
+ volatile int lk = 0;
+ r = -__syscall(SYS_futex, &lk, FUTEX_LOCK_PI, 0, 0);
+ a_store(&check_pi_result, r);
+ }
+ if (r) return r;
a->__attr |= 8;
return 0;
case PTHREAD_PRIO_PROTECT:
diff --git a/src/thread/pthread_mutexattr_setrobust.c b/src/thread/pthread_mutexattr_setrobust.c
index 04db92a6..30a9ac3b 100644
--- a/src/thread/pthread_mutexattr_setrobust.c
+++ b/src/thread/pthread_mutexattr_setrobust.c
@@ -1,22 +1,20 @@
#include "pthread_impl.h"
#include "syscall.h"
-static pthread_once_t check_robust_once;
-static int check_robust_result;
-
-static void check_robust()
-{
- void *p;
- size_t l;
- check_robust_result = -__syscall(SYS_get_robust_list, 0, &p, &l);
-}
+static volatile int check_robust_result = -1;
int pthread_mutexattr_setrobust(pthread_mutexattr_t *a, int robust)
{
if (robust > 1U) return EINVAL;
if (robust) {
- pthread_once(&check_robust_once, check_robust);
- if (check_robust_result) return check_robust_result;
+ int r = check_robust_result;
+ if (r < 0) {
+ void *p;
+ size_t l;
+ r = -__syscall(SYS_get_robust_list, 0, &p, &l);
+ a_store(&check_robust_result, r);
+ }
+ if (r) return r;
a->__attr |= 4;
return 0;
}
diff --git a/src/thread/pthread_setname_np.c b/src/thread/pthread_setname_np.c
index 82d35e17..fc2d2306 100644
--- a/src/thread/pthread_setname_np.c
+++ b/src/thread/pthread_setname_np.c
@@ -19,7 +19,7 @@ int pthread_setname_np(pthread_t thread, const char *name)
snprintf(f, sizeof f, "/proc/self/task/%d/comm", thread->tid);
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
- if ((fd = open(f, O_WRONLY)) < 0 || write(fd, name, len) < 0) status = errno;
+ if ((fd = open(f, O_WRONLY|O_CLOEXEC)) < 0 || write(fd, name, len) < 0) status = errno;
if (fd >= 0) close(fd);
pthread_setcancelstate(cs, 0);
return status;
diff --git a/src/thread/pthread_setschedparam.c b/src/thread/pthread_setschedparam.c
index 038d13d8..76d4d45a 100644
--- a/src/thread/pthread_setschedparam.c
+++ b/src/thread/pthread_setschedparam.c
@@ -4,8 +4,11 @@
int pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
{
int r;
+ sigset_t set;
+ __block_app_sigs(&set);
LOCK(t->killlock);
r = !t->tid ? ESRCH : -__syscall(SYS_sched_setscheduler, t->tid, policy, param);
UNLOCK(t->killlock);
+ __restore_sigs(&set);
return r;
}
diff --git a/src/thread/pthread_setschedprio.c b/src/thread/pthread_setschedprio.c
index 5bf4a019..fc2e13dd 100644
--- a/src/thread/pthread_setschedprio.c
+++ b/src/thread/pthread_setschedprio.c
@@ -4,8 +4,11 @@
int pthread_setschedprio(pthread_t t, int prio)
{
int r;
+ sigset_t set;
+ __block_app_sigs(&set);
LOCK(t->killlock);
r = !t->tid ? ESRCH : -__syscall(SYS_sched_setparam, t->tid, &prio);
UNLOCK(t->killlock);
+ __restore_sigs(&set);
return r;
}
diff --git a/src/thread/riscv32/__set_thread_area.s b/src/thread/riscv32/__set_thread_area.s
new file mode 100644
index 00000000..828154d2
--- /dev/null
+++ b/src/thread/riscv32/__set_thread_area.s
@@ -0,0 +1,6 @@
+.global __set_thread_area
+.type __set_thread_area, %function
+__set_thread_area:
+ mv tp, a0
+ li a0, 0
+ ret
diff --git a/src/thread/riscv32/__unmapself.s b/src/thread/riscv32/__unmapself.s
new file mode 100644
index 00000000..2849119c
--- /dev/null
+++ b/src/thread/riscv32/__unmapself.s
@@ -0,0 +1,7 @@
+.global __unmapself
+.type __unmapself, %function
+__unmapself:
+ li a7, 215 # SYS_munmap
+ ecall
+ li a7, 93 # SYS_exit
+ ecall
diff --git a/src/thread/riscv32/clone.s b/src/thread/riscv32/clone.s
new file mode 100644
index 00000000..3102239d
--- /dev/null
+++ b/src/thread/riscv32/clone.s
@@ -0,0 +1,34 @@
+# __clone(func, stack, flags, arg, ptid, tls, ctid)
+# a0, a1, a2, a3, a4, a5, a6
+
+# syscall(SYS_clone, flags, stack, ptid, tls, ctid)
+# a7 a0, a1, a2, a3, a4
+
+.global __clone
+.type __clone, %function
+__clone:
+ # Save func and arg to stack
+ addi a1, a1, -16
+ sw a0, 0(a1)
+ sw a3, 4(a1)
+
+ # Call SYS_clone
+ mv a0, a2
+ mv a2, a4
+ mv a3, a5
+ mv a4, a6
+ li a7, 220 # SYS_clone
+ ecall
+
+ beqz a0, 1f
+ # Parent
+ ret
+
+ # Child
+1: lw a1, 0(sp)
+ lw a0, 4(sp)
+ jalr a1
+
+ # Exit
+ li a7, 93 # SYS_exit
+ ecall
diff --git a/src/thread/riscv32/syscall_cp.s b/src/thread/riscv32/syscall_cp.s
new file mode 100644
index 00000000..079d1ba0
--- /dev/null
+++ b/src/thread/riscv32/syscall_cp.s
@@ -0,0 +1,29 @@
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.global __syscall_cp_asm
+.hidden __syscall_cp_asm
+.type __syscall_cp_asm, %function
+__syscall_cp_asm:
+__cp_begin:
+ lw t0, 0(a0)
+ bnez t0, __cp_cancel
+
+ mv t0, a1
+ mv a0, a2
+ mv a1, a3
+ mv a2, a4
+ mv a3, a5
+ mv a4, a6
+ mv a5, a7
+ lw a6, 0(sp)
+ mv a7, t0
+ ecall
+__cp_end:
+ ret
+__cp_cancel:
+ tail __cancel
diff --git a/src/thread/s390x/clone.s b/src/thread/s390x/clone.s
index 577748ea..2125f20b 100644
--- a/src/thread/s390x/clone.s
+++ b/src/thread/s390x/clone.s
@@ -17,6 +17,9 @@ __clone:
# if (!tid) syscall(SYS_exit, a(d));
# return tid;
+ # preserve call-saved register used as syscall arg
+ stg %r6, 48(%r15)
+
# create initial stack frame for new thread
nill %r3, 0xfff8
aghi %r3, -160
@@ -35,6 +38,9 @@ __clone:
lg %r6, 160(%r15)
svc 120
+ # restore call-saved register
+ lg %r6, 48(%r15)
+
# if error or if we're the parent, return
ltgr %r2, %r2
bnzr %r14
diff --git a/src/thread/s390x/syscall_cp.s b/src/thread/s390x/syscall_cp.s
index c1da40de..d094cbf5 100644
--- a/src/thread/s390x/syscall_cp.s
+++ b/src/thread/s390x/syscall_cp.s
@@ -14,6 +14,7 @@ __cp_begin:
icm %r2, 15, 0(%r2)
jne __cp_cancel
+ stg %r6, 48(%r15)
stg %r7, 56(%r15)
lgr %r1, %r3
lgr %r2, %r4
@@ -26,6 +27,7 @@ __cp_begin:
__cp_end:
lg %r7, 56(%r15)
+ lg %r6, 48(%r15)
br %r14
__cp_cancel:
diff --git a/src/thread/sem_getvalue.c b/src/thread/sem_getvalue.c
index d9d83071..c0b7762d 100644
--- a/src/thread/sem_getvalue.c
+++ b/src/thread/sem_getvalue.c
@@ -1,8 +1,9 @@
#include <semaphore.h>
+#include <limits.h>
int sem_getvalue(sem_t *restrict sem, int *restrict valp)
{
int val = sem->__val[0];
- *valp = val < 0 ? 0 : val;
+ *valp = val & SEM_VALUE_MAX;
return 0;
}
diff --git a/src/thread/sem_open.c b/src/thread/sem_open.c
index de8555c5..0ad29de9 100644
--- a/src/thread/sem_open.c
+++ b/src/thread/sem_open.c
@@ -12,6 +12,12 @@
#include <stdlib.h>
#include <pthread.h>
#include "lock.h"
+#include "fork_impl.h"
+
+#define malloc __libc_malloc
+#define calloc __libc_calloc
+#define realloc undef
+#define free undef
static struct {
ino_t ino;
@@ -19,6 +25,7 @@ static struct {
int refcnt;
} *semtab;
static volatile int lock[1];
+volatile int *const __sem_open_lockptr = lock;
#define FLAGS (O_RDWR|O_NOFOLLOW|O_CLOEXEC|O_NONBLOCK)
@@ -163,10 +170,12 @@ int sem_close(sem_t *sem)
int i;
LOCK(lock);
for (i=0; i<SEM_NSEMS_MAX && semtab[i].sem != sem; i++);
- if (!--semtab[i].refcnt) {
- semtab[i].sem = 0;
- semtab[i].ino = 0;
+ if (--semtab[i].refcnt) {
+ UNLOCK(lock);
+ return 0;
}
+ semtab[i].sem = 0;
+ semtab[i].ino = 0;
UNLOCK(lock);
munmap(sem, sizeof *sem);
return 0;
diff --git a/src/thread/sem_post.c b/src/thread/sem_post.c
index 31e3293d..5c2517f2 100644
--- a/src/thread/sem_post.c
+++ b/src/thread/sem_post.c
@@ -1,17 +1,21 @@
#include <semaphore.h>
+#include <limits.h>
#include "pthread_impl.h"
int sem_post(sem_t *sem)
{
- int val, waiters, priv = sem->__val[2];
+ int val, new, waiters, priv = sem->__val[2];
do {
val = sem->__val[0];
waiters = sem->__val[1];
- if (val == SEM_VALUE_MAX) {
+ if ((val & SEM_VALUE_MAX) == SEM_VALUE_MAX) {
errno = EOVERFLOW;
return -1;
}
- } while (a_cas(sem->__val, val, val+1+(val<0)) != val);
- if (val<0 || waiters) __wake(sem->__val, 1, priv);
+ new = val + 1;
+ if (waiters <= 1)
+ new &= ~0x80000000;
+ } while (a_cas(sem->__val, val, new) != val);
+ if (val<0) __wake(sem->__val, waiters>1 ? 1 : -1, priv);
return 0;
}
diff --git a/src/thread/sem_timedwait.c b/src/thread/sem_timedwait.c
index 58d3ebfe..aa67376c 100644
--- a/src/thread/sem_timedwait.c
+++ b/src/thread/sem_timedwait.c
@@ -1,4 +1,5 @@
#include <semaphore.h>
+#include <limits.h>
#include "pthread_impl.h"
static void cleanup(void *p)
@@ -13,14 +14,15 @@ int sem_timedwait(sem_t *restrict sem, const struct timespec *restrict at)
if (!sem_trywait(sem)) return 0;
int spins = 100;
- while (spins-- && sem->__val[0] <= 0 && !sem->__val[1]) a_spin();
+ while (spins-- && !(sem->__val[0] & SEM_VALUE_MAX) && !sem->__val[1])
+ a_spin();
while (sem_trywait(sem)) {
- int r;
+ int r, priv = sem->__val[2];
a_inc(sem->__val+1);
- a_cas(sem->__val, 0, -1);
+ a_cas(sem->__val, 0, 0x80000000);
pthread_cleanup_push(cleanup, (void *)(sem->__val+1));
- r = __timedwait_cp(sem->__val, -1, CLOCK_REALTIME, at, sem->__val[2]);
+ r = __timedwait_cp(sem->__val, 0x80000000, CLOCK_REALTIME, at, priv);
pthread_cleanup_pop(1);
if (r) {
errno = r;
diff --git a/src/thread/sem_trywait.c b/src/thread/sem_trywait.c
index 04edf46b..beb435da 100644
--- a/src/thread/sem_trywait.c
+++ b/src/thread/sem_trywait.c
@@ -1,12 +1,12 @@
#include <semaphore.h>
+#include <limits.h>
#include "pthread_impl.h"
int sem_trywait(sem_t *sem)
{
int val;
- while ((val=sem->__val[0]) > 0) {
- int new = val-1-(val==1 && sem->__val[1]);
- if (a_cas(sem->__val, val, new)==val) return 0;
+ while ((val=sem->__val[0]) & SEM_VALUE_MAX) {
+ if (a_cas(sem->__val, val, val-1)==val) return 0;
}
errno = EAGAIN;
return -1;
diff --git a/src/thread/synccall.c b/src/thread/synccall.c
index 648a6ad4..38597254 100644
--- a/src/thread/synccall.c
+++ b/src/thread/synccall.c
@@ -11,7 +11,7 @@ weak_alias(dummy_0, __tl_unlock);
static int target_tid;
static void (*callback)(void *), *context;
-static sem_t target_sem, caller_sem;
+static sem_t target_sem, caller_sem, exit_sem;
static void dummy(void *p)
{
@@ -33,7 +33,7 @@ static void handler(int sig)
/* Inform caller we've complered the callback and wait
* for the caller to release us to return. */
sem_post(&caller_sem);
- sem_wait(&target_sem);
+ sem_wait(&exit_sem);
/* Inform caller we are returning and state is destroyable. */
sem_post(&caller_sem);
@@ -45,7 +45,7 @@ void __synccall(void (*func)(void *), void *ctx)
{
sigset_t oldmask;
int cs, i, r;
- struct sigaction sa = { .sa_flags = SA_RESTART, .sa_handler = handler };
+ struct sigaction sa = { .sa_flags = SA_RESTART | SA_ONSTACK, .sa_handler = handler };
pthread_t self = __pthread_self(), td;
int count = 0;
@@ -62,8 +62,10 @@ void __synccall(void (*func)(void *), void *ctx)
sem_init(&target_sem, 0, 0);
sem_init(&caller_sem, 0, 0);
+ sem_init(&exit_sem, 0, 0);
- if (!libc.threads_minus_1) goto single_threaded;
+ if (!libc.threads_minus_1 || __syscall(SYS_gettid) != self->tid)
+ goto single_threaded;
callback = func;
context = ctx;
@@ -106,12 +108,13 @@ single_threaded:
/* Only release the caught threads once all threads, including the
* caller, have returned from the callback function. */
for (i=0; i<count; i++)
- sem_post(&target_sem);
+ sem_post(&exit_sem);
for (i=0; i<count; i++)
sem_wait(&caller_sem);
sem_destroy(&caller_sem);
sem_destroy(&target_sem);
+ sem_destroy(&exit_sem);
pthread_setcancelstate(cs, 0);
__tl_unlock();
diff --git a/src/thread/vmlock.c b/src/thread/vmlock.c
index 75f3cb76..fa0a8e3c 100644
--- a/src/thread/vmlock.c
+++ b/src/thread/vmlock.c
@@ -1,6 +1,8 @@
#include "pthread_impl.h"
+#include "fork_impl.h"
static volatile int vmlock[2];
+volatile int *const __vmlock_lockptr = vmlock;
void __vm_wait()
{