summaryrefslogtreecommitdiff
path: root/src/thread
diff options
context:
space:
mode:
Diffstat (limited to 'src/thread')
-rw-r--r--src/thread/loongarch64/__set_thread_area.s7
-rw-r--r--src/thread/loongarch64/__unmapself.s7
-rw-r--r--src/thread/loongarch64/clone.s29
-rw-r--r--src/thread/loongarch64/syscall_cp.s29
-rw-r--r--src/thread/pthread_atfork.c8
-rw-r--r--src/thread/pthread_attr_get.c2
-rw-r--r--src/thread/pthread_cancel.c9
-rw-r--r--src/thread/pthread_create.c16
-rw-r--r--src/thread/pthread_detach.c8
-rw-r--r--src/thread/pthread_getname_np.c25
-rw-r--r--src/thread/pthread_key_create.c8
-rw-r--r--src/thread/pthread_setname_np.c2
-rw-r--r--src/thread/riscv32/__set_thread_area.s6
-rw-r--r--src/thread/riscv32/__unmapself.s7
-rw-r--r--src/thread/riscv32/clone.s34
-rw-r--r--src/thread/riscv32/syscall_cp.s29
-rw-r--r--src/thread/s390x/clone.s6
-rw-r--r--src/thread/s390x/syscall_cp.s2
-rw-r--r--src/thread/sem_getvalue.c3
-rw-r--r--src/thread/sem_post.c12
-rw-r--r--src/thread/sem_timedwait.c10
-rw-r--r--src/thread/sem_trywait.c6
-rw-r--r--src/thread/synccall.c10
23 files changed, 246 insertions, 29 deletions
diff --git a/src/thread/loongarch64/__set_thread_area.s b/src/thread/loongarch64/__set_thread_area.s
new file mode 100644
index 00000000..021307fc
--- /dev/null
+++ b/src/thread/loongarch64/__set_thread_area.s
@@ -0,0 +1,7 @@
+.global __set_thread_area
+.hidden __set_thread_area
+.type __set_thread_area,@function
+__set_thread_area:
+ move $tp, $a0
+ move $a0, $zero
+ jr $ra
diff --git a/src/thread/loongarch64/__unmapself.s b/src/thread/loongarch64/__unmapself.s
new file mode 100644
index 00000000..719ad056
--- /dev/null
+++ b/src/thread/loongarch64/__unmapself.s
@@ -0,0 +1,7 @@
+.global __unmapself
+.type __unmapself, @function
+__unmapself:
+ li.d $a7, 215 # call munmap
+ syscall 0
+ li.d $a7, 93 # call exit
+ syscall 0
diff --git a/src/thread/loongarch64/clone.s b/src/thread/loongarch64/clone.s
new file mode 100644
index 00000000..a165b365
--- /dev/null
+++ b/src/thread/loongarch64/clone.s
@@ -0,0 +1,29 @@
+#__clone(func, stack, flags, arg, ptid, tls, ctid)
+# a0, a1, a2, a3, a4, a5, a6
+# sys_clone(flags, stack, ptid, ctid, tls)
+# a0, a1, a2, a3, a4
+
+.global __clone
+.hidden __clone
+.type __clone,@function
+__clone:
+ bstrins.d $a1, $zero, 3, 0 #stack to 16 align
+ # Save function pointer and argument pointer on new thread stack
+ addi.d $a1, $a1, -16
+ st.d $a0, $a1, 0 # save function pointer
+ st.d $a3, $a1, 8 # save argument pointer
+ or $a0, $a2, $zero
+ or $a2, $a4, $zero
+ or $a3, $a6, $zero
+ or $a4, $a5, $zero
+ ori $a7, $zero, 220
+ syscall 0 # call clone
+
+ beqz $a0, 1f # whether child process
+ jirl $zero, $ra, 0 # parent process return
+1:
+ ld.d $t8, $sp, 0 # function pointer
+ ld.d $a0, $sp, 8 # argument pointer
+ jirl $ra, $t8, 0 # call the user's function
+ ori $a7, $zero, 93
+ syscall 0 # child process exit
diff --git a/src/thread/loongarch64/syscall_cp.s b/src/thread/loongarch64/syscall_cp.s
new file mode 100644
index 00000000..c057a97b
--- /dev/null
+++ b/src/thread/loongarch64/syscall_cp.s
@@ -0,0 +1,29 @@
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.global __syscall_cp_asm
+.hidden __syscall_cp_asm
+.type __syscall_cp_asm,@function
+
+__syscall_cp_asm:
+__cp_begin:
+ ld.w $a0, $a0, 0
+ bnez $a0, __cp_cancel
+ move $t8, $a1 # reserve system call number
+ move $a0, $a2
+ move $a1, $a3
+ move $a2, $a4
+ move $a3, $a5
+ move $a4, $a6
+ move $a5, $a7
+ move $a7, $t8
+ syscall 0
+__cp_end:
+ jr $ra
+__cp_cancel:
+ la.local $t8, __cancel
+ jr $t8
diff --git a/src/thread/pthread_atfork.c b/src/thread/pthread_atfork.c
index 76497401..26d32543 100644
--- a/src/thread/pthread_atfork.c
+++ b/src/thread/pthread_atfork.c
@@ -1,7 +1,13 @@
#include <pthread.h>
+#include <errno.h>
#include "libc.h"
#include "lock.h"
+#define malloc __libc_malloc
+#define calloc undef
+#define realloc undef
+#define free undef
+
static struct atfork_funcs {
void (*prepare)(void);
void (*parent)(void);
@@ -34,7 +40,7 @@ void __fork_handler(int who)
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
{
struct atfork_funcs *new = malloc(sizeof *new);
- if (!new) return -1;
+ if (!new) return ENOMEM;
LOCK(lock);
new->next = funcs;
diff --git a/src/thread/pthread_attr_get.c b/src/thread/pthread_attr_get.c
index 4aa5afdb..f12ff442 100644
--- a/src/thread/pthread_attr_get.c
+++ b/src/thread/pthread_attr_get.c
@@ -70,7 +70,7 @@ int pthread_condattr_getpshared(const pthread_condattr_t *restrict a, int *restr
int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *restrict a, int *restrict protocol)
{
- *protocol = PTHREAD_PRIO_NONE;
+ *protocol = a->__attr / 8U % 2;
return 0;
}
int pthread_mutexattr_getpshared(const pthread_mutexattr_t *restrict a, int *restrict pshared)
diff --git a/src/thread/pthread_cancel.c b/src/thread/pthread_cancel.c
index 2f9d5e97..139a6fc8 100644
--- a/src/thread/pthread_cancel.c
+++ b/src/thread/pthread_cancel.c
@@ -56,7 +56,12 @@ static void cancel_handler(int sig, siginfo_t *si, void *ctx)
_sigaddset(&uc->uc_sigmask, SIGCANCEL);
- if (self->cancelasync || pc >= (uintptr_t)__cp_begin && pc < (uintptr_t)__cp_end) {
+ if (self->cancelasync) {
+ pthread_sigmask(SIG_SETMASK, &uc->uc_sigmask, 0);
+ __cancel();
+ }
+
+ if (pc >= (uintptr_t)__cp_begin && pc < (uintptr_t)__cp_end) {
uc->uc_mcontext.MC_PC = (uintptr_t)__cp_cancel;
#ifdef CANCEL_GOT
uc->uc_mcontext.MC_GOT = CANCEL_GOT;
@@ -77,7 +82,7 @@ void __testcancel()
static void init_cancellation()
{
struct sigaction sa = {
- .sa_flags = SA_SIGINFO | SA_RESTART,
+ .sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK,
.sa_sigaction = cancel_handler
};
memset(&sa.sa_mask, -1, _NSIG/8);
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
index 6f187ee8..087f6206 100644
--- a/src/thread/pthread_create.c
+++ b/src/thread/pthread_create.c
@@ -107,6 +107,16 @@ _Noreturn void __pthread_exit(void *result)
/* At this point we are committed to thread termination. */
+ /* After the kernel thread exits, its tid may be reused. Clear it
+ * to prevent inadvertent use and inform functions that would use
+ * it that it's no longer available. At this point the killlock
+ * may be released, since functions that use it will consistently
+ * see the thread as having exited. Release it now so that no
+ * remaining locks (except thread list) are held if we end up
+ * resetting need_locks below. */
+ self->tid = 0;
+ UNLOCK(self->killlock);
+
/* Process robust list in userspace to handle non-pshared mutexes
* and the detached thread case where the robust list head will
* be invalid when the kernel would process it. */
@@ -159,12 +169,6 @@ _Noreturn void __pthread_exit(void *result)
a_store(&self->detach_state, DT_EXITED);
__wake(&self->detach_state, 1, 1);
- /* After the kernel thread exits, its tid may be reused. Clear it
- * to prevent inadvertent use and inform functions that would use
- * it that it's no longer available. */
- self->tid = 0;
- UNLOCK(self->killlock);
-
for (;;) __syscall(SYS_exit, 0);
}
diff --git a/src/thread/pthread_detach.c b/src/thread/pthread_detach.c
index 77772af2..d73a500e 100644
--- a/src/thread/pthread_detach.c
+++ b/src/thread/pthread_detach.c
@@ -5,8 +5,12 @@ static int __pthread_detach(pthread_t t)
{
/* If the cas fails, detach state is either already-detached
* or exiting/exited, and pthread_join will trap or cleanup. */
- if (a_cas(&t->detach_state, DT_JOINABLE, DT_DETACHED) != DT_JOINABLE)
- return __pthread_join(t, 0);
+ if (a_cas(&t->detach_state, DT_JOINABLE, DT_DETACHED) != DT_JOINABLE) {
+ int cs;
+ __pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
+ __pthread_join(t, 0);
+ __pthread_setcancelstate(cs, 0);
+ }
return 0;
}
diff --git a/src/thread/pthread_getname_np.c b/src/thread/pthread_getname_np.c
new file mode 100644
index 00000000..85504e45
--- /dev/null
+++ b/src/thread/pthread_getname_np.c
@@ -0,0 +1,25 @@
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/prctl.h>
+
+#include "pthread_impl.h"
+
+int pthread_getname_np(pthread_t thread, char *name, size_t len)
+{
+ int fd, cs, status = 0;
+ char f[sizeof "/proc/self/task//comm" + 3*sizeof(int)];
+
+ if (len < 16) return ERANGE;
+
+ if (thread == pthread_self())
+ return prctl(PR_GET_NAME, (unsigned long)name, 0UL, 0UL, 0UL) ? errno : 0;
+
+ snprintf(f, sizeof f, "/proc/self/task/%d/comm", thread->tid);
+ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
+ if ((fd = open(f, O_RDONLY|O_CLOEXEC)) < 0 || (len = read(fd, name, len)) == -1) status = errno;
+ else name[len-1] = 0; /* remove trailing new line only if successful */
+ if (fd >= 0) close(fd);
+ pthread_setcancelstate(cs, 0);
+ return status;
+}
diff --git a/src/thread/pthread_key_create.c b/src/thread/pthread_key_create.c
index d1120941..39770c7a 100644
--- a/src/thread/pthread_key_create.c
+++ b/src/thread/pthread_key_create.c
@@ -1,4 +1,5 @@
#include "pthread_impl.h"
+#include "fork_impl.h"
volatile size_t __pthread_tsd_size = sizeof(void *) * PTHREAD_KEYS_MAX;
void *__pthread_tsd_main[PTHREAD_KEYS_MAX] = { 0 };
@@ -20,6 +21,13 @@ static void dummy_0(void)
weak_alias(dummy_0, __tl_lock);
weak_alias(dummy_0, __tl_unlock);
+void __pthread_key_atfork(int who)
+{
+ if (who<0) __pthread_rwlock_rdlock(&key_lock);
+ else if (!who) __pthread_rwlock_unlock(&key_lock);
+ else key_lock = (pthread_rwlock_t)PTHREAD_RWLOCK_INITIALIZER;
+}
+
int __pthread_key_create(pthread_key_t *k, void (*dtor)(void *))
{
pthread_t self = __pthread_self();
diff --git a/src/thread/pthread_setname_np.c b/src/thread/pthread_setname_np.c
index 82d35e17..fc2d2306 100644
--- a/src/thread/pthread_setname_np.c
+++ b/src/thread/pthread_setname_np.c
@@ -19,7 +19,7 @@ int pthread_setname_np(pthread_t thread, const char *name)
snprintf(f, sizeof f, "/proc/self/task/%d/comm", thread->tid);
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
- if ((fd = open(f, O_WRONLY)) < 0 || write(fd, name, len) < 0) status = errno;
+ if ((fd = open(f, O_WRONLY|O_CLOEXEC)) < 0 || write(fd, name, len) < 0) status = errno;
if (fd >= 0) close(fd);
pthread_setcancelstate(cs, 0);
return status;
diff --git a/src/thread/riscv32/__set_thread_area.s b/src/thread/riscv32/__set_thread_area.s
new file mode 100644
index 00000000..828154d2
--- /dev/null
+++ b/src/thread/riscv32/__set_thread_area.s
@@ -0,0 +1,6 @@
+.global __set_thread_area
+.type __set_thread_area, %function
+__set_thread_area:
+ mv tp, a0
+ li a0, 0
+ ret
diff --git a/src/thread/riscv32/__unmapself.s b/src/thread/riscv32/__unmapself.s
new file mode 100644
index 00000000..2849119c
--- /dev/null
+++ b/src/thread/riscv32/__unmapself.s
@@ -0,0 +1,7 @@
+.global __unmapself
+.type __unmapself, %function
+__unmapself:
+ li a7, 215 # SYS_munmap
+ ecall
+ li a7, 93 # SYS_exit
+ ecall
diff --git a/src/thread/riscv32/clone.s b/src/thread/riscv32/clone.s
new file mode 100644
index 00000000..3102239d
--- /dev/null
+++ b/src/thread/riscv32/clone.s
@@ -0,0 +1,34 @@
+# __clone(func, stack, flags, arg, ptid, tls, ctid)
+# a0, a1, a2, a3, a4, a5, a6
+
+# syscall(SYS_clone, flags, stack, ptid, tls, ctid)
+# a7 a0, a1, a2, a3, a4
+
+.global __clone
+.type __clone, %function
+__clone:
+ # Save func and arg to stack
+ addi a1, a1, -16
+ sw a0, 0(a1)
+ sw a3, 4(a1)
+
+ # Call SYS_clone
+ mv a0, a2
+ mv a2, a4
+ mv a3, a5
+ mv a4, a6
+ li a7, 220 # SYS_clone
+ ecall
+
+ beqz a0, 1f
+ # Parent
+ ret
+
+ # Child
+1: lw a1, 0(sp)
+ lw a0, 4(sp)
+ jalr a1
+
+ # Exit
+ li a7, 93 # SYS_exit
+ ecall
diff --git a/src/thread/riscv32/syscall_cp.s b/src/thread/riscv32/syscall_cp.s
new file mode 100644
index 00000000..079d1ba0
--- /dev/null
+++ b/src/thread/riscv32/syscall_cp.s
@@ -0,0 +1,29 @@
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.global __syscall_cp_asm
+.hidden __syscall_cp_asm
+.type __syscall_cp_asm, %function
+__syscall_cp_asm:
+__cp_begin:
+ lw t0, 0(a0)
+ bnez t0, __cp_cancel
+
+ mv t0, a1
+ mv a0, a2
+ mv a1, a3
+ mv a2, a4
+ mv a3, a5
+ mv a4, a6
+ mv a5, a7
+ lw a6, 0(sp)
+ mv a7, t0
+ ecall
+__cp_end:
+ ret
+__cp_cancel:
+ tail __cancel
diff --git a/src/thread/s390x/clone.s b/src/thread/s390x/clone.s
index 577748ea..2125f20b 100644
--- a/src/thread/s390x/clone.s
+++ b/src/thread/s390x/clone.s
@@ -17,6 +17,9 @@ __clone:
# if (!tid) syscall(SYS_exit, a(d));
# return tid;
+ # preserve call-saved register used as syscall arg
+ stg %r6, 48(%r15)
+
# create initial stack frame for new thread
nill %r3, 0xfff8
aghi %r3, -160
@@ -35,6 +38,9 @@ __clone:
lg %r6, 160(%r15)
svc 120
+ # restore call-saved register
+ lg %r6, 48(%r15)
+
# if error or if we're the parent, return
ltgr %r2, %r2
bnzr %r14
diff --git a/src/thread/s390x/syscall_cp.s b/src/thread/s390x/syscall_cp.s
index c1da40de..d094cbf5 100644
--- a/src/thread/s390x/syscall_cp.s
+++ b/src/thread/s390x/syscall_cp.s
@@ -14,6 +14,7 @@ __cp_begin:
icm %r2, 15, 0(%r2)
jne __cp_cancel
+ stg %r6, 48(%r15)
stg %r7, 56(%r15)
lgr %r1, %r3
lgr %r2, %r4
@@ -26,6 +27,7 @@ __cp_begin:
__cp_end:
lg %r7, 56(%r15)
+ lg %r6, 48(%r15)
br %r14
__cp_cancel:
diff --git a/src/thread/sem_getvalue.c b/src/thread/sem_getvalue.c
index d9d83071..c0b7762d 100644
--- a/src/thread/sem_getvalue.c
+++ b/src/thread/sem_getvalue.c
@@ -1,8 +1,9 @@
#include <semaphore.h>
+#include <limits.h>
int sem_getvalue(sem_t *restrict sem, int *restrict valp)
{
int val = sem->__val[0];
- *valp = val < 0 ? 0 : val;
+ *valp = val & SEM_VALUE_MAX;
return 0;
}
diff --git a/src/thread/sem_post.c b/src/thread/sem_post.c
index 31e3293d..5c2517f2 100644
--- a/src/thread/sem_post.c
+++ b/src/thread/sem_post.c
@@ -1,17 +1,21 @@
#include <semaphore.h>
+#include <limits.h>
#include "pthread_impl.h"
int sem_post(sem_t *sem)
{
- int val, waiters, priv = sem->__val[2];
+ int val, new, waiters, priv = sem->__val[2];
do {
val = sem->__val[0];
waiters = sem->__val[1];
- if (val == SEM_VALUE_MAX) {
+ if ((val & SEM_VALUE_MAX) == SEM_VALUE_MAX) {
errno = EOVERFLOW;
return -1;
}
- } while (a_cas(sem->__val, val, val+1+(val<0)) != val);
- if (val<0 || waiters) __wake(sem->__val, 1, priv);
+ new = val + 1;
+ if (waiters <= 1)
+ new &= ~0x80000000;
+ } while (a_cas(sem->__val, val, new) != val);
+ if (val<0) __wake(sem->__val, waiters>1 ? 1 : -1, priv);
return 0;
}
diff --git a/src/thread/sem_timedwait.c b/src/thread/sem_timedwait.c
index 58d3ebfe..aa67376c 100644
--- a/src/thread/sem_timedwait.c
+++ b/src/thread/sem_timedwait.c
@@ -1,4 +1,5 @@
#include <semaphore.h>
+#include <limits.h>
#include "pthread_impl.h"
static void cleanup(void *p)
@@ -13,14 +14,15 @@ int sem_timedwait(sem_t *restrict sem, const struct timespec *restrict at)
if (!sem_trywait(sem)) return 0;
int spins = 100;
- while (spins-- && sem->__val[0] <= 0 && !sem->__val[1]) a_spin();
+ while (spins-- && !(sem->__val[0] & SEM_VALUE_MAX) && !sem->__val[1])
+ a_spin();
while (sem_trywait(sem)) {
- int r;
+ int r, priv = sem->__val[2];
a_inc(sem->__val+1);
- a_cas(sem->__val, 0, -1);
+ a_cas(sem->__val, 0, 0x80000000);
pthread_cleanup_push(cleanup, (void *)(sem->__val+1));
- r = __timedwait_cp(sem->__val, -1, CLOCK_REALTIME, at, sem->__val[2]);
+ r = __timedwait_cp(sem->__val, 0x80000000, CLOCK_REALTIME, at, priv);
pthread_cleanup_pop(1);
if (r) {
errno = r;
diff --git a/src/thread/sem_trywait.c b/src/thread/sem_trywait.c
index 04edf46b..beb435da 100644
--- a/src/thread/sem_trywait.c
+++ b/src/thread/sem_trywait.c
@@ -1,12 +1,12 @@
#include <semaphore.h>
+#include <limits.h>
#include "pthread_impl.h"
int sem_trywait(sem_t *sem)
{
int val;
- while ((val=sem->__val[0]) > 0) {
- int new = val-1-(val==1 && sem->__val[1]);
- if (a_cas(sem->__val, val, new)==val) return 0;
+ while ((val=sem->__val[0]) & SEM_VALUE_MAX) {
+ if (a_cas(sem->__val, val, val-1)==val) return 0;
}
errno = EAGAIN;
return -1;
diff --git a/src/thread/synccall.c b/src/thread/synccall.c
index d58c851f..38597254 100644
--- a/src/thread/synccall.c
+++ b/src/thread/synccall.c
@@ -11,7 +11,7 @@ weak_alias(dummy_0, __tl_unlock);
static int target_tid;
static void (*callback)(void *), *context;
-static sem_t target_sem, caller_sem;
+static sem_t target_sem, caller_sem, exit_sem;
static void dummy(void *p)
{
@@ -33,7 +33,7 @@ static void handler(int sig)
/* Inform caller we've complered the callback and wait
* for the caller to release us to return. */
sem_post(&caller_sem);
- sem_wait(&target_sem);
+ sem_wait(&exit_sem);
/* Inform caller we are returning and state is destroyable. */
sem_post(&caller_sem);
@@ -45,7 +45,7 @@ void __synccall(void (*func)(void *), void *ctx)
{
sigset_t oldmask;
int cs, i, r;
- struct sigaction sa = { .sa_flags = SA_RESTART, .sa_handler = handler };
+ struct sigaction sa = { .sa_flags = SA_RESTART | SA_ONSTACK, .sa_handler = handler };
pthread_t self = __pthread_self(), td;
int count = 0;
@@ -62,6 +62,7 @@ void __synccall(void (*func)(void *), void *ctx)
sem_init(&target_sem, 0, 0);
sem_init(&caller_sem, 0, 0);
+ sem_init(&exit_sem, 0, 0);
if (!libc.threads_minus_1 || __syscall(SYS_gettid) != self->tid)
goto single_threaded;
@@ -107,12 +108,13 @@ single_threaded:
/* Only release the caught threads once all threads, including the
* caller, have returned from the callback function. */
for (i=0; i<count; i++)
- sem_post(&target_sem);
+ sem_post(&exit_sem);
for (i=0; i<count; i++)
sem_wait(&caller_sem);
sem_destroy(&caller_sem);
sem_destroy(&target_sem);
+ sem_destroy(&exit_sem);
pthread_setcancelstate(cs, 0);
__tl_unlock();