summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2011-08-07 11:14:32 -0400
committerRich Felker <dalias@aerifal.cx>2011-08-07 11:14:32 -0400
commitb3c08a16c0a085e8de01cdc9de106aaa332d27d5 (patch)
tree6b3645eb14be5acf87334a158202d4866efda50b
parentb2b95a58b4326c8a8aa87fdf6e8b0f6421dbaf99 (diff)
downloadmusl-b3c08a16c0a085e8de01cdc9de106aaa332d27d5.tar.gz
simplify unified timed wait code, drop support for newer method
the new absolute-time-based wait kernelside was hard to get right and basically just code duplication. it could only improve "performance" when waiting, and even then, the improvement was just slight drop in cpu usage during a wait. actually, with vdso clock_gettime, the "old" way will be even faster than the "new" way if the time has already expired, since it will not invoke any syscalls. it can determine entirely in userspace that it needs to return ETIMEDOUT.
-rw-r--r--src/thread/__timedwait.c59
1 files changed, 28 insertions, 31 deletions
diff --git a/src/thread/__timedwait.c b/src/thread/__timedwait.c
index e15e2d0a..302273ae 100644
--- a/src/thread/__timedwait.c
+++ b/src/thread/__timedwait.c
@@ -4,45 +4,42 @@
#include "futex.h"
#include "syscall.h"
-static int do_wait(volatile int *addr, int val, clockid_t clk, const struct timespec *at, int cp, int priv)
+static int do_wait(volatile int *addr, int val,
+ clockid_t clk, const struct timespec *at, int priv)
{
- int r, flag = 0;
+ int r;
struct timespec to, *top=0;
- if (!at) goto notimeout;
- if (at->tv_nsec >= 1000000000UL)
- return EINVAL;
- if (clk == CLOCK_REALTIME || clk == CLOCK_MONOTONIC) {
- if (clk == CLOCK_REALTIME) flag = FUTEX_CLOCK_REALTIME;
- if (cp) r = -__syscall_cp(SYS_futex, addr, FUTEX_WAIT_BITSET|flag, val, at, 0, -1);
- else r = -__syscall(SYS_futex, addr, FUTEX_WAIT_BITSET|flag, val, at, 0, -1);
- if (r != EINVAL && r != ENOSYS) goto done;
- }
- if (clock_gettime(clk, &to)) return EINVAL;
- to.tv_sec = at->tv_sec - to.tv_sec;
- if ((to.tv_nsec = at->tv_nsec - to.tv_nsec) < 0) {
- to.tv_sec--;
- to.tv_nsec += 1000000000;
+ if (at) {
+ if (at->tv_nsec >= 1000000000UL) return EINVAL;
+ if (clock_gettime(clk, &to)) return EINVAL;
+ to.tv_sec = at->tv_sec - to.tv_sec;
+ if ((to.tv_nsec = at->tv_nsec - to.tv_nsec) < 0) {
+ to.tv_sec--;
+ to.tv_nsec += 1000000000;
+ }
+ if (to.tv_sec < 0) return ETIMEDOUT;
+ top = &to;
}
- if (to.tv_sec < 0) return ETIMEDOUT;
- top = &to;
-notimeout:
- if (cp) r = -__syscall_cp(SYS_futex, addr, FUTEX_WAIT, val, top);
- else r = -__syscall(SYS_futex, addr, FUTEX_WAIT, val, top);
-done:
+
+ r = -__syscall_cp(SYS_futex, addr, FUTEX_WAIT, val, top);
if (r == EINTR || r == EINVAL || r == ETIMEDOUT) return r;
return 0;
}
-int __timedwait(volatile int *addr, int val, clockid_t clk, const struct timespec *at, void (*cleanup)(void *), void *arg, int priv)
+int __timedwait(volatile int *addr, int val,
+ clockid_t clk, const struct timespec *at,
+ void (*cleanup)(void *), void *arg, int priv)
{
- int r;
- if (cleanup) {
- pthread_cleanup_push(cleanup, arg);
- r = do_wait(addr, val, clk, at, 1, priv);
- pthread_cleanup_pop(0);
- } else {
- r = do_wait(addr, val, clk, at, 0, priv);
- }
+ int r, cs;
+
+ if (!cleanup) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
+ pthread_cleanup_push(cleanup, arg);
+
+ r = do_wait(addr, val, clk, at, priv);
+
+ pthread_cleanup_pop(0);
+ if (!cleanup) pthread_setcancelstate(cs, 0);
+
return r;
}