summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2019-07-30 23:51:45 -0400
committerRich Felker <dalias@aerifal.cx>2019-08-02 00:08:23 -0400
commit2b4fd6f75b4fa66d28cddcf165ad48e8fda486d1 (patch)
tree682eb0797308379efae78da1c705390bc59696fa /src
parent2e554617e5a6a41bf3f6c6306c753cd53abf728c (diff)
downloadmusl-2b4fd6f75b4fa66d28cddcf165ad48e8fda486d1.tar.gz
clock_adjtime: add time64 support, decouple 32-bit time_t, fix x32
the 64-bit/time64 version of the syscall is not API-compatible with the userspace timex structure definition; fields specified as long have type long long. so when using the time64 syscall, we have to convert the entire structure. this was always the case for x32 as well, but went unnoticed, meaning that clock_adjtime just passed junk to the kernel on x32. it should be fixed now. for the fallback case, we avoid encoding any assumptions about the new location of the time member or naming of the legacy slots by accessing them through a union of the kernel type and the new userspace type. the only assumption is that the non-time members live at the same offsets as in the (non-time64, long-based) kernel timex struct. this property saves us from having to convert the whole thing, and avoids a lot of additional work in compat shims. the new code is statically unreachable for now except on x32, where it fixes major brokenness. it is permanently unreachable on 64-bit.
Diffstat (limited to 'src')
-rw-r--r--src/linux/clock_adjtime.c110
1 files changed, 110 insertions, 0 deletions
diff --git a/src/linux/clock_adjtime.c b/src/linux/clock_adjtime.c
index 7d6b0349..2f531397 100644
--- a/src/linux/clock_adjtime.c
+++ b/src/linux/clock_adjtime.c
@@ -1,9 +1,119 @@
#include <sys/timex.h>
#include <time.h>
+#include <errno.h>
#include "syscall.h"
+#define IS32BIT(x) !((x)+0x80000000ULL>>32)
+
+struct ktimex64 {
+ unsigned modes;
+ int :32;
+ long long offset, freq, maxerror, esterror;
+ int status;
+ int :32;
+ long long constant, precision, tolerance;
+ long long time_sec, time_usec;
+ long long tick, ppsfreq, jitter;
+ int shift;
+ int :32;
+ long long stabil, jitcnt, calcnt, errcnt, stbcnt;
+ int tai;
+ int __padding[11];
+};
+
+struct ktimex {
+ unsigned modes;
+ long offset, freq, maxerror, esterror;
+ int status;
+ long constant, precision, tolerance;
+ long time_sec, time_usec;
+ long tick, ppsfreq, jitter;
+ int shift;
+ long stabil, jitcnt, calcnt, errcnt, stbcnt;
+ int tai;
+ int __padding[11];
+};
+
int clock_adjtime (clockid_t clock_id, struct timex *utx)
{
+ int r = -ENOSYS;
+#ifdef SYS_clock_adjtime64
+ if (SYS_clock_adjtime == SYS_clock_adjtime64 ||
+ (utx->modes & ADJ_SETOFFSET) && !IS32BIT(utx->time.tv_sec)) {
+ struct ktimex64 ktx = {
+ .modes = utx->modes,
+ .offset = utx->offset,
+ .freq = utx->freq,
+ .maxerror = utx->maxerror,
+ .esterror = utx->esterror,
+ .status = utx->status,
+ .constant = utx->constant,
+ .precision = utx->precision,
+ .tolerance = utx->tolerance,
+ .time_sec = utx->time.tv_sec,
+ .time_usec = utx->time.tv_usec,
+ .tick = utx->tick,
+ .ppsfreq = utx->ppsfreq,
+ .jitter = utx->jitter,
+ .shift = utx->shift,
+ .stabil = utx->stabil,
+ .jitcnt = utx->jitcnt,
+ .calcnt = utx->calcnt,
+ .errcnt = utx->errcnt,
+ .stbcnt = utx->stbcnt,
+ .tai = utx->tai,
+ };
+ r = __syscall(SYS_clock_adjtime, clock_id, &ktx);
+ if (r>=0) {
+ utx->modes = ktx.modes;
+ utx->offset = ktx.offset;
+ utx->freq = ktx.freq;
+ utx->maxerror = ktx.maxerror;
+ utx->esterror = ktx.esterror;
+ utx->status = ktx.status;
+ utx->constant = ktx.constant;
+ utx->precision = ktx.precision;
+ utx->tolerance = ktx.tolerance;
+ utx->time.tv_sec = ktx.time_sec;
+ utx->time.tv_usec = ktx.time_usec;
+ utx->tick = ktx.tick;
+ utx->ppsfreq = ktx.ppsfreq;
+ utx->jitter = ktx.jitter;
+ utx->shift = ktx.shift;
+ utx->stabil = ktx.stabil;
+ utx->jitcnt = ktx.jitcnt;
+ utx->calcnt = ktx.calcnt;
+ utx->errcnt = ktx.errcnt;
+ utx->stbcnt = ktx.stbcnt;
+ utx->tai = ktx.tai;
+ }
+ }
+ if (SYS_clock_adjtime == SYS_clock_adjtime64 || r!=-ENOSYS)
+ return __syscall_ret(r);
+ if ((utx->modes & ADJ_SETOFFSET) && !IS32BIT(utx->time.tv_sec))
+ return __syscall_ret(-ENOTSUP);
+#endif
+ if (sizeof(time_t) > sizeof(long)) {
+ union {
+ struct timex utx;
+ struct ktimex ktx;
+ } u = { *utx };
+ u.ktx.time_sec = utx->time.tv_sec;
+ u.ktx.time_usec = utx->time.tv_usec;
+#ifdef SYS_adjtimex
+ if (clock_id==CLOCK_REALTIME) r = __syscall(SYS_adjtimex, &u);
+ else
+#endif
+ r = __syscall(SYS_clock_adjtime, clock_id, &u);
+ if (r>=0) {
+ *utx = u.utx;
+ utx->time.tv_sec = u.ktx.time_sec;
+ utx->time.tv_usec = u.ktx.time_usec;
+ }
+ return __syscall_ret(r);
+ }
+#ifdef SYS_adjtimex
if (clock_id==CLOCK_REALTIME) return syscall(SYS_adjtimex, utx);
+#endif
return syscall(SYS_clock_adjtime, clock_id, utx);
}