summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--COPYRIGHT5
-rw-r--r--Makefile3
-rw-r--r--arch/arm/syscall_arch.h6
-rw-r--r--arch/mips/bits/signal.h2
-rw-r--r--arch/mips64/bits/signal.h2
-rw-r--r--arch/mipsn32/bits/signal.h2
-rwxr-xr-xconfigure19
-rw-r--r--ldso/dynlink.c3
-rw-r--r--src/fenv/sh/fenv.S2
-rw-r--r--src/internal/atomic.h15
-rw-r--r--src/internal/dynlink.h5
-rw-r--r--src/internal/libc.h9
-rw-r--r--src/internal/shgetc.c2
-rw-r--r--src/internal/stdio_impl.h2
-rw-r--r--src/linux/clock_adjtime.c95
-rw-r--r--src/malloc/DESIGN22
-rw-r--r--src/malloc/aligned_alloc.c7
-rw-r--r--src/malloc/calloc.c45
-rw-r--r--src/malloc/expand_heap.c71
-rw-r--r--src/malloc/lite_malloc.c100
-rw-r--r--src/malloc/mallocng/aligned_alloc.c57
-rw-r--r--src/malloc/mallocng/donate.c39
-rw-r--r--src/malloc/mallocng/free.c143
-rw-r--r--src/malloc/mallocng/glue.h77
-rw-r--r--src/malloc/mallocng/malloc.c387
-rw-r--r--src/malloc/mallocng/malloc_usable_size.c12
-rw-r--r--src/malloc/mallocng/meta.h288
-rw-r--r--src/malloc/mallocng/realloc.c51
-rw-r--r--src/malloc/memalign.c53
-rw-r--r--src/malloc/oldmalloc/aligned_alloc.c53
-rw-r--r--src/malloc/oldmalloc/malloc.c (renamed from src/malloc/malloc.c)357
-rw-r--r--src/malloc/oldmalloc/malloc_impl.h (renamed from src/internal/malloc_impl.h)9
-rw-r--r--src/malloc/oldmalloc/malloc_usable_size.c (renamed from src/malloc/malloc_usable_size.c)0
-rw-r--r--src/malloc/posix_memalign.c3
-rw-r--r--src/malloc/replaced.c4
-rw-r--r--src/math/i386/fabs.c7
-rw-r--r--src/math/i386/fabs.s6
-rw-r--r--src/math/i386/fabsf.c7
-rw-r--r--src/math/i386/fabsf.s6
-rw-r--r--src/math/i386/fabsl.c7
-rw-r--r--src/math/i386/fabsl.s6
-rw-r--r--src/math/i386/fmod.c10
-rw-r--r--src/math/i386/fmod.s11
-rw-r--r--src/math/i386/fmodf.c10
-rw-r--r--src/math/i386/fmodf.s11
-rw-r--r--src/math/i386/fmodl.c9
-rw-r--r--src/math/i386/fmodl.s11
-rw-r--r--src/math/i386/llrint.c8
-rw-r--r--src/math/i386/llrint.s8
-rw-r--r--src/math/i386/llrintf.c8
-rw-r--r--src/math/i386/llrintf.s9
-rw-r--r--src/math/i386/llrintl.c8
-rw-r--r--src/math/i386/llrintl.s8
-rw-r--r--src/math/i386/lrint.c8
-rw-r--r--src/math/i386/lrint.s7
-rw-r--r--src/math/i386/lrintf.c8
-rw-r--r--src/math/i386/lrintf.s7
-rw-r--r--src/math/i386/lrintl.c8
-rw-r--r--src/math/i386/lrintl.s7
-rw-r--r--src/math/i386/remainder.c12
-rw-r--r--src/math/i386/remainder.s14
-rw-r--r--src/math/i386/remainderf.c12
-rw-r--r--src/math/i386/remainderf.s14
-rw-r--r--src/math/i386/remainderl.c9
-rw-r--r--src/math/i386/remainderl.s11
-rw-r--r--src/math/i386/rint.c7
-rw-r--r--src/math/i386/rint.s6
-rw-r--r--src/math/i386/rintf.c7
-rw-r--r--src/math/i386/rintf.s6
-rw-r--r--src/math/i386/rintl.c7
-rw-r--r--src/math/i386/rintl.s6
-rw-r--r--src/math/i386/sqrt.c15
-rw-r--r--src/math/i386/sqrt.s21
-rw-r--r--src/math/i386/sqrtf.c12
-rw-r--r--src/math/i386/sqrtf.s7
-rw-r--r--src/math/i386/sqrtl.c7
-rw-r--r--src/math/i386/sqrtl.s5
-rw-r--r--src/math/x86_64/fabs.c10
-rw-r--r--src/math/x86_64/fabs.s9
-rw-r--r--src/math/x86_64/fabsf.c10
-rw-r--r--src/math/x86_64/fabsf.s7
-rw-r--r--src/math/x86_64/fabsl.c7
-rw-r--r--src/math/x86_64/fabsl.s6
-rw-r--r--src/math/x86_64/fmodl.c9
-rw-r--r--src/math/x86_64/fmodl.s11
-rw-r--r--src/math/x86_64/llrint.c8
-rw-r--r--src/math/x86_64/llrint.s5
-rw-r--r--src/math/x86_64/llrintf.c8
-rw-r--r--src/math/x86_64/llrintf.s5
-rw-r--r--src/math/x86_64/llrintl.c8
-rw-r--r--src/math/x86_64/llrintl.s7
-rw-r--r--src/math/x86_64/lrint.c8
-rw-r--r--src/math/x86_64/lrint.s5
-rw-r--r--src/math/x86_64/lrintf.c8
-rw-r--r--src/math/x86_64/lrintf.s5
-rw-r--r--src/math/x86_64/lrintl.c8
-rw-r--r--src/math/x86_64/lrintl.s7
-rw-r--r--src/math/x86_64/remainderl.c9
-rw-r--r--src/math/x86_64/remainderl.s11
-rw-r--r--src/math/x86_64/remquol.c32
-rw-r--r--src/math/x86_64/rintl.c7
-rw-r--r--src/math/x86_64/rintl.s6
-rw-r--r--src/math/x86_64/sqrt.c7
-rw-r--r--src/math/x86_64/sqrt.s4
-rw-r--r--src/math/x86_64/sqrtf.c7
-rw-r--r--src/math/x86_64/sqrtf.s4
-rw-r--r--src/math/x86_64/sqrtl.c7
-rw-r--r--src/math/x86_64/sqrtl.s5
-rw-r--r--src/misc/nftw.c22
-rw-r--r--src/network/getnameinfo.c1
-rw-r--r--src/network/lookup_name.c12
-rw-r--r--src/network/res_mkquery.c1
-rw-r--r--src/network/res_send.c2
-rw-r--r--src/process/fork.c1
-rw-r--r--src/stdio/__string_read.c16
-rw-r--r--src/stdio/fmemopen.c6
-rw-r--r--src/stdio/vfscanf.c5
-rw-r--r--src/stdio/vsscanf.c16
-rw-r--r--src/stdlib/wcstod.c3
-rw-r--r--src/stdlib/wcstol.c3
-rw-r--r--src/string/aarch64/memcpy.S186
-rw-r--r--src/string/aarch64/memset.S115
-rw-r--r--src/string/arm/memcpy.S (renamed from src/string/arm/memcpy_le.S)101
-rw-r--r--src/string/arm/memcpy.c3
-rw-r--r--src/string/memmem.c8
-rw-r--r--src/string/strsignal.c10
-rw-r--r--src/string/strstr.c8
-rw-r--r--src/thread/__lock.c4
-rw-r--r--src/thread/pthread_create.c23
-rw-r--r--src/time/__tz.c10
130 files changed, 2334 insertions, 795 deletions
diff --git a/COPYRIGHT b/COPYRIGHT
index e6472371..c1628e9a 100644
--- a/COPYRIGHT
+++ b/COPYRIGHT
@@ -127,10 +127,13 @@ Copyright © 2017-2018 Arm Limited
and labelled as such in comments in the individual source files. All
have been licensed under extremely permissive terms.
-The ARM memcpy code (src/string/arm/memcpy_el.S) is Copyright © 2008
+The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
The Android Open Source Project and is licensed under a two-clause BSD
license. It was taken from Bionic libc, used on Android.
+The AArch64 memcpy and memset code (src/string/aarch64/*) are
+Copyright © 1999-2019, Arm Limited.
+
The implementation of DES for crypt (src/crypt/crypt_des.c) is
Copyright © 1994 David Burren. It is licensed under a BSD license.
diff --git a/Makefile b/Makefile
index bd8f5c38..e8cc4436 100644
--- a/Makefile
+++ b/Makefile
@@ -17,7 +17,8 @@ includedir = $(prefix)/include
libdir = $(prefix)/lib
syslibdir = /lib
-SRC_DIRS = $(addprefix $(srcdir)/,src/* crt ldso $(COMPAT_SRC_DIRS))
+MALLOC_DIR = mallocng
+SRC_DIRS = $(addprefix $(srcdir)/,src/* src/malloc/$(MALLOC_DIR) crt ldso $(COMPAT_SRC_DIRS))
BASE_GLOBS = $(addsuffix /*.c,$(SRC_DIRS))
ARCH_GLOBS = $(addsuffix /$(ARCH)/*.[csS],$(SRC_DIRS))
BASE_SRCS = $(sort $(wildcard $(BASE_GLOBS)))
diff --git a/arch/arm/syscall_arch.h b/arch/arm/syscall_arch.h
index 4b08762d..a877b2cf 100644
--- a/arch/arm/syscall_arch.h
+++ b/arch/arm/syscall_arch.h
@@ -98,12 +98,6 @@ static inline long __syscall6(long n, long a, long b, long c, long d, long e, lo
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5));
}
-#define VDSO_USEFUL
-#define VDSO_CGT32_SYM "__vdso_clock_gettime"
-#define VDSO_CGT32_VER "LINUX_2.6"
-#define VDSO_CGT_SYM "__vdso_clock_gettime64"
-#define VDSO_CGT_VER "LINUX_2.6"
-
#define SYSCALL_FADVISE_6_ARG
#define SYSCALL_IPC_BROKEN_MODE
diff --git a/arch/mips/bits/signal.h b/arch/mips/bits/signal.h
index e1d97ac7..1b69e762 100644
--- a/arch/mips/bits/signal.h
+++ b/arch/mips/bits/signal.h
@@ -93,7 +93,7 @@ typedef struct __ucontext {
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT SIGABRT
-#define SIGSTKFLT 7
+#define SIGEMT 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGBUS 10
diff --git a/arch/mips64/bits/signal.h b/arch/mips64/bits/signal.h
index c31ad07e..4f91c9fc 100644
--- a/arch/mips64/bits/signal.h
+++ b/arch/mips64/bits/signal.h
@@ -112,7 +112,7 @@ typedef struct __ucontext {
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT SIGABRT
-#define SIGSTKFLT 7
+#define SIGEMT 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGBUS 10
diff --git a/arch/mipsn32/bits/signal.h b/arch/mipsn32/bits/signal.h
index c31ad07e..4f91c9fc 100644
--- a/arch/mipsn32/bits/signal.h
+++ b/arch/mipsn32/bits/signal.h
@@ -112,7 +112,7 @@ typedef struct __ucontext {
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT SIGABRT
-#define SIGSTKFLT 7
+#define SIGEMT 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGBUS 10
diff --git a/configure b/configure
index a2728969..18fda9af 100755
--- a/configure
+++ b/configure
@@ -35,6 +35,9 @@ Optional features:
--disable-shared inhibit building shared library [enabled]
--disable-static inhibit building static library [enabled]
+Optional packages:
+ --with-malloc=... choose malloc implementation [mallocng]
+
Some influential environment variables:
CC C compiler command [detected]
CFLAGS C compiler flags [-Os -pipe ...]
@@ -139,6 +142,7 @@ static=yes
wrapper=auto
gcc_wrapper=no
clang_wrapper=no
+malloc_dir=mallocng
for arg ; do
case "$arg" in
@@ -168,6 +172,7 @@ case "$arg" in
--disable-wrapper|--enable-wrapper=no) wrapper=no ;;
--enable-gcc-wrapper|--enable-gcc-wrapper=yes) wrapper=yes ; gcc_wrapper=yes ;;
--disable-gcc-wrapper|--enable-gcc-wrapper=no) wrapper=no ;;
+--with-malloc=*) malloc_dir=${arg#*=} ;;
--enable-*|--disable-*|--with-*|--without-*|--*dir=*) ;;
--host=*|--target=*) target=${arg#*=} ;;
--build=*) build=${arg#*=} ;;
@@ -215,6 +220,12 @@ set +C
trap 'rm "$tmpc"' EXIT INT QUIT TERM HUP
#
+# Check that the requested malloc implementation exists
+#
+test -d "$srcdir/src/malloc/$malloc_dir" \
+|| fail "$0: error: chosen malloc implementation '$malloc_dir' does not exist"
+
+#
# Check whether we are cross-compiling, and set a default
# CROSS_COMPILE prefix if none was provided.
#
@@ -495,6 +506,13 @@ fnmatch '-mtune=*|*\ -mtune=*' "$CC $CFLAGS" || tryldflag CFLAGS_AUTO -mtune=gen
fi
#
+# GCC defines -w as overriding any -W options, regardless of order, but
+# clang has a bunch of annoying warnings enabled by default and needs -w
+# to start from a clean slate. So use -w if building with clang.
+#
+test "$cc_family" = clang && tryflag CFLAGS_AUTO -w
+
+#
# Even with -std=c99, gcc accepts some constructs which are constraint
# violations. We want to treat these as errors regardless of whether
# other purely stylistic warnings are enabled -- especially implicit
@@ -772,6 +790,7 @@ OPTIMIZE_GLOBS = $OPTIMIZE_GLOBS
ALL_TOOLS = $tools
TOOL_LIBS = $tool_libs
ADD_CFI = $ADD_CFI
+MALLOC_DIR = $malloc_dir
EOF
test "x$static" = xno && echo "STATIC_LIBS ="
test "x$shared" = xno && echo "SHARED_LIBS ="
diff --git a/ldso/dynlink.c b/ldso/dynlink.c
index 6468f203..d3d4ddd2 100644
--- a/ldso/dynlink.c
+++ b/ldso/dynlink.c
@@ -23,7 +23,6 @@
#include "pthread_impl.h"
#include "libc.h"
#include "dynlink.h"
-#include "malloc_impl.h"
static void error(const char *, ...);
@@ -1936,6 +1935,8 @@ void __dls3(size_t *sp, size_t *auxv)
* possibility of incomplete replacement. */
if (find_sym(head, "malloc", 1).dso != &ldso)
__malloc_replaced = 1;
+ if (find_sym(head, "aligned_alloc", 1).dso != &ldso)
+ __aligned_alloc_replaced = 1;
/* Switch to runtime mode: any further failures in the dynamic
* linker are a reportable failure rather than a fatal startup
diff --git a/src/fenv/sh/fenv.S b/src/fenv/sh/fenv.S
index 907aefc0..b3b7d66a 100644
--- a/src/fenv/sh/fenv.S
+++ b/src/fenv/sh/fenv.S
@@ -12,6 +12,8 @@ fegetround:
.type __fesetround, @function
__fesetround:
sts fpscr, r0
+ mov #-4, r1
+ and r1, r0
or r4, r0
lds r0, fpscr
rts
diff --git a/src/internal/atomic.h b/src/internal/atomic.h
index f938879b..96c1552d 100644
--- a/src/internal/atomic.h
+++ b/src/internal/atomic.h
@@ -315,4 +315,19 @@ static inline int a_clz_64(uint64_t x)
}
#endif
+#ifndef a_clz_32
+#define a_clz_32 a_clz_32
+static inline int a_clz_32(uint32_t x)
+{
+ x >>= 1;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x++;
+ return 31-a_ctz_32(x);
+}
+#endif
+
#endif
diff --git a/src/internal/dynlink.h b/src/internal/dynlink.h
index 764e3a1a..51c0639f 100644
--- a/src/internal/dynlink.h
+++ b/src/internal/dynlink.h
@@ -105,4 +105,9 @@ hidden void __dl_vseterr(const char *, va_list);
hidden ptrdiff_t __tlsdesc_static(), __tlsdesc_dynamic();
+hidden extern int __malloc_replaced;
+hidden extern int __aligned_alloc_replaced;
+hidden void __malloc_donate(char *, char *);
+hidden int __malloc_allzerop(void *);
+
#endif
diff --git a/src/internal/libc.h b/src/internal/libc.h
index ac97dc7e..619bba86 100644
--- a/src/internal/libc.h
+++ b/src/internal/libc.h
@@ -18,10 +18,11 @@ struct tls_module {
};
struct __libc {
- int can_do_threads;
- int threaded;
- int secure;
- volatile int threads_minus_1;
+ char can_do_threads;
+ char threaded;
+ char secure;
+ volatile signed char need_locks;
+ int threads_minus_1;
size_t *auxv;
struct tls_module *tls_head;
size_t tls_size, tls_align, tls_cnt;
diff --git a/src/internal/shgetc.c b/src/internal/shgetc.c
index a4a9c633..7455d2f0 100644
--- a/src/internal/shgetc.c
+++ b/src/internal/shgetc.c
@@ -32,6 +32,6 @@ int __shgetc(FILE *f)
else
f->shend = f->rend;
f->shcnt = f->buf - f->rpos + cnt;
- if (f->rpos[-1] != c) f->rpos[-1] = c;
+ if (f->rpos <= f->buf) f->rpos[-1] = c;
return c;
}
diff --git a/src/internal/stdio_impl.h b/src/internal/stdio_impl.h
index d7398f59..0b2438d6 100644
--- a/src/internal/stdio_impl.h
+++ b/src/internal/stdio_impl.h
@@ -60,8 +60,6 @@ hidden size_t __stdout_write(FILE *, const unsigned char *, size_t);
hidden off_t __stdio_seek(FILE *, off_t, int);
hidden int __stdio_close(FILE *);
-hidden size_t __string_read(FILE *, unsigned char *, size_t);
-
hidden int __toread(FILE *);
hidden int __towrite(FILE *);
diff --git a/src/linux/clock_adjtime.c b/src/linux/clock_adjtime.c
index 23eb8729..d4d03d24 100644
--- a/src/linux/clock_adjtime.c
+++ b/src/linux/clock_adjtime.c
@@ -38,55 +38,52 @@ int clock_adjtime (clockid_t clock_id, struct timex *utx)
{
int r = -ENOSYS;
#ifdef SYS_clock_adjtime64
- if (SYS_clock_adjtime == SYS_clock_adjtime64 ||
- (utx->modes & ADJ_SETOFFSET) && !IS32BIT(utx->time.tv_sec)) {
- struct ktimex64 ktx = {
- .modes = utx->modes,
- .offset = utx->offset,
- .freq = utx->freq,
- .maxerror = utx->maxerror,
- .esterror = utx->esterror,
- .status = utx->status,
- .constant = utx->constant,
- .precision = utx->precision,
- .tolerance = utx->tolerance,
- .time_sec = utx->time.tv_sec,
- .time_usec = utx->time.tv_usec,
- .tick = utx->tick,
- .ppsfreq = utx->ppsfreq,
- .jitter = utx->jitter,
- .shift = utx->shift,
- .stabil = utx->stabil,
- .jitcnt = utx->jitcnt,
- .calcnt = utx->calcnt,
- .errcnt = utx->errcnt,
- .stbcnt = utx->stbcnt,
- .tai = utx->tai,
- };
- r = __syscall(SYS_clock_adjtime, clock_id, &ktx);
- if (r>=0) {
- utx->modes = ktx.modes;
- utx->offset = ktx.offset;
- utx->freq = ktx.freq;
- utx->maxerror = ktx.maxerror;
- utx->esterror = ktx.esterror;
- utx->status = ktx.status;
- utx->constant = ktx.constant;
- utx->precision = ktx.precision;
- utx->tolerance = ktx.tolerance;
- utx->time.tv_sec = ktx.time_sec;
- utx->time.tv_usec = ktx.time_usec;
- utx->tick = ktx.tick;
- utx->ppsfreq = ktx.ppsfreq;
- utx->jitter = ktx.jitter;
- utx->shift = ktx.shift;
- utx->stabil = ktx.stabil;
- utx->jitcnt = ktx.jitcnt;
- utx->calcnt = ktx.calcnt;
- utx->errcnt = ktx.errcnt;
- utx->stbcnt = ktx.stbcnt;
- utx->tai = ktx.tai;
- }
+ struct ktimex64 ktx = {
+ .modes = utx->modes,
+ .offset = utx->offset,
+ .freq = utx->freq,
+ .maxerror = utx->maxerror,
+ .esterror = utx->esterror,
+ .status = utx->status,
+ .constant = utx->constant,
+ .precision = utx->precision,
+ .tolerance = utx->tolerance,
+ .time_sec = utx->time.tv_sec,
+ .time_usec = utx->time.tv_usec,
+ .tick = utx->tick,
+ .ppsfreq = utx->ppsfreq,
+ .jitter = utx->jitter,
+ .shift = utx->shift,
+ .stabil = utx->stabil,
+ .jitcnt = utx->jitcnt,
+ .calcnt = utx->calcnt,
+ .errcnt = utx->errcnt,
+ .stbcnt = utx->stbcnt,
+ .tai = utx->tai,
+ };
+ r = __syscall(SYS_clock_adjtime64, clock_id, &ktx);
+ if (r>=0) {
+ utx->modes = ktx.modes;
+ utx->offset = ktx.offset;
+ utx->freq = ktx.freq;
+ utx->maxerror = ktx.maxerror;
+ utx->esterror = ktx.esterror;
+ utx->status = ktx.status;
+ utx->constant = ktx.constant;
+ utx->precision = ktx.precision;
+ utx->tolerance = ktx.tolerance;
+ utx->time.tv_sec = ktx.time_sec;
+ utx->time.tv_usec = ktx.time_usec;
+ utx->tick = ktx.tick;
+ utx->ppsfreq = ktx.ppsfreq;
+ utx->jitter = ktx.jitter;
+ utx->shift = ktx.shift;
+ utx->stabil = ktx.stabil;
+ utx->jitcnt = ktx.jitcnt;
+ utx->calcnt = ktx.calcnt;
+ utx->errcnt = ktx.errcnt;
+ utx->stbcnt = ktx.stbcnt;
+ utx->tai = ktx.tai;
}
if (SYS_clock_adjtime == SYS_clock_adjtime64 || r!=-ENOSYS)
return __syscall_ret(r);
diff --git a/src/malloc/DESIGN b/src/malloc/DESIGN
deleted file mode 100644
index 58b0523f..00000000
--- a/src/malloc/DESIGN
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-In principle, this memory allocator is roughly equivalent to Doug
-Lea's dlmalloc with fine-grained locking.
-
-
-
-malloc:
-
-Uses a freelist binned by chunk size, with a bitmap to optimize
-searching for the smallest non-empty bin which can satisfy an
-allocation. If no free chunks are available, it creates a new chunk of
-the requested size and attempts to merge it with any existing free
-chunk immediately below the newly created chunk.
-
-Whether the chunk was obtained from a bin or newly created, it's
-likely to be larger than the requested allocation. malloc always
-finishes its work by passing the new chunk to realloc, which will
-split it into two chunks and free the tail portion.
-
-
-
diff --git a/src/malloc/aligned_alloc.c b/src/malloc/aligned_alloc.c
deleted file mode 100644
index b6143f30..00000000
--- a/src/malloc/aligned_alloc.c
+++ /dev/null
@@ -1,7 +0,0 @@
-#include <stdlib.h>
-#include "malloc_impl.h"
-
-void *aligned_alloc(size_t align, size_t len)
-{
- return __memalign(align, len);
-}
diff --git a/src/malloc/calloc.c b/src/malloc/calloc.c
new file mode 100644
index 00000000..bf6bddca
--- /dev/null
+++ b/src/malloc/calloc.c
@@ -0,0 +1,45 @@
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+#include "dynlink.h"
+
+static size_t mal0_clear(char *p, size_t n)
+{
+ const size_t pagesz = 4096; /* arbitrary */
+ if (n < pagesz) return n;
+#ifdef __GNUC__
+ typedef uint64_t __attribute__((__may_alias__)) T;
+#else
+ typedef unsigned char T;
+#endif
+ char *pp = p + n;
+ size_t i = (uintptr_t)pp & (pagesz - 1);
+ for (;;) {
+ pp = memset(pp - i, 0, i);
+ if (pp - p < pagesz) return pp - p;
+ for (i = pagesz; i; i -= 2*sizeof(T), pp -= 2*sizeof(T))
+ if (((T *)pp)[-1] | ((T *)pp)[-2])
+ break;
+ }
+}
+
+static int allzerop(void *p)
+{
+ return 0;
+}
+weak_alias(allzerop, __malloc_allzerop);
+
+void *calloc(size_t m, size_t n)
+{
+ if (n && m > (size_t)-1/n) {
+ errno = ENOMEM;
+ return 0;
+ }
+ n *= m;
+ void *p = malloc(n);
+ if (!p || (!__malloc_replaced && __malloc_allzerop(p)))
+ return p;
+ n = mal0_clear(p, n);
+ return memset(p, 0, n);
+}
diff --git a/src/malloc/expand_heap.c b/src/malloc/expand_heap.c
deleted file mode 100644
index e6a3d7a0..00000000
--- a/src/malloc/expand_heap.c
+++ /dev/null
@@ -1,71 +0,0 @@
-#include <limits.h>
-#include <stdint.h>
-#include <errno.h>
-#include <sys/mman.h>
-#include "libc.h"
-#include "syscall.h"
-#include "malloc_impl.h"
-
-/* This function returns true if the interval [old,new]
- * intersects the 'len'-sized interval below &libc.auxv
- * (interpreted as the main-thread stack) or below &b
- * (the current stack). It is used to defend against
- * buggy brk implementations that can cross the stack. */
-
-static int traverses_stack_p(uintptr_t old, uintptr_t new)
-{
- const uintptr_t len = 8<<20;
- uintptr_t a, b;
-
- b = (uintptr_t)libc.auxv;
- a = b > len ? b-len : 0;
- if (new>a && old<b) return 1;
-
- b = (uintptr_t)&b;
- a = b > len ? b-len : 0;
- if (new>a && old<b) return 1;
-
- return 0;
-}
-
-/* Expand the heap in-place if brk can be used, or otherwise via mmap,
- * using an exponential lower bound on growth by mmap to make
- * fragmentation asymptotically irrelevant. The size argument is both
- * an input and an output, since the caller needs to know the size
- * allocated, which will be larger than requested due to page alignment
- * and mmap minimum size rules. The caller is responsible for locking
- * to prevent concurrent calls. */
-
-void *__expand_heap(size_t *pn)
-{
- static uintptr_t brk;
- static unsigned mmap_step;
- size_t n = *pn;
-
- if (n > SIZE_MAX/2 - PAGE_SIZE) {
- errno = ENOMEM;
- return 0;
- }
- n += -n & PAGE_SIZE-1;
-
- if (!brk) {
- brk = __syscall(SYS_brk, 0);
- brk += -brk & PAGE_SIZE-1;
- }
-
- if (n < SIZE_MAX-brk && !traverses_stack_p(brk, brk+n)
- && __syscall(SYS_brk, brk+n)==brk+n) {
- *pn = n;
- brk += n;
- return (void *)(brk-n);
- }
-
- size_t min = (size_t)PAGE_SIZE << mmap_step/2;
- if (n < min) n = min;
- void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
- if (area == MAP_FAILED) return 0;
- *pn = n;
- mmap_step++;
- return area;
-}
diff --git a/src/malloc/lite_malloc.c b/src/malloc/lite_malloc.c
index 050d84f6..f8931ba5 100644
--- a/src/malloc/lite_malloc.c
+++ b/src/malloc/lite_malloc.c
@@ -2,58 +2,102 @@
#include <stdint.h>
#include <limits.h>
#include <errno.h>
+#include <sys/mman.h>
+#include "libc.h"
#include "lock.h"
-#include "malloc_impl.h"
+#include "syscall.h"
#define ALIGN 16
+/* This function returns true if the interval [old,new]
+ * intersects the 'len'-sized interval below &libc.auxv
+ * (interpreted as the main-thread stack) or below &b
+ * (the current stack). It is used to defend against
+ * buggy brk implementations that can cross the stack. */
+
+static int traverses_stack_p(uintptr_t old, uintptr_t new)
+{
+ const uintptr_t len = 8<<20;
+ uintptr_t a, b;
+
+ b = (uintptr_t)libc.auxv;
+ a = b > len ? b-len : 0;
+ if (new>a && old<b) return 1;
+
+ b = (uintptr_t)&b;
+ a = b > len ? b-len : 0;
+ if (new>a && old<b) return 1;
+
+ return 0;
+}
+
static void *__simple_malloc(size_t n)
{
- static char *cur, *end;
+ static uintptr_t brk, cur, end;
static volatile int lock[1];
- size_t align=1, pad;
+ static unsigned mmap_step;
+ size_t align=1;
void *p;
+ if (n > SIZE_MAX/2) {
+ errno = ENOMEM;
+ return 0;
+ }
+
if (!n) n++;
while (align<n && align<ALIGN)
align += align;
LOCK(lock);
- pad = -(uintptr_t)cur & align-1;
-
- if (n <= SIZE_MAX/2 + ALIGN) n += pad;
+ cur += -cur & align-1;
if (n > end-cur) {
- size_t m = n;
- char *new = __expand_heap(&m);
- if (!new) {
- UNLOCK(lock);
- return 0;
+ size_t req = n - (end-cur) + PAGE_SIZE-1 & -PAGE_SIZE;
+
+ if (!cur) {
+ brk = __syscall(SYS_brk, 0);
+ brk += -brk & PAGE_SIZE-1;
+ cur = end = brk;
}
- if (new != end) {
- cur = new;
- n -= pad;
- pad = 0;
+
+ if (brk == end && req < SIZE_MAX-brk
+ && !traverses_stack_p(brk, brk+req)
+ && __syscall(SYS_brk, brk+req)==brk+req) {
+ brk = end += req;
+ } else {
+ int new_area = 0;
+ req = n + PAGE_SIZE-1 & -PAGE_SIZE;
+ /* Only make a new area rather than individual mmap
+ * if wasted space would be over 1/8 of the map. */
+ if (req-n > req/8) {
+ /* Geometric area size growth up to 64 pages,
+ * bounding waste by 1/8 of the area. */
+ size_t min = PAGE_SIZE<<(mmap_step/2);
+ if (min-n > end-cur) {
+ if (req < min) {
+ req = min;
+ if (mmap_step < 12)
+ mmap_step++;
+ }
+ new_area = 1;
+ }
+ }
+ void *mem = __mmap(0, req, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (mem == MAP_FAILED || !new_area) {
+ UNLOCK(lock);
+ return mem==MAP_FAILED ? 0 : mem;
+ }
+ cur = (uintptr_t)mem;
+ end = cur + req;
}
- end = new + m;
}
- p = cur + pad;
+ p = (void *)cur;
cur += n;
UNLOCK(lock);
return p;
}
weak_alias(__simple_malloc, malloc);
-
-static void *__simple_calloc(size_t m, size_t n)
-{
- if (n && m > (size_t)-1/n) {
- errno = ENOMEM;
- return 0;
- }
- return __simple_malloc(n * m);
-}
-
-weak_alias(__simple_calloc, calloc);
diff --git a/src/malloc/mallocng/aligned_alloc.c b/src/malloc/mallocng/aligned_alloc.c
new file mode 100644
index 00000000..34116896
--- /dev/null
+++ b/src/malloc/mallocng/aligned_alloc.c
@@ -0,0 +1,57 @@
+#include <stdlib.h>
+#include <errno.h>
+#include "meta.h"
+
+void *aligned_alloc(size_t align, size_t len)
+{
+ if ((align & -align) != align) {
+ errno = EINVAL;
+ return 0;
+ }
+
+ if (len > SIZE_MAX - align || align >= (1ULL<<31)*UNIT) {
+ errno = ENOMEM;
+ return 0;
+ }
+
+ if (DISABLE_ALIGNED_ALLOC) {
+ errno = ENOMEM;
+ return 0;
+ }
+
+ if (align <= UNIT) align = UNIT;
+
+ unsigned char *p = malloc(len + align - UNIT);
+ struct meta *g = get_meta(p);
+ int idx = get_slot_index(p);
+ size_t stride = get_stride(g);
+ unsigned char *start = g->mem->storage + stride*idx;
+ unsigned char *end = g->mem->storage + stride*(idx+1) - IB;
+ size_t adj = -(uintptr_t)p & (align-1);
+
+ if (!adj) {
+ set_size(p, end, len);
+ return p;
+ }
+ p += adj;
+ uint32_t offset = (size_t)(p-g->mem->storage)/UNIT;
+ if (offset <= 0xffff) {
+ *(uint16_t *)(p-2) = offset;
+ p[-4] = 0;
+ } else {
+ // use a 32-bit offset if 16-bit doesn't fit. for this,
+ // 16-bit field must be zero, [-4] byte nonzero.
+ *(uint16_t *)(p-2) = 0;
+ *(uint32_t *)(p-8) = offset;
+ p[-4] = 1;
+ }
+ p[-3] = idx;
+ set_size(p, end, len);
+ // store offset to aligned enframing. this facilitates cycling
+ // offset and also iteration of heap for debugging/measurement.
+ // for extreme overalignment it won't fit but these are classless
+ // allocations anyway.
+ *(uint16_t *)(start - 2) = (size_t)(p-start)/UNIT;
+ start[-3] = 7<<5;
+ return p;
+}
diff --git a/src/malloc/mallocng/donate.c b/src/malloc/mallocng/donate.c
new file mode 100644
index 00000000..41d850f3
--- /dev/null
+++ b/src/malloc/mallocng/donate.c
@@ -0,0 +1,39 @@
+#include <stdlib.h>
+#include <stdint.h>
+#include <limits.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <errno.h>
+
+#include "meta.h"
+
+static void donate(unsigned char *base, size_t len)
+{
+ uintptr_t a = (uintptr_t)base;
+ uintptr_t b = a + len;
+ a += -a & (UNIT-1);
+ b -= b & (UNIT-1);
+ memset(base, 0, len);
+ for (int sc=47; sc>0 && b>a; sc-=4) {
+ if (b-a < (size_classes[sc]+1)*UNIT) continue;
+ struct meta *m = alloc_meta();
+ m->avail_mask = 0;
+ m->freed_mask = 1;
+ m->mem = (void *)a;
+ m->mem->meta = m;
+ m->last_idx = 0;
+ m->freeable = 0;
+ m->sizeclass = sc;
+ m->maplen = 0;
+ *((unsigned char *)m->mem+UNIT-4) = 0;
+ *((unsigned char *)m->mem+UNIT-3) = 255;
+ m->mem->storage[size_classes[sc]*UNIT-4] = 0;
+ queue(&ctx.active[sc], m);
+ a += (size_classes[sc]+1)*UNIT;
+ }
+}
+
+void __malloc_donate(char *start, char *end)
+{
+ donate((void *)start, end-start);
+}
diff --git a/src/malloc/mallocng/free.c b/src/malloc/mallocng/free.c
new file mode 100644
index 00000000..40745f97
--- /dev/null
+++ b/src/malloc/mallocng/free.c
@@ -0,0 +1,143 @@
+#define _BSD_SOURCE
+#include <stdlib.h>
+#include <sys/mman.h>
+
+#include "meta.h"
+
+struct mapinfo {
+ void *base;
+ size_t len;
+};
+
+static struct mapinfo nontrivial_free(struct meta *, int);
+
+static struct mapinfo free_group(struct meta *g)
+{
+ struct mapinfo mi = { 0 };
+ int sc = g->sizeclass;
+ if (sc < 48) {
+ ctx.usage_by_class[sc] -= g->last_idx+1;
+ }
+ if (g->maplen) {
+ step_seq();
+ record_seq(sc);
+ mi.base = g->mem;
+ mi.len = g->maplen*4096UL;
+ } else {
+ void *p = g->mem;
+ struct meta *m = get_meta(p);
+ int idx = get_slot_index(p);
+ g->mem->meta = 0;
+ // not checking size/reserved here; it's intentionally invalid
+ mi = nontrivial_free(m, idx);
+ }
+ free_meta(g);
+ return mi;
+}
+
+static int okay_to_free(struct meta *g)
+{
+ int sc = g->sizeclass;
+
+ if (!g->freeable) return 0;
+
+ // always free individual mmaps not suitable for reuse
+ if (sc >= 48 || get_stride(g) < UNIT*size_classes[sc])
+ return 1;
+
+ // always free groups allocated inside another group's slot
+ // since recreating them should not be expensive and they
+ // might be blocking freeing of a much larger group.
+ if (!g->maplen) return 1;
+
+ // if there is another non-full group, free this one to
+ // consolidate future allocations, reduce fragmentation.
+ if (g->next != g) return 1;
+
+ // free any group in a size class that's not bouncing
+ if (!is_bouncing(sc)) return 1;
+
+ size_t cnt = g->last_idx+1;
+ size_t usage = ctx.usage_by_class[sc];
+
+ // if usage is high enough that a larger count should be
+ // used, free the low-count group so a new one will be made.
+ if (9*cnt <= usage && cnt < 20)
+ return 1;
+
+ // otherwise, keep the last group in a bouncing class.
+ return 0;
+}
+
+static struct mapinfo nontrivial_free(struct meta *g, int i)
+{
+ uint32_t self = 1u<<i;
+ int sc = g->sizeclass;
+ uint32_t mask = g->freed_mask | g->avail_mask;
+
+ if (mask+self == (2u<<g->last_idx)-1 && okay_to_free(g)) {
+ // any multi-slot group is necessarily on an active list
+ // here, but single-slot groups might or might not be.
+ if (g->next) {
+ assert(sc < 48);
+ int activate_new = (ctx.active[sc]==g);
+ dequeue(&ctx.active[sc], g);
+ if (activate_new && ctx.active[sc])
+ activate_group(ctx.active[sc]);
+ }
+ return free_group(g);
+ } else if (!mask) {
+ assert(sc < 48);
+ // might still be active if there were no allocations
+ // after last available slot was taken.
+ if (ctx.active[sc] != g) {
+ queue(&ctx.active[sc], g);
+ }
+ }
+ a_or(&g->freed_mask, self);
+ return (struct mapinfo){ 0 };
+}
+
+void free(void *p)
+{
+ if (!p) return;
+
+ struct meta *g = get_meta(p);
+ int idx = get_slot_index(p);
+ size_t stride = get_stride(g);
+ unsigned char *start = g->mem->storage + stride*idx;
+ unsigned char *end = start + stride - IB;
+ get_nominal_size(p, end);
+ uint32_t self = 1u<<idx, all = (2u<<g->last_idx)-1;
+ ((unsigned char *)p)[-3] = 255;
+ // invalidate offset to group header, and cycle offset of
+ // used region within slot if current offset is zero.
+ *(uint16_t *)((char *)p-2) = 0;
+
+ // release any whole pages contained in the slot to be freed
+ // unless it's a single-slot group that will be unmapped.
+ if (((uintptr_t)(start-1) ^ (uintptr_t)end) >= 2*PGSZ && g->last_idx) {
+ unsigned char *base = start + (-(uintptr_t)start & (PGSZ-1));
+ size_t len = (end-base) & -PGSZ;
+ if (len) madvise(base, len, MADV_FREE);
+ }
+
+ // atomic free without locking if this is neither first or last slot
+ for (;;) {
+ uint32_t freed = g->freed_mask;
+ uint32_t avail = g->avail_mask;
+ uint32_t mask = freed | avail;
+ assert(!(mask&self));
+ if (!freed || mask+self==all) break;
+ if (!MT)
+ g->freed_mask = freed+self;
+ else if (a_cas(&g->freed_mask, freed, freed+self)!=freed)
+ continue;
+ return;
+ }
+
+ wrlock();
+ struct mapinfo mi = nontrivial_free(g, idx);
+ unlock();
+ if (mi.len) munmap(mi.base, mi.len);
+}
diff --git a/src/malloc/mallocng/glue.h b/src/malloc/mallocng/glue.h
new file mode 100644
index 00000000..16acd1ea
--- /dev/null
+++ b/src/malloc/mallocng/glue.h
@@ -0,0 +1,77 @@
+#ifndef MALLOC_GLUE_H
+#define MALLOC_GLUE_H
+
+#include <stdint.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <elf.h>
+#include <string.h>
+#include "atomic.h"
+#include "syscall.h"
+#include "libc.h"
+#include "lock.h"
+#include "dynlink.h"
+
+// use macros to appropriately namespace these.
+#define size_classes __malloc_size_classes
+#define ctx __malloc_context
+#define alloc_meta __malloc_alloc_meta
+#define is_allzero __malloc_allzerop
+#define dump_heap __dump_heap
+
+#if USE_REAL_ASSERT
+#include <assert.h>
+#else
+#undef assert
+#define assert(x) do { if (!(x)) a_crash(); } while(0)
+#endif
+
+#define brk(p) ((uintptr_t)__syscall(SYS_brk, p))
+
+#define mmap __mmap
+#define madvise __madvise
+#define mremap __mremap
+
+#define DISABLE_ALIGNED_ALLOC (__malloc_replaced && !__aligned_alloc_replaced)
+
+static inline uint64_t get_random_secret()
+{
+ uint64_t secret = (uintptr_t)&secret * 1103515245;
+ for (size_t i=0; libc.auxv[i]; i+=2)
+ if (libc.auxv[i]==AT_RANDOM)
+ memcpy(&secret, (char *)libc.auxv[i+1]+8, sizeof secret);
+ return secret;
+}
+
+#ifndef PAGESIZE
+#define PAGESIZE PAGE_SIZE
+#endif
+
+#define MT (libc.need_locks)
+
+#define RDLOCK_IS_EXCLUSIVE 1
+
+__attribute__((__visibility__("hidden")))
+extern int __malloc_lock[1];
+
+#define LOCK_OBJ_DEF \
+int __malloc_lock[1];
+
+static inline void rdlock()
+{
+ if (MT) LOCK(__malloc_lock);
+}
+static inline void wrlock()
+{
+ if (MT) LOCK(__malloc_lock);
+}
+static inline void unlock()
+{
+ UNLOCK(__malloc_lock);
+}
+static inline void upgradelock()
+{
+}
+
+#endif
diff --git a/src/malloc/mallocng/malloc.c b/src/malloc/mallocng/malloc.c
new file mode 100644
index 00000000..d695ab8e
--- /dev/null
+++ b/src/malloc/mallocng/malloc.c
@@ -0,0 +1,387 @@
+#include <stdlib.h>
+#include <stdint.h>
+#include <limits.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <errno.h>
+
+#include "meta.h"
+
+LOCK_OBJ_DEF;
+
+const uint16_t size_classes[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 12, 15,
+ 18, 20, 25, 31,
+ 36, 42, 50, 63,
+ 72, 84, 102, 127,
+ 146, 170, 204, 255,
+ 292, 340, 409, 511,
+ 584, 682, 818, 1023,
+ 1169, 1364, 1637, 2047,
+ 2340, 2730, 3276, 4095,
+ 4680, 5460, 6552, 8191,
+};
+
+static const uint8_t small_cnt_tab[][3] = {
+ { 30, 30, 30 },
+ { 31, 15, 15 },
+ { 20, 10, 10 },
+ { 31, 15, 7 },
+ { 25, 12, 6 },
+ { 21, 10, 5 },
+ { 18, 8, 4 },
+ { 31, 15, 7 },
+ { 28, 14, 6 },
+};
+
+static const uint8_t med_cnt_tab[4] = { 28, 24, 20, 32 };
+
+struct malloc_context ctx = { 0 };
+
+struct meta *alloc_meta(void)
+{
+ struct meta *m;
+ unsigned char *p;
+ if (!ctx.init_done) {
+#ifndef PAGESIZE
+ ctx.pagesize = get_page_size();
+#endif
+ ctx.secret = get_random_secret();
+ ctx.init_done = 1;
+ }
+ size_t pagesize = PGSZ;
+ if (pagesize < 4096) pagesize = 4096;
+ if ((m = dequeue_head(&ctx.free_meta_head))) return m;
+ if (!ctx.avail_meta_count) {
+ int need_unprotect = 1;
+ if (!ctx.avail_meta_area_count && ctx.brk!=-1) {
+ uintptr_t new = ctx.brk + pagesize;
+ int need_guard = 0;
+ if (!ctx.brk) {
+ need_guard = 1;
+ ctx.brk = brk(0);
+ // some ancient kernels returned _ebss
+ // instead of next page as initial brk.
+ ctx.brk += -ctx.brk & (pagesize-1);
+ new = ctx.brk + 2*pagesize;
+ }
+ if (brk(new) != new) {
+ ctx.brk = -1;
+ } else {
+ if (need_guard) mmap((void *)ctx.brk, pagesize,
+ PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
+ ctx.brk = new;
+ ctx.avail_meta_areas = (void *)(new - pagesize);
+ ctx.avail_meta_area_count = pagesize>>12;
+ need_unprotect = 0;
+ }
+ }
+ if (!ctx.avail_meta_area_count) {
+ size_t n = 2UL << ctx.meta_alloc_shift;
+ p = mmap(0, n*pagesize, PROT_NONE,
+ MAP_PRIVATE|MAP_ANON, -1, 0);
+ if (p==MAP_FAILED) return 0;
+ ctx.avail_meta_areas = p + pagesize;
+ ctx.avail_meta_area_count = (n-1)*(pagesize>>12);
+ ctx.meta_alloc_shift++;
+ }
+ p = ctx.avail_meta_areas;
+ if ((uintptr_t)p & (pagesize-1)) need_unprotect = 0;
+ if (need_unprotect)
+ if (mprotect(p, pagesize, PROT_READ|PROT_WRITE)
+ && errno != ENOSYS)
+ return 0;
+ ctx.avail_meta_area_count--;
+ ctx.avail_meta_areas = p + 4096;
+ if (ctx.meta_area_tail) {
+ ctx.meta_area_tail->next = (void *)p;
+ } else {
+ ctx.meta_area_head = (void *)p;
+ }
+ ctx.meta_area_tail = (void *)p;
+ ctx.meta_area_tail->check = ctx.secret;
+ ctx.avail_meta_count = ctx.meta_area_tail->nslots
+ = (4096-sizeof(struct meta_area))/sizeof *m;
+ ctx.avail_meta = ctx.meta_area_tail->slots;
+ }
+ ctx.avail_meta_count--;
+ m = ctx.avail_meta++;
+ m->prev = m->next = 0;
+ return m;
+}
+
+static uint32_t try_avail(struct meta **pm)
+{
+ struct meta *m = *pm;
+ uint32_t first;
+ if (!m) return 0;
+ uint32_t mask = m->avail_mask;
+ if (!mask) {
+ if (!m) return 0;
+ if (!m->freed_mask) {
+ dequeue(pm, m);
+ m = *pm;
+ if (!m) return 0;
+ } else {
+ m = m->next;
+ *pm = m;
+ }
+
+ mask = m->freed_mask;
+
+ // skip fully-free group unless it's the only one
+ // or it's a permanently non-freeable group
+ if (mask == (2u<<m->last_idx)-1 && m->freeable) {
+ m = m->next;
+ *pm = m;
+ mask = m->freed_mask;
+ }
+
+ // activate more slots in a not-fully-active group
+ // if needed, but only as a last resort. prefer using
+ // any other group with free slots. this avoids
+ // touching & dirtying as-yet-unused pages.
+ if (!(mask & ((2u<<m->mem->active_idx)-1))) {
+ if (m->next != m) {
+ m = m->next;
+ *pm = m;
+ } else {
+ int cnt = m->mem->active_idx + 2;
+ int size = size_classes[m->sizeclass]*UNIT;
+ int span = UNIT + size*cnt;
+ // activate up to next 4k boundary
+ while ((span^(span+size-1)) < 4096) {
+ cnt++;
+ span += size;
+ }
+ if (cnt > m->last_idx+1)
+ cnt = m->last_idx+1;
+ m->mem->active_idx = cnt-1;
+ }
+ }
+ mask = activate_group(m);
+ assert(mask);
+ decay_bounces(m->sizeclass);
+ }
+ first = mask&-mask;
+ m->avail_mask = mask-first;
+ return first;
+}
+
+static int alloc_slot(int, size_t);
+
+static struct meta *alloc_group(int sc, size_t req)
+{
+ size_t size = UNIT*size_classes[sc];
+ int i = 0, cnt;
+ unsigned char *p;
+ struct meta *m = alloc_meta();
+ if (!m) return 0;
+ size_t usage = ctx.usage_by_class[sc];
+ size_t pagesize = PGSZ;
+ int active_idx;
+ if (sc < 9) {
+ while (i<2 && 4*small_cnt_tab[sc][i] > usage)
+ i++;
+ cnt = small_cnt_tab[sc][i];
+ } else {
+ // lookup max number of slots fitting in power-of-two size
+ // from a table, along with number of factors of two we
+ // can divide out without a remainder or reaching 1.
+ cnt = med_cnt_tab[sc&3];
+
+ // reduce cnt to avoid excessive eagar allocation.
+ while (!(cnt&1) && 4*cnt > usage)
+ cnt >>= 1;
+
+ // data structures don't support groups whose slot offsets
+ // in units don't fit in 16 bits.
+ while (size*cnt >= 65536*UNIT)
+ cnt >>= 1;
+ }
+
+ // If we selected a count of 1 above but it's not sufficient to use
+ // mmap, increase to 2. Then it might be; if not it will nest.
+ if (cnt==1 && size*cnt+UNIT <= pagesize/2) cnt = 2;
+
+ // All choices of size*cnt are "just below" a power of two, so anything
+ // larger than half the page size should be allocated as whole pages.
+ if (size*cnt+UNIT > pagesize/2) {
+ // check/update bounce counter to start/increase retention
+ // of freed maps, and inhibit use of low-count, odd-size
+ // small mappings and single-slot groups if activated.
+ int nosmall = is_bouncing(sc);
+ account_bounce(sc);
+ step_seq();
+
+ // since the following count reduction opportunities have
+ // an absolute memory usage cost, don't overdo them. count
+ // coarse usage as part of usage.
+ if (!(sc&1) && sc<32) usage += ctx.usage_by_class[sc+1];
+
+ // try to drop to a lower count if the one found above
+ // increases usage by more than 25%. these reduced counts
+ // roughly fill an integral number of pages, just not a
+ // power of two, limiting amount of unusable space.
+ if (4*cnt > usage && !nosmall) {
+ if (0);
+ else if ((sc&3)==1 && size*cnt>8*pagesize) cnt = 2;
+ else if ((sc&3)==2 && size*cnt>4*pagesize) cnt = 3;
+ else if ((sc&3)==0 && size*cnt>8*pagesize) cnt = 3;
+ else if ((sc&3)==0 && size*cnt>2*pagesize) cnt = 5;
+ }
+ size_t needed = size*cnt + UNIT;
+ needed += -needed & (pagesize-1);
+
+ // produce an individually-mmapped allocation if usage is low,
+ // bounce counter hasn't triggered, and either it saves memory
+ // or it avoids eagar slot allocation without wasting too much.
+ if (!nosmall && cnt<=7) {
+ req += IB + UNIT;
+ req += -req & (pagesize-1);
+ if (req<size+UNIT || (req>=4*pagesize && 2*cnt>usage)) {
+ cnt = 1;
+ needed = req;
+ }
+ }
+
+ p = mmap(0, needed, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
+ if (p==MAP_FAILED) {
+ free_meta(m);
+ return 0;
+ }
+ m->maplen = needed>>12;
+ ctx.mmap_counter++;
+ active_idx = (4096-UNIT)/size-1;
+ if (active_idx > cnt-1) active_idx = cnt-1;
+ if (active_idx < 0) active_idx = 0;
+ } else {
+ int j = size_to_class(UNIT+cnt*size-IB);
+ int idx = alloc_slot(j, UNIT+cnt*size-IB);
+ if (idx < 0) {
+ free_meta(m);
+ return 0;
+ }
+ struct meta *g = ctx.active[j];
+ p = enframe(g, idx, UNIT*size_classes[j]-IB, ctx.mmap_counter);
+ m->maplen = 0;
+ p[-3] = (p[-3]&31) | (6<<5);
+ for (int i=0; i<=cnt; i++)
+ p[UNIT+i*size-4] = 0;
+ active_idx = cnt-1;
+ }
+ ctx.usage_by_class[sc] += cnt;
+ m->avail_mask = (2u<<active_idx)-1;
+ m->freed_mask = (2u<<(cnt-1))-1 - m->avail_mask;
+ m->mem = (void *)p;
+ m->mem->meta = m;
+ m->mem->active_idx = active_idx;
+ m->last_idx = cnt-1;
+ m->freeable = 1;
+ m->sizeclass = sc;
+ return m;
+}
+
+static int alloc_slot(int sc, size_t req)
+{
+ uint32_t first = try_avail(&ctx.active[sc]);
+ if (first) return a_ctz_32(first);
+
+ struct meta *g = alloc_group(sc, req);
+ if (!g) return -1;
+
+ g->avail_mask--;
+ queue(&ctx.active[sc], g);
+ return 0;
+}
+
+void *malloc(size_t n)
+{
+ if (size_overflows(n)) return 0;
+ struct meta *g;
+ uint32_t mask, first;
+ int sc;
+ int idx;
+ int ctr;
+
+ if (n >= MMAP_THRESHOLD) {
+ size_t needed = n + IB + UNIT;
+ void *p = mmap(0, needed, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANON, -1, 0);
+ if (p==MAP_FAILED) return 0;
+ wrlock();
+ step_seq();
+ g = alloc_meta();
+ if (!g) {
+ unlock();
+ munmap(p, needed);
+ return 0;
+ }
+ g->mem = p;
+ g->mem->meta = g;
+ g->last_idx = 0;
+ g->freeable = 1;
+ g->sizeclass = 63;
+ g->maplen = (needed+4095)/4096;
+ g->avail_mask = g->freed_mask = 0;
+ // use a global counter to cycle offset in
+ // individually-mmapped allocations.
+ ctx.mmap_counter++;
+ idx = 0;
+ goto success;
+ }
+
+ sc = size_to_class(n);
+
+ rdlock();
+ g = ctx.active[sc];
+
+ // use coarse size classes initially when there are not yet
+ // any groups of desired size. this allows counts of 2 or 3
+ // to be allocated at first rather than having to start with
+ // 7 or 5, the min counts for even size classes.
+ if (!g && sc>=4 && sc<32 && sc!=6 && !(sc&1) && !ctx.usage_by_class[sc]) {
+ size_t usage = ctx.usage_by_class[sc|1];
+ // if a new group may be allocated, count it toward
+ // usage in deciding if we can use coarse class.
+ if (!ctx.active[sc|1] || (!ctx.active[sc|1]->avail_mask
+ && !ctx.active[sc|1]->freed_mask))
+ usage += 3;
+ if (usage <= 12)
+ sc |= 1;
+ g = ctx.active[sc];
+ }
+
+ for (;;) {
+ mask = g ? g->avail_mask : 0;
+ first = mask&-mask;
+ if (!first) break;
+ if (RDLOCK_IS_EXCLUSIVE || !MT)
+ g->avail_mask = mask-first;
+ else if (a_cas(&g->avail_mask, mask, mask-first)!=mask)
+ continue;
+ idx = a_ctz_32(first);
+ goto success;
+ }
+ upgradelock();
+
+ idx = alloc_slot(sc, n);
+ if (idx < 0) {
+ unlock();
+ return 0;
+ }
+ g = ctx.active[sc];
+
+success:
+ ctr = ctx.mmap_counter;
+ unlock();
+ return enframe(g, idx, n, ctr);
+}
+
+int is_allzero(void *p)
+{
+ struct meta *g = get_meta(p);
+ return g->sizeclass >= 48 ||
+ get_stride(g) < UNIT*size_classes[g->sizeclass];
+}
diff --git a/src/malloc/mallocng/malloc_usable_size.c b/src/malloc/mallocng/malloc_usable_size.c
new file mode 100644
index 00000000..a440a4ea
--- /dev/null
+++ b/src/malloc/mallocng/malloc_usable_size.c
@@ -0,0 +1,12 @@
+#include <stdlib.h>
+#include "meta.h"
+
+size_t malloc_usable_size(void *p)
+{
+ struct meta *g = get_meta(p);
+ int idx = get_slot_index(p);
+ size_t stride = get_stride(g);
+ unsigned char *start = g->mem->storage + stride*idx;
+ unsigned char *end = start + stride - IB;
+ return get_nominal_size(p, end);
+}
diff --git a/src/malloc/mallocng/meta.h b/src/malloc/mallocng/meta.h
new file mode 100644
index 00000000..61ec53f9
--- /dev/null
+++ b/src/malloc/mallocng/meta.h
@@ -0,0 +1,288 @@
+#ifndef MALLOC_META_H
+#define MALLOC_META_H
+
+#include <stdint.h>
+#include <errno.h>
+#include <limits.h>
+#include "glue.h"
+
+__attribute__((__visibility__("hidden")))
+extern const uint16_t size_classes[];
+
+#define MMAP_THRESHOLD 131052
+
+#define UNIT 16
+#define IB 4
+
+struct group {
+ struct meta *meta;
+ unsigned char active_idx:5;
+ char pad[UNIT - sizeof(struct meta *) - 1];
+ unsigned char storage[];
+};
+
+struct meta {
+ struct meta *prev, *next;
+ struct group *mem;
+ volatile int avail_mask, freed_mask;
+ uintptr_t last_idx:5;
+ uintptr_t freeable:1;
+ uintptr_t sizeclass:6;
+ uintptr_t maplen:8*sizeof(uintptr_t)-12;
+};
+
+struct meta_area {
+ uint64_t check;
+ struct meta_area *next;
+ int nslots;
+ struct meta slots[];
+};
+
+struct malloc_context {
+ uint64_t secret;
+#ifndef PAGESIZE
+ size_t pagesize;
+#endif
+ int init_done;
+ unsigned mmap_counter;
+ struct meta *free_meta_head;
+ struct meta *avail_meta;
+ size_t avail_meta_count, avail_meta_area_count, meta_alloc_shift;
+ struct meta_area *meta_area_head, *meta_area_tail;
+ unsigned char *avail_meta_areas;
+ struct meta *active[48];
+ size_t usage_by_class[48];
+ uint8_t unmap_seq[32], bounces[32];
+ uint8_t seq;
+ uintptr_t brk;
+};
+
+__attribute__((__visibility__("hidden")))
+extern struct malloc_context ctx;
+
+#ifdef PAGESIZE
+#define PGSZ PAGESIZE
+#else
+#define PGSZ ctx.pagesize
+#endif
+
+__attribute__((__visibility__("hidden")))
+struct meta *alloc_meta(void);
+
+__attribute__((__visibility__("hidden")))
+int is_allzero(void *);
+
+static inline void queue(struct meta **phead, struct meta *m)
+{
+ assert(!m->next);
+ assert(!m->prev);
+ if (*phead) {
+ struct meta *head = *phead;
+ m->next = head;
+ m->prev = head->prev;
+ m->next->prev = m->prev->next = m;
+ } else {
+ m->prev = m->next = m;
+ *phead = m;
+ }
+}
+
+static inline void dequeue(struct meta **phead, struct meta *m)
+{
+ if (m->next != m) {
+ m->prev->next = m->next;
+ m->next->prev = m->prev;
+ if (*phead == m) *phead = m->next;
+ } else {
+ *phead = 0;
+ }
+ m->prev = m->next = 0;
+}
+
+static inline struct meta *dequeue_head(struct meta **phead)
+{
+ struct meta *m = *phead;
+ if (m) dequeue(phead, m);
+ return m;
+}
+
+static inline void free_meta(struct meta *m)
+{
+ *m = (struct meta){0};
+ queue(&ctx.free_meta_head, m);
+}
+
+static inline uint32_t activate_group(struct meta *m)
+{
+ assert(!m->avail_mask);
+ uint32_t mask, act = (2u<<m->mem->active_idx)-1;
+ do mask = m->freed_mask;
+ while (a_cas(&m->freed_mask, mask, mask&~act)!=mask);
+ return m->avail_mask = mask & act;
+}
+
+static inline int get_slot_index(const unsigned char *p)
+{
+ return p[-3] & 31;
+}
+
+static inline struct meta *get_meta(const unsigned char *p)
+{
+ assert(!((uintptr_t)p & 15));
+ int offset = *(const uint16_t *)(p - 2);
+ int index = get_slot_index(p);
+ if (p[-4]) {
+ assert(!offset);
+ offset = *(uint32_t *)(p - 8);
+ assert(offset > 0xffff);
+ }
+ const struct group *base = (const void *)(p - UNIT*offset - UNIT);
+ const struct meta *meta = base->meta;
+ assert(meta->mem == base);
+ assert(index <= meta->last_idx);
+ assert(!(meta->avail_mask & (1u<<index)));
+ assert(!(meta->freed_mask & (1u<<index)));
+ const struct meta_area *area = (void *)((uintptr_t)meta & -4096);
+ assert(area->check == ctx.secret);
+ if (meta->sizeclass < 48) {
+ assert(offset >= size_classes[meta->sizeclass]*index);
+ assert(offset < size_classes[meta->sizeclass]*(index+1));
+ } else {
+ assert(meta->sizeclass == 63);
+ }
+ if (meta->maplen) {
+ assert(offset <= meta->maplen*4096UL/UNIT - 1);
+ }
+ return (struct meta *)meta;
+}
+
+static inline size_t get_nominal_size(const unsigned char *p, const unsigned char *end)
+{
+ size_t reserved = p[-3] >> 5;
+ if (reserved >= 5) {
+ assert(reserved == 5);
+ reserved = *(const uint32_t *)(end-4);
+ assert(reserved >= 5);
+ assert(!end[-5]);
+ }
+ assert(reserved <= end-p);
+ assert(!*(end-reserved));
+ // also check the slot's overflow byte
+ assert(!*end);
+ return end-reserved-p;
+}
+
+static inline size_t get_stride(const struct meta *g)
+{
+ if (!g->last_idx && g->maplen) {
+ return g->maplen*4096UL - UNIT;
+ } else {
+ return UNIT*size_classes[g->sizeclass];
+ }
+}
+
+static inline void set_size(unsigned char *p, unsigned char *end, size_t n)
+{
+ int reserved = end-p-n;
+ if (reserved) end[-reserved] = 0;
+ if (reserved >= 5) {
+ *(uint32_t *)(end-4) = reserved;
+ end[-5] = 0;
+ reserved = 5;
+ }
+ p[-3] = (p[-3]&31) + (reserved<<5);
+}
+
+static inline void *enframe(struct meta *g, int idx, size_t n, int ctr)
+{
+ size_t stride = get_stride(g);
+ size_t slack = (stride-IB-n)/UNIT;
+ unsigned char *p = g->mem->storage + stride*idx;
+ unsigned char *end = p+stride-IB;
+ // cycle offset within slot to increase interval to address
+ // reuse, facilitate trapping double-free.
+ int off = (p[-3] ? *(uint16_t *)(p-2) + 1 : ctr) & 255;
+ assert(!p[-4]);
+ if (off > slack) {
+ size_t m = slack;
+ m |= m>>1; m |= m>>2; m |= m>>4;
+ off &= m;
+ if (off > slack) off -= slack+1;
+ assert(off <= slack);
+ }
+ if (off) {
+ // store offset in unused header at offset zero
+ // if enframing at non-zero offset.
+ *(uint16_t *)(p-2) = off;
+ p[-3] = 7<<5;
+ p += UNIT*off;
+ // for nonzero offset there is no permanent check
+ // byte, so make one.
+ p[-4] = 0;
+ }
+ *(uint16_t *)(p-2) = (size_t)(p-g->mem->storage)/UNIT;
+ p[-3] = idx;
+ set_size(p, end, n);
+ return p;
+}
+
+static inline int size_to_class(size_t n)
+{
+ n = (n+IB-1)>>4;
+ if (n<10) return n;
+ n++;
+ int i = (28-a_clz_32(n))*4 + 8;
+ if (n>size_classes[i+1]) i+=2;
+ if (n>size_classes[i]) i++;
+ return i;
+}
+
+static inline int size_overflows(size_t n)
+{
+ if (n >= SIZE_MAX/2 - 4096) {
+ errno = ENOMEM;
+ return 1;
+ }
+ return 0;
+}
+
+static inline void step_seq(void)
+{
+ if (ctx.seq==255) {
+ for (int i=0; i<32; i++) ctx.unmap_seq[i] = 0;
+ ctx.seq = 1;
+ } else {
+ ctx.seq++;
+ }
+}
+
+static inline void record_seq(int sc)
+{
+ if (sc-7U < 32) ctx.unmap_seq[sc-7] = ctx.seq;
+}
+
+static inline void account_bounce(int sc)
+{
+ if (sc-7U < 32) {
+ int seq = ctx.unmap_seq[sc-7];
+ if (seq && ctx.seq-seq < 10) {
+ if (ctx.bounces[sc-7]+1 < 100)
+ ctx.bounces[sc-7]++;
+ else
+ ctx.bounces[sc-7] = 150;
+ }
+ }
+}
+
+static inline void decay_bounces(int sc)
+{
+ if (sc-7U < 32 && ctx.bounces[sc-7])
+ ctx.bounces[sc-7]--;
+}
+
+static inline int is_bouncing(int sc)
+{
+ return (sc-7U < 32 && ctx.bounces[sc-7] >= 100);
+}
+
+#endif
diff --git a/src/malloc/mallocng/realloc.c b/src/malloc/mallocng/realloc.c
new file mode 100644
index 00000000..18769f42
--- /dev/null
+++ b/src/malloc/mallocng/realloc.c
@@ -0,0 +1,51 @@
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <string.h>
+#include "meta.h"
+
+void *realloc(void *p, size_t n)
+{
+ if (!p) return malloc(n);
+ if (size_overflows(n)) return 0;
+
+ struct meta *g = get_meta(p);
+ int idx = get_slot_index(p);
+ size_t stride = get_stride(g);
+ unsigned char *start = g->mem->storage + stride*idx;
+ unsigned char *end = start + stride - IB;
+ size_t old_size = get_nominal_size(p, end);
+ size_t avail_size = end-(unsigned char *)p;
+ void *new;
+
+ // only resize in-place if size class matches
+ if (n <= avail_size && n<MMAP_THRESHOLD
+ && size_to_class(n)+1 >= g->sizeclass) {
+ set_size(p, end, n);
+ return p;
+ }
+
+ // use mremap if old and new size are both mmap-worthy
+ if (g->sizeclass>=48 && n>=MMAP_THRESHOLD) {
+ assert(g->sizeclass==63);
+ size_t base = (unsigned char *)p-start;
+ size_t needed = (n + base + UNIT + IB + 4095) & -4096;
+ new = g->maplen*4096UL == needed ? g->mem :
+ mremap(g->mem, g->maplen*4096UL, needed, MREMAP_MAYMOVE);
+ if (new!=MAP_FAILED) {
+ g->mem = new;
+ g->maplen = needed/4096;
+ p = g->mem->storage + base;
+ end = g->mem->storage + (needed - UNIT) - IB;
+ *end = 0;
+ set_size(p, end, n);
+ return p;
+ }
+ }
+
+ new = malloc(n);
+ if (!new) return 0;
+ memcpy(new, p, n < old_size ? n : old_size);
+ free(p);
+ return new;
+}
diff --git a/src/malloc/memalign.c b/src/malloc/memalign.c
index cf9dfbda..32cd87d8 100644
--- a/src/malloc/memalign.c
+++ b/src/malloc/memalign.c
@@ -1,54 +1,7 @@
+#define _BSD_SOURCE
#include <stdlib.h>
-#include <stdint.h>
-#include <errno.h>
-#include "malloc_impl.h"
-void *__memalign(size_t align, size_t len)
+void *memalign(size_t align, size_t len)
{
- unsigned char *mem, *new;
-
- if ((align & -align) != align) {
- errno = EINVAL;
- return 0;
- }
-
- if (len > SIZE_MAX - align || __malloc_replaced) {
- errno = ENOMEM;
- return 0;
- }
-
- if (align <= SIZE_ALIGN)
- return malloc(len);
-
- if (!(mem = malloc(len + align-1)))
- return 0;
-
- new = (void *)((uintptr_t)mem + align-1 & -align);
- if (new == mem) return mem;
-
- struct chunk *c = MEM_TO_CHUNK(mem);
- struct chunk *n = MEM_TO_CHUNK(new);
-
- if (IS_MMAPPED(c)) {
- /* Apply difference between aligned and original
- * address to the "extra" field of mmapped chunk. */
- n->psize = c->psize + (new-mem);
- n->csize = c->csize - (new-mem);
- return new;
- }
-
- struct chunk *t = NEXT_CHUNK(c);
-
- /* Split the allocated chunk into two chunks. The aligned part
- * that will be used has the size in its footer reduced by the
- * difference between the aligned and original addresses, and
- * the resulting size copied to its header. A new header and
- * footer are written for the split-off part to be freed. */
- n->psize = c->csize = C_INUSE | (new-mem);
- n->csize = t->psize -= new-mem;
-
- __bin_chunk(c);
- return new;
+ return aligned_alloc(align, len);
}
-
-weak_alias(__memalign, memalign);
diff --git a/src/malloc/oldmalloc/aligned_alloc.c b/src/malloc/oldmalloc/aligned_alloc.c
new file mode 100644
index 00000000..4adca3b4
--- /dev/null
+++ b/src/malloc/oldmalloc/aligned_alloc.c
@@ -0,0 +1,53 @@
+#include <stdlib.h>
+#include <stdint.h>
+#include <errno.h>
+#include "malloc_impl.h"
+
+void *aligned_alloc(size_t align, size_t len)
+{
+ unsigned char *mem, *new;
+
+ if ((align & -align) != align) {
+ errno = EINVAL;
+ return 0;
+ }
+
+ if (len > SIZE_MAX - align ||
+ (__malloc_replaced && !__aligned_alloc_replaced)) {
+ errno = ENOMEM;
+ return 0;
+ }
+
+ if (align <= SIZE_ALIGN)
+ return malloc(len);
+
+ if (!(mem = malloc(len + align-1)))
+ return 0;
+
+ new = (void *)((uintptr_t)mem + align-1 & -align);
+ if (new == mem) return mem;
+
+ struct chunk *c = MEM_TO_CHUNK(mem);
+ struct chunk *n = MEM_TO_CHUNK(new);
+
+ if (IS_MMAPPED(c)) {
+ /* Apply difference between aligned and original
+ * address to the "extra" field of mmapped chunk. */
+ n->psize = c->psize + (new-mem);
+ n->csize = c->csize - (new-mem);
+ return new;
+ }
+
+ struct chunk *t = NEXT_CHUNK(c);
+
+ /* Split the allocated chunk into two chunks. The aligned part
+ * that will be used has the size in its footer reduced by the
+ * difference between the aligned and original addresses, and
+ * the resulting size copied to its header. A new header and
+ * footer are written for the split-off part to be freed. */
+ n->psize = c->csize = C_INUSE | (new-mem);
+ n->csize = t->psize -= new-mem;
+
+ __bin_chunk(c);
+ return new;
+}
diff --git a/src/malloc/malloc.c b/src/malloc/oldmalloc/malloc.c
index 96982596..c0997ad8 100644
--- a/src/malloc/malloc.c
+++ b/src/malloc/oldmalloc/malloc.c
@@ -17,17 +17,18 @@
static struct {
volatile uint64_t binmap;
struct bin bins[64];
- volatile int free_lock[2];
+ volatile int split_merge_lock[2];
} mal;
-int __malloc_replaced;
-
/* Synchronization tools */
static inline void lock(volatile int *lk)
{
- if (libc.threads_minus_1)
+ int need_locks = libc.need_locks;
+ if (need_locks) {
while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
+ if (need_locks < 0) libc.need_locks = 0;
+ }
}
static inline void unlock(volatile int *lk)
@@ -123,9 +124,72 @@ void __dump_heap(int x)
}
#endif
+/* This function returns true if the interval [old,new]
+ * intersects the 'len'-sized interval below &libc.auxv
+ * (interpreted as the main-thread stack) or below &b
+ * (the current stack). It is used to defend against
+ * buggy brk implementations that can cross the stack. */
+
+static int traverses_stack_p(uintptr_t old, uintptr_t new)
+{
+ const uintptr_t len = 8<<20;
+ uintptr_t a, b;
+
+ b = (uintptr_t)libc.auxv;
+ a = b > len ? b-len : 0;
+ if (new>a && old<b) return 1;
+
+ b = (uintptr_t)&b;
+ a = b > len ? b-len : 0;
+ if (new>a && old<b) return 1;
+
+ return 0;
+}
+
+/* Expand the heap in-place if brk can be used, or otherwise via mmap,
+ * using an exponential lower bound on growth by mmap to make
+ * fragmentation asymptotically irrelevant. The size argument is both
+ * an input and an output, since the caller needs to know the size
+ * allocated, which will be larger than requested due to page alignment
+ * and mmap minimum size rules. The caller is responsible for locking
+ * to prevent concurrent calls. */
+
+static void *__expand_heap(size_t *pn)
+{
+ static uintptr_t brk;
+ static unsigned mmap_step;
+ size_t n = *pn;
+
+ if (n > SIZE_MAX/2 - PAGE_SIZE) {
+ errno = ENOMEM;
+ return 0;
+ }
+ n += -n & PAGE_SIZE-1;
+
+ if (!brk) {
+ brk = __syscall(SYS_brk, 0);
+ brk += -brk & PAGE_SIZE-1;
+ }
+
+ if (n < SIZE_MAX-brk && !traverses_stack_p(brk, brk+n)
+ && __syscall(SYS_brk, brk+n)==brk+n) {
+ *pn = n;
+ brk += n;
+ return (void *)(brk-n);
+ }
+
+ size_t min = (size_t)PAGE_SIZE << mmap_step/2;
+ if (n < min) n = min;
+ void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (area == MAP_FAILED) return 0;
+ *pn = n;
+ mmap_step++;
+ return area;
+}
+
static struct chunk *expand_heap(size_t n)
{
- static int heap_lock[2];
static void *end;
void *p;
struct chunk *w;
@@ -135,13 +199,8 @@ static struct chunk *expand_heap(size_t n)
* we need room for an extra zero-sized sentinel chunk. */
n += SIZE_ALIGN;
- lock(heap_lock);
-
p = __expand_heap(&n);
- if (!p) {
- unlock(heap_lock);
- return 0;
- }
+ if (!p) return 0;
/* If not just expanding existing space, we need to make a
* new sentinel chunk below the allocated space. */
@@ -164,8 +223,6 @@ static struct chunk *expand_heap(size_t n)
w = MEM_TO_CHUNK(p);
w->csize = n | C_INUSE;
- unlock(heap_lock);
-
return w;
}
@@ -195,96 +252,44 @@ static void unbin(struct chunk *c, int i)
NEXT_CHUNK(c)->psize |= C_INUSE;
}
-static int alloc_fwd(struct chunk *c)
+static void bin_chunk(struct chunk *self, int i)
{
- int i;
- size_t k;
- while (!((k=c->csize) & C_INUSE)) {
- i = bin_index(k);
- lock_bin(i);
- if (c->csize == k) {
- unbin(c, i);
- unlock_bin(i);
- return 1;
- }
- unlock_bin(i);
- }
- return 0;
-}
-
-static int alloc_rev(struct chunk *c)
-{
- int i;
- size_t k;
- while (!((k=c->psize) & C_INUSE)) {
- i = bin_index(k);
- lock_bin(i);
- if (c->psize == k) {
- unbin(PREV_CHUNK(c), i);
- unlock_bin(i);
- return 1;
- }
- unlock_bin(i);
- }
- return 0;
+ self->next = BIN_TO_CHUNK(i);
+ self->prev = mal.bins[i].tail;
+ self->next->prev = self;
+ self->prev->next = self;
+ if (self->prev == BIN_TO_CHUNK(i))
+ a_or_64(&mal.binmap, 1ULL<<i);
}
-
-/* pretrim - trims a chunk _prior_ to removing it from its bin.
- * Must be called with i as the ideal bin for size n, j the bin
- * for the _free_ chunk self, and bin j locked. */
-static int pretrim(struct chunk *self, size_t n, int i, int j)
+static void trim(struct chunk *self, size_t n)
{
- size_t n1;
+ size_t n1 = CHUNK_SIZE(self);
struct chunk *next, *split;
- /* We cannot pretrim if it would require re-binning. */
- if (j < 40) return 0;
- if (j < i+3) {
- if (j != 63) return 0;
- n1 = CHUNK_SIZE(self);
- if (n1-n <= MMAP_THRESHOLD) return 0;
- } else {
- n1 = CHUNK_SIZE(self);
- }
- if (bin_index(n1-n) != j) return 0;
+ if (n >= n1 - DONTCARE) return;
next = NEXT_CHUNK(self);
split = (void *)((char *)self + n);
- split->prev = self->prev;
- split->next = self->next;
- split->prev->next = split;
- split->next->prev = split;
split->psize = n | C_INUSE;
split->csize = n1-n;
next->psize = n1-n;
self->csize = n | C_INUSE;
- return 1;
-}
-
-static void trim(struct chunk *self, size_t n)
-{
- size_t n1 = CHUNK_SIZE(self);
- struct chunk *next, *split;
-
- if (n >= n1 - DONTCARE) return;
- next = NEXT_CHUNK(self);
- split = (void *)((char *)self + n);
+ int i = bin_index(n1-n);
+ lock_bin(i);
- split->psize = n | C_INUSE;
- split->csize = n1-n | C_INUSE;
- next->psize = n1-n | C_INUSE;
- self->csize = n | C_INUSE;
+ bin_chunk(split, i);
- __bin_chunk(split);
+ unlock_bin(i);
}
void *malloc(size_t n)
{
struct chunk *c;
int i, j;
+ uint64_t mask;
if (adjust_size(&n) < 0) return 0;
@@ -300,70 +305,43 @@ void *malloc(size_t n)
}
i = bin_index_up(n);
- for (;;) {
- uint64_t mask = mal.binmap & -(1ULL<<i);
- if (!mask) {
- c = expand_heap(n);
- if (!c) return 0;
- if (alloc_rev(c)) {
- struct chunk *x = c;
- c = PREV_CHUNK(c);
- NEXT_CHUNK(x)->psize = c->csize =
- x->csize + CHUNK_SIZE(c);
- }
- break;
+ if (i<63 && (mal.binmap & (1ULL<<i))) {
+ lock_bin(i);
+ c = mal.bins[i].head;
+ if (c != BIN_TO_CHUNK(i) && CHUNK_SIZE(c)-n <= DONTCARE) {
+ unbin(c, i);
+ unlock_bin(i);
+ return CHUNK_TO_MEM(c);
}
+ unlock_bin(i);
+ }
+ lock(mal.split_merge_lock);
+ for (mask = mal.binmap & -(1ULL<<i); mask; mask -= (mask&-mask)) {
j = first_set(mask);
lock_bin(j);
c = mal.bins[j].head;
if (c != BIN_TO_CHUNK(j)) {
- if (!pretrim(c, n, i, j)) unbin(c, j);
+ unbin(c, j);
unlock_bin(j);
break;
}
unlock_bin(j);
}
-
- /* Now patch up in case we over-allocated */
+ if (!mask) {
+ c = expand_heap(n);
+ if (!c) {
+ unlock(mal.split_merge_lock);
+ return 0;
+ }
+ }
trim(c, n);
-
+ unlock(mal.split_merge_lock);
return CHUNK_TO_MEM(c);
}
-static size_t mal0_clear(char *p, size_t pagesz, size_t n)
+int __malloc_allzerop(void *p)
{
-#ifdef __GNUC__
- typedef uint64_t __attribute__((__may_alias__)) T;
-#else
- typedef unsigned char T;
-#endif
- char *pp = p + n;
- size_t i = (uintptr_t)pp & (pagesz - 1);
- for (;;) {
- pp = memset(pp - i, 0, i);
- if (pp - p < pagesz) return pp - p;
- for (i = pagesz; i; i -= 2*sizeof(T), pp -= 2*sizeof(T))
- if (((T *)pp)[-1] | ((T *)pp)[-2])
- break;
- }
-}
-
-void *calloc(size_t m, size_t n)
-{
- if (n && m > (size_t)-1/n) {
- errno = ENOMEM;
- return 0;
- }
- n *= m;
- void *p = malloc(n);
- if (!p) return p;
- if (!__malloc_replaced) {
- if (IS_MMAPPED(MEM_TO_CHUNK(p)))
- return p;
- if (n >= PAGE_SIZE)
- n = mal0_clear(p, PAGE_SIZE, n);
- }
- return memset(p, 0, n);
+ return IS_MMAPPED(MEM_TO_CHUNK(p));
}
void *realloc(void *p, size_t n)
@@ -379,6 +357,8 @@ void *realloc(void *p, size_t n)
self = MEM_TO_CHUNK(p);
n1 = n0 = CHUNK_SIZE(self);
+ if (n<=n0 && n0-n<=DONTCARE) return p;
+
if (IS_MMAPPED(self)) {
size_t extra = self->psize;
char *base = (char *)self - extra;
@@ -405,34 +385,43 @@ void *realloc(void *p, size_t n)
/* Crash on corrupted footer (likely from buffer overflow) */
if (next->psize != self->csize) a_crash();
- /* Merge adjacent chunks if we need more space. This is not
- * a waste of time even if we fail to get enough space, because our
- * subsequent call to free would otherwise have to do the merge. */
- if (n > n1 && alloc_fwd(next)) {
- n1 += CHUNK_SIZE(next);
- next = NEXT_CHUNK(next);
- }
- /* FIXME: find what's wrong here and reenable it..? */
- if (0 && n > n1 && alloc_rev(self)) {
- self = PREV_CHUNK(self);
- n1 += CHUNK_SIZE(self);
+ if (n < n0) {
+ int i = bin_index_up(n);
+ int j = bin_index(n0);
+ if (i<j && (mal.binmap & (1ULL << i)))
+ goto copy_realloc;
+ struct chunk *split = (void *)((char *)self + n);
+ self->csize = split->psize = n | C_INUSE;
+ split->csize = next->psize = n0-n | C_INUSE;
+ __bin_chunk(split);
+ return CHUNK_TO_MEM(self);
}
- self->csize = n1 | C_INUSE;
- next->psize = n1 | C_INUSE;
- /* If we got enough space, split off the excess and return */
- if (n <= n1) {
- //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
- trim(self, n);
- return CHUNK_TO_MEM(self);
+ lock(mal.split_merge_lock);
+
+ size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next);
+ if (n0+nsize >= n) {
+ int i = bin_index(nsize);
+ lock_bin(i);
+ if (!(next->csize & C_INUSE)) {
+ unbin(next, i);
+ unlock_bin(i);
+ next = NEXT_CHUNK(next);
+ self->csize = next->psize = n0+nsize | C_INUSE;
+ trim(self, n);
+ unlock(mal.split_merge_lock);
+ return CHUNK_TO_MEM(self);
+ }
+ unlock_bin(i);
}
+ unlock(mal.split_merge_lock);
copy_realloc:
/* As a last resort, allocate a new chunk and copy to it. */
new = malloc(n-OVERHEAD);
if (!new) return 0;
copy_free_ret:
- memcpy(new, p, n0-OVERHEAD);
+ memcpy(new, p, (n<n0 ? n : n0) - OVERHEAD);
free(CHUNK_TO_MEM(self));
return new;
}
@@ -440,59 +429,51 @@ copy_free_ret:
void __bin_chunk(struct chunk *self)
{
struct chunk *next = NEXT_CHUNK(self);
- size_t final_size, new_size, size;
- int reclaim=0;
- int i;
-
- final_size = new_size = CHUNK_SIZE(self);
/* Crash on corrupted footer (likely from buffer overflow) */
if (next->psize != self->csize) a_crash();
- for (;;) {
- if (self->psize & next->csize & C_INUSE) {
- self->csize = final_size | C_INUSE;
- next->psize = final_size | C_INUSE;
- i = bin_index(final_size);
- lock_bin(i);
- lock(mal.free_lock);
- if (self->psize & next->csize & C_INUSE)
- break;
- unlock(mal.free_lock);
- unlock_bin(i);
- }
+ lock(mal.split_merge_lock);
- if (alloc_rev(self)) {
- self = PREV_CHUNK(self);
- size = CHUNK_SIZE(self);
- final_size += size;
- if (new_size+size > RECLAIM && (new_size+size^size) > size)
- reclaim = 1;
- }
+ size_t osize = CHUNK_SIZE(self), size = osize;
+
+ /* Since we hold split_merge_lock, only transition from free to
+ * in-use can race; in-use to free is impossible */
+ size_t psize = self->psize & C_INUSE ? 0 : CHUNK_PSIZE(self);
+ size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next);
- if (alloc_fwd(next)) {
- size = CHUNK_SIZE(next);
- final_size += size;
- if (new_size+size > RECLAIM && (new_size+size^size) > size)
- reclaim = 1;
+ if (psize) {
+ int i = bin_index(psize);
+ lock_bin(i);
+ if (!(self->psize & C_INUSE)) {
+ struct chunk *prev = PREV_CHUNK(self);
+ unbin(prev, i);
+ self = prev;
+ size += psize;
+ }
+ unlock_bin(i);
+ }
+ if (nsize) {
+ int i = bin_index(nsize);
+ lock_bin(i);
+ if (!(next->csize & C_INUSE)) {
+ unbin(next, i);
next = NEXT_CHUNK(next);
+ size += nsize;
}
+ unlock_bin(i);
}
- if (!(mal.binmap & 1ULL<<i))
- a_or_64(&mal.binmap, 1ULL<<i);
-
- self->csize = final_size;
- next->psize = final_size;
- unlock(mal.free_lock);
+ int i = bin_index(size);
+ lock_bin(i);
- self->next = BIN_TO_CHUNK(i);
- self->prev = mal.bins[i].tail;
- self->next->prev = self;
- self->prev->next = self;
+ self->csize = size;
+ next->psize = size;
+ bin_chunk(self, i);
+ unlock(mal.split_merge_lock);
/* Replace middle of large chunks with fresh zero pages */
- if (reclaim) {
+ if (size > RECLAIM && (size^(size-osize)) > size-osize) {
uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
#if 1
diff --git a/src/internal/malloc_impl.h b/src/malloc/oldmalloc/malloc_impl.h
index 59785a7f..e1cf4774 100644
--- a/src/internal/malloc_impl.h
+++ b/src/malloc/oldmalloc/malloc_impl.h
@@ -2,12 +2,7 @@
#define MALLOC_IMPL_H
#include <sys/mman.h>
-
-hidden void *__expand_heap(size_t *);
-
-hidden void __malloc_donate(char *, char *);
-
-hidden void *__memalign(size_t, size_t);
+#include "dynlink.h"
struct chunk {
size_t psize, csize;
@@ -41,6 +36,4 @@ struct bin {
hidden void __bin_chunk(struct chunk *);
-hidden extern int __malloc_replaced;
-
#endif
diff --git a/src/malloc/malloc_usable_size.c b/src/malloc/oldmalloc/malloc_usable_size.c
index 672b518a..672b518a 100644
--- a/src/malloc/malloc_usable_size.c
+++ b/src/malloc/oldmalloc/malloc_usable_size.c
diff --git a/src/malloc/posix_memalign.c b/src/malloc/posix_memalign.c
index 2ea8bd8a..ad4d8f47 100644
--- a/src/malloc/posix_memalign.c
+++ b/src/malloc/posix_memalign.c
@@ -1,11 +1,10 @@
#include <stdlib.h>
#include <errno.h>
-#include "malloc_impl.h"
int posix_memalign(void **res, size_t align, size_t len)
{
if (align < sizeof(void *)) return EINVAL;
- void *mem = __memalign(align, len);
+ void *mem = aligned_alloc(align, len);
if (!mem) return errno;
*res = mem;
return 0;
diff --git a/src/malloc/replaced.c b/src/malloc/replaced.c
new file mode 100644
index 00000000..07fce61e
--- /dev/null
+++ b/src/malloc/replaced.c
@@ -0,0 +1,4 @@
+#include "dynlink.h"
+
+int __malloc_replaced;
+int __aligned_alloc_replaced;
diff --git a/src/math/i386/fabs.c b/src/math/i386/fabs.c
new file mode 100644
index 00000000..39672786
--- /dev/null
+++ b/src/math/i386/fabs.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+double fabs(double x)
+{
+ __asm__ ("fabs" : "+t"(x));
+ return x;
+}
diff --git a/src/math/i386/fabs.s b/src/math/i386/fabs.s
deleted file mode 100644
index d66ea9a1..00000000
--- a/src/math/i386/fabs.s
+++ /dev/null
@@ -1,6 +0,0 @@
-.global fabs
-.type fabs,@function
-fabs:
- fldl 4(%esp)
- fabs
- ret
diff --git a/src/math/i386/fabsf.c b/src/math/i386/fabsf.c
new file mode 100644
index 00000000..d882eee3
--- /dev/null
+++ b/src/math/i386/fabsf.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+float fabsf(float x)
+{
+ __asm__ ("fabs" : "+t"(x));
+ return x;
+}
diff --git a/src/math/i386/fabsf.s b/src/math/i386/fabsf.s
deleted file mode 100644
index a981c422..00000000
--- a/src/math/i386/fabsf.s
+++ /dev/null
@@ -1,6 +0,0 @@
-.global fabsf
-.type fabsf,@function
-fabsf:
- flds 4(%esp)
- fabs
- ret
diff --git a/src/math/i386/fabsl.c b/src/math/i386/fabsl.c
new file mode 100644
index 00000000..cc1c9ed9
--- /dev/null
+++ b/src/math/i386/fabsl.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+long double fabsl(long double x)
+{
+ __asm__ ("fabs" : "+t"(x));
+ return x;
+}
diff --git a/src/math/i386/fabsl.s b/src/math/i386/fabsl.s
deleted file mode 100644
index ceef9e4c..00000000
--- a/src/math/i386/fabsl.s
+++ /dev/null
@@ -1,6 +0,0 @@
-.global fabsl
-.type fabsl,@function
-fabsl:
- fldt 4(%esp)
- fabs
- ret
diff --git a/src/math/i386/fmod.c b/src/math/i386/fmod.c
new file mode 100644
index 00000000..ea0c58d9
--- /dev/null
+++ b/src/math/i386/fmod.c
@@ -0,0 +1,10 @@
+#include <math.h>
+
+double fmod(double x, double y)
+{
+ unsigned short fpsr;
+ // fprem does not introduce excess precision into x
+ do __asm__ ("fprem; fnstsw %%ax" : "+t"(x), "=a"(fpsr) : "u"(y));
+ while (fpsr & 0x400);
+ return x;
+}
diff --git a/src/math/i386/fmod.s b/src/math/i386/fmod.s
deleted file mode 100644
index 2113b3c5..00000000
--- a/src/math/i386/fmod.s
+++ /dev/null
@@ -1,11 +0,0 @@
-.global fmod
-.type fmod,@function
-fmod:
- fldl 12(%esp)
- fldl 4(%esp)
-1: fprem
- fnstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- ret
diff --git a/src/math/i386/fmodf.c b/src/math/i386/fmodf.c
new file mode 100644
index 00000000..90b56ab0
--- /dev/null
+++ b/src/math/i386/fmodf.c
@@ -0,0 +1,10 @@
+#include <math.h>
+
+float fmodf(float x, float y)
+{
+ unsigned short fpsr;
+ // fprem does not introduce excess precision into x
+ do __asm__ ("fprem; fnstsw %%ax" : "+t"(x), "=a"(fpsr) : "u"(y));
+ while (fpsr & 0x400);
+ return x;
+}
diff --git a/src/math/i386/fmodf.s b/src/math/i386/fmodf.s
deleted file mode 100644
index e04e2a56..00000000
--- a/src/math/i386/fmodf.s
+++ /dev/null
@@ -1,11 +0,0 @@
-.global fmodf
-.type fmodf,@function
-fmodf:
- flds 8(%esp)
- flds 4(%esp)
-1: fprem
- fnstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- ret
diff --git a/src/math/i386/fmodl.c b/src/math/i386/fmodl.c
new file mode 100644
index 00000000..3daeab06
--- /dev/null
+++ b/src/math/i386/fmodl.c
@@ -0,0 +1,9 @@
+#include <math.h>
+
+long double fmodl(long double x, long double y)
+{
+ unsigned short fpsr;
+ do __asm__ ("fprem; fnstsw %%ax" : "+t"(x), "=a"(fpsr) : "u"(y));
+ while (fpsr & 0x400);
+ return x;
+}
diff --git a/src/math/i386/fmodl.s b/src/math/i386/fmodl.s
deleted file mode 100644
index 0cb3fe9b..00000000
--- a/src/math/i386/fmodl.s
+++ /dev/null
@@ -1,11 +0,0 @@
-.global fmodl
-.type fmodl,@function
-fmodl:
- fldt 16(%esp)
- fldt 4(%esp)
-1: fprem
- fnstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- ret
diff --git a/src/math/i386/llrint.c b/src/math/i386/llrint.c
new file mode 100644
index 00000000..aa400817
--- /dev/null
+++ b/src/math/i386/llrint.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long long llrint(double x)
+{
+ long long r;
+ __asm__ ("fistpll %0" : "=m"(r) : "t"(x) : "st");
+ return r;
+}
diff --git a/src/math/i386/llrint.s b/src/math/i386/llrint.s
deleted file mode 100644
index 8e89cd91..00000000
--- a/src/math/i386/llrint.s
+++ /dev/null
@@ -1,8 +0,0 @@
-.global llrint
-.type llrint,@function
-llrint:
- fldl 4(%esp)
- fistpll 4(%esp)
- mov 4(%esp),%eax
- mov 8(%esp),%edx
- ret
diff --git a/src/math/i386/llrintf.c b/src/math/i386/llrintf.c
new file mode 100644
index 00000000..c41a317b
--- /dev/null
+++ b/src/math/i386/llrintf.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long long llrintf(float x)
+{
+ long long r;
+ __asm__ ("fistpll %0" : "=m"(r) : "t"(x) : "st");
+ return r;
+}
diff --git a/src/math/i386/llrintf.s b/src/math/i386/llrintf.s
deleted file mode 100644
index aa850c6c..00000000
--- a/src/math/i386/llrintf.s
+++ /dev/null
@@ -1,9 +0,0 @@
-.global llrintf
-.type llrintf,@function
-llrintf:
- sub $8,%esp
- flds 12(%esp)
- fistpll (%esp)
- pop %eax
- pop %edx
- ret
diff --git a/src/math/i386/llrintl.c b/src/math/i386/llrintl.c
new file mode 100644
index 00000000..c439ef28
--- /dev/null
+++ b/src/math/i386/llrintl.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long long llrintl(long double x)
+{
+ long long r;
+ __asm__ ("fistpll %0" : "=m"(r) : "t"(x) : "st");
+ return r;
+}
diff --git a/src/math/i386/llrintl.s b/src/math/i386/llrintl.s
deleted file mode 100644
index 1cfb56f1..00000000
--- a/src/math/i386/llrintl.s
+++ /dev/null
@@ -1,8 +0,0 @@
-.global llrintl
-.type llrintl,@function
-llrintl:
- fldt 4(%esp)
- fistpll 4(%esp)
- mov 4(%esp),%eax
- mov 8(%esp),%edx
- ret
diff --git a/src/math/i386/lrint.c b/src/math/i386/lrint.c
new file mode 100644
index 00000000..89563ab2
--- /dev/null
+++ b/src/math/i386/lrint.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long lrint(double x)
+{
+ long r;
+ __asm__ ("fistpl %0" : "=m"(r) : "t"(x) : "st");
+ return r;
+}
diff --git a/src/math/i386/lrint.s b/src/math/i386/lrint.s
deleted file mode 100644
index 02b83d9f..00000000
--- a/src/math/i386/lrint.s
+++ /dev/null
@@ -1,7 +0,0 @@
-.global lrint
-.type lrint,@function
-lrint:
- fldl 4(%esp)
- fistpl 4(%esp)
- mov 4(%esp),%eax
- ret
diff --git a/src/math/i386/lrintf.c b/src/math/i386/lrintf.c
new file mode 100644
index 00000000..0bbf29de
--- /dev/null
+++ b/src/math/i386/lrintf.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long lrintf(float x)
+{
+ long r;
+ __asm__ ("fistpl %0" : "=m"(r) : "t"(x) : "st");
+ return r;
+}
diff --git a/src/math/i386/lrintf.s b/src/math/i386/lrintf.s
deleted file mode 100644
index 907aac29..00000000
--- a/src/math/i386/lrintf.s
+++ /dev/null
@@ -1,7 +0,0 @@
-.global lrintf
-.type lrintf,@function
-lrintf:
- flds 4(%esp)
- fistpl 4(%esp)
- mov 4(%esp),%eax
- ret
diff --git a/src/math/i386/lrintl.c b/src/math/i386/lrintl.c
new file mode 100644
index 00000000..eb8c0902
--- /dev/null
+++ b/src/math/i386/lrintl.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long lrintl(long double x)
+{
+ long r;
+ __asm__ ("fistpl %0" : "=m"(r) : "t"(x) : "st");
+ return r;
+}
diff --git a/src/math/i386/lrintl.s b/src/math/i386/lrintl.s
deleted file mode 100644
index 3ae05aac..00000000
--- a/src/math/i386/lrintl.s
+++ /dev/null
@@ -1,7 +0,0 @@
-.global lrintl
-.type lrintl,@function
-lrintl:
- fldt 4(%esp)
- fistpl 4(%esp)
- mov 4(%esp),%eax
- ret
diff --git a/src/math/i386/remainder.c b/src/math/i386/remainder.c
new file mode 100644
index 00000000..c083df90
--- /dev/null
+++ b/src/math/i386/remainder.c
@@ -0,0 +1,12 @@
+#include <math.h>
+
+double remainder(double x, double y)
+{
+ unsigned short fpsr;
+ // fprem1 does not introduce excess precision into x
+ do __asm__ ("fprem1; fnstsw %%ax" : "+t"(x), "=a"(fpsr) : "u"(y));
+ while (fpsr & 0x400);
+ return x;
+}
+
+weak_alias(remainder, drem);
diff --git a/src/math/i386/remainder.s b/src/math/i386/remainder.s
deleted file mode 100644
index ab1da95d..00000000
--- a/src/math/i386/remainder.s
+++ /dev/null
@@ -1,14 +0,0 @@
-.global remainder
-.type remainder,@function
-remainder:
-.weak drem
-.type drem,@function
-drem:
- fldl 12(%esp)
- fldl 4(%esp)
-1: fprem1
- fnstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- ret
diff --git a/src/math/i386/remainderf.c b/src/math/i386/remainderf.c
new file mode 100644
index 00000000..280207d2
--- /dev/null
+++ b/src/math/i386/remainderf.c
@@ -0,0 +1,12 @@
+#include <math.h>
+
+float remainderf(float x, float y)
+{
+ unsigned short fpsr;
+ // fprem1 does not introduce excess precision into x
+ do __asm__ ("fprem1; fnstsw %%ax" : "+t"(x), "=a"(fpsr) : "u"(y));
+ while (fpsr & 0x400);
+ return x;
+}
+
+weak_alias(remainderf, dremf);
diff --git a/src/math/i386/remainderf.s b/src/math/i386/remainderf.s
deleted file mode 100644
index 6a7378a3..00000000
--- a/src/math/i386/remainderf.s
+++ /dev/null
@@ -1,14 +0,0 @@
-.global remainderf
-.type remainderf,@function
-remainderf:
-.weak dremf
-.type dremf,@function
-dremf:
- flds 8(%esp)
- flds 4(%esp)
-1: fprem1
- fnstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- ret
diff --git a/src/math/i386/remainderl.c b/src/math/i386/remainderl.c
new file mode 100644
index 00000000..8cf75071
--- /dev/null
+++ b/src/math/i386/remainderl.c
@@ -0,0 +1,9 @@
+#include <math.h>
+
+long double remainderl(long double x, long double y)
+{
+ unsigned short fpsr;
+ do __asm__ ("fprem1; fnstsw %%ax" : "+t"(x), "=a"(fpsr) : "u"(y));
+ while (fpsr & 0x400);
+ return x;
+}
diff --git a/src/math/i386/remainderl.s b/src/math/i386/remainderl.s
deleted file mode 100644
index b41518ed..00000000
--- a/src/math/i386/remainderl.s
+++ /dev/null
@@ -1,11 +0,0 @@
-.global remainderl
-.type remainderl,@function
-remainderl:
- fldt 16(%esp)
- fldt 4(%esp)
-1: fprem1
- fnstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- ret
diff --git a/src/math/i386/rint.c b/src/math/i386/rint.c
new file mode 100644
index 00000000..a5276a60
--- /dev/null
+++ b/src/math/i386/rint.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+double rint(double x)
+{
+ __asm__ ("frndint" : "+t"(x));
+ return x;
+}
diff --git a/src/math/i386/rint.s b/src/math/i386/rint.s
deleted file mode 100644
index bb99a11c..00000000
--- a/src/math/i386/rint.s
+++ /dev/null
@@ -1,6 +0,0 @@
-.global rint
-.type rint,@function
-rint:
- fldl 4(%esp)
- frndint
- ret
diff --git a/src/math/i386/rintf.c b/src/math/i386/rintf.c
new file mode 100644
index 00000000..bb4121a4
--- /dev/null
+++ b/src/math/i386/rintf.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+float rintf(float x)
+{
+ __asm__ ("frndint" : "+t"(x));
+ return x;
+}
diff --git a/src/math/i386/rintf.s b/src/math/i386/rintf.s
deleted file mode 100644
index bce4c5a6..00000000
--- a/src/math/i386/rintf.s
+++ /dev/null
@@ -1,6 +0,0 @@
-.global rintf
-.type rintf,@function
-rintf:
- flds 4(%esp)
- frndint
- ret
diff --git a/src/math/i386/rintl.c b/src/math/i386/rintl.c
new file mode 100644
index 00000000..e1a92077
--- /dev/null
+++ b/src/math/i386/rintl.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+long double rintl(long double x)
+{
+ __asm__ ("frndint" : "+t"(x));
+ return x;
+}
diff --git a/src/math/i386/rintl.s b/src/math/i386/rintl.s
deleted file mode 100644
index cd2bf9a9..00000000
--- a/src/math/i386/rintl.s
+++ /dev/null
@@ -1,6 +0,0 @@
-.global rintl
-.type rintl,@function
-rintl:
- fldt 4(%esp)
- frndint
- ret
diff --git a/src/math/i386/sqrt.c b/src/math/i386/sqrt.c
new file mode 100644
index 00000000..934fbcca
--- /dev/null
+++ b/src/math/i386/sqrt.c
@@ -0,0 +1,15 @@
+#include "libm.h"
+
+double sqrt(double x)
+{
+ union ldshape ux;
+ unsigned fpsr;
+ __asm__ ("fsqrt; fnstsw %%ax": "=t"(ux.f), "=a"(fpsr) : "0"(x));
+ if ((ux.i.m & 0x7ff) != 0x400)
+ return (double)ux.f;
+ /* Rounding to double would have encountered an exact halfway case.
+ Adjust mantissa downwards if fsqrt rounded up, else upwards.
+ (result of fsqrt could not have been exact) */
+ ux.i.m ^= (fpsr & 0x200) + 0x300;
+ return (double)ux.f;
+}
diff --git a/src/math/i386/sqrt.s b/src/math/i386/sqrt.s
deleted file mode 100644
index 57837e25..00000000
--- a/src/math/i386/sqrt.s
+++ /dev/null
@@ -1,21 +0,0 @@
-.global sqrt
-.type sqrt,@function
-sqrt: fldl 4(%esp)
- fsqrt
- fnstsw %ax
- sub $12,%esp
- fld %st(0)
- fstpt (%esp)
- mov (%esp),%ecx
- and $0x7ff,%ecx
- cmp $0x400,%ecx
- jnz 1f
- and $0x200,%eax
- sub $0x100,%eax
- sub %eax,(%esp)
- fstp %st(0)
- fldt (%esp)
-1: add $12,%esp
- fstpl 4(%esp)
- fldl 4(%esp)
- ret
diff --git a/src/math/i386/sqrtf.c b/src/math/i386/sqrtf.c
new file mode 100644
index 00000000..41c65c2b
--- /dev/null
+++ b/src/math/i386/sqrtf.c
@@ -0,0 +1,12 @@
+#include <math.h>
+
+float sqrtf(float x)
+{
+ long double t;
+ /* The long double result has sufficient precision so that
+ * second rounding to float still keeps the returned value
+ * correctly rounded, see Pierre Roux, "Innocuous Double
+ * Rounding of Basic Arithmetic Operations". */
+ __asm__ ("fsqrt" : "=t"(t) : "0"(x));
+ return (float)t;
+}
diff --git a/src/math/i386/sqrtf.s b/src/math/i386/sqrtf.s
deleted file mode 100644
index 9e944f45..00000000
--- a/src/math/i386/sqrtf.s
+++ /dev/null
@@ -1,7 +0,0 @@
-.global sqrtf
-.type sqrtf,@function
-sqrtf: flds 4(%esp)
- fsqrt
- fstps 4(%esp)
- flds 4(%esp)
- ret
diff --git a/src/math/i386/sqrtl.c b/src/math/i386/sqrtl.c
new file mode 100644
index 00000000..864cfcc4
--- /dev/null
+++ b/src/math/i386/sqrtl.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+long double sqrtl(long double x)
+{
+ __asm__ ("fsqrt" : "+t"(x));
+ return x;
+}
diff --git a/src/math/i386/sqrtl.s b/src/math/i386/sqrtl.s
deleted file mode 100644
index e0d42616..00000000
--- a/src/math/i386/sqrtl.s
+++ /dev/null
@@ -1,5 +0,0 @@
-.global sqrtl
-.type sqrtl,@function
-sqrtl: fldt 4(%esp)
- fsqrt
- ret
diff --git a/src/math/x86_64/fabs.c b/src/math/x86_64/fabs.c
new file mode 100644
index 00000000..16562477
--- /dev/null
+++ b/src/math/x86_64/fabs.c
@@ -0,0 +1,10 @@
+#include <math.h>
+
+double fabs(double x)
+{
+ double t;
+ __asm__ ("pcmpeqd %0, %0" : "=x"(t)); // t = ~0
+ __asm__ ("psrlq $1, %0" : "+x"(t)); // t >>= 1
+ __asm__ ("andps %1, %0" : "+x"(x) : "x"(t)); // x &= t
+ return x;
+}
diff --git a/src/math/x86_64/fabs.s b/src/math/x86_64/fabs.s
deleted file mode 100644
index 5715005e..00000000
--- a/src/math/x86_64/fabs.s
+++ /dev/null
@@ -1,9 +0,0 @@
-.global fabs
-.type fabs,@function
-fabs:
- xor %eax,%eax
- dec %rax
- shr %rax
- movq %rax,%xmm1
- andpd %xmm1,%xmm0
- ret
diff --git a/src/math/x86_64/fabsf.c b/src/math/x86_64/fabsf.c
new file mode 100644
index 00000000..36ea7481
--- /dev/null
+++ b/src/math/x86_64/fabsf.c
@@ -0,0 +1,10 @@
+#include <math.h>
+
+float fabsf(float x)
+{
+ float t;
+ __asm__ ("pcmpeqd %0, %0" : "=x"(t)); // t = ~0
+ __asm__ ("psrld $1, %0" : "+x"(t)); // t >>= 1
+ __asm__ ("andps %1, %0" : "+x"(x) : "x"(t)); // x &= t
+ return x;
+}
diff --git a/src/math/x86_64/fabsf.s b/src/math/x86_64/fabsf.s
deleted file mode 100644
index 501a1f17..00000000
--- a/src/math/x86_64/fabsf.s
+++ /dev/null
@@ -1,7 +0,0 @@
-.global fabsf
-.type fabsf,@function
-fabsf:
- mov $0x7fffffff,%eax
- movq %rax,%xmm1
- andps %xmm1,%xmm0
- ret
diff --git a/src/math/x86_64/fabsl.c b/src/math/x86_64/fabsl.c
new file mode 100644
index 00000000..cc1c9ed9
--- /dev/null
+++ b/src/math/x86_64/fabsl.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+long double fabsl(long double x)
+{
+ __asm__ ("fabs" : "+t"(x));
+ return x;
+}
diff --git a/src/math/x86_64/fabsl.s b/src/math/x86_64/fabsl.s
deleted file mode 100644
index 4e7ab525..00000000
--- a/src/math/x86_64/fabsl.s
+++ /dev/null
@@ -1,6 +0,0 @@
-.global fabsl
-.type fabsl,@function
-fabsl:
- fldt 8(%rsp)
- fabs
- ret
diff --git a/src/math/x86_64/fmodl.c b/src/math/x86_64/fmodl.c
new file mode 100644
index 00000000..3daeab06
--- /dev/null
+++ b/src/math/x86_64/fmodl.c
@@ -0,0 +1,9 @@
+#include <math.h>
+
+long double fmodl(long double x, long double y)
+{
+ unsigned short fpsr;
+ do __asm__ ("fprem; fnstsw %%ax" : "+t"(x), "=a"(fpsr) : "u"(y));
+ while (fpsr & 0x400);
+ return x;
+}
diff --git a/src/math/x86_64/fmodl.s b/src/math/x86_64/fmodl.s
deleted file mode 100644
index ea07b402..00000000
--- a/src/math/x86_64/fmodl.s
+++ /dev/null
@@ -1,11 +0,0 @@
-.global fmodl
-.type fmodl,@function
-fmodl:
- fldt 24(%rsp)
- fldt 8(%rsp)
-1: fprem
- fnstsw %ax
- testb $4,%ah
- jnz 1b
- fstp %st(1)
- ret
diff --git a/src/math/x86_64/llrint.c b/src/math/x86_64/llrint.c
new file mode 100644
index 00000000..dd38a722
--- /dev/null
+++ b/src/math/x86_64/llrint.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long long llrint(double x)
+{
+ long long r;
+ __asm__ ("cvtsd2si %1, %0" : "=r"(r) : "x"(x));
+ return r;
+}
diff --git a/src/math/x86_64/llrint.s b/src/math/x86_64/llrint.s
deleted file mode 100644
index bf476498..00000000
--- a/src/math/x86_64/llrint.s
+++ /dev/null
@@ -1,5 +0,0 @@
-.global llrint
-.type llrint,@function
-llrint:
- cvtsd2si %xmm0,%rax
- ret
diff --git a/src/math/x86_64/llrintf.c b/src/math/x86_64/llrintf.c
new file mode 100644
index 00000000..fc8625e8
--- /dev/null
+++ b/src/math/x86_64/llrintf.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long long llrintf(float x)
+{
+ long long r;
+ __asm__ ("cvtss2si %1, %0" : "=r"(r) : "x"(x));
+ return r;
+}
diff --git a/src/math/x86_64/llrintf.s b/src/math/x86_64/llrintf.s
deleted file mode 100644
index d7204ac0..00000000
--- a/src/math/x86_64/llrintf.s
+++ /dev/null
@@ -1,5 +0,0 @@
-.global llrintf
-.type llrintf,@function
-llrintf:
- cvtss2si %xmm0,%rax
- ret
diff --git a/src/math/x86_64/llrintl.c b/src/math/x86_64/llrintl.c
new file mode 100644
index 00000000..c439ef28
--- /dev/null
+++ b/src/math/x86_64/llrintl.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long long llrintl(long double x)
+{
+ long long r;
+ __asm__ ("fistpll %0" : "=m"(r) : "t"(x) : "st");
+ return r;
+}
diff --git a/src/math/x86_64/llrintl.s b/src/math/x86_64/llrintl.s
deleted file mode 100644
index 1ec0817d..00000000
--- a/src/math/x86_64/llrintl.s
+++ /dev/null
@@ -1,7 +0,0 @@
-.global llrintl
-.type llrintl,@function
-llrintl:
- fldt 8(%rsp)
- fistpll 8(%rsp)
- mov 8(%rsp),%rax
- ret
diff --git a/src/math/x86_64/lrint.c b/src/math/x86_64/lrint.c
new file mode 100644
index 00000000..a742fec6
--- /dev/null
+++ b/src/math/x86_64/lrint.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long lrint(double x)
+{
+ long r;
+ __asm__ ("cvtsd2si %1, %0" : "=r"(r) : "x"(x));
+ return r;
+}
diff --git a/src/math/x86_64/lrint.s b/src/math/x86_64/lrint.s
deleted file mode 100644
index 15fc2454..00000000
--- a/src/math/x86_64/lrint.s
+++ /dev/null
@@ -1,5 +0,0 @@
-.global lrint
-.type lrint,@function
-lrint:
- cvtsd2si %xmm0,%rax
- ret
diff --git a/src/math/x86_64/lrintf.c b/src/math/x86_64/lrintf.c
new file mode 100644
index 00000000..2ba5639d
--- /dev/null
+++ b/src/math/x86_64/lrintf.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long lrintf(float x)
+{
+ long r;
+ __asm__ ("cvtss2si %1, %0" : "=r"(r) : "x"(x));
+ return r;
+}
diff --git a/src/math/x86_64/lrintf.s b/src/math/x86_64/lrintf.s
deleted file mode 100644
index 488423d2..00000000
--- a/src/math/x86_64/lrintf.s
+++ /dev/null
@@ -1,5 +0,0 @@
-.global lrintf
-.type lrintf,@function
-lrintf:
- cvtss2si %xmm0,%rax
- ret
diff --git a/src/math/x86_64/lrintl.c b/src/math/x86_64/lrintl.c
new file mode 100644
index 00000000..068e2e4d
--- /dev/null
+++ b/src/math/x86_64/lrintl.c
@@ -0,0 +1,8 @@
+#include <math.h>
+
+long lrintl(long double x)
+{
+ long r;
+ __asm__ ("fistpll %0" : "=m"(r) : "t"(x) : "st");
+ return r;
+}
diff --git a/src/math/x86_64/lrintl.s b/src/math/x86_64/lrintl.s
deleted file mode 100644
index d587b12b..00000000
--- a/src/math/x86_64/lrintl.s
+++ /dev/null
@@ -1,7 +0,0 @@
-.global lrintl
-.type lrintl,@function
-lrintl:
- fldt 8(%rsp)
- fistpll 8(%rsp)
- mov 8(%rsp),%rax
- ret
diff --git a/src/math/x86_64/remainderl.c b/src/math/x86_64/remainderl.c
new file mode 100644
index 00000000..8cf75071
--- /dev/null
+++ b/src/math/x86_64/remainderl.c
@@ -0,0 +1,9 @@
+#include <math.h>
+
+long double remainderl(long double x, long double y)
+{
+ unsigned short fpsr;
+ do __asm__ ("fprem1; fnstsw %%ax" : "+t"(x), "=a"(fpsr) : "u"(y));
+ while (fpsr & 0x400);
+ return x;
+}
diff --git a/src/math/x86_64/remainderl.s b/src/math/x86_64/remainderl.s
deleted file mode 100644
index cb3857b4..00000000
--- a/src/math/x86_64/remainderl.s
+++ /dev/null
@@ -1,11 +0,0 @@
-.global remainderl
-.type remainderl,@function
-remainderl:
- fldt 24(%rsp)
- fldt 8(%rsp)
-1: fprem1
- fnstsw %ax
- testb $4,%ah
- jnz 1b
- fstp %st(1)
- ret
diff --git a/src/math/x86_64/remquol.c b/src/math/x86_64/remquol.c
new file mode 100644
index 00000000..60eef089
--- /dev/null
+++ b/src/math/x86_64/remquol.c
@@ -0,0 +1,32 @@
+#include <math.h>
+
+long double remquol(long double x, long double y, int *quo)
+{
+ signed char *cx = (void *)&x, *cy = (void *)&y;
+ /* By ensuring that addresses of x and y cannot be discarded,
+ * this empty asm guides GCC into representing extraction of
+ * their sign bits as memory loads rather than making x and y
+ * not-address-taken internally and using bitfield operations,
+ * which in the end wouldn't work out, as extraction from FPU
+ * registers needs to go through memory anyway. This way GCC
+ * should manage to use incoming stack slots without spills. */
+ __asm__ ("" :: "X"(cx), "X"(cy));
+
+ long double t = x;
+ unsigned fpsr;
+ do __asm__ ("fprem1; fnstsw %%ax" : "+t"(t), "=a"(fpsr) : "u"(y));
+ while (fpsr & 0x400);
+ /* C0, C1, C3 flags in x87 status word carry low bits of quotient:
+ * 15 14 13 12 11 10 9 8
+ * . C3 . . . C2 C1 C0
+ * . b1 . . . 0 b0 b2 */
+ unsigned char i = fpsr >> 8;
+ i = i>>4 | i<<4;
+ /* i[5:2] is now {b0 b2 ? b1}. Retrieve {0 b2 b1 b0} via
+ * in-register table lookup. */
+ unsigned qbits = 0x7575313164642020 >> (i & 60);
+ qbits &= 7;
+
+ *quo = (cx[9]^cy[9]) < 0 ? -qbits : qbits;
+ return t;
+}
diff --git a/src/math/x86_64/rintl.c b/src/math/x86_64/rintl.c
new file mode 100644
index 00000000..e1a92077
--- /dev/null
+++ b/src/math/x86_64/rintl.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+long double rintl(long double x)
+{
+ __asm__ ("frndint" : "+t"(x));
+ return x;
+}
diff --git a/src/math/x86_64/rintl.s b/src/math/x86_64/rintl.s
deleted file mode 100644
index 64e663cd..00000000
--- a/src/math/x86_64/rintl.s
+++ /dev/null
@@ -1,6 +0,0 @@
-.global rintl
-.type rintl,@function
-rintl:
- fldt 8(%rsp)
- frndint
- ret
diff --git a/src/math/x86_64/sqrt.c b/src/math/x86_64/sqrt.c
new file mode 100644
index 00000000..657e09e3
--- /dev/null
+++ b/src/math/x86_64/sqrt.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+double sqrt(double x)
+{
+ __asm__ ("sqrtsd %1, %0" : "=x"(x) : "x"(x));
+ return x;
+}
diff --git a/src/math/x86_64/sqrt.s b/src/math/x86_64/sqrt.s
deleted file mode 100644
index d3c609f9..00000000
--- a/src/math/x86_64/sqrt.s
+++ /dev/null
@@ -1,4 +0,0 @@
-.global sqrt
-.type sqrt,@function
-sqrt: sqrtsd %xmm0, %xmm0
- ret
diff --git a/src/math/x86_64/sqrtf.c b/src/math/x86_64/sqrtf.c
new file mode 100644
index 00000000..720baec6
--- /dev/null
+++ b/src/math/x86_64/sqrtf.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+float sqrtf(float x)
+{
+ __asm__ ("sqrtss %1, %0" : "=x"(x) : "x"(x));
+ return x;
+}
diff --git a/src/math/x86_64/sqrtf.s b/src/math/x86_64/sqrtf.s
deleted file mode 100644
index eec48c60..00000000
--- a/src/math/x86_64/sqrtf.s
+++ /dev/null
@@ -1,4 +0,0 @@
-.global sqrtf
-.type sqrtf,@function
-sqrtf: sqrtss %xmm0, %xmm0
- ret
diff --git a/src/math/x86_64/sqrtl.c b/src/math/x86_64/sqrtl.c
new file mode 100644
index 00000000..864cfcc4
--- /dev/null
+++ b/src/math/x86_64/sqrtl.c
@@ -0,0 +1,7 @@
+#include <math.h>
+
+long double sqrtl(long double x)
+{
+ __asm__ ("fsqrt" : "+t"(x));
+ return x;
+}
diff --git a/src/math/x86_64/sqrtl.s b/src/math/x86_64/sqrtl.s
deleted file mode 100644
index 23cd687d..00000000
--- a/src/math/x86_64/sqrtl.s
+++ /dev/null
@@ -1,5 +0,0 @@
-.global sqrtl
-.type sqrtl,@function
-sqrtl: fldt 8(%rsp)
- fsqrt
- ret
diff --git a/src/misc/nftw.c b/src/misc/nftw.c
index 0a464100..8dcff7fe 100644
--- a/src/misc/nftw.c
+++ b/src/misc/nftw.c
@@ -1,5 +1,6 @@
#include <ftw.h>
#include <dirent.h>
+#include <fcntl.h>
#include <sys/stat.h>
#include <errno.h>
#include <unistd.h>
@@ -26,6 +27,8 @@ static int do_nftw(char *path, int (*fn)(const char *, const struct stat *, int,
struct history new;
int type;
int r;
+ int dfd;
+ int err;
struct FTW lev;
if ((flags & FTW_PHYS) ? lstat(path, &st) : stat(path, &st) < 0) {
@@ -34,8 +37,7 @@ static int do_nftw(char *path, int (*fn)(const char *, const struct stat *, int,
else if (errno != EACCES) return -1;
else type = FTW_NS;
} else if (S_ISDIR(st.st_mode)) {
- if (access(path, R_OK) < 0) type = FTW_DNR;
- else if (flags & FTW_DEPTH) type = FTW_DP;
+ if (flags & FTW_DEPTH) type = FTW_DP;
else type = FTW_D;
} else if (S_ISLNK(st.st_mode)) {
if (flags & FTW_PHYS) type = FTW_SL;
@@ -63,6 +65,13 @@ static int do_nftw(char *path, int (*fn)(const char *, const struct stat *, int,
lev.base = k;
}
+ if (type == FTW_D || type == FTW_DP) {
+ dfd = open(path, O_RDONLY);
+ err = errno;
+ if (dfd < 0 && err == EACCES) type = FTW_DNR;
+ if (!fd_limit) close(dfd);
+ }
+
if (!(flags & FTW_DEPTH) && (r=fn(path, &st, type, &lev)))
return r;
@@ -71,7 +80,11 @@ static int do_nftw(char *path, int (*fn)(const char *, const struct stat *, int,
return 0;
if ((type == FTW_D || type == FTW_DP) && fd_limit) {
- DIR *d = opendir(path);
+ if (dfd < 0) {
+ errno = err;
+ return -1;
+ }
+ DIR *d = fdopendir(dfd);
if (d) {
struct dirent *de;
while ((de = readdir(d))) {
@@ -92,7 +105,8 @@ static int do_nftw(char *path, int (*fn)(const char *, const struct stat *, int,
}
}
closedir(d);
- } else if (errno != EACCES) {
+ } else {
+ close(dfd);
return -1;
}
}
diff --git a/src/network/getnameinfo.c b/src/network/getnameinfo.c
index f77e73ad..949e1811 100644
--- a/src/network/getnameinfo.c
+++ b/src/network/getnameinfo.c
@@ -158,6 +158,7 @@ int getnameinfo(const struct sockaddr *restrict sa, socklen_t sl,
unsigned char query[18+PTR_MAX], reply[512];
int qlen = __res_mkquery(0, ptr, 1, RR_PTR,
0, 0, 0, query, sizeof query);
+ query[3] = 0; /* don't need AD flag */
int rlen = __res_send(query, qlen, reply, sizeof reply);
buf[0] = 0;
if (rlen > 0)
diff --git a/src/network/lookup_name.c b/src/network/lookup_name.c
index c93263a9..aae0d95a 100644
--- a/src/network/lookup_name.c
+++ b/src/network/lookup_name.c
@@ -149,6 +149,7 @@ static int name_from_dns(struct address buf[static MAXADDRS], char canon[static
0, 0, 0, qbuf[nq], sizeof *qbuf);
if (qlens[nq] == -1)
return EAI_NONAME;
+ qbuf[nq][3] = 0; /* don't need AD flag */
nq++;
}
}
@@ -156,14 +157,17 @@ static int name_from_dns(struct address buf[static MAXADDRS], char canon[static
if (__res_msend_rc(nq, qp, qlens, ap, alens, sizeof *abuf, conf) < 0)
return EAI_SYSTEM;
+ for (i=0; i<nq; i++) {
+ if (alens[i] < 4 || (abuf[i][3] & 15) == 2) return EAI_AGAIN;
+ if ((abuf[i][3] & 15) == 3) return 0;
+ if ((abuf[i][3] & 15) != 0) return EAI_FAIL;
+ }
+
for (i=0; i<nq; i++)
__dns_parse(abuf[i], alens[i], dns_parse_callback, &ctx);
if (ctx.cnt) return ctx.cnt;
- if (alens[0] < 4 || (abuf[0][3] & 15) == 2) return EAI_AGAIN;
- if ((abuf[0][3] & 15) == 0) return EAI_NONAME;
- if ((abuf[0][3] & 15) == 3) return 0;
- return EAI_FAIL;
+ return EAI_NONAME;
}
static int name_from_dns_search(struct address buf[static MAXADDRS], char canon[static 256], const char *name, int family)
diff --git a/src/network/res_mkquery.c b/src/network/res_mkquery.c
index 6fa04a5c..33f50cb9 100644
--- a/src/network/res_mkquery.c
+++ b/src/network/res_mkquery.c
@@ -20,6 +20,7 @@ int __res_mkquery(int op, const char *dname, int class, int type,
/* Construct query template - ID will be filled later */
memset(q, 0, n);
q[2] = op*8 + 1;
+ q[3] = 32; /* AD */
q[5] = 1;
memcpy((char *)q+13, dname, l);
for (i=13; q[i]; i=j+1) {
diff --git a/src/network/res_send.c b/src/network/res_send.c
index b9cea0bf..ee4abf1f 100644
--- a/src/network/res_send.c
+++ b/src/network/res_send.c
@@ -3,7 +3,7 @@
int __res_send(const unsigned char *msg, int msglen, unsigned char *answer, int anslen)
{
int r = __res_msend(1, &msg, &msglen, &answer, &anslen, anslen);
- return r<0 ? r : anslen;
+ return r<0 || !anslen ? -1 : anslen;
}
weak_alias(__res_send, res_send);
diff --git a/src/process/fork.c b/src/process/fork.c
index fb42478a..7e984ff8 100644
--- a/src/process/fork.c
+++ b/src/process/fork.c
@@ -30,6 +30,7 @@ pid_t fork(void)
self->next = self->prev = self;
__thread_list_lock = 0;
libc.threads_minus_1 = 0;
+ if (libc.need_locks) libc.need_locks = -1;
}
__restore_sigs(&set);
__fork_handler(!ret);
diff --git a/src/stdio/__string_read.c b/src/stdio/__string_read.c
deleted file mode 100644
index 7b50a7e1..00000000
--- a/src/stdio/__string_read.c
+++ /dev/null
@@ -1,16 +0,0 @@
-#include "stdio_impl.h"
-#include <string.h>
-
-size_t __string_read(FILE *f, unsigned char *buf, size_t len)
-{
- char *src = f->cookie;
- size_t k = len+256;
- char *end = memchr(src, 0, k);
- if (end) k = end-src;
- if (k < len) len = k;
- memcpy(buf, src, len);
- f->rpos = (void *)(src+len);
- f->rend = (void *)(src+k);
- f->cookie = src+k;
- return len;
-}
diff --git a/src/stdio/fmemopen.c b/src/stdio/fmemopen.c
index 5685092e..343e3e3f 100644
--- a/src/stdio/fmemopen.c
+++ b/src/stdio/fmemopen.c
@@ -2,6 +2,7 @@
#include <errno.h>
#include <string.h>
#include <stdlib.h>
+#include <stddef.h>
#include <inttypes.h>
#include "libc.h"
@@ -95,18 +96,17 @@ FILE *fmemopen(void *restrict buf, size_t size, const char *restrict mode)
f = malloc(sizeof *f + (buf?0:size));
if (!f) return 0;
- memset(&f->f, 0, sizeof f->f);
+ memset(f, 0, offsetof(struct mem_FILE, buf));
f->f.cookie = &f->c;
f->f.fd = -1;
f->f.lbf = EOF;
f->f.buf = f->buf + UNGET;
f->f.buf_size = sizeof f->buf - UNGET;
if (!buf) {
- buf = f->buf2;;
+ buf = f->buf2;
memset(buf, 0, size);
}
- memset(&f->c, 0, sizeof f->c);
f->c.buf = buf;
f->c.size = size;
f->c.mode = *mode;
diff --git a/src/stdio/vfscanf.c b/src/stdio/vfscanf.c
index 9e030fc4..b78a374d 100644
--- a/src/stdio/vfscanf.c
+++ b/src/stdio/vfscanf.c
@@ -57,7 +57,7 @@ int vfscanf(FILE *restrict f, const char *restrict fmt, va_list ap)
{
int width;
int size;
- int alloc;
+ int alloc = 0;
int base;
const unsigned char *p;
int c, t;
@@ -76,6 +76,9 @@ int vfscanf(FILE *restrict f, const char *restrict fmt, va_list ap)
FLOCK(f);
+ if (!f->rpos) __toread(f);
+ if (!f->rpos) goto input_fail;
+
for (p=(const unsigned char *)fmt; *p; p++) {
alloc = 0;
diff --git a/src/stdio/vsscanf.c b/src/stdio/vsscanf.c
index 98500225..4d6d259b 100644
--- a/src/stdio/vsscanf.c
+++ b/src/stdio/vsscanf.c
@@ -1,15 +1,25 @@
#include "stdio_impl.h"
+#include <string.h>
-static size_t do_read(FILE *f, unsigned char *buf, size_t len)
+static size_t string_read(FILE *f, unsigned char *buf, size_t len)
{
- return __string_read(f, buf, len);
+ char *src = f->cookie;
+ size_t k = len+256;
+ char *end = memchr(src, 0, k);
+ if (end) k = end-src;
+ if (k < len) len = k;
+ memcpy(buf, src, len);
+ f->rpos = (void *)(src+len);
+ f->rend = (void *)(src+k);
+ f->cookie = src+k;
+ return len;
}
int vsscanf(const char *restrict s, const char *restrict fmt, va_list ap)
{
FILE f = {
.buf = (void *)s, .cookie = (void *)s,
- .read = do_read, .lock = -1
+ .read = string_read, .lock = -1
};
return vfscanf(&f, fmt, ap);
}
diff --git a/src/stdlib/wcstod.c b/src/stdlib/wcstod.c
index 26fe9af8..0deb7010 100644
--- a/src/stdlib/wcstod.c
+++ b/src/stdlib/wcstod.c
@@ -33,8 +33,7 @@ static long double wcstox(const wchar_t *s, wchar_t **p, int prec)
unsigned char buf[64];
FILE f = {0};
f.flags = 0;
- f.rpos = f.rend = 0;
- f.buf = buf + 4;
+ f.rpos = f.rend = f.buf = buf + 4;
f.buf_size = sizeof buf - 4;
f.lock = -1;
f.read = do_read;
diff --git a/src/stdlib/wcstol.c b/src/stdlib/wcstol.c
index 4443f577..1eeb495f 100644
--- a/src/stdlib/wcstol.c
+++ b/src/stdlib/wcstol.c
@@ -35,8 +35,7 @@ static unsigned long long wcstox(const wchar_t *s, wchar_t **p, int base, unsign
unsigned char buf[64];
FILE f = {0};
f.flags = 0;
- f.rpos = f.rend = 0;
- f.buf = buf + 4;
+ f.rpos = f.rend = f.buf = buf + 4;
f.buf_size = sizeof buf - 4;
f.lock = -1;
f.read = do_read;
diff --git a/src/string/aarch64/memcpy.S b/src/string/aarch64/memcpy.S
new file mode 100644
index 00000000..48bb8a8d
--- /dev/null
+++ b/src/string/aarch64/memcpy.S
@@ -0,0 +1,186 @@
+/*
+ * memcpy - copy memory area
+ *
+ * Copyright (c) 2012-2020, Arm Limited.
+ * SPDX-License-Identifier: MIT
+ */
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
+ *
+ */
+
+#define dstin x0
+#define src x1
+#define count x2
+#define dst x3
+#define srcend x4
+#define dstend x5
+#define A_l x6
+#define A_lw w6
+#define A_h x7
+#define B_l x8
+#define B_lw w8
+#define B_h x9
+#define C_l x10
+#define C_lw w10
+#define C_h x11
+#define D_l x12
+#define D_h x13
+#define E_l x14
+#define E_h x15
+#define F_l x16
+#define F_h x17
+#define G_l count
+#define G_h dst
+#define H_l src
+#define H_h srcend
+#define tmp1 x14
+
+/* This implementation of memcpy uses unaligned accesses and branchless
+ sequences to keep the code small, simple and improve performance.
+
+ Copies are split into 3 main cases: small copies of up to 32 bytes, medium
+ copies of up to 128 bytes, and large copies. The overhead of the overlap
+ check is negligible since it is only required for large copies.
+
+ Large copies use a software pipelined loop processing 64 bytes per iteration.
+ The destination pointer is 16-byte aligned to minimize unaligned accesses.
+ The loop tail is handled by always copying 64 bytes from the end.
+*/
+
+.global memcpy
+.type memcpy,%function
+memcpy:
+ add srcend, src, count
+ add dstend, dstin, count
+ cmp count, 128
+ b.hi .Lcopy_long
+ cmp count, 32
+ b.hi .Lcopy32_128
+
+ /* Small copies: 0..32 bytes. */
+ cmp count, 16
+ b.lo .Lcopy16
+ ldp A_l, A_h, [src]
+ ldp D_l, D_h, [srcend, -16]
+ stp A_l, A_h, [dstin]
+ stp D_l, D_h, [dstend, -16]
+ ret
+
+ /* Copy 8-15 bytes. */
+.Lcopy16:
+ tbz count, 3, .Lcopy8
+ ldr A_l, [src]
+ ldr A_h, [srcend, -8]
+ str A_l, [dstin]
+ str A_h, [dstend, -8]
+ ret
+
+ .p2align 3
+ /* Copy 4-7 bytes. */
+.Lcopy8:
+ tbz count, 2, .Lcopy4
+ ldr A_lw, [src]
+ ldr B_lw, [srcend, -4]
+ str A_lw, [dstin]
+ str B_lw, [dstend, -4]
+ ret
+
+ /* Copy 0..3 bytes using a branchless sequence. */
+.Lcopy4:
+ cbz count, .Lcopy0
+ lsr tmp1, count, 1
+ ldrb A_lw, [src]
+ ldrb C_lw, [srcend, -1]
+ ldrb B_lw, [src, tmp1]
+ strb A_lw, [dstin]
+ strb B_lw, [dstin, tmp1]
+ strb C_lw, [dstend, -1]
+.Lcopy0:
+ ret
+
+ .p2align 4
+ /* Medium copies: 33..128 bytes. */
+.Lcopy32_128:
+ ldp A_l, A_h, [src]
+ ldp B_l, B_h, [src, 16]
+ ldp C_l, C_h, [srcend, -32]
+ ldp D_l, D_h, [srcend, -16]
+ cmp count, 64
+ b.hi .Lcopy128
+ stp A_l, A_h, [dstin]
+ stp B_l, B_h, [dstin, 16]
+ stp C_l, C_h, [dstend, -32]
+ stp D_l, D_h, [dstend, -16]
+ ret
+
+ .p2align 4
+ /* Copy 65..128 bytes. */
+.Lcopy128:
+ ldp E_l, E_h, [src, 32]
+ ldp F_l, F_h, [src, 48]
+ cmp count, 96
+ b.ls .Lcopy96
+ ldp G_l, G_h, [srcend, -64]
+ ldp H_l, H_h, [srcend, -48]
+ stp G_l, G_h, [dstend, -64]
+ stp H_l, H_h, [dstend, -48]
+.Lcopy96:
+ stp A_l, A_h, [dstin]
+ stp B_l, B_h, [dstin, 16]
+ stp E_l, E_h, [dstin, 32]
+ stp F_l, F_h, [dstin, 48]
+ stp C_l, C_h, [dstend, -32]
+ stp D_l, D_h, [dstend, -16]
+ ret
+
+ .p2align 4
+ /* Copy more than 128 bytes. */
+.Lcopy_long:
+
+ /* Copy 16 bytes and then align dst to 16-byte alignment. */
+
+ ldp D_l, D_h, [src]
+ and tmp1, dstin, 15
+ bic dst, dstin, 15
+ sub src, src, tmp1
+ add count, count, tmp1 /* Count is now 16 too large. */
+ ldp A_l, A_h, [src, 16]
+ stp D_l, D_h, [dstin]
+ ldp B_l, B_h, [src, 32]
+ ldp C_l, C_h, [src, 48]
+ ldp D_l, D_h, [src, 64]!
+ subs count, count, 128 + 16 /* Test and readjust count. */
+ b.ls .Lcopy64_from_end
+
+.Lloop64:
+ stp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [src, 16]
+ stp B_l, B_h, [dst, 32]
+ ldp B_l, B_h, [src, 32]
+ stp C_l, C_h, [dst, 48]
+ ldp C_l, C_h, [src, 48]
+ stp D_l, D_h, [dst, 64]!
+ ldp D_l, D_h, [src, 64]!
+ subs count, count, 64
+ b.hi .Lloop64
+
+ /* Write the last iteration and copy 64 bytes from the end. */
+.Lcopy64_from_end:
+ ldp E_l, E_h, [srcend, -64]
+ stp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [srcend, -48]
+ stp B_l, B_h, [dst, 32]
+ ldp B_l, B_h, [srcend, -32]
+ stp C_l, C_h, [dst, 48]
+ ldp C_l, C_h, [srcend, -16]
+ stp D_l, D_h, [dst, 64]
+ stp E_l, E_h, [dstend, -64]
+ stp A_l, A_h, [dstend, -48]
+ stp B_l, B_h, [dstend, -32]
+ stp C_l, C_h, [dstend, -16]
+ ret
+
+.size memcpy,.-memcpy
diff --git a/src/string/aarch64/memset.S b/src/string/aarch64/memset.S
new file mode 100644
index 00000000..f0d29b7f
--- /dev/null
+++ b/src/string/aarch64/memset.S
@@ -0,0 +1,115 @@
+/*
+ * memset - fill memory with a constant byte
+ *
+ * Copyright (c) 2012-2020, Arm Limited.
+ * SPDX-License-Identifier: MIT
+ */
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, Advanced SIMD, unaligned accesses.
+ *
+ */
+
+#define dstin x0
+#define val x1
+#define valw w1
+#define count x2
+#define dst x3
+#define dstend x4
+#define zva_val x5
+
+.global memset
+.type memset,%function
+memset:
+
+ dup v0.16B, valw
+ add dstend, dstin, count
+
+ cmp count, 96
+ b.hi .Lset_long
+ cmp count, 16
+ b.hs .Lset_medium
+ mov val, v0.D[0]
+
+ /* Set 0..15 bytes. */
+ tbz count, 3, 1f
+ str val, [dstin]
+ str val, [dstend, -8]
+ ret
+ nop
+1: tbz count, 2, 2f
+ str valw, [dstin]
+ str valw, [dstend, -4]
+ ret
+2: cbz count, 3f
+ strb valw, [dstin]
+ tbz count, 1, 3f
+ strh valw, [dstend, -2]
+3: ret
+
+ /* Set 17..96 bytes. */
+.Lset_medium:
+ str q0, [dstin]
+ tbnz count, 6, .Lset96
+ str q0, [dstend, -16]
+ tbz count, 5, 1f
+ str q0, [dstin, 16]
+ str q0, [dstend, -32]
+1: ret
+
+ .p2align 4
+ /* Set 64..96 bytes. Write 64 bytes from the start and
+ 32 bytes from the end. */
+.Lset96:
+ str q0, [dstin, 16]
+ stp q0, q0, [dstin, 32]
+ stp q0, q0, [dstend, -32]
+ ret
+
+ .p2align 4
+.Lset_long:
+ and valw, valw, 255
+ bic dst, dstin, 15
+ str q0, [dstin]
+ cmp count, 160
+ ccmp valw, 0, 0, hs
+ b.ne .Lno_zva
+
+#ifndef SKIP_ZVA_CHECK
+ mrs zva_val, dczid_el0
+ and zva_val, zva_val, 31
+ cmp zva_val, 4 /* ZVA size is 64 bytes. */
+ b.ne .Lno_zva
+#endif
+ str q0, [dst, 16]
+ stp q0, q0, [dst, 32]
+ bic dst, dst, 63
+ sub count, dstend, dst /* Count is now 64 too large. */
+ sub count, count, 128 /* Adjust count and bias for loop. */
+
+ .p2align 4
+.Lzva_loop:
+ add dst, dst, 64
+ dc zva, dst
+ subs count, count, 64
+ b.hi .Lzva_loop
+ stp q0, q0, [dstend, -64]
+ stp q0, q0, [dstend, -32]
+ ret
+
+.Lno_zva:
+ sub count, dstend, dst /* Count is 16 too large. */
+ sub dst, dst, 16 /* Dst is biased by -32. */
+ sub count, count, 64 + 16 /* Adjust count and bias for loop. */
+.Lno_zva_loop:
+ stp q0, q0, [dst, 32]
+ stp q0, q0, [dst, 64]!
+ subs count, count, 64
+ b.hi .Lno_zva_loop
+ stp q0, q0, [dstend, -64]
+ stp q0, q0, [dstend, -32]
+ ret
+
+.size memset,.-memset
+
diff --git a/src/string/arm/memcpy_le.S b/src/string/arm/memcpy.S
index 7b35d305..869e3448 100644
--- a/src/string/arm/memcpy_le.S
+++ b/src/string/arm/memcpy.S
@@ -1,5 +1,3 @@
-#if !__ARMEB__
-
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
@@ -42,7 +40,7 @@
* code safely callable from thumb mode, adjusting the return
* instructions to be compatible with pre-thumb ARM cpus, removal of
* prefetch code that is not compatible with older cpus and support for
- * building as thumb 2.
+ * building as thumb 2 and big-endian.
*/
.syntax unified
@@ -227,24 +225,45 @@ non_congruent:
* becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
*/
movs r5, r5, lsl #31
+
+#if __ARMEB__
+ movmi r3, r3, ror #24
+ strbmi r3, [r0], #1
+ movcs r3, r3, ror #24
+ strbcs r3, [r0], #1
+ movcs r3, r3, ror #24
+ strbcs r3, [r0], #1
+#else
strbmi r3, [r0], #1
movmi r3, r3, lsr #8
strbcs r3, [r0], #1
movcs r3, r3, lsr #8
strbcs r3, [r0], #1
movcs r3, r3, lsr #8
+#endif
cmp r2, #4
blo partial_word_tail
+#if __ARMEB__
+ mov r3, r3, lsr r12
+ mov r3, r3, lsl r12
+#endif
+
/* Align destination to 32 bytes (cache line boundary) */
1: tst r0, #0x1c
beq 2f
ldr r5, [r1], #4
sub r2, r2, #4
+#if __ARMEB__
+ mov r4, r5, lsr lr
+ orr r4, r4, r3
+ mov r3, r5, lsl r12
+#else
mov r4, r5, lsl lr
orr r4, r4, r3
mov r3, r5, lsr r12
+#endif
str r4, [r0], #4
cmp r2, #4
bhs 1b
@@ -270,6 +289,25 @@ loop16:
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
subs r2, r2, #32
ldrhs r12, [r1], #4
+#if __ARMEB__
+ orr r3, r3, r4, lsr #16
+ mov r4, r4, lsl #16
+ orr r4, r4, r5, lsr #16
+ mov r5, r5, lsl #16
+ orr r5, r5, r6, lsr #16
+ mov r6, r6, lsl #16
+ orr r6, r6, r7, lsr #16
+ mov r7, r7, lsl #16
+ orr r7, r7, r8, lsr #16
+ mov r8, r8, lsl #16
+ orr r8, r8, r9, lsr #16
+ mov r9, r9, lsl #16
+ orr r9, r9, r10, lsr #16
+ mov r10, r10, lsl #16
+ orr r10, r10, r11, lsr #16
+ stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
+ mov r3, r11, lsl #16
+#else
orr r3, r3, r4, lsl #16
mov r4, r4, lsr #16
orr r4, r4, r5, lsl #16
@@ -287,6 +325,7 @@ loop16:
orr r10, r10, r11, lsl #16
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #16
+#endif
bhs 1b
b less_than_thirtytwo
@@ -296,6 +335,25 @@ loop8:
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
subs r2, r2, #32
ldrhs r12, [r1], #4
+#if __ARMEB__
+ orr r3, r3, r4, lsr #24
+ mov r4, r4, lsl #8
+ orr r4, r4, r5, lsr #24
+ mov r5, r5, lsl #8
+ orr r5, r5, r6, lsr #24
+ mov r6, r6, lsl #8
+ orr r6, r6, r7, lsr #24
+ mov r7, r7, lsl #8
+ orr r7, r7, r8, lsr #24
+ mov r8, r8, lsl #8
+ orr r8, r8, r9, lsr #24
+ mov r9, r9, lsl #8
+ orr r9, r9, r10, lsr #24
+ mov r10, r10, lsl #8
+ orr r10, r10, r11, lsr #24
+ stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
+ mov r3, r11, lsl #8
+#else
orr r3, r3, r4, lsl #24
mov r4, r4, lsr #8
orr r4, r4, r5, lsl #24
@@ -313,6 +371,7 @@ loop8:
orr r10, r10, r11, lsl #24
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #8
+#endif
bhs 1b
b less_than_thirtytwo
@@ -322,6 +381,25 @@ loop24:
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
subs r2, r2, #32
ldrhs r12, [r1], #4
+#if __ARMEB__
+ orr r3, r3, r4, lsr #8
+ mov r4, r4, lsl #24
+ orr r4, r4, r5, lsr #8
+ mov r5, r5, lsl #24
+ orr r5, r5, r6, lsr #8
+ mov r6, r6, lsl #24
+ orr r6, r6, r7, lsr #8
+ mov r7, r7, lsl #24
+ orr r7, r7, r8, lsr #8
+ mov r8, r8, lsl #24
+ orr r8, r8, r9, lsr #8
+ mov r9, r9, lsl #24
+ orr r9, r9, r10, lsr #8
+ mov r10, r10, lsl #24
+ orr r10, r10, r11, lsr #8
+ stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
+ mov r3, r11, lsl #24
+#else
orr r3, r3, r4, lsl #8
mov r4, r4, lsr #24
orr r4, r4, r5, lsl #8
@@ -339,6 +417,7 @@ loop24:
orr r10, r10, r11, lsl #8
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #24
+#endif
bhs 1b
less_than_thirtytwo:
@@ -350,9 +429,15 @@ less_than_thirtytwo:
1: ldr r5, [r1], #4
sub r2, r2, #4
+#if __ARMEB__
+ mov r4, r5, lsr lr
+ orr r4, r4, r3
+ mov r3, r5, lsl r12
+#else
mov r4, r5, lsl lr
orr r4, r4, r3
mov r3, r5, lsr r12
+#endif
str r4, [r0], #4
cmp r2, #4
bhs 1b
@@ -360,11 +445,20 @@ less_than_thirtytwo:
partial_word_tail:
/* we have a partial word in the input buffer */
movs r5, lr, lsl #(31-3)
+#if __ARMEB__
+ movmi r3, r3, ror #24
+ strbmi r3, [r0], #1
+ movcs r3, r3, ror #24
+ strbcs r3, [r0], #1
+ movcs r3, r3, ror #24
+ strbcs r3, [r0], #1
+#else
strbmi r3, [r0], #1
movmi r3, r3, lsr #8
strbcs r3, [r0], #1
movcs r3, r3, lsr #8
strbcs r3, [r0], #1
+#endif
/* Refill spilled registers from the stack. Don't update sp. */
ldmfd sp, {r5-r11}
@@ -383,4 +477,3 @@ copy_last_3_and_return:
ldmfd sp!, {r0, r4, lr}
bx lr
-#endif
diff --git a/src/string/arm/memcpy.c b/src/string/arm/memcpy.c
deleted file mode 100644
index 041614f4..00000000
--- a/src/string/arm/memcpy.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#if __ARMEB__
-#include "../memcpy.c"
-#endif
diff --git a/src/string/memmem.c b/src/string/memmem.c
index 58a21fcd..11eff86e 100644
--- a/src/string/memmem.c
+++ b/src/string/memmem.c
@@ -12,8 +12,8 @@ static char *twobyte_memmem(const unsigned char *h, size_t k, const unsigned cha
static char *threebyte_memmem(const unsigned char *h, size_t k, const unsigned char *n)
{
- uint32_t nw = n[0]<<24 | n[1]<<16 | n[2]<<8;
- uint32_t hw = h[0]<<24 | h[1]<<16 | h[2]<<8;
+ uint32_t nw = (uint32_t)n[0]<<24 | n[1]<<16 | n[2]<<8;
+ uint32_t hw = (uint32_t)h[0]<<24 | h[1]<<16 | h[2]<<8;
for (h+=3, k-=3; k; k--, hw = (hw|*h++)<<8)
if (hw == nw) return (char *)h-3;
return hw == nw ? (char *)h-3 : 0;
@@ -21,8 +21,8 @@ static char *threebyte_memmem(const unsigned char *h, size_t k, const unsigned c
static char *fourbyte_memmem(const unsigned char *h, size_t k, const unsigned char *n)
{
- uint32_t nw = n[0]<<24 | n[1]<<16 | n[2]<<8 | n[3];
- uint32_t hw = h[0]<<24 | h[1]<<16 | h[2]<<8 | h[3];
+ uint32_t nw = (uint32_t)n[0]<<24 | n[1]<<16 | n[2]<<8 | n[3];
+ uint32_t hw = (uint32_t)h[0]<<24 | h[1]<<16 | h[2]<<8 | h[3];
for (h+=4, k-=4; k; k--, hw = hw<<8 | *h++)
if (hw == nw) return (char *)h-4;
return hw == nw ? (char *)h-4 : 0;
diff --git a/src/string/strsignal.c b/src/string/strsignal.c
index 96bfe841..5156366e 100644
--- a/src/string/strsignal.c
+++ b/src/string/strsignal.c
@@ -31,7 +31,11 @@ static const char map[] = {
[SIGPIPE] = 13,
[SIGALRM] = 14,
[SIGTERM] = 15,
+#if defined(SIGSTKFLT)
[SIGSTKFLT] = 16,
+#elif defined(SIGEMT)
+ [SIGEMT] = 16,
+#endif
[SIGCHLD] = 17,
[SIGCONT] = 18,
[SIGSTOP] = 19,
@@ -70,7 +74,13 @@ static const char strings[] =
"Broken pipe\0"
"Alarm clock\0"
"Terminated\0"
+#if defined(SIGSTKFLT)
"Stack fault\0"
+#elif defined(SIGEMT)
+ "Emulator trap\0"
+#else
+ "Unknown signal\0"
+#endif
"Child process status\0"
"Continued\0"
"Stopped (signal)\0"
diff --git a/src/string/strstr.c b/src/string/strstr.c
index 55ba1c7b..43a0207a 100644
--- a/src/string/strstr.c
+++ b/src/string/strstr.c
@@ -10,16 +10,16 @@ static char *twobyte_strstr(const unsigned char *h, const unsigned char *n)
static char *threebyte_strstr(const unsigned char *h, const unsigned char *n)
{
- uint32_t nw = n[0]<<24 | n[1]<<16 | n[2]<<8;
- uint32_t hw = h[0]<<24 | h[1]<<16 | h[2]<<8;
+ uint32_t nw = (uint32_t)n[0]<<24 | n[1]<<16 | n[2]<<8;
+ uint32_t hw = (uint32_t)h[0]<<24 | h[1]<<16 | h[2]<<8;
for (h+=2; *h && hw != nw; hw = (hw|*++h)<<8);
return *h ? (char *)h-2 : 0;
}
static char *fourbyte_strstr(const unsigned char *h, const unsigned char *n)
{
- uint32_t nw = n[0]<<24 | n[1]<<16 | n[2]<<8 | n[3];
- uint32_t hw = h[0]<<24 | h[1]<<16 | h[2]<<8 | h[3];
+ uint32_t nw = (uint32_t)n[0]<<24 | n[1]<<16 | n[2]<<8 | n[3];
+ uint32_t hw = (uint32_t)h[0]<<24 | h[1]<<16 | h[2]<<8 | h[3];
for (h+=3; *h && hw != nw; hw = hw<<8 | *++h);
return *h ? (char *)h-3 : 0;
}
diff --git a/src/thread/__lock.c b/src/thread/__lock.c
index 45557c88..60eece49 100644
--- a/src/thread/__lock.c
+++ b/src/thread/__lock.c
@@ -18,9 +18,11 @@
void __lock(volatile int *l)
{
- if (!libc.threads_minus_1) return;
+ int need_locks = libc.need_locks;
+ if (!need_locks) return;
/* fast path: INT_MIN for the lock, +1 for the congestion */
int current = a_cas(l, 0, INT_MIN + 1);
+ if (need_locks < 0) libc.need_locks = 0;
if (!current) return;
/* A first spin loop, for medium congestion. */
for (unsigned i = 0; i < 10; ++i) {
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
index 5f491092..6bdfb44f 100644
--- a/src/thread/pthread_create.c
+++ b/src/thread/pthread_create.c
@@ -90,14 +90,7 @@ _Noreturn void __pthread_exit(void *result)
exit(0);
}
- /* At this point we are committed to thread termination. Unlink
- * the thread from the list. This change will not be visible
- * until the lock is released, which only happens after SYS_exit
- * has been called, via the exit futex address pointing at the lock. */
- libc.threads_minus_1--;
- self->next->prev = self->prev;
- self->prev->next = self->next;
- self->prev = self->next = self;
+ /* At this point we are committed to thread termination. */
/* Process robust list in userspace to handle non-pshared mutexes
* and the detached thread case where the robust list head will
@@ -121,6 +114,16 @@ _Noreturn void __pthread_exit(void *result)
__do_orphaned_stdio_locks();
__dl_thread_cleanup();
+ /* Last, unlink thread from the list. This change will not be visible
+ * until the lock is released, which only happens after SYS_exit
+ * has been called, via the exit futex address pointing at the lock.
+ * This needs to happen after any possible calls to LOCK() that might
+ * skip locking if process appears single-threaded. */
+ if (!--libc.threads_minus_1) libc.need_locks = -1;
+ self->next->prev = self->prev;
+ self->prev->next = self->next;
+ self->prev = self->next = self;
+
/* This atomic potentially competes with a concurrent pthread_detach
* call; the loser is responsible for freeing thread resources. */
int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
@@ -336,7 +339,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
__tl_lock();
- libc.threads_minus_1++;
+ if (!libc.threads_minus_1++) libc.need_locks = 1;
ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
/* All clone failures translate to EAGAIN. If explicit scheduling
@@ -360,7 +363,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
new->next->prev = new;
new->prev->next = new;
} else {
- libc.threads_minus_1--;
+ if (!--libc.threads_minus_1) libc.need_locks = 0;
}
__tl_unlock();
__restore_sigs(&set);
diff --git a/src/time/__tz.c b/src/time/__tz.c
index a962960e..49a7371e 100644
--- a/src/time/__tz.c
+++ b/src/time/__tz.c
@@ -86,15 +86,15 @@ static void getname(char *d, const char **p)
int i;
if (**p == '<') {
++*p;
- for (i=0; (*p)[i] && (*p)[i]!='>' && i<TZNAME_MAX; i++)
- d[i] = (*p)[i];
+ for (i=0; (*p)[i] && (*p)[i]!='>'; i++)
+ if (i<TZNAME_MAX) d[i] = (*p)[i];
if ((*p)[i]) ++*p;
} else {
- for (i=0; ((*p)[i]|32)-'a'<26U && i<TZNAME_MAX; i++)
- d[i] = (*p)[i];
+ for (i=0; ((*p)[i]|32)-'a'<26U; i++)
+ if (i<TZNAME_MAX) d[i] = (*p)[i];
}
*p += i;
- d[i] = 0;
+ d[i<TZNAME_MAX?i:TZNAME_MAX] = 0;
}
#define VEC(...) ((const unsigned char[]){__VA_ARGS__})