From a543369e3b06a51eacd392c738fc10c5267a195f Mon Sep 17 00:00:00 2001
From: Rich Felker
Date: Tue, 27 Aug 2013 18:08:29 0400
Subject: optimized C memset
this version of memset is optimized both for small and large values of
n, and makes no misaligned writes, so it is usable (and nearoptimal)
on all archs. it is capable of filling up to 52 or 56 bytes without
entering a loop and with at most 7 branches, all of which can be fully
predicted if memset is called multiple times with the same size.
it also uses the attribute extension to inform the compiler that it is
violating the aliasing rules, unlike the previous code which simply
assumed it was safe to violate the aliasing rules since translation
unit boundaries hide the violations from the compiler. for nonGNUC
compilers, 100% portable fallback code in the form of a naive loop is
provided. I intend to eventually apply this approach to all of the
string/memory functions which are doing wordatatime accesses.

src/string/memset.c  89 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 77 insertions(+), 12 deletions()
(limited to 'src')
diff git a/src/string/memset.c b/src/string/memset.c
index 20e47c45..f438b073 100644
 a/src/string/memset.c
+++ b/src/string/memset.c
@@ 1,21 +1,86 @@
#include
#include
#include
#include

#define SS (sizeof(size_t))
#define ALIGN (sizeof(size_t)1)
#define ONES ((size_t)1/UCHAR_MAX)
void *memset(void *dest, int c, size_t n)
{
unsigned char *s = dest;
 c = (unsigned char)c;
 for (; ((uintptr_t)s & ALIGN) && n; n) *s++ = c;
 if (n) {
 size_t *w, k = ONES * c;
 for (w = (void *)s; n>=SS; n=SS, w++) *w = k;
 for (s = (void *)w; n; n, s++) *s = c;
+ size_t k;
+
+ /* Fill head and tail with minimal branching. Each
+ * conditional ensures that all the subsequently used
+ * offsets are welldefined and in the dest region. */
+
+ if (!n) return dest;
+ s[0] = s[n1] = c;
+ if (n <= 2) return dest;
+ s[1] = s[n2] = c;
+ s[2] = s[n3] = c;
+ if (n <= 6) return dest;
+ s[3] = s[n4] = c;
+ if (n <= 8) return dest;
+
+ /* Advance pointer to align it at a 4byte boundary,
+ * and truncate n to a multiple of 4. The previous code
+ * already took care of any head/tail that get cut off
+ * by the alignment. */
+
+ k = (uintptr_t)s & 3;
+ s += k;
+ n = k;
+ n &= 4;
+
+#ifdef __GNUC__
+ typedef uint32_t __attribute__((__may_alias__)) u32;
+ typedef uint64_t __attribute__((__may_alias__)) u64;
+
+ u32 c32 = ((u32)1)/255 * (unsigned char)c;
+
+ /* In preparation to copy 32 bytes at a time, aligned on
+ * an 8byte bounary, fill head/tail up to 28 bytes each.
+ * As in the initial bytebased head/tail fill, each
+ * conditional below ensures that the subsequent offsets
+ * are valid (e.g. !(n<=24) implies n>=28). */
+
+ *(u32 *)(s+0) = c32;
+ *(u32 *)(s+n4) = c32;
+ if (n <= 8) return dest;
+ *(u32 *)(s+4) = c32;
+ *(u32 *)(s+8) = c32;
+ *(u32 *)(s+n12) = c32;
+ *(u32 *)(s+n8) = c32;
+ if (n <= 24) return dest;
+ *(u32 *)(s+12) = c32;
+ *(u32 *)(s+16) = c32;
+ *(u32 *)(s+20) = c32;
+ *(u32 *)(s+24) = c32;
+ *(u32 *)(s+n28) = c32;
+ *(u32 *)(s+n24) = c32;
+ *(u32 *)(s+n20) = c32;
+ *(u32 *)(s+n16) = c32;
+
+ /* Align to a multiple of 8 so we can fill 64 bits at a time,
+ * and avoid writing the same bytes twice as much as is
+ * practical without introducing additional branching. */
+
+ k = 24 + ((uintptr_t)s & 4);
+ s += k;
+ n = k;
+
+ /* If this loop is reached, 28 tail bytes have already been
+ * filled, so any remainder when n drops below 32 can be
+ * safely ignored. */
+
+ u64 c64 = c32  ((u64)c32 << 32);
+ for (; n >= 32; n=32, s+=32) {
+ *(u64 *)(s+0) = c64;
+ *(u64 *)(s+8) = c64;
+ *(u64 *)(s+16) = c64;
+ *(u64 *)(s+24) = c64;
}
+#else
+ /* Pure C fallback with no aliasing violations. */
+ for (; n; n, s++) *s = c;
+#endif
+
return dest;
}

cgit v1.2.1