4 void *memset(void *dest, int c, size_t n)
6 unsigned char *s = dest;
9 /* Fill head and tail with minimal branching. Each
10 * conditional ensures that all the subsequently used
11 * offsets are well-defined and in the dest region. */
15 if (n <= 2) return dest;
18 if (n <= 6) return dest;
20 if (n <= 8) return dest;
22 /* Advance pointer to align it at a 4-byte boundary,
23 * and truncate n to a multiple of 4. The previous code
24 * already took care of any head/tail that get cut off
25 * by the alignment. */
27 k = -(uintptr_t)s & 3;
33 typedef uint32_t __attribute__((__may_alias__)) u32;
34 typedef uint64_t __attribute__((__may_alias__)) u64;
36 u32 c32 = ((u32)-1)/255 * (unsigned char)c;
38 /* In preparation to copy 32 bytes at a time, aligned on
39 * an 8-byte bounary, fill head/tail up to 28 bytes each.
40 * As in the initial byte-based head/tail fill, each
41 * conditional below ensures that the subsequent offsets
42 * are valid (e.g. !(n<=24) implies n>=28). */
45 *(u32 *)(s+n-4) = c32;
46 if (n <= 8) return dest;
49 *(u32 *)(s+n-12) = c32;
50 *(u32 *)(s+n-8) = c32;
51 if (n <= 24) return dest;
56 *(u32 *)(s+n-28) = c32;
57 *(u32 *)(s+n-24) = c32;
58 *(u32 *)(s+n-20) = c32;
59 *(u32 *)(s+n-16) = c32;
61 /* Align to a multiple of 8 so we can fill 64 bits at a time,
62 * and avoid writing the same bytes twice as much as is
63 * practical without introducing additional branching. */
65 k = 24 + ((uintptr_t)s & 4);
69 /* If this loop is reached, 28 tail bytes have already been
70 * filled, so any remainder when n drops below 32 can be
73 u64 c64 = c32 | ((u64)c32 << 32);
74 for (; n >= 32; n-=32, s+=32) {
81 /* Pure C fallback with no aliasing violations. */
82 for (; n; n--, s++) *s = c;