10 #include "pthread_impl.h"
11 #include "malloc_impl.h"
12 #include "fork_impl.h"
14 #define malloc __libc_malloc_impl
15 #define realloc __libc_realloc
16 #define free __libc_free
18 #if defined(__GNUC__) && defined(__PIC__)
19 #define inline inline __attribute__((always_inline))
23 volatile uint64_t binmap;
25 volatile int split_merge_lock[2];
28 /* Synchronization tools */
30 static inline void lock(volatile int *lk)
32 int need_locks = libc.need_locks;
34 while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
35 if (need_locks < 0) libc.need_locks = 0;
39 static inline void unlock(volatile int *lk)
43 if (lk[1]) __wake(lk, 1, 1);
47 static inline void lock_bin(int i)
49 lock(mal.bins[i].lock);
50 if (!mal.bins[i].head)
51 mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
54 static inline void unlock_bin(int i)
56 unlock(mal.bins[i].lock);
59 static int first_set(uint64_t x)
64 static const char debruijn64[64] = {
65 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
66 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
67 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
68 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
70 static const char debruijn32[32] = {
71 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
72 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
74 if (sizeof(long) < 8) {
78 return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
80 return debruijn32[(y&-y)*0x076be629 >> 27];
82 return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
86 static const unsigned char bin_tab[60] = {
87 32,33,34,35,36,36,37,37,38,38,39,39,
88 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43,
89 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,
90 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,
93 static int bin_index(size_t x)
95 x = x / SIZE_ALIGN - 1;
96 if (x <= 32) return x;
97 if (x < 512) return bin_tab[x/8-4];
98 if (x > 0x1c00) return 63;
99 return bin_tab[x/128-4] + 16;
102 static int bin_index_up(size_t x)
104 x = x / SIZE_ALIGN - 1;
105 if (x <= 32) return x;
107 if (x < 512) return bin_tab[x/8-4] + 1;
108 return bin_tab[x/128-4] + 17;
112 void __dump_heap(int x)
116 for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
117 fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
118 c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
120 NEXT_CHUNK(c)->psize & 15);
121 for (i=0; i<64; i++) {
122 if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
123 fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
124 if (!(mal.binmap & 1ULL<<i))
125 fprintf(stderr, "missing from binmap!\n");
126 } else if (mal.binmap & 1ULL<<i)
127 fprintf(stderr, "binmap wrongly contains %d!\n", i);
132 /* This function returns true if the interval [old,new]
133 * intersects the 'len'-sized interval below &libc.auxv
134 * (interpreted as the main-thread stack) or below &b
135 * (the current stack). It is used to defend against
136 * buggy brk implementations that can cross the stack. */
138 static int traverses_stack_p(uintptr_t old, uintptr_t new)
140 const uintptr_t len = 8<<20;
143 b = (uintptr_t)libc.auxv;
144 a = b > len ? b-len : 0;
145 if (new>a && old<b) return 1;
148 a = b > len ? b-len : 0;
149 if (new>a && old<b) return 1;
154 /* Expand the heap in-place if brk can be used, or otherwise via mmap,
155 * using an exponential lower bound on growth by mmap to make
156 * fragmentation asymptotically irrelevant. The size argument is both
157 * an input and an output, since the caller needs to know the size
158 * allocated, which will be larger than requested due to page alignment
159 * and mmap minimum size rules. The caller is responsible for locking
160 * to prevent concurrent calls. */
162 static void *__expand_heap(size_t *pn)
164 static uintptr_t brk;
165 static unsigned mmap_step;
168 if (n > SIZE_MAX/2 - PAGE_SIZE) {
172 n += -n & PAGE_SIZE-1;
175 brk = __syscall(SYS_brk, 0);
176 brk += -brk & PAGE_SIZE-1;
179 if (n < SIZE_MAX-brk && !traverses_stack_p(brk, brk+n)
180 && __syscall(SYS_brk, brk+n)==brk+n) {
183 return (void *)(brk-n);
186 size_t min = (size_t)PAGE_SIZE << mmap_step/2;
187 if (n < min) n = min;
188 void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
189 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
190 if (area == MAP_FAILED) return 0;
196 static struct chunk *expand_heap(size_t n)
202 /* The argument n already accounts for the caller's chunk
203 * overhead needs, but if the heap can't be extended in-place,
204 * we need room for an extra zero-sized sentinel chunk. */
207 p = __expand_heap(&n);
210 /* If not just expanding existing space, we need to make a
211 * new sentinel chunk below the allocated space. */
213 /* Valid/safe because of the prologue increment. */
215 p = (char *)p + SIZE_ALIGN;
217 w->psize = 0 | C_INUSE;
220 /* Record new heap end and fill in footer. */
222 w = MEM_TO_CHUNK(end);
223 w->psize = n | C_INUSE;
224 w->csize = 0 | C_INUSE;
226 /* Fill in header, which may be new or may be replacing a
227 * zero-size sentinel header at the old end-of-heap. */
229 w->csize = n | C_INUSE;
234 static int adjust_size(size_t *n)
236 /* Result of pointer difference must fit in ptrdiff_t. */
237 if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
246 *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
250 static void unbin(struct chunk *c, int i)
252 if (c->prev == c->next)
253 a_and_64(&mal.binmap, ~(1ULL<<i));
254 c->prev->next = c->next;
255 c->next->prev = c->prev;
257 NEXT_CHUNK(c)->psize |= C_INUSE;
260 static void bin_chunk(struct chunk *self, int i)
262 self->next = BIN_TO_CHUNK(i);
263 self->prev = mal.bins[i].tail;
264 self->next->prev = self;
265 self->prev->next = self;
266 if (self->prev == BIN_TO_CHUNK(i))
267 a_or_64(&mal.binmap, 1ULL<<i);
270 static void trim(struct chunk *self, size_t n)
272 size_t n1 = CHUNK_SIZE(self);
273 struct chunk *next, *split;
275 if (n >= n1 - DONTCARE) return;
277 next = NEXT_CHUNK(self);
278 split = (void *)((char *)self + n);
280 split->psize = n | C_INUSE;
283 self->csize = n | C_INUSE;
285 int i = bin_index(n1-n);
293 void *malloc(size_t n)
299 if (adjust_size(&n) < 0) return 0;
301 if (n > MMAP_THRESHOLD) {
302 size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
303 char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
304 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
305 if (base == (void *)-1) return 0;
306 c = (void *)(base + SIZE_ALIGN - OVERHEAD);
307 c->csize = len - (SIZE_ALIGN - OVERHEAD);
308 c->psize = SIZE_ALIGN - OVERHEAD;
309 return CHUNK_TO_MEM(c);
313 if (i<63 && (mal.binmap & (1ULL<<i))) {
315 c = mal.bins[i].head;
316 if (c != BIN_TO_CHUNK(i) && CHUNK_SIZE(c)-n <= DONTCARE) {
319 return CHUNK_TO_MEM(c);
323 lock(mal.split_merge_lock);
324 for (mask = mal.binmap & -(1ULL<<i); mask; mask -= (mask&-mask)) {
327 c = mal.bins[j].head;
328 if (c != BIN_TO_CHUNK(j)) {
338 unlock(mal.split_merge_lock);
343 unlock(mal.split_merge_lock);
344 return CHUNK_TO_MEM(c);
347 int __malloc_allzerop(void *p)
349 return IS_MMAPPED(MEM_TO_CHUNK(p));
352 void *realloc(void *p, size_t n)
354 struct chunk *self, *next;
358 if (!p) return malloc(n);
360 if (adjust_size(&n) < 0) return 0;
362 self = MEM_TO_CHUNK(p);
363 n1 = n0 = CHUNK_SIZE(self);
365 if (n<=n0 && n0-n<=DONTCARE) return p;
367 if (IS_MMAPPED(self)) {
368 size_t extra = self->psize;
369 char *base = (char *)self - extra;
370 size_t oldlen = n0 + extra;
371 size_t newlen = n + extra;
372 /* Crash on realloc of freed chunk */
373 if (extra & 1) a_crash();
374 if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) {
378 newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
379 if (oldlen == newlen) return p;
380 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
381 if (base == (void *)-1)
383 self = (void *)(base + extra);
384 self->csize = newlen - extra;
385 return CHUNK_TO_MEM(self);
388 next = NEXT_CHUNK(self);
390 /* Crash on corrupted footer (likely from buffer overflow) */
391 if (next->psize != self->csize) a_crash();
394 int i = bin_index_up(n);
395 int j = bin_index(n0);
396 if (i<j && (mal.binmap & (1ULL << i)))
398 struct chunk *split = (void *)((char *)self + n);
399 self->csize = split->psize = n | C_INUSE;
400 split->csize = next->psize = n0-n | C_INUSE;
402 return CHUNK_TO_MEM(self);
405 lock(mal.split_merge_lock);
407 size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next);
409 int i = bin_index(nsize);
411 if (!(next->csize & C_INUSE)) {
414 next = NEXT_CHUNK(next);
415 self->csize = next->psize = n0+nsize | C_INUSE;
417 unlock(mal.split_merge_lock);
418 return CHUNK_TO_MEM(self);
422 unlock(mal.split_merge_lock);
425 /* As a last resort, allocate a new chunk and copy to it. */
426 new = malloc(n-OVERHEAD);
429 memcpy(new, p, (n<n0 ? n : n0) - OVERHEAD);
430 free(CHUNK_TO_MEM(self));
434 void __bin_chunk(struct chunk *self)
436 struct chunk *next = NEXT_CHUNK(self);
438 /* Crash on corrupted footer (likely from buffer overflow) */
439 if (next->psize != self->csize) a_crash();
441 lock(mal.split_merge_lock);
443 size_t osize = CHUNK_SIZE(self), size = osize;
445 /* Since we hold split_merge_lock, only transition from free to
446 * in-use can race; in-use to free is impossible */
447 size_t psize = self->psize & C_INUSE ? 0 : CHUNK_PSIZE(self);
448 size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next);
451 int i = bin_index(psize);
453 if (!(self->psize & C_INUSE)) {
454 struct chunk *prev = PREV_CHUNK(self);
462 int i = bin_index(nsize);
464 if (!(next->csize & C_INUSE)) {
466 next = NEXT_CHUNK(next);
472 int i = bin_index(size);
478 unlock(mal.split_merge_lock);
480 /* Replace middle of large chunks with fresh zero pages */
481 if (size > RECLAIM && (size^(size-osize)) > size-osize) {
482 uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
483 uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
486 __madvise((void *)a, b-a, MADV_DONTNEED);
488 __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
489 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
497 static void unmap_chunk(struct chunk *self)
499 size_t extra = self->psize;
500 char *base = (char *)self - extra;
501 size_t len = CHUNK_SIZE(self) + extra;
502 /* Crash on double free */
503 if (extra & 1) a_crash();
513 struct chunk *self = MEM_TO_CHUNK(p);
515 if (IS_MMAPPED(self))
521 void __malloc_donate(char *start, char *end)
523 size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD);
524 size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end;
526 /* Getting past this condition ensures that the padding for alignment
527 * and header overhead will not overflow and will leave a nonzero
528 * multiple of SIZE_ALIGN bytes between start and end. */
529 if (end - start <= OVERHEAD + align_start_up + align_end_down)
531 start += align_start_up + OVERHEAD;
532 end -= align_end_down;
534 struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
535 c->psize = n->csize = C_INUSE;
536 c->csize = n->psize = C_INUSE | (end-start);
540 void __malloc_atfork(int who)
543 lock(mal.split_merge_lock);
544 for (int i=0; i<64; i++)
545 lock(mal.bins[i].lock);
547 for (int i=0; i<64; i++)
548 unlock(mal.bins[i].lock);
549 unlock(mal.split_merge_lock);
551 for (int i=0; i<64; i++)
552 mal.bins[i].lock[0] = mal.bins[i].lock[1] = 0;
553 mal.split_merge_lock[1] = 0;
554 mal.split_merge_lock[0] = 0;