10 #include "pthread_impl.h"
12 #if defined(__GNUC__) && defined(__PIC__)
13 #define inline inline __attribute__((always_inline))
16 void *__mmap(void *, size_t, int, int, int, off_t);
17 int __munmap(void *, size_t);
18 void *__mremap(void *, size_t, size_t, int, ...);
19 int __madvise(void *, size_t, int);
23 struct chunk *next, *prev;
33 volatile uint64_t binmap;
35 volatile int free_lock[2];
39 #define SIZE_ALIGN (4*sizeof(size_t))
40 #define SIZE_MASK (-SIZE_ALIGN)
41 #define OVERHEAD (2*sizeof(size_t))
42 #define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN)
44 #define RECLAIM 163840
46 #define CHUNK_SIZE(c) ((c)->csize & -2)
47 #define CHUNK_PSIZE(c) ((c)->psize & -2)
48 #define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c)))
49 #define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c)))
50 #define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD)
51 #define CHUNK_TO_MEM(c) (void *)((char *)(c) + OVERHEAD)
52 #define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head))
54 #define C_INUSE ((size_t)1)
56 #define IS_MMAPPED(c) !((c)->csize & (C_INUSE))
59 /* Synchronization tools */
61 static inline void lock(volatile int *lk)
63 if (libc.threads_minus_1)
64 while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
67 static inline void unlock(volatile int *lk)
71 if (lk[1]) __wake(lk, 1, 1);
75 static inline void lock_bin(int i)
77 lock(mal.bins[i].lock);
78 if (!mal.bins[i].head)
79 mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
82 static inline void unlock_bin(int i)
84 unlock(mal.bins[i].lock);
87 static int first_set(uint64_t x)
92 static const char debruijn64[64] = {
93 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
94 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
95 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
96 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
98 static const char debruijn32[32] = {
99 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
100 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
102 if (sizeof(long) < 8) {
106 return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
108 return debruijn32[(y&-y)*0x076be629 >> 27];
110 return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
114 static const unsigned char bin_tab[60] = {
115 32,33,34,35,36,36,37,37,38,38,39,39,
116 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43,
117 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,
118 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,
121 static int bin_index(size_t x)
123 x = x / SIZE_ALIGN - 1;
124 if (x <= 32) return x;
125 if (x < 512) return bin_tab[x/8-4];
126 if (x > 0x1c00) return 63;
127 return bin_tab[x/128-4] + 16;
130 static int bin_index_up(size_t x)
132 x = x / SIZE_ALIGN - 1;
133 if (x <= 32) return x;
135 if (x < 512) return bin_tab[x/8-4] + 1;
136 return bin_tab[x/128-4] + 17;
140 void __dump_heap(int x)
144 for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
145 fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
146 c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
148 NEXT_CHUNK(c)->psize & 15);
149 for (i=0; i<64; i++) {
150 if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
151 fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
152 if (!(mal.binmap & 1ULL<<i))
153 fprintf(stderr, "missing from binmap!\n");
154 } else if (mal.binmap & 1ULL<<i)
155 fprintf(stderr, "binmap wrongly contains %d!\n", i);
160 void *__expand_heap(size_t *);
162 static struct chunk *expand_heap(size_t n)
164 static int heap_lock[2];
169 /* The argument n already accounts for the caller's chunk
170 * overhead needs, but if the heap can't be extended in-place,
171 * we need room for an extra zero-sized sentinel chunk. */
176 p = __expand_heap(&n);
182 /* If not just expanding existing space, we need to make a
183 * new sentinel chunk below the allocated space. */
185 /* Valid/safe because of the prologue increment. */
187 p = (char *)p + SIZE_ALIGN;
189 w->psize = 0 | C_INUSE;
192 /* Record new heap end and fill in footer. */
194 w = MEM_TO_CHUNK(end);
195 w->psize = n | C_INUSE;
196 w->csize = 0 | C_INUSE;
198 /* Fill in header, which may be new or may be replacing a
199 * zero-size sentinel header at the old end-of-heap. */
201 w->csize = n | C_INUSE;
208 static int adjust_size(size_t *n)
210 /* Result of pointer difference must fit in ptrdiff_t. */
211 if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
220 *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
224 static void unbin(struct chunk *c, int i)
226 if (c->prev == c->next)
227 a_and_64(&mal.binmap, ~(1ULL<<i));
228 c->prev->next = c->next;
229 c->next->prev = c->prev;
231 NEXT_CHUNK(c)->psize |= C_INUSE;
234 static int alloc_fwd(struct chunk *c)
238 while (!((k=c->csize) & C_INUSE)) {
251 static int alloc_rev(struct chunk *c)
255 while (!((k=c->psize) & C_INUSE)) {
259 unbin(PREV_CHUNK(c), i);
269 /* pretrim - trims a chunk _prior_ to removing it from its bin.
270 * Must be called with i as the ideal bin for size n, j the bin
271 * for the _free_ chunk self, and bin j locked. */
272 static int pretrim(struct chunk *self, size_t n, int i, int j)
275 struct chunk *next, *split;
277 /* We cannot pretrim if it would require re-binning. */
278 if (j < 40) return 0;
280 if (j != 63) return 0;
281 n1 = CHUNK_SIZE(self);
282 if (n1-n <= MMAP_THRESHOLD) return 0;
284 n1 = CHUNK_SIZE(self);
286 if (bin_index(n1-n) != j) return 0;
288 next = NEXT_CHUNK(self);
289 split = (void *)((char *)self + n);
291 split->prev = self->prev;
292 split->next = self->next;
293 split->prev->next = split;
294 split->next->prev = split;
295 split->psize = n | C_INUSE;
298 self->csize = n | C_INUSE;
302 static void bin_chunk(struct chunk *);
304 static void trim(struct chunk *self, size_t n)
306 size_t n1 = CHUNK_SIZE(self);
307 struct chunk *next, *split;
309 if (n >= n1 - DONTCARE) return;
311 next = NEXT_CHUNK(self);
312 split = (void *)((char *)self + n);
314 split->psize = n | C_INUSE;
315 split->csize = n1-n | C_INUSE;
316 next->psize = n1-n | C_INUSE;
317 self->csize = n | C_INUSE;
322 void *malloc(size_t n)
327 if (adjust_size(&n) < 0) return 0;
329 if (n > MMAP_THRESHOLD) {
330 size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
331 char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
332 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
333 if (base == (void *)-1) return 0;
334 c = (void *)(base + SIZE_ALIGN - OVERHEAD);
335 c->csize = len - (SIZE_ALIGN - OVERHEAD);
336 c->psize = SIZE_ALIGN - OVERHEAD;
337 return CHUNK_TO_MEM(c);
342 uint64_t mask = mal.binmap & -(1ULL<<i);
349 NEXT_CHUNK(x)->psize = c->csize =
350 x->csize + CHUNK_SIZE(c);
356 c = mal.bins[j].head;
357 if (c != BIN_TO_CHUNK(j)) {
358 if (!pretrim(c, n, i, j)) unbin(c, j);
365 /* Now patch up in case we over-allocated */
368 return CHUNK_TO_MEM(c);
371 weak_alias(malloc, __internal_malloc);
373 static size_t mal0_clear(char *p, size_t pagesz, size_t n)
376 typedef uint64_t __attribute__((__may_alias__)) T;
378 typedef unsigned char T;
381 size_t i = (uintptr_t)pp & (pagesz - 1);
383 pp = memset(pp - i, 0, i);
384 if (pp - p < pagesz) return pp - p;
385 for (i = pagesz; i; i -= 2*sizeof(T), pp -= 2*sizeof(T))
386 if (((T *)pp)[-1] | ((T *)pp)[-2])
391 void *calloc(size_t m, size_t n)
393 if (n && m > (size_t)-1/n) {
400 if (malloc == __internal_malloc) {
401 if (IS_MMAPPED(MEM_TO_CHUNK(p)))
404 n = mal0_clear(p, PAGE_SIZE, n);
406 return memset(p, 0, n);
409 void *realloc(void *p, size_t n)
411 struct chunk *self, *next;
415 if (!p) return malloc(n);
417 if (adjust_size(&n) < 0) return 0;
419 self = MEM_TO_CHUNK(p);
420 n1 = n0 = CHUNK_SIZE(self);
422 if (IS_MMAPPED(self)) {
423 size_t extra = self->psize;
424 char *base = (char *)self - extra;
425 size_t oldlen = n0 + extra;
426 size_t newlen = n + extra;
427 /* Crash on realloc of freed chunk */
428 if (extra & 1) a_crash();
429 if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) {
433 newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
434 if (oldlen == newlen) return p;
435 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
436 if (base == (void *)-1)
438 self = (void *)(base + extra);
439 self->csize = newlen - extra;
440 return CHUNK_TO_MEM(self);
443 next = NEXT_CHUNK(self);
445 /* Crash on corrupted footer (likely from buffer overflow) */
446 if (next->psize != self->csize) a_crash();
448 /* Merge adjacent chunks if we need more space. This is not
449 * a waste of time even if we fail to get enough space, because our
450 * subsequent call to free would otherwise have to do the merge. */
451 if (n > n1 && alloc_fwd(next)) {
452 n1 += CHUNK_SIZE(next);
453 next = NEXT_CHUNK(next);
455 /* FIXME: find what's wrong here and reenable it..? */
456 if (0 && n > n1 && alloc_rev(self)) {
457 self = PREV_CHUNK(self);
458 n1 += CHUNK_SIZE(self);
460 self->csize = n1 | C_INUSE;
461 next->psize = n1 | C_INUSE;
463 /* If we got enough space, split off the excess and return */
465 //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
467 return CHUNK_TO_MEM(self);
471 /* As a last resort, allocate a new chunk and copy to it. */
472 new = malloc(n-OVERHEAD);
475 memcpy(new, p, n0-OVERHEAD);
476 free(CHUNK_TO_MEM(self));
480 static void bin_chunk(struct chunk *self)
482 struct chunk *next = NEXT_CHUNK(self);
483 size_t final_size, new_size, size;
487 final_size = new_size = CHUNK_SIZE(self);
489 /* Crash on corrupted footer (likely from buffer overflow) */
490 if (next->psize != self->csize) a_crash();
493 if (self->psize & next->csize & C_INUSE) {
494 self->csize = final_size | C_INUSE;
495 next->psize = final_size | C_INUSE;
496 i = bin_index(final_size);
499 if (self->psize & next->csize & C_INUSE)
501 unlock(mal.free_lock);
505 if (alloc_rev(self)) {
506 self = PREV_CHUNK(self);
507 size = CHUNK_SIZE(self);
509 if (new_size+size > RECLAIM && (new_size+size^size) > size)
513 if (alloc_fwd(next)) {
514 size = CHUNK_SIZE(next);
516 if (new_size+size > RECLAIM && (new_size+size^size) > size)
518 next = NEXT_CHUNK(next);
522 if (!(mal.binmap & 1ULL<<i))
523 a_or_64(&mal.binmap, 1ULL<<i);
525 self->csize = final_size;
526 next->psize = final_size;
527 unlock(mal.free_lock);
529 self->next = BIN_TO_CHUNK(i);
530 self->prev = mal.bins[i].tail;
531 self->next->prev = self;
532 self->prev->next = self;
534 /* Replace middle of large chunks with fresh zero pages */
536 uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
537 uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
539 __madvise((void *)a, b-a, MADV_DONTNEED);
541 __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
542 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
549 static void unmap_chunk(struct chunk *self)
551 size_t extra = self->psize;
552 char *base = (char *)self - extra;
553 size_t len = CHUNK_SIZE(self) + extra;
554 /* Crash on double free */
555 if (extra & 1) a_crash();
563 struct chunk *self = MEM_TO_CHUNK(p);
565 if (IS_MMAPPED(self))
571 weak_alias(free, __internal_free);
573 void __malloc_donate(char *start, char *end)
575 size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD);
576 size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end;
578 /* Getting past this condition ensures that the padding for alignment
579 * and header overhead will not overflow and will leave a nonzero
580 * multiple of SIZE_ALIGN bytes between start and end. */
581 if (end - start <= OVERHEAD + align_start_up + align_end_down)
583 start += align_start_up + OVERHEAD;
584 end -= align_end_down;
586 struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
587 c->psize = n->csize = C_INUSE;
588 c->csize = n->psize = C_INUSE | (end-start);