X-Git-Url: http://nsz.repo.hu/git/?p=musl;a=blobdiff_plain;f=src%2Fmalloc%2Fmalloc.c;h=1a6d1493291c7ed2d816687d92d100b78c972137;hp=3c08c41e2fd6be42e765adc76e89113063e47d85;hb=b8ccf8e46bab6ee9d63a6e392c3b33b9aa89255c;hpb=26031da0f83a2a3ed52190077931ee6c18dfd689 diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c index 3c08c41e..1a6d1493 100644 --- a/src/malloc/malloc.c +++ b/src/malloc/malloc.c @@ -1,3 +1,4 @@ +#define _GNU_SOURCE #include #include #include @@ -8,6 +9,10 @@ #include "atomic.h" #include "pthread_impl.h" +#if defined(__GNUC__) && defined(__PIC__) +#define inline inline __attribute__((always_inline)) +#endif + uintptr_t __brk(uintptr_t); void *__mmap(void *, size_t, int, int, int, off_t); int __munmap(void *, size_t); @@ -15,9 +20,8 @@ void *__mremap(void *, size_t, size_t, int, ...); int __madvise(void *, size_t, int); struct chunk { - size_t data[1]; - struct chunk *next; - struct chunk *prev; + size_t psize, csize; + struct chunk *next, *prev; }; struct bin { @@ -43,37 +47,35 @@ static struct { #define DONTCARE 16 #define RECLAIM 163840 -#define CHUNK_SIZE(c) ((c)->data[0] & SIZE_MASK) -#define CHUNK_PSIZE(c) ((c)->data[-1] & SIZE_MASK) +#define CHUNK_SIZE(c) ((c)->csize & -2) +#define CHUNK_PSIZE(c) ((c)->psize & -2) #define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c))) #define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c))) -#define MEM_TO_CHUNK(p) (struct chunk *)((size_t *)p - 1) -#define CHUNK_TO_MEM(c) (void *)((c)->data+1) +#define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD) +#define CHUNK_TO_MEM(c) (void *)((char *)(c) + OVERHEAD) #define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head)) #define C_INUSE ((size_t)1) -#define C_FLAGS ((size_t)3) -#define C_SIZE SIZE_MASK -#define IS_MMAPPED(c) !((c)->data[0] & (C_INUSE)) +#define IS_MMAPPED(c) !((c)->csize & (C_INUSE)) /* Synchronization tools */ -static void lock(volatile int *lk) +static inline void lock(volatile int *lk) { if (!libc.threads_minus_1) return; while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); } -static void unlock(volatile int *lk) +static inline void unlock(volatile int *lk) { if (!libc.threads_minus_1) return; a_store(lk, 0); if (lk[1]) __wake(lk, 1, 1); } -static void lock_bin(int i) +static inline void lock_bin(int i) { if (libc.threads_minus_1) lock(mal.bins[i].lock); @@ -81,7 +83,7 @@ static void lock_bin(int i) mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i); } -static void unlock_bin(int i) +static inline void unlock_bin(int i) { if (!libc.threads_minus_1) return; unlock(mal.bins[i].lock); @@ -119,14 +121,14 @@ static int bin_index(size_t x) x = x / SIZE_ALIGN - 1; if (x <= 32) return x; if (x > 0x1c00) return 63; - return ((union { float v; uint32_t r; }){ x }.r>>21) - 496; + return ((union { float v; uint32_t r; }){(int)x}.r>>21) - 496; } static int bin_index_up(size_t x) { x = x / SIZE_ALIGN - 1; if (x <= 32) return x; - return ((union { float v; uint32_t r; }){ x }.r+0x1fffff>>21) - 496; + return ((union { float v; uint32_t r; }){(int)x}.r+0x1fffff>>21) - 496; } #if 0 @@ -137,8 +139,8 @@ void __dump_heap(int x) for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c)) fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n", c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)), - c->data[0] & 15, - NEXT_CHUNK(c)->data[-1] & 15); + c->csize & 15, + NEXT_CHUNK(c)->psize & 15); for (i=0; i<64; i++) { if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) { fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head); @@ -164,11 +166,11 @@ static struct chunk *expand_heap(size_t n) if (__brk(new) != new) goto fail; w = MEM_TO_CHUNK(new); - w->data[-1] = n | C_INUSE; - w->data[0] = 0 | C_INUSE; + w->psize = n | C_INUSE; + w->csize = 0 | C_INUSE; w = MEM_TO_CHUNK(mal.brk); - w->data[0] = n | C_INUSE; + w->csize = n | C_INUSE; mal.brk = new; unlock(mal.brk_lock); @@ -179,7 +181,7 @@ fail: return 0; } -static int init_malloc() +static int init_malloc(size_t n) { static int init, waiters; int state; @@ -194,9 +196,13 @@ static int init_malloc() return 0; } - mal.brk = __brk(0) + 2*SIZE_ALIGN-1 & -SIZE_ALIGN; + mal.brk = __brk(0); +#ifdef SHARED + mal.brk = mal.brk + PAGE_SIZE-1 & -PAGE_SIZE; +#endif + mal.brk = mal.brk + 2*SIZE_ALIGN-1 & -SIZE_ALIGN; - c = expand_heap(1); + c = expand_heap(n); if (!c) { a_store(&init, 0); @@ -205,12 +211,12 @@ static int init_malloc() } mal.heap = (void *)c; - c->data[-1] = 0 | C_INUSE; + c->psize = 0 | C_INUSE; free(CHUNK_TO_MEM(c)); a_store(&init, 2); if (waiters) __wake(&init, -1, 1); - return 0; + return 1; } static int adjust_size(size_t *n) @@ -235,18 +241,18 @@ static void unbin(struct chunk *c, int i) a_and_64(&mal.binmap, ~(1ULL<prev->next = c->next; c->next->prev = c->prev; - c->data[0] |= C_INUSE; - NEXT_CHUNK(c)->data[-1] |= C_INUSE; + c->csize |= C_INUSE; + NEXT_CHUNK(c)->psize |= C_INUSE; } static int alloc_fwd(struct chunk *c) { int i; size_t k; - while (!((k=c->data[0]) & C_INUSE)) { + while (!((k=c->csize) & C_INUSE)) { i = bin_index(k); lock_bin(i); - if (c->data[0] == k) { + if (c->csize == k) { unbin(c, i); unlock_bin(i); return 1; @@ -260,10 +266,10 @@ static int alloc_rev(struct chunk *c) { int i; size_t k; - while (!((k=c->data[-1]) & C_INUSE)) { + while (!((k=c->psize) & C_INUSE)) { i = bin_index(k); lock_bin(i); - if (c->data[-1] == k) { + if (c->psize == k) { unbin(PREV_CHUNK(c), i); unlock_bin(i); return 1; @@ -300,10 +306,10 @@ static int pretrim(struct chunk *self, size_t n, int i, int j) split->next = self->next; split->prev->next = split; split->next->prev = split; - split->data[-1] = n | C_INUSE; - split->data[0] = n1-n; - next->data[-1] = n1-n; - self->data[0] = n | C_INUSE; + split->psize = n | C_INUSE; + split->csize = n1-n; + next->psize = n1-n; + self->csize = n | C_INUSE; return 1; } @@ -317,10 +323,10 @@ static void trim(struct chunk *self, size_t n) next = NEXT_CHUNK(self); split = (void *)((char *)self + n); - split->data[-1] = n | C_INUSE; - split->data[0] = n1-n | C_INUSE; - next->data[-1] = n1-n | C_INUSE; - self->data[0] = n | C_INUSE; + split->psize = n | C_INUSE; + split->csize = n1-n | C_INUSE; + next->psize = n1-n | C_INUSE; + self->csize = n | C_INUSE; free(CHUNK_TO_MEM(split)); } @@ -333,13 +339,13 @@ void *malloc(size_t n) if (adjust_size(&n) < 0) return 0; if (n > MMAP_THRESHOLD) { - size_t len = n + PAGE_SIZE - 1 & -PAGE_SIZE; + size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE; char *base = __mmap(0, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); if (base == (void *)-1) return 0; - c = (void *)(base + SIZE_ALIGN - sizeof(size_t)); - c->data[0] = len - (SIZE_ALIGN - sizeof(size_t)); - c->data[-1] = SIZE_ALIGN - sizeof(size_t); + c = (void *)(base + SIZE_ALIGN - OVERHEAD); + c->csize = len - (SIZE_ALIGN - OVERHEAD); + c->psize = SIZE_ALIGN - OVERHEAD; return CHUNK_TO_MEM(c); } @@ -347,21 +353,21 @@ void *malloc(size_t n) for (;;) { uint64_t mask = mal.binmap & -(1ULL< 0) continue; c = expand_heap(n); if (!c) return 0; if (alloc_rev(c)) { struct chunk *x = c; c = PREV_CHUNK(c); - NEXT_CHUNK(x)->data[-1] = c->data[0] = - x->data[0] + CHUNK_SIZE(c); + NEXT_CHUNK(x)->psize = c->csize = + x->csize + CHUNK_SIZE(c); } break; } j = first_set(mask); lock_bin(j); c = mal.bins[j].head; - if (c != BIN_TO_CHUNK(j) && j == bin_index(c->data[0])) { + if (c != BIN_TO_CHUNK(j) && j == bin_index(c->csize)) { if (!pretrim(c, n, i, j)) unbin(c, j); unlock_bin(j); break; @@ -389,10 +395,12 @@ void *realloc(void *p, size_t n) n1 = n0 = CHUNK_SIZE(self); if (IS_MMAPPED(self)) { - size_t extra = self->data[-1]; + size_t extra = self->psize; char *base = (char *)self - extra; size_t oldlen = n0 + extra; size_t newlen = n + extra; + /* Crash on realloc of freed chunk */ + if (extra & 1) a_crash(); if (newlen < PAGE_SIZE && (new = malloc(n))) { memcpy(new, p, n-OVERHEAD); free(p); @@ -404,7 +412,7 @@ void *realloc(void *p, size_t n) if (base == (void *)-1) return newlen < oldlen ? p : 0; self = (void *)(base + extra); - self->data[0] = newlen - extra; + self->csize = newlen - extra; return CHUNK_TO_MEM(self); } @@ -422,8 +430,8 @@ void *realloc(void *p, size_t n) self = PREV_CHUNK(self); n1 += CHUNK_SIZE(self); } - self->data[0] = n1 | C_INUSE; - next->data[-1] = n1 | C_INUSE; + self->csize = n1 | C_INUSE; + next->psize = n1 | C_INUSE; /* If we got enough space, split off the excess and return */ if (n <= n1) { @@ -451,9 +459,11 @@ void free(void *p) if (!p) return; if (IS_MMAPPED(self)) { - size_t extra = self->data[-1]; + size_t extra = self->psize; char *base = (char *)self - extra; size_t len = CHUNK_SIZE(self) + extra; + /* Crash on double free */ + if (extra & 1) a_crash(); __munmap(base, len); return; } @@ -463,7 +473,7 @@ void free(void *p) for (;;) { /* Replace middle of large chunks with fresh zero pages */ - if (reclaim && (self->data[-1] & next->data[0] & C_INUSE)) { + if (reclaim && (self->psize & next->csize & C_INUSE)) { uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE; uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE; #if 1 @@ -474,13 +484,13 @@ void free(void *p) #endif } - if (self->data[-1] & next->data[0] & C_INUSE) { - self->data[0] = final_size | C_INUSE; - next->data[-1] = final_size | C_INUSE; + if (self->psize & next->csize & C_INUSE) { + self->csize = final_size | C_INUSE; + next->psize = final_size | C_INUSE; i = bin_index(final_size); lock_bin(i); lock(mal.free_lock); - if (self->data[-1] & next->data[0] & C_INUSE) + if (self->psize & next->csize & C_INUSE) break; unlock(mal.free_lock); unlock_bin(i); @@ -503,8 +513,8 @@ void free(void *p) } } - self->data[0] = final_size; - next->data[-1] = final_size; + self->csize = final_size; + next->psize = final_size; unlock(mal.free_lock); self->next = BIN_TO_CHUNK(i);