X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=src%2Fmalloc%2Fmalloc.c;h=d4de2dc1ac71a83d0f2f8d183c0eb1a91922c197;hb=064898cfe2233526e7639c21e780695be5ece257;hp=39c7d05188ce7fc516e312f348dcfb9b688bf3e0;hpb=e5d78fe8df9bd61940abcd98ad07ed69b7da4350;p=musl diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c index 39c7d051..d4de2dc1 100644 --- a/src/malloc/malloc.c +++ b/src/malloc/malloc.c @@ -9,6 +9,10 @@ #include "atomic.h" #include "pthread_impl.h" +#if defined(__GNUC__) && defined(__PIC__) +#define inline inline __attribute__((always_inline)) +#endif + uintptr_t __brk(uintptr_t); void *__mmap(void *, size_t, int, int, int, off_t); int __munmap(void *, size_t); @@ -21,7 +25,7 @@ struct chunk { }; struct bin { - int lock[2]; + volatile int lock[2]; struct chunk *head; struct chunk *tail; }; @@ -29,10 +33,11 @@ struct bin { static struct { uintptr_t brk; size_t *heap; - uint64_t binmap; + volatile uint64_t binmap; struct bin bins[64]; - int brk_lock[2]; - int free_lock[2]; + volatile int brk_lock[2]; + volatile int free_lock[2]; + unsigned mmap_step; } mal; @@ -58,30 +63,29 @@ static struct { /* Synchronization tools */ -static void lock(volatile int *lk) +static inline void lock(volatile int *lk) { - if (!libc.threads_minus_1) return; - while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); + if (libc.threads_minus_1) + while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); } -static void unlock(volatile int *lk) +static inline void unlock(volatile int *lk) { - if (!libc.threads_minus_1) return; - a_store(lk, 0); - if (lk[1]) __wake(lk, 1, 1); + if (lk[0]) { + a_store(lk, 0); + if (lk[1]) __wake(lk, 1, 1); + } } -static void lock_bin(int i) +static inline void lock_bin(int i) { - if (libc.threads_minus_1) - lock(mal.bins[i].lock); + lock(mal.bins[i].lock); if (!mal.bins[i].head) mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i); } -static void unlock_bin(int i) +static inline void unlock_bin(int i) { - if (!libc.threads_minus_1) return; unlock(mal.bins[i].lock); } @@ -150,16 +154,51 @@ void __dump_heap(int x) static struct chunk *expand_heap(size_t n) { + static int init; struct chunk *w; uintptr_t new; lock(mal.brk_lock); + if (!init) { + mal.brk = __brk(0); +#ifdef SHARED + mal.brk = mal.brk + PAGE_SIZE-1 & -PAGE_SIZE; +#endif + mal.brk = mal.brk + 2*SIZE_ALIGN-1 & -SIZE_ALIGN; + mal.heap = (void *)mal.brk; + init = 1; + } + if (n > SIZE_MAX - mal.brk - 2*PAGE_SIZE) goto fail; new = mal.brk + n + SIZE_ALIGN + PAGE_SIZE - 1 & -PAGE_SIZE; n = new - mal.brk; - if (__brk(new) != new) goto fail; + if (__brk(new) != new) { + size_t min = (size_t)PAGE_SIZE << mal.mmap_step/2; + n += -n & PAGE_SIZE-1; + if (n < min) n = min; + void *area = __mmap(0, n, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (area == MAP_FAILED) goto fail; + + mal.mmap_step++; + area = (char *)area + SIZE_ALIGN - OVERHEAD; + w = area; + n -= SIZE_ALIGN; + w->psize = 0 | C_INUSE; + w->csize = n | C_INUSE; + w = NEXT_CHUNK(w); + w->psize = n | C_INUSE; + w->csize = 0 | C_INUSE; + + unlock(mal.brk_lock); + + return area; + } + + w = MEM_TO_CHUNK(mal.heap); + w->psize = 0 | C_INUSE; w = MEM_TO_CHUNK(new); w->psize = n | C_INUSE; @@ -174,43 +213,10 @@ static struct chunk *expand_heap(size_t n) return w; fail: unlock(mal.brk_lock); + errno = ENOMEM; return 0; } -static int init_malloc(size_t n) -{ - static int init, waiters; - int state; - struct chunk *c; - - if (init == 2) return 0; - - while ((state=a_swap(&init, 1)) == 1) - __wait(&init, &waiters, 1, 1); - if (state) { - a_store(&init, 2); - return 0; - } - - mal.brk = __brk(0) + 2*SIZE_ALIGN-1 & -SIZE_ALIGN; - - c = expand_heap(n); - - if (!c) { - a_store(&init, 0); - if (waiters) __wake(&init, 1, 1); - return -1; - } - - mal.heap = (void *)c; - c->psize = 0 | C_INUSE; - free(CHUNK_TO_MEM(c)); - - a_store(&init, 2); - if (waiters) __wake(&init, -1, 1); - return 1; -} - static int adjust_size(size_t *n) { /* Result of pointer difference must fit in ptrdiff_t. */ @@ -345,7 +351,6 @@ void *malloc(size_t n) for (;;) { uint64_t mask = mal.binmap & -(1ULL< 0) continue; c = expand_heap(n); if (!c) return 0; if (alloc_rev(c)) { @@ -359,7 +364,7 @@ void *malloc(size_t n) j = first_set(mask); lock_bin(j); c = mal.bins[j].head; - if (c != BIN_TO_CHUNK(j) && j == bin_index(c->csize)) { + if (c != BIN_TO_CHUNK(j)) { if (!pretrim(c, n, i, j)) unbin(c, j); unlock_bin(j); break; @@ -410,6 +415,9 @@ void *realloc(void *p, size_t n) next = NEXT_CHUNK(self); + /* Crash on corrupted footer (likely from buffer overflow) */ + if (next->psize != self->csize) a_crash(); + /* Merge adjacent chunks if we need more space. This is not * a waste of time even if we fail to get enough space, because our * subsequent call to free would otherwise have to do the merge. */ @@ -463,6 +471,9 @@ void free(void *p) final_size = new_size = CHUNK_SIZE(self); next = NEXT_CHUNK(self); + /* Crash on corrupted footer (likely from buffer overflow) */ + if (next->psize != self->csize) a_crash(); + for (;;) { /* Replace middle of large chunks with fresh zero pages */ if (reclaim && (self->psize & next->csize & C_INUSE)) {