X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=src%2Fmalloc%2Fmalloc.c;h=96982596b94d8c638eb716f1b0000081288e7e8c;hb=1ef37aa00ea830dfda76e04e3d941cafa74d8b76;hp=991300ccc17bb039c2163e547a0b08171ee78287;hpb=14032c30e2d41e5c0dac25d399f7086f74d4e0c8;p=musl diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c index 991300cc..96982596 100644 --- a/src/malloc/malloc.c +++ b/src/malloc/malloc.c @@ -8,53 +8,19 @@ #include "libc.h" #include "atomic.h" #include "pthread_impl.h" +#include "malloc_impl.h" #if defined(__GNUC__) && defined(__PIC__) #define inline inline __attribute__((always_inline)) #endif -void *__mmap(void *, size_t, int, int, int, off_t); -int __munmap(void *, size_t); -void *__mremap(void *, size_t, size_t, int, ...); -int __madvise(void *, size_t, int); - -struct chunk { - size_t psize, csize; - struct chunk *next, *prev; -}; - -struct bin { - volatile int lock[2]; - struct chunk *head; - struct chunk *tail; -}; - static struct { volatile uint64_t binmap; struct bin bins[64]; volatile int free_lock[2]; } mal; - -#define SIZE_ALIGN (4*sizeof(size_t)) -#define SIZE_MASK (-SIZE_ALIGN) -#define OVERHEAD (2*sizeof(size_t)) -#define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN) -#define DONTCARE 16 -#define RECLAIM 163840 - -#define CHUNK_SIZE(c) ((c)->csize & -2) -#define CHUNK_PSIZE(c) ((c)->psize & -2) -#define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c))) -#define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c))) -#define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD) -#define CHUNK_TO_MEM(c) (void *)((char *)(c) + OVERHEAD) -#define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head)) - -#define C_INUSE ((size_t)1) - -#define IS_MMAPPED(c) !((c)->csize & (C_INUSE)) - +int __malloc_replaced; /* Synchronization tools */ @@ -157,8 +123,6 @@ void __dump_heap(int x) } #endif -void *__expand_heap(size_t *); - static struct chunk *expand_heap(size_t n) { static int heap_lock[2]; @@ -299,8 +263,6 @@ static int pretrim(struct chunk *self, size_t n, int i, int j) return 1; } -static void bin_chunk(struct chunk *); - static void trim(struct chunk *self, size_t n) { size_t n1 = CHUNK_SIZE(self); @@ -316,7 +278,7 @@ static void trim(struct chunk *self, size_t n) next->psize = n1-n | C_INUSE; self->csize = n | C_INUSE; - bin_chunk(split); + __bin_chunk(split); } void *malloc(size_t n) @@ -386,13 +348,21 @@ static size_t mal0_clear(char *p, size_t pagesz, size_t n) } } -void *__malloc0(size_t n) +void *calloc(size_t m, size_t n) { + if (n && m > (size_t)-1/n) { + errno = ENOMEM; + return 0; + } + n *= m; void *p = malloc(n); - if (!p || IS_MMAPPED(MEM_TO_CHUNK(p))) - return p; - if (n >= PAGE_SIZE) - n = mal0_clear(p, PAGE_SIZE, n); + if (!p) return p; + if (!__malloc_replaced) { + if (IS_MMAPPED(MEM_TO_CHUNK(p))) + return p; + if (n >= PAGE_SIZE) + n = mal0_clear(p, PAGE_SIZE, n); + } return memset(p, 0, n); } @@ -467,7 +437,7 @@ copy_free_ret: return new; } -static void bin_chunk(struct chunk *self) +void __bin_chunk(struct chunk *self) { struct chunk *next = NEXT_CHUNK(self); size_t final_size, new_size, size; @@ -555,7 +525,7 @@ void free(void *p) if (IS_MMAPPED(self)) unmap_chunk(self); else - bin_chunk(self); + __bin_chunk(self); } void __malloc_donate(char *start, char *end) @@ -574,5 +544,5 @@ void __malloc_donate(char *start, char *end) struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end); c->psize = n->csize = C_INUSE; c->csize = n->psize = C_INUSE | (end-start); - bin_chunk(c); + __bin_chunk(c); }