#include "atomic.h"
#include "pthread_impl.h"
+#if defined(__GNUC__) && defined(__PIC__)
+#define inline inline __attribute__((always_inline))
+#endif
+
uintptr_t __brk(uintptr_t);
void *__mmap(void *, size_t, int, int, int, off_t);
int __munmap(void *, size_t);
struct bin bins[64];
int brk_lock[2];
int free_lock[2];
+ unsigned mmap_step;
} mal;
#define DONTCARE 16
#define RECLAIM 163840
-#define CHUNK_SIZE(c) ((c)->csize & SIZE_MASK)
-#define CHUNK_PSIZE(c) ((c)->psize & SIZE_MASK)
+#define CHUNK_SIZE(c) ((c)->csize & -2)
+#define CHUNK_PSIZE(c) ((c)->psize & -2)
#define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c)))
#define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c)))
#define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD)
#define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head))
#define C_INUSE ((size_t)1)
-#define C_FLAGS ((size_t)3)
-#define C_SIZE SIZE_MASK
#define IS_MMAPPED(c) !((c)->csize & (C_INUSE))
/* Synchronization tools */
-static void lock(volatile int *lk)
+static inline void lock(volatile int *lk)
{
- if (!libc.threads_minus_1) return;
- while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
+ if (libc.threads_minus_1)
+ while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
}
-static void unlock(volatile int *lk)
+static inline void unlock(volatile int *lk)
{
- if (!libc.threads_minus_1) return;
- a_store(lk, 0);
- if (lk[1]) __wake(lk, 1, 1);
+ if (lk[0]) {
+ a_store(lk, 0);
+ if (lk[1]) __wake(lk, 1, 1);
+ }
}
-static void lock_bin(int i)
+static inline void lock_bin(int i)
{
- if (libc.threads_minus_1)
- lock(mal.bins[i].lock);
+ lock(mal.bins[i].lock);
if (!mal.bins[i].head)
mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
}
-static void unlock_bin(int i)
+static inline void unlock_bin(int i)
{
- if (!libc.threads_minus_1) return;
unlock(mal.bins[i].lock);
}
new = mal.brk + n + SIZE_ALIGN + PAGE_SIZE - 1 & -PAGE_SIZE;
n = new - mal.brk;
- if (__brk(new) != new) goto fail;
+ if (__brk(new) != new) {
+ size_t min = (size_t)PAGE_SIZE << mal.mmap_step/2;
+ n += -n & PAGE_SIZE-1;
+ if (n < min) n = min;
+ void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (area == MAP_FAILED) goto fail;
+
+ mal.mmap_step++;
+ area = (char *)area + SIZE_ALIGN - OVERHEAD;
+ w = area;
+ n -= SIZE_ALIGN;
+ w->psize = 0 | C_INUSE;
+ w->csize = n | C_INUSE;
+ w = NEXT_CHUNK(w);
+ w->psize = n | C_INUSE;
+ w->csize = 0 | C_INUSE;
+
+ unlock(mal.brk_lock);
+
+ return area;
+ }
w = MEM_TO_CHUNK(new);
w->psize = n | C_INUSE;
return w;
fail:
unlock(mal.brk_lock);
+ errno = ENOMEM;
return 0;
}
return 0;
}
- mal.brk = __brk(0) + 2*SIZE_ALIGN-1 & -SIZE_ALIGN;
+ mal.brk = __brk(0);
+#ifdef SHARED
+ mal.brk = mal.brk + PAGE_SIZE-1 & -PAGE_SIZE;
+#endif
+ mal.brk = mal.brk + 2*SIZE_ALIGN-1 & -SIZE_ALIGN;
c = expand_heap(n);
next = NEXT_CHUNK(self);
+ /* Crash on corrupted footer (likely from buffer overflow) */
+ if (next->psize != self->csize) a_crash();
+
/* Merge adjacent chunks if we need more space. This is not
* a waste of time even if we fail to get enough space, because our
* subsequent call to free would otherwise have to do the merge. */
final_size = new_size = CHUNK_SIZE(self);
next = NEXT_CHUNK(self);
+ /* Crash on corrupted footer (likely from buffer overflow) */
+ if (next->psize != self->csize) a_crash();
+
for (;;) {
/* Replace middle of large chunks with fresh zero pages */
if (reclaim && (self->psize & next->csize & C_INUSE)) {