12 const uint16_t size_classes[] = {
13 1, 2, 3, 4, 5, 6, 7, 8,
21 1169, 1364, 1637, 2047,
22 2340, 2730, 3276, 4095,
23 4680, 5460, 6552, 8191,
26 static const uint8_t small_cnt_tab[][3] = {
38 static const uint8_t med_cnt_tab[4] = { 28, 24, 20, 32 };
40 struct malloc_context ctx = { 0 };
42 struct meta *alloc_meta(void)
48 ctx.pagesize = get_page_size();
50 ctx.secret = get_random_secret();
53 size_t pagesize = PGSZ;
54 if (pagesize < 4096) pagesize = 4096;
55 if ((m = dequeue_head(&ctx.free_meta_head))) return m;
56 if (!ctx.avail_meta_count) {
57 int need_unprotect = 1;
58 if (!ctx.avail_meta_area_count && ctx.brk!=-1) {
59 uintptr_t new = ctx.brk + pagesize;
64 // some ancient kernels returned _ebss
65 // instead of next page as initial brk.
66 ctx.brk += -ctx.brk & (pagesize-1);
67 new = ctx.brk + 2*pagesize;
69 if (brk(new) != new) {
72 if (need_guard) mmap((void *)ctx.brk, pagesize,
73 PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
75 ctx.avail_meta_areas = (void *)(new - pagesize);
76 ctx.avail_meta_area_count = pagesize>>12;
80 if (!ctx.avail_meta_area_count) {
81 size_t n = 2UL << ctx.meta_alloc_shift;
82 p = mmap(0, n*pagesize, PROT_NONE,
83 MAP_PRIVATE|MAP_ANON, -1, 0);
84 if (p==MAP_FAILED) return 0;
85 ctx.avail_meta_areas = p + pagesize;
86 ctx.avail_meta_area_count = (n-1)*(pagesize>>12);
87 ctx.meta_alloc_shift++;
89 p = ctx.avail_meta_areas;
90 if ((uintptr_t)p & (pagesize-1)) need_unprotect = 0;
92 if (mprotect(p, pagesize, PROT_READ|PROT_WRITE)
95 ctx.avail_meta_area_count--;
96 ctx.avail_meta_areas = p + 4096;
97 if (ctx.meta_area_tail) {
98 ctx.meta_area_tail->next = (void *)p;
100 ctx.meta_area_head = (void *)p;
102 ctx.meta_area_tail = (void *)p;
103 ctx.meta_area_tail->check = ctx.secret;
104 ctx.avail_meta_count = ctx.meta_area_tail->nslots
105 = (4096-sizeof(struct meta_area))/sizeof *m;
106 ctx.avail_meta = ctx.meta_area_tail->slots;
108 ctx.avail_meta_count--;
109 m = ctx.avail_meta++;
110 m->prev = m->next = 0;
114 static uint32_t try_avail(struct meta **pm)
116 struct meta *m = *pm;
119 uint32_t mask = m->avail_mask;
122 if (!m->freed_mask) {
131 mask = m->freed_mask;
133 // skip fully-free group unless it's the only one
134 // or it's a permanently non-freeable group
135 if (mask == (2u<<m->last_idx)-1 && m->freeable) {
138 mask = m->freed_mask;
141 // activate more slots in a not-fully-active group
142 // if needed, but only as a last resort. prefer using
143 // any other group with free slots. this avoids
144 // touching & dirtying as-yet-unused pages.
145 if (!(mask & ((2u<<m->mem->active_idx)-1))) {
150 int cnt = m->mem->active_idx + 2;
151 int size = size_classes[m->sizeclass]*UNIT;
152 int span = UNIT + size*cnt;
153 // activate up to next 4k boundary
154 while ((span^(span+size-1)) < 4096) {
158 if (cnt > m->last_idx+1)
160 m->mem->active_idx = cnt-1;
163 mask = activate_group(m);
165 decay_bounces(m->sizeclass);
168 m->avail_mask = mask-first;
172 static int alloc_slot(int, size_t);
174 static struct meta *alloc_group(int sc, size_t req)
176 size_t size = UNIT*size_classes[sc];
179 struct meta *m = alloc_meta();
181 size_t usage = ctx.usage_by_class[sc];
182 size_t pagesize = PGSZ;
185 while (i<2 && 4*small_cnt_tab[sc][i] > usage)
187 cnt = small_cnt_tab[sc][i];
189 // lookup max number of slots fitting in power-of-two size
190 // from a table, along with number of factors of two we
191 // can divide out without a remainder or reaching 1.
192 cnt = med_cnt_tab[sc&3];
194 // reduce cnt to avoid excessive eagar allocation.
195 while (!(cnt&1) && 4*cnt > usage)
198 // data structures don't support groups whose slot offsets
199 // in units don't fit in 16 bits.
200 while (size*cnt >= 65536*UNIT)
204 // If we selected a count of 1 above but it's not sufficient to use
205 // mmap, increase to 2. Then it might be; if not it will nest.
206 if (cnt==1 && size*cnt+UNIT <= pagesize/2) cnt = 2;
208 // All choices of size*cnt are "just below" a power of two, so anything
209 // larger than half the page size should be allocated as whole pages.
210 if (size*cnt+UNIT > pagesize/2) {
211 // check/update bounce counter to start/increase retention
212 // of freed maps, and inhibit use of low-count, odd-size
213 // small mappings and single-slot groups if activated.
214 int nosmall = is_bouncing(sc);
218 // since the following count reduction opportunities have
219 // an absolute memory usage cost, don't overdo them. count
220 // coarse usage as part of usage.
221 if (!(sc&1) && sc<32) usage += ctx.usage_by_class[sc+1];
223 // try to drop to a lower count if the one found above
224 // increases usage by more than 25%. these reduced counts
225 // roughly fill an integral number of pages, just not a
226 // power of two, limiting amount of unusable space.
227 if (4*cnt > usage && !nosmall) {
229 else if ((sc&3)==1 && size*cnt>8*pagesize) cnt = 2;
230 else if ((sc&3)==2 && size*cnt>4*pagesize) cnt = 3;
231 else if ((sc&3)==0 && size*cnt>8*pagesize) cnt = 3;
232 else if ((sc&3)==0 && size*cnt>2*pagesize) cnt = 5;
234 size_t needed = size*cnt + UNIT;
235 needed += -needed & (pagesize-1);
237 // produce an individually-mmapped allocation if usage is low,
238 // bounce counter hasn't triggered, and either it saves memory
239 // or it avoids eagar slot allocation without wasting too much.
240 if (!nosmall && cnt<=7) {
242 req += -req & (pagesize-1);
243 if (req<size+UNIT || (req>=4*pagesize && 2*cnt>usage)) {
249 p = mmap(0, needed, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
254 m->maplen = needed>>12;
256 active_idx = (4096-UNIT)/size-1;
257 if (active_idx > cnt-1) active_idx = cnt-1;
258 if (active_idx < 0) active_idx = 0;
260 int j = size_to_class(UNIT+cnt*size-IB);
261 int idx = alloc_slot(j, UNIT+cnt*size-IB);
266 struct meta *g = ctx.active[j];
267 p = enframe(g, idx, UNIT*size_classes[j]-IB, ctx.mmap_counter);
269 p[-3] = (p[-3]&31) | (6<<5);
270 for (int i=0; i<=cnt; i++)
271 p[UNIT+i*size-4] = 0;
274 ctx.usage_by_class[sc] += cnt;
275 m->avail_mask = (2u<<active_idx)-1;
276 m->freed_mask = (2u<<(cnt-1))-1 - m->avail_mask;
279 m->mem->active_idx = active_idx;
286 static int alloc_slot(int sc, size_t req)
288 uint32_t first = try_avail(&ctx.active[sc]);
289 if (first) return a_ctz_32(first);
291 struct meta *g = alloc_group(sc, req);
295 queue(&ctx.active[sc], g);
299 void *malloc(size_t n)
301 if (size_overflows(n)) return 0;
303 uint32_t mask, first;
308 if (n >= MMAP_THRESHOLD) {
309 size_t needed = n + IB + UNIT;
310 void *p = mmap(0, needed, PROT_READ|PROT_WRITE,
311 MAP_PRIVATE|MAP_ANON, -1, 0);
312 if (p==MAP_FAILED) return 0;
326 g->maplen = (needed+4095)/4096;
327 g->avail_mask = g->freed_mask = 0;
328 // use a global counter to cycle offset in
329 // individually-mmapped allocations.
335 sc = size_to_class(n);
340 // use coarse size classes initially when there are not yet
341 // any groups of desired size. this allows counts of 2 or 3
342 // to be allocated at first rather than having to start with
343 // 7 or 5, the min counts for even size classes.
344 if (!g && sc>=4 && sc<32 && sc!=6 && !(sc&1) && !ctx.usage_by_class[sc]) {
345 size_t usage = ctx.usage_by_class[sc|1];
346 // if a new group may be allocated, count it toward
347 // usage in deciding if we can use coarse class.
348 if (!ctx.active[sc|1] || (!ctx.active[sc|1]->avail_mask
349 && !ctx.active[sc|1]->freed_mask))
357 mask = g ? g->avail_mask : 0;
360 if (RDLOCK_IS_EXCLUSIVE || !MT)
361 g->avail_mask = mask-first;
362 else if (a_cas(&g->avail_mask, mask, mask-first)!=mask)
364 idx = a_ctz_32(first);
369 idx = alloc_slot(sc, n);
377 ctr = ctx.mmap_counter;
379 return enframe(g, idx, n, ctr);
382 int is_allzero(void *p)
384 struct meta *g = get_meta(p);
385 return g->sizeclass >= 48 ||
386 get_stride(g) < UNIT*size_classes[g->sizeclass];