9 __attribute__((__visibility__("hidden")))
10 extern const uint16_t size_classes[];
12 #define MMAP_THRESHOLD 131052
19 unsigned char active_idx:5;
20 char pad[UNIT - sizeof(struct meta *) - 1];
21 unsigned char storage[];
25 struct meta *prev, *next;
27 volatile int avail_mask, freed_mask;
30 uintptr_t sizeclass:6;
31 uintptr_t maplen:8*sizeof(uintptr_t)-12;
36 struct meta_area *next;
41 struct malloc_context {
47 unsigned mmap_counter;
48 struct meta *free_meta_head;
49 struct meta *avail_meta;
50 size_t avail_meta_count, avail_meta_area_count, meta_alloc_shift;
51 struct meta_area *meta_area_head, *meta_area_tail;
52 unsigned char *avail_meta_areas;
53 struct meta *active[48];
54 size_t usage_by_class[48];
55 uint8_t unmap_seq[32], bounces[32];
60 __attribute__((__visibility__("hidden")))
61 extern struct malloc_context ctx;
66 #define PGSZ ctx.pagesize
69 __attribute__((__visibility__("hidden")))
70 struct meta *alloc_meta(void);
72 __attribute__((__visibility__("hidden")))
73 int is_allzero(void *);
75 static inline void queue(struct meta **phead, struct meta *m)
80 struct meta *head = *phead;
83 m->next->prev = m->prev->next = m;
85 m->prev = m->next = m;
90 static inline void dequeue(struct meta **phead, struct meta *m)
93 m->prev->next = m->next;
94 m->next->prev = m->prev;
95 if (*phead == m) *phead = m->next;
99 m->prev = m->next = 0;
102 static inline struct meta *dequeue_head(struct meta **phead)
104 struct meta *m = *phead;
105 if (m) dequeue(phead, m);
109 static inline void free_meta(struct meta *m)
111 *m = (struct meta){0};
112 queue(&ctx.free_meta_head, m);
115 static inline uint32_t activate_group(struct meta *m)
117 assert(!m->avail_mask);
118 uint32_t mask, act = (2u<<m->mem->active_idx)-1;
119 do mask = m->freed_mask;
120 while (a_cas(&m->freed_mask, mask, mask&~act)!=mask);
121 return m->avail_mask = mask & act;
124 static inline int get_slot_index(const unsigned char *p)
129 static inline struct meta *get_meta(const unsigned char *p)
131 assert(!((uintptr_t)p & 15));
132 int offset = *(const uint16_t *)(p - 2);
133 int index = get_slot_index(p);
136 offset = *(uint32_t *)(p - 8);
137 assert(offset > 0xffff);
139 const struct group *base = (const void *)(p - UNIT*offset - UNIT);
140 const struct meta *meta = base->meta;
141 assert(meta->mem == base);
142 assert(index <= meta->last_idx);
143 assert(!(meta->avail_mask & (1u<<index)));
144 assert(!(meta->freed_mask & (1u<<index)));
145 const struct meta_area *area = (void *)((uintptr_t)meta & -4096);
146 assert(area->check == ctx.secret);
147 if (meta->sizeclass < 48) {
148 assert(offset >= size_classes[meta->sizeclass]*index);
149 assert(offset < size_classes[meta->sizeclass]*(index+1));
151 assert(meta->sizeclass == 63);
154 assert(offset <= meta->maplen*4096UL/UNIT - 1);
156 return (struct meta *)meta;
159 static inline size_t get_nominal_size(const unsigned char *p, const unsigned char *end)
161 size_t reserved = p[-3] >> 5;
163 assert(reserved == 5);
164 reserved = *(const uint32_t *)(end-4);
165 assert(reserved >= 5);
168 assert(reserved <= end-p);
169 assert(!*(end-reserved));
170 // also check the slot's overflow byte
172 return end-reserved-p;
175 static inline size_t get_stride(const struct meta *g)
177 if (!g->last_idx && g->maplen) {
178 return g->maplen*4096UL - UNIT;
180 return UNIT*size_classes[g->sizeclass];
184 static inline void set_size(unsigned char *p, unsigned char *end, size_t n)
186 int reserved = end-p-n;
187 if (reserved) end[-reserved] = 0;
189 *(uint32_t *)(end-4) = reserved;
193 p[-3] = (p[-3]&31) + (reserved<<5);
196 static inline void *enframe(struct meta *g, int idx, size_t n, int ctr)
198 size_t stride = get_stride(g);
199 size_t slack = (stride-IB-n)/UNIT;
200 unsigned char *p = g->mem->storage + stride*idx;
201 unsigned char *end = p+stride-IB;
202 // cycle offset within slot to increase interval to address
203 // reuse, facilitate trapping double-free.
204 int off = (p[-3] ? *(uint16_t *)(p-2) + 1 : ctr) & 255;
208 m |= m>>1; m |= m>>2; m |= m>>4;
210 if (off > slack) off -= slack+1;
211 assert(off <= slack);
214 // store offset in unused header at offset zero
215 // if enframing at non-zero offset.
216 *(uint16_t *)(p-2) = off;
219 // for nonzero offset there is no permanent check
220 // byte, so make one.
223 *(uint16_t *)(p-2) = (size_t)(p-g->mem->storage)/UNIT;
229 static inline int size_to_class(size_t n)
234 int i = (28-a_clz_32(n))*4 + 8;
235 if (n>size_classes[i+1]) i+=2;
236 if (n>size_classes[i]) i++;
240 static inline int size_overflows(size_t n)
242 if (n >= SIZE_MAX/2 - 4096) {
249 static inline void step_seq(void)
252 for (int i=0; i<32; i++) ctx.unmap_seq[i] = 0;
259 static inline void record_seq(int sc)
261 if (sc-7U < 32) ctx.unmap_seq[sc-7] = ctx.seq;
264 static inline void account_bounce(int sc)
267 int seq = ctx.unmap_seq[sc-7];
268 if (seq && ctx.seq-seq < 10) {
269 if (ctx.bounces[sc-7]+1 < 100)
272 ctx.bounces[sc-7] = 150;
277 static inline void decay_bounces(int sc)
279 if (sc-7U < 32 && ctx.bounces[sc-7])
283 static inline int is_bouncing(int sc)
285 return (sc-7U < 32 && ctx.bounces[sc-7] >= 100);