1 #ifndef MALLOC_META_H
2 #define MALLOC_META_H
3
4 #include <stdint.h>
5 #include <errno.h>
6 #include <limits.h>
7 #include "glue.h"
8
9 __attribute__((__visibility__("hidden")))
10 extern const uint16_t size_classes[];
11
12 #define MMAP_THRESHOLD 131052
13
14 #define UNIT 16
15 #define IB 4
16
17 struct group {
18 struct meta *meta;
19 unsigned char active_idx:5;
20 char pad[UNIT - sizeof(struct meta *) - 1];
21 unsigned char storage[];
22 };
23
24 struct meta {
25 struct meta *prev, *next;
26 struct group *mem;
27 volatile int avail_mask, freed_mask;
28 uintptr_t last_idx:5;
29 uintptr_t freeable:1;
30 uintptr_t sizeclass:6;
31 uintptr_t maplen:8*sizeof(uintptr_t)-12;
32 };
33
34 struct meta_area {
35 uint64_t check;
36 struct meta_area *next;
37 int nslots;
38 struct meta slots[];
39 };
40
41 struct malloc_context {
42 uint64_t secret;
43 #ifndef PAGESIZE
44 size_t pagesize;
45 #endif
46 int init_done;
47 unsigned mmap_counter;
48 struct meta *free_meta_head;
49 struct meta *avail_meta;
50 size_t avail_meta_count, avail_meta_area_count, meta_alloc_shift;
51 struct meta_area *meta_area_head, *meta_area_tail;
52 unsigned char *avail_meta_areas;
53 struct meta *active[48];
54 size_t usage_by_class[48];
55 uint8_t unmap_seq[32], bounces[32];
56 uint8_t seq;
57 uintptr_t brk;
58 };
59
60 __attribute__((__visibility__("hidden")))
61 extern struct malloc_context ctx;
62
63 #ifdef PAGESIZE
64 #define PGSZ PAGESIZE
65 #else
66 #define PGSZ ctx.pagesize
67 #endif
68
69 __attribute__((__visibility__("hidden")))
70 struct meta *alloc_meta(void);
71
72 __attribute__((__visibility__("hidden")))
73 int is_allzero(void *);
74
encode_ptr(void * ptr,uint64_t key)75 static inline void *encode_ptr(void *ptr, uint64_t key)
76 {
77 #ifdef MALLOC_FREELIST_HARDENED
78 return (void *)((uintptr_t)ptr ^ key);
79 #else
80 (void)key;
81 return ptr;
82 #endif
83 }
84
queue(struct meta ** phead,struct meta * m)85 static inline void queue(struct meta **phead, struct meta *m)
86 {
87 assert(!m->next);
88 assert(!m->prev);
89 if (*phead) {
90 struct meta *head = *phead;
91 #ifdef MALLOC_FREELIST_HARDENED
92 if (head->next->prev != head || head->prev->next != head) {
93 a_crash();
94 }
95 #endif
96 m->next = head;
97 m->prev = head->prev;
98 m->next->prev = m->prev->next = m;
99 } else {
100 m->prev = m->next = m;
101 *phead = m;
102 }
103 }
104
dequeue(struct meta ** phead,struct meta * m)105 static inline void dequeue(struct meta **phead, struct meta *m)
106 {
107 if (m->next != m) {
108 m->prev->next = m->next;
109 m->next->prev = m->prev;
110 if (*phead == m) *phead = m->next;
111 } else {
112 *phead = 0;
113 }
114 m->prev = m->next = 0;
115 }
116
dequeue_head(struct meta ** phead)117 static inline struct meta *dequeue_head(struct meta **phead)
118 {
119 struct meta *m = *phead;
120 if (m) dequeue(phead, m);
121 return m;
122 }
123
free_meta(struct meta * m)124 static inline void free_meta(struct meta *m)
125 {
126 *m = (struct meta){0};
127 queue(&ctx.free_meta_head, m);
128 }
129
activate_group(struct meta * m)130 static inline uint32_t activate_group(struct meta *m)
131 {
132 assert(!m->avail_mask);
133 uint32_t mask, act = (2u<<m->mem->active_idx)-1;
134 do mask = m->freed_mask;
135 while (a_cas(&m->freed_mask, mask, mask&~act)!=mask);
136 return m->avail_mask = mask & act;
137 }
138
get_slot_index(const unsigned char * p)139 static inline int get_slot_index(const unsigned char *p)
140 {
141 return p[-3] & 31;
142 }
143
get_meta(const unsigned char * p)144 static inline struct meta *get_meta(const unsigned char *p)
145 {
146 assert(!((uintptr_t)p & 15));
147 int offset = *(const uint16_t *)(p - 2);
148 int index = get_slot_index(p);
149 if (p[-4]) {
150 assert(!offset);
151 offset = *(uint32_t *)(p - 8);
152 assert(offset > 0xffff);
153 }
154 const struct group *base = (const void *)(p - UNIT*offset - UNIT);
155 const struct meta *meta = encode_ptr(base->meta, ctx.secret);
156 assert(meta->mem == base);
157 assert(index <= meta->last_idx);
158 assert(!(meta->avail_mask & (1u<<index)));
159 assert(!(meta->freed_mask & (1u<<index)));
160 const struct meta_area *area = (void *)((uintptr_t)meta & -4096);
161 assert(area->check == ctx.secret);
162 if (meta->sizeclass < 48) {
163 assert(offset >= size_classes[meta->sizeclass]*index);
164 assert(offset < size_classes[meta->sizeclass]*(index+1));
165 } else {
166 assert(meta->sizeclass == 63);
167 }
168 if (meta->maplen) {
169 assert(offset <= meta->maplen*4096UL/UNIT - 1);
170 }
171 return (struct meta *)meta;
172 }
173
get_nominal_size(const unsigned char * p,const unsigned char * end)174 static inline size_t get_nominal_size(const unsigned char *p, const unsigned char *end)
175 {
176 size_t reserved = p[-3] >> 5;
177 if (reserved >= 5) {
178 assert(reserved == 5);
179 reserved = *(const uint32_t *)(end-4);
180 assert(reserved >= 5);
181 assert(!end[-5]);
182 }
183 assert(reserved <= end-p);
184 assert(!*(end-reserved));
185 // also check the slot's overflow byte
186 assert(!*end);
187 return end-reserved-p;
188 }
189
get_stride(const struct meta * g)190 static inline size_t get_stride(const struct meta *g)
191 {
192 if (!g->last_idx && g->maplen) {
193 return g->maplen*4096UL - UNIT;
194 } else {
195 return UNIT*size_classes[g->sizeclass];
196 }
197 }
198
set_size(unsigned char * p,unsigned char * end,size_t n)199 static inline void set_size(unsigned char *p, unsigned char *end, size_t n)
200 {
201 int reserved = end-p-n;
202 if (reserved) end[-reserved] = 0;
203 if (reserved >= 5) {
204 *(uint32_t *)(end-4) = reserved;
205 end[-5] = 0;
206 reserved = 5;
207 }
208 p[-3] = (p[-3]&31) + (reserved<<5);
209 }
210
enframe(struct meta * g,int idx,size_t n,int ctr)211 static inline void *enframe(struct meta *g, int idx, size_t n, int ctr)
212 {
213 size_t stride = get_stride(g);
214 size_t slack = (stride-IB-n)/UNIT;
215 unsigned char *p = g->mem->storage + stride*idx;
216 unsigned char *end = p+stride-IB;
217 // cycle offset within slot to increase interval to address
218 // reuse, facilitate trapping double-free.
219 int off = (p[-3] ? *(uint16_t *)(p-2) + 1 : ctr) & 255;
220 assert(!p[-4]);
221 if (off > slack) {
222 size_t m = slack;
223 m |= m>>1; m |= m>>2; m |= m>>4;
224 off &= m;
225 if (off > slack) off -= slack+1;
226 assert(off <= slack);
227 }
228 if (off) {
229 // store offset in unused header at offset zero
230 // if enframing at non-zero offset.
231 *(uint16_t *)(p-2) = off;
232 p[-3] = 7<<5;
233 p += UNIT*off;
234 // for nonzero offset there is no permanent check
235 // byte, so make one.
236 p[-4] = 0;
237 }
238 *(uint16_t *)(p-2) = (size_t)(p-g->mem->storage)/UNIT;
239 p[-3] = idx;
240 set_size(p, end, n);
241 return p;
242 }
243
size_to_class(size_t n)244 static inline int size_to_class(size_t n)
245 {
246 n = (n+IB-1)>>4;
247 if (n<10) return n;
248 n++;
249 int i = (28-a_clz_32(n))*4 + 8;
250 if (n>size_classes[i+1]) i+=2;
251 if (n>size_classes[i]) i++;
252 return i;
253 }
254
size_overflows(size_t n)255 static inline int size_overflows(size_t n)
256 {
257 if (n >= SIZE_MAX/2 - 4096) {
258 errno = ENOMEM;
259 return 1;
260 }
261 return 0;
262 }
263
step_seq(void)264 static inline void step_seq(void)
265 {
266 if (ctx.seq==255) {
267 for (int i=0; i<32; i++) ctx.unmap_seq[i] = 0;
268 ctx.seq = 1;
269 } else {
270 ctx.seq++;
271 }
272 }
273
record_seq(int sc)274 static inline void record_seq(int sc)
275 {
276 if (sc-7U < 32) ctx.unmap_seq[sc-7] = ctx.seq;
277 }
278
account_bounce(int sc)279 static inline void account_bounce(int sc)
280 {
281 if (sc-7U < 32) {
282 int seq = ctx.unmap_seq[sc-7];
283 if (seq && ctx.seq-seq < 10) {
284 if (ctx.bounces[sc-7]+1 < 100)
285 ctx.bounces[sc-7]++;
286 else
287 ctx.bounces[sc-7] = 150;
288 }
289 }
290 }
291
decay_bounces(int sc)292 static inline void decay_bounces(int sc)
293 {
294 if (sc-7U < 32 && ctx.bounces[sc-7])
295 ctx.bounces[sc-7]--;
296 }
297
is_bouncing(int sc)298 static inline int is_bouncing(int sc)
299 {
300 return (sc-7U < 32 && ctx.bounces[sc-7] >= 100);
301 }
302
303 #endif
304