1 #define _BSD_SOURCE
2 #include <stdlib.h>
3 #include <sys/mman.h>
4
5 #include "meta.h"
6
7 struct mapinfo {
8 void *base;
9 size_t len;
10 };
11
12 static struct mapinfo nontrivial_free(struct meta *, int);
13
free_group(struct meta * g)14 static struct mapinfo free_group(struct meta *g)
15 {
16 struct mapinfo mi = { 0 };
17 int sc = g->sizeclass;
18 if (sc < 48) {
19 ctx.usage_by_class[sc] -= g->last_idx+1;
20 }
21 if (g->maplen) {
22 step_seq();
23 record_seq(sc);
24 mi.base = g->mem;
25 mi.len = g->maplen*4096UL;
26 } else {
27 void *p = g->mem;
28 struct meta *m = get_meta(p);
29 int idx = get_slot_index(p);
30 g->mem->meta = 0;
31 // not checking size/reserved here; it's intentionally invalid
32 mi = nontrivial_free(m, idx);
33 }
34 free_meta(g);
35 return mi;
36 }
37
okay_to_free(struct meta * g)38 static int okay_to_free(struct meta *g)
39 {
40 int sc = g->sizeclass;
41
42 if (!g->freeable) return 0;
43
44 // always free individual mmaps not suitable for reuse
45 if (sc >= 48 || get_stride(g) < UNIT*size_classes[sc])
46 return 1;
47
48 // always free groups allocated inside another group's slot
49 // since recreating them should not be expensive and they
50 // might be blocking freeing of a much larger group.
51 if (!g->maplen) return 1;
52
53 // if there is another non-full group, free this one to
54 // consolidate future allocations, reduce fragmentation.
55 if (g->next != g) return 1;
56
57 // free any group in a size class that's not bouncing
58 if (!is_bouncing(sc)) return 1;
59
60 size_t cnt = g->last_idx+1;
61 size_t usage = ctx.usage_by_class[sc];
62
63 // if usage is high enough that a larger count should be
64 // used, free the low-count group so a new one will be made.
65 if (9*cnt <= usage && cnt < 20)
66 return 1;
67
68 // otherwise, keep the last group in a bouncing class.
69 return 0;
70 }
71
nontrivial_free(struct meta * g,int i)72 static struct mapinfo nontrivial_free(struct meta *g, int i)
73 {
74 uint32_t self = 1u<<i;
75 int sc = g->sizeclass;
76 uint32_t mask = g->freed_mask | g->avail_mask;
77
78 if (mask+self == (2u<<g->last_idx)-1 && okay_to_free(g)) {
79 // any multi-slot group is necessarily on an active list
80 // here, but single-slot groups might or might not be.
81 if (g->next) {
82 assert(sc < 48);
83 int activate_new = (ctx.active[sc]==g);
84 dequeue(&ctx.active[sc], g);
85 if (activate_new && ctx.active[sc])
86 activate_group(ctx.active[sc]);
87 }
88 return free_group(g);
89 } else if (!mask) {
90 assert(sc < 48);
91 // might still be active if there were no allocations
92 // after last available slot was taken.
93 if (ctx.active[sc] != g) {
94 queue(&ctx.active[sc], g);
95 }
96 }
97 a_or(&g->freed_mask, self);
98 return (struct mapinfo){ 0 };
99 }
100
free(void * p)101 void free(void *p)
102 {
103 if (!p) return;
104
105 struct meta *g = get_meta(p);
106 int idx = get_slot_index(p);
107 size_t stride = get_stride(g);
108 unsigned char *start = g->mem->storage + stride*idx;
109 unsigned char *end = start + stride - IB;
110 get_nominal_size(p, end);
111 uint32_t self = 1u<<idx, all = (2u<<g->last_idx)-1;
112 ((unsigned char *)p)[-3] = 255;
113 // invalidate offset to group header, and cycle offset of
114 // used region within slot if current offset is zero.
115 *(uint16_t *)((char *)p-2) = 0;
116
117 // release any whole pages contained in the slot to be freed
118 // unless it's a single-slot group that will be unmapped.
119 if (((uintptr_t)(start-1) ^ (uintptr_t)end) >= 2*PGSZ && g->last_idx) {
120 unsigned char *base = start + (-(uintptr_t)start & (PGSZ-1));
121 size_t len = (end-base) & -PGSZ;
122 if (len) {
123 int e = errno;
124 madvise(base, len, MADV_FREE);
125 errno = e;
126 }
127 }
128
129 // atomic free without locking if this is neither first or last slot
130 for (;;) {
131 uint32_t freed = g->freed_mask;
132 uint32_t avail = g->avail_mask;
133 uint32_t mask = freed | avail;
134 assert(!(mask&self));
135 if (!freed || mask+self==all) break;
136 if (!MT)
137 g->freed_mask = freed+self;
138 else if (a_cas(&g->freed_mask, freed, freed+self)!=freed)
139 continue;
140 return;
141 }
142
143 wrlock();
144 struct mapinfo mi = nontrivial_free(g, idx);
145 unlock();
146 if (mi.len) {
147 int e = errno;
148 munmap(mi.base, mi.len);
149 errno = e;
150 }
151 }
152