1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <common/macro.h>
13 #include <common/types.h>
14 #include <common/kprint.h>
15 #include <common/lock.h>
16 #include <common/debug.h>
17 #include <mm/kmalloc.h>
18 #include <mm/slab.h>
19 #include <mm/buddy.h>
20
21 /* slab_pool is also static. We do not add the static modifier due to unit test.
22 */
23 struct slab_pointer slab_pool[SLAB_MAX_ORDER + 1];
24 static struct lock slabs_locks[SLAB_MAX_ORDER + 1];
25
26 /*
27 static inline int order_to_index(int order)
28 {
29 return order - SLAB_MIN_ORDER;
30 }
31 */
32
size_to_order(unsigned long size)33 static inline int size_to_order(unsigned long size)
34 {
35 unsigned long order = 0;
36 unsigned long tmp = size;
37
38 while (tmp > 1) {
39 tmp >>= 1;
40 order += 1;
41 }
42 if (size > (1 << order))
43 order += 1;
44
45 return (int)order;
46 }
47
order_to_size(int order)48 static inline unsigned long order_to_size(int order)
49 {
50 return 1UL << order;
51 }
52
53 /* @set_or_clear: true for set and false for clear. */
set_or_clear_slab_in_page(void * addr,unsigned long size,bool set_or_clear)54 static void set_or_clear_slab_in_page(void *addr, unsigned long size,
55 bool set_or_clear)
56 {
57 struct page *page;
58 int order;
59 unsigned long page_num;
60 int i;
61 void *page_addr;
62
63 order = size_to_order(size / BUDDY_PAGE_SIZE);
64 page_num = order_to_size(order);
65
66 /* Set/Clear the `slab` field in each struct page. */
67 for (i = 0; i < page_num; i++) {
68 page_addr = (void *)((unsigned long)addr + i * BUDDY_PAGE_SIZE);
69 page = virt_to_page(page_addr);
70 if (set_or_clear)
71 page->slab = addr;
72 else
73 page->slab = NULL;
74 }
75 }
76
alloc_slab_memory(unsigned long size)77 static void *alloc_slab_memory(unsigned long size)
78 {
79 void *addr;
80 int order;
81
82 /* Allocate memory pages from the buddy system as a new slab. */
83 order = size_to_order(size / BUDDY_PAGE_SIZE);
84 addr = _get_pages(order, false);
85
86 if (unlikely(addr == NULL)) {
87 kwarn("%s failed due to out of memory\n", __func__);
88 return NULL;
89 }
90
91 set_or_clear_slab_in_page(addr, size, true);
92
93 return addr;
94 }
95
init_slab_cache(int order,int size)96 static struct slab_header *init_slab_cache(int order, int size)
97 {
98 void *addr;
99 struct slab_slot_list *slot;
100 struct slab_header *slab;
101 unsigned long cnt, obj_size;
102 int i;
103
104 addr = alloc_slab_memory(size);
105 if (unlikely(addr == NULL))
106 /* Fail: no available memory. */
107 return NULL;
108 slab = (struct slab_header *)addr;
109
110 obj_size = order_to_size(order);
111 /* The first slot is used as metadata (struct slab_header). */
112 cnt = size / obj_size - 1;
113
114 slot = (struct slab_slot_list *)((unsigned long)addr + obj_size);
115 slab->free_list_head = (void *)slot;
116 slab->order = order;
117 slab->total_free_cnt = cnt;
118 slab->current_free_cnt = cnt;
119
120 /* The last slot has no next one. */
121 for (i = 0; i < cnt - 1; ++i) {
122 slot->next_free = (void *)((unsigned long)slot + obj_size);
123 slot = (struct slab_slot_list *)((unsigned long)slot + obj_size);
124 }
125 slot->next_free = NULL;
126
127 return slab;
128 }
129
choose_new_current_slab(struct slab_pointer * pool,int order)130 static void choose_new_current_slab(struct slab_pointer *pool, int order)
131 {
132 struct list_head *list;
133
134 list = &(pool->partial_slab_list);
135 if (list_empty(list)) {
136 pool->current_slab = NULL;
137 } else {
138 struct slab_header *slab;
139
140 slab = (struct slab_header *)list_entry(
141 list->next, struct slab_header, node);
142 pool->current_slab = slab;
143 list_del(list->next);
144 }
145 }
146
alloc_in_slab_impl(int order)147 static void *alloc_in_slab_impl(int order)
148 {
149 struct slab_header *current_slab;
150 struct slab_slot_list *free_list;
151 void *next_slot;
152
153 lock(&slabs_locks[order]);
154
155 current_slab = slab_pool[order].current_slab;
156 /* When serving the first allocation request. */
157 if (unlikely(current_slab == NULL)) {
158 current_slab = init_slab_cache(order, SIZE_OF_ONE_SLAB);
159 if (current_slab == NULL) {
160 unlock(&slabs_locks[order]);
161 return NULL;
162 }
163 slab_pool[order].current_slab = current_slab;
164 }
165
166 free_list = (struct slab_slot_list *)current_slab->free_list_head;
167 BUG_ON(free_list == NULL);
168
169 next_slot = free_list->next_free;
170 current_slab->free_list_head = next_slot;
171
172 current_slab->current_free_cnt -= 1;
173 /* When current_slab is full, choose a new slab as the current one. */
174 if (unlikely(current_slab->current_free_cnt == 0))
175 choose_new_current_slab(&slab_pool[order], order);
176
177 unlock(&slabs_locks[order]);
178
179 return (void *)free_list;
180 }
181
182 #if ENABLE_DETECTING_DOUBLE_FREE_IN_SLAB == ON
check_slot_is_free(struct slab_header * slab_header,struct slab_slot_list * slot)183 static int check_slot_is_free(struct slab_header *slab_header,
184 struct slab_slot_list *slot)
185 {
186 struct slab_slot_list *cur_slot;
187 struct slab_header *next_slab;
188
189 cur_slot = (struct slab_slot_list *)(slab_header->free_list_head);
190
191 while (cur_slot != NULL) {
192 if (cur_slot == slot)
193 return 1;
194 cur_slot = (struct slab_slot_list *)cur_slot->next_free;
195 }
196
197 return 0;
198 }
199 #endif
200
try_insert_full_slab_to_partial(struct slab_header * slab)201 static void try_insert_full_slab_to_partial(struct slab_header *slab)
202 {
203 /* @slab is not a full one. */
204 if (slab->current_free_cnt != 0)
205 return;
206
207 int order;
208 order = slab->order;
209
210 list_append(&slab->node, &slab_pool[order].partial_slab_list);
211 }
212
try_return_slab_to_buddy(struct slab_header * slab,int order)213 static void try_return_slab_to_buddy(struct slab_header *slab, int order)
214 {
215 /* The slab is whole free now. */
216 if (slab->current_free_cnt != slab->total_free_cnt)
217 return;
218
219 if (slab == slab_pool[order].current_slab)
220 choose_new_current_slab(&slab_pool[order], order);
221 else
222 list_del(&slab->node);
223
224 /* Clear the slab field in the page structures before freeing them. */
225 set_or_clear_slab_in_page(slab, SIZE_OF_ONE_SLAB, false);
226 free_pages_without_record(slab);
227 }
228
229 /* Interfaces exported to the kernel/mm moudule */
230
init_slab(void)231 void init_slab(void)
232 {
233 int order;
234
235 /* slab obj size: 32, 64, 128, 256, 512, 1024, 2048 */
236 for (order = SLAB_MIN_ORDER; order <= SLAB_MAX_ORDER; order++) {
237 lock_init(&slabs_locks[order]);
238 slab_pool[order].current_slab = NULL;
239 init_list_head(&(slab_pool[order].partial_slab_list));
240 }
241 kdebug("mm: finish initing slab allocators\n");
242 }
243
alloc_in_slab(unsigned long size,size_t * real_size)244 void *alloc_in_slab(unsigned long size, size_t *real_size)
245 {
246 int order;
247
248 BUG_ON(size > order_to_size(SLAB_MAX_ORDER));
249
250 order = (int)size_to_order(size);
251 if (order < SLAB_MIN_ORDER)
252 order = SLAB_MIN_ORDER;
253
254 return alloc_in_slab_impl(order);
255 }
256
free_in_slab(void * addr)257 void free_in_slab(void *addr)
258 {
259 struct page *page;
260 struct slab_header *slab;
261 struct slab_slot_list *slot;
262 int order;
263
264 slot = (struct slab_slot_list *)addr;
265 page = virt_to_page(addr);
266 BUG_ON(page == NULL);
267
268 slab = page->slab;
269 order = slab->order;
270 lock(&slabs_locks[order]);
271
272 try_insert_full_slab_to_partial(slab);
273
274 #if ENABLE_DETECTING_DOUBLE_FREE_IN_SLAB == ON
275 /*
276 * SLAB double free detection: check whether the slot to free is
277 * already in the free list.
278 */
279 if (check_slot_is_free(slab, slot) == 1) {
280 kinfo("SLAB: double free detected. Address is %p\n",
281 (unsigned long)slot);
282 BUG_ON(1);
283 }
284 #endif
285
286 slot->next_free = slab->free_list_head;
287 slab->free_list_head = slot;
288 slab->current_free_cnt += 1;
289
290 try_return_slab_to_buddy(slab, order);
291
292 unlock(&slabs_locks[order]);
293 }
294
295 /* This interface is not marked as static because it is needed in the unit test.
296 */
get_free_slot_number(int order)297 unsigned long get_free_slot_number(int order)
298 {
299 struct slab_header *slab;
300 struct slab_slot_list *slot;
301 unsigned long current_slot_num = 0;
302 unsigned long check_slot_num = 0;
303
304 lock(&slabs_locks[order]);
305
306 slab = slab_pool[order].current_slab;
307 if (slab) {
308 slot = (struct slab_slot_list *)slab->free_list_head;
309 while (slot != NULL) {
310 current_slot_num++;
311 slot = slot->next_free;
312 }
313 check_slot_num += slab->current_free_cnt;
314 }
315
316 if (list_empty(&slab_pool[order].partial_slab_list))
317 goto out;
318
319 for_each_in_list (
320 slab, struct slab_header, node, &slab_pool[order].partial_slab_list) {
321 slot = (struct slab_slot_list *)slab->free_list_head;
322 while (slot != NULL) {
323 current_slot_num++;
324 slot = slot->next_free;
325 }
326 check_slot_num += slab->current_free_cnt;
327 }
328
329 out:
330 unlock(&slabs_locks[order]);
331
332 BUG_ON(check_slot_num != current_slot_num);
333
334 return current_slot_num;
335 }
336
337 /* Get the size of free memory in slab */
get_free_mem_size_from_slab(void)338 unsigned long get_free_mem_size_from_slab(void)
339 {
340 int order;
341 unsigned long current_slot_size;
342 unsigned long slot_num;
343 unsigned long total_size = 0;
344
345 for (order = SLAB_MIN_ORDER; order <= SLAB_MAX_ORDER; order++) {
346 current_slot_size = order_to_size(order);
347 slot_num = get_free_slot_number(order);
348 total_size += (current_slot_size * slot_num);
349
350 kdebug("slab memory chunk size : 0x%lx, num : %d\n",
351 current_slot_size,
352 slot_num);
353 }
354
355 return total_size;
356 }
357