1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <common/util.h>
13 #include <common/macro.h>
14 #include <common/kprint.h>
15 #include <mm/buddy.h>
16
get_buddy_chunk(struct phys_mem_pool * pool,struct page * chunk)17 static struct page *get_buddy_chunk(struct phys_mem_pool *pool,
18 struct page *chunk)
19 {
20 vaddr_t chunk_addr;
21 vaddr_t buddy_chunk_addr;
22 int order;
23
24 /* Get the address of the chunk. */
25 chunk_addr = (vaddr_t)page_to_virt(chunk);
26 order = chunk->order;
27 /*
28 * Calculate the address of the buddy chunk according to the address
29 * relationship between buddies.
30 */
31 buddy_chunk_addr = chunk_addr ^ (1UL << (order + BUDDY_PAGE_SIZE_ORDER));
32
33 /* Check whether the buddy_chunk_addr belongs to pool. */
34 if ((buddy_chunk_addr < pool->pool_start_addr)
35 || ((buddy_chunk_addr + (1 << order) * BUDDY_PAGE_SIZE)
36 > (pool->pool_start_addr + pool->pool_mem_size))) {
37 return NULL;
38 }
39
40 return virt_to_page((void *)buddy_chunk_addr);
41 }
42
43 /* The most recursion level of split_chunk is decided by the macro of
44 * BUDDY_MAX_ORDER. */
split_chunk(struct phys_mem_pool * pool,int order,struct page * chunk)45 static struct page *split_chunk(struct phys_mem_pool *pool, int order,
46 struct page *chunk)
47 {
48 struct page *buddy_chunk;
49 struct list_head *free_list;
50
51 /*
52 * If the @chunk's order equals to the required order,
53 * return this chunk.
54 */
55 if (chunk->order == order)
56 return chunk;
57
58 /*
59 * If the current order is larger than the required order,
60 * split the memory chunck into two halves.
61 */
62 chunk->order -= 1;
63
64 buddy_chunk = get_buddy_chunk(pool, chunk);
65 /* The buddy_chunk must exist since we are spliting a large chunk. */
66 BUG_ON(buddy_chunk == NULL);
67
68 /* Set the metadata of the remaining buddy_chunk. */
69 buddy_chunk->order = chunk->order;
70 buddy_chunk->allocated = 0;
71
72 /* Put the remaining buddy_chunk into its correspondint free list. */
73 free_list = &(pool->free_lists[buddy_chunk->order].free_list);
74 list_add(&buddy_chunk->node, free_list);
75 pool->free_lists[buddy_chunk->order].nr_free += 1;
76
77 /* Continue to split current chunk (@chunk). */
78 return split_chunk(pool, order, chunk);
79 }
80
81 /* The most recursion level of merge_chunk is decided by the macro of
82 * BUDDY_MAX_ORDER. */
merge_chunk(struct phys_mem_pool * pool,struct page * chunk)83 static struct page *merge_chunk(struct phys_mem_pool *pool, struct page *chunk)
84 {
85 struct page *buddy_chunk;
86
87 /* The @chunk has already been the largest one. */
88 if (chunk->order == (BUDDY_MAX_ORDER - 1)) {
89 return chunk;
90 }
91
92 /* Locate the buddy_chunk of @chunk. */
93 buddy_chunk = get_buddy_chunk(pool, chunk);
94
95 /* If the buddy_chunk does not exist, no further merge is required. */
96 if (buddy_chunk == NULL)
97 return chunk;
98
99 /* The buddy_chunk is not free, no further merge is required. */
100 if (buddy_chunk->allocated == 1)
101 return chunk;
102
103 /* The buddy_chunk is not free as a whole, no further merge is required.
104 */
105 if (buddy_chunk->order != chunk->order)
106 return chunk;
107
108 /* Remove the buddy_chunk from its current free list. */
109 list_del(&(buddy_chunk->node));
110 pool->free_lists[buddy_chunk->order].nr_free -= 1;
111
112 /* Merge the two buddies and get a larger chunk @chunk (order+1). */
113 buddy_chunk->order += 1;
114 chunk->order += 1;
115 if (chunk > buddy_chunk)
116 chunk = buddy_chunk;
117
118 /* Keeping merging. */
119 return merge_chunk(pool, chunk);
120 }
121
122 /*
123 * The layout of a phys_mem_pool:
124 * | page_metadata are (an array of struct page) | alignment pad | usable memory
125 * |
126 *
127 * The usable memory: [pool_start_addr, pool_start_addr + pool_mem_size).
128 */
init_buddy(struct phys_mem_pool * pool,struct page * start_page,vaddr_t start_addr,unsigned long page_num)129 void init_buddy(struct phys_mem_pool *pool, struct page *start_page,
130 vaddr_t start_addr, unsigned long page_num)
131 {
132 int order;
133 int page_idx;
134 struct page *page;
135
136 BUG_ON(lock_init(&pool->buddy_lock) != 0);
137
138 /* Init the physical memory pool. */
139 pool->pool_start_addr = start_addr;
140 pool->page_metadata = start_page;
141 pool->pool_mem_size = page_num * BUDDY_PAGE_SIZE;
142 /* This field is for unit test only. */
143 pool->pool_phys_page_num = page_num;
144
145 /* Init the free lists */
146 for (order = 0; order < BUDDY_MAX_ORDER; ++order) {
147 pool->free_lists[order].nr_free = 0;
148 init_list_head(&(pool->free_lists[order].free_list));
149 }
150
151 /* Clear the page_metadata area. */
152 memset((char *)start_page, 0, page_num * sizeof(struct page));
153
154 /* Init the page_metadata area. */
155 for (page_idx = 0; page_idx < page_num; ++page_idx) {
156 page = start_page + page_idx;
157 page->allocated = 1;
158 page->order = 0;
159 page->pool = pool;
160 }
161
162 /* Put each physical memory page into the free lists. */
163 for (page_idx = 0; page_idx < page_num; ++page_idx) {
164 page = start_page + page_idx;
165 buddy_free_pages(pool, page);
166 }
167 }
168
buddy_get_pages(struct phys_mem_pool * pool,int order)169 struct page *buddy_get_pages(struct phys_mem_pool *pool, int order)
170 {
171 int cur_order;
172 struct list_head *free_list;
173 struct page *page = NULL;
174
175 if (unlikely(order >= BUDDY_MAX_ORDER)) {
176 kwarn("ChCore does not support allocating such too large "
177 "contious physical memory\n");
178 return NULL;
179 }
180
181 lock(&pool->buddy_lock);
182
183 /* Search a chunk (with just enough size) in the free lists. */
184 for (cur_order = order; cur_order < BUDDY_MAX_ORDER; ++cur_order) {
185 free_list = &(pool->free_lists[cur_order].free_list);
186 if (!list_empty(free_list)) {
187 /* Get a free memory chunck from the free list */
188 page = list_entry(free_list->next, struct page, node);
189 list_del(&page->node);
190 pool->free_lists[cur_order].nr_free -= 1;
191 page->allocated = 1;
192 break;
193 }
194 }
195
196 if (unlikely(page == NULL)) {
197 kdebug("[OOM] No enough memory in memory pool %p\n", pool);
198 goto out;
199 }
200
201 /*
202 * Split the chunk found and return the start part of the chunck
203 * which can meet the required size.
204 */
205 page = split_chunk(pool, order, page);
206
207 out:
208 unlock(&pool->buddy_lock);
209 return page;
210 }
211
buddy_free_pages(struct phys_mem_pool * pool,struct page * page)212 void buddy_free_pages(struct phys_mem_pool *pool, struct page *page)
213 {
214 int order;
215 struct list_head *free_list;
216
217 lock(&pool->buddy_lock);
218
219 BUG_ON(page->allocated == 0);
220 /* Mark the chunk @page as free. */
221 page->allocated = 0;
222 /* Merge the freed chunk. */
223 page = merge_chunk(pool, page);
224
225 /* Put the merged chunk into the its corresponding free list. */
226 order = page->order;
227 free_list = &(pool->free_lists[order].free_list);
228 list_add(&page->node, free_list);
229 pool->free_lists[order].nr_free += 1;
230
231 unlock(&pool->buddy_lock);
232 }
233
page_to_virt(struct page * page)234 void *page_to_virt(struct page *page)
235 {
236 vaddr_t addr;
237 struct phys_mem_pool *pool = page->pool;
238
239 BUG_ON(pool == NULL);
240
241 /* page_idx * BUDDY_PAGE_SIZE + start_addr */
242 addr =
243 (page - pool->page_metadata) * BUDDY_PAGE_SIZE + pool->pool_start_addr;
244 return (void *)addr;
245 }
246
virt_to_page(void * ptr)247 struct page *virt_to_page(void *ptr)
248 {
249 struct page *page;
250 struct phys_mem_pool *pool = NULL;
251 vaddr_t addr = (vaddr_t)ptr;
252 int i;
253
254 /* Find the corresponding physical memory pool. */
255 for (i = 0; i < physmem_map_num; ++i) {
256 if (addr >= global_mem[i].pool_start_addr
257 && addr < global_mem[i].pool_start_addr
258 + global_mem[i].pool_mem_size) {
259 pool = &global_mem[i];
260 break;
261 }
262 }
263 BUG_ON(pool == NULL);
264
265 page = pool->page_metadata
266 + (((vaddr_t)addr - pool->pool_start_addr) / BUDDY_PAGE_SIZE);
267 return page;
268 }
269
get_free_mem_size_from_buddy(struct phys_mem_pool * pool)270 unsigned long get_free_mem_size_from_buddy(struct phys_mem_pool *pool)
271 {
272 int order;
273 struct free_list *list;
274 unsigned long current_order_size;
275 unsigned long total_size = 0;
276
277 for (order = 0; order < BUDDY_MAX_ORDER; order++) {
278 /* 2^order * 4K */
279 current_order_size = BUDDY_PAGE_SIZE * (1 << order);
280 list = pool->free_lists + order;
281 total_size += list->nr_free * current_order_size;
282
283 /* debug : print info about current order */
284 kdebug("buddy memory chunk order: %d, size: 0x%lx, num: %d\n",
285 order,
286 current_order_size,
287 list->nr_free);
288 }
289 return total_size;
290 }
291
get_total_mem_size_from_buddy(struct phys_mem_pool * pool)292 unsigned long get_total_mem_size_from_buddy(struct phys_mem_pool *pool)
293 {
294 return pool->pool_mem_size;
295 }
296