1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <common/debug.h>
13 #include <common/types.h>
14 #include <common/macro.h>
15 #include <common/errno.h>
16 #include <common/util.h>
17 #include <common/kprint.h>
18
19 #include <mm/slab.h>
20 #include <mm/buddy.h>
21
22 #define SLAB_MAX_SIZE (1UL << SLAB_MAX_ORDER)
23 #define ZERO_SIZE_PTR ((void *)(-1UL))
24
_get_pages(int order,bool is_record)25 void *_get_pages(int order, bool is_record)
26 {
27 struct page *page = NULL;
28 int i;
29 void *addr;
30
31 /* Try to get continous physical memory pages from one physmem pool. */
32 for (i = 0; i < physmem_map_num; ++i) {
33 page = buddy_get_pages(&global_mem[i], order);
34 if (page)
35 break;
36 }
37
38 if (unlikely(!page)) {
39 kwarn("[OOM] Cannot get page from any memory pool!\n");
40 return NULL;
41 }
42
43 addr = page_to_virt(page);
44 return addr;
45 }
46
get_pages(int order)47 void *get_pages(int order)
48 {
49 return _get_pages(order, true);
50 }
51
_free_pages(void * addr,bool is_revoke_record)52 void _free_pages(void *addr, bool is_revoke_record)
53 {
54 struct page *page;
55
56 page = virt_to_page(addr);
57 buddy_free_pages(page->pool, page);
58 }
59
free_pages(void * addr)60 void free_pages(void *addr)
61 {
62 _free_pages(addr, true);
63 }
64
free_pages_without_record(void * addr)65 void free_pages_without_record(void *addr)
66 {
67 _free_pages(addr, false);
68 }
69
size_to_page_order(unsigned long size)70 static int size_to_page_order(unsigned long size)
71 {
72 unsigned long order;
73 unsigned long pg_num;
74 unsigned long tmp;
75
76 order = 0;
77 pg_num = ROUND_UP(size, BUDDY_PAGE_SIZE) / BUDDY_PAGE_SIZE;
78 tmp = pg_num;
79
80 while (tmp > 1) {
81 tmp >>= 1;
82 order += 1;
83 }
84
85 if (pg_num > (1 << order))
86 order += 1;
87
88 return (int)order;
89 }
90
91 /* Currently, BUG_ON no available memory. */
_kmalloc(size_t size,bool is_record,size_t * real_size)92 void *_kmalloc(size_t size, bool is_record, size_t *real_size)
93 {
94 void *addr;
95 int order;
96
97 if (unlikely(size == 0))
98 return ZERO_SIZE_PTR;
99
100 if (size <= SLAB_MAX_SIZE) {
101 addr = alloc_in_slab(size, real_size);
102 } else {
103 if (size <= BUDDY_PAGE_SIZE)
104 order = 0;
105 else
106 order = size_to_page_order(size);
107 addr = get_pages(order);
108 }
109
110 BUG_ON(!addr);
111 return addr;
112 }
113
kmalloc(size_t size)114 void *kmalloc(size_t size)
115 {
116 size_t real_size;
117 void *ret;
118 ret = _kmalloc(size, true, &real_size);
119 return ret;
120 }
121
kzalloc(size_t size)122 void *kzalloc(size_t size)
123 {
124 void *addr;
125
126 addr = kmalloc(size);
127 memset(addr, 0, size);
128 return addr;
129 }
130
_kfree(void * ptr,bool is_revoke_record)131 void _kfree(void *ptr, bool is_revoke_record)
132 {
133 struct page *page;
134
135 if (unlikely(ptr == ZERO_SIZE_PTR))
136 return;
137
138 page = virt_to_page(ptr);
139 if (page && page->slab)
140 free_in_slab(ptr);
141 else if (page && page->pool)
142 buddy_free_pages(page->pool, page);
143 else
144 kwarn("unexpected state in %s\n", __func__);
145 }
146
kfree(void * ptr)147 void kfree(void *ptr)
148 {
149 _kfree(ptr, true);
150 }
151