1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <common/types.h>
13 #include <common/list.h>
14 #include <common/errno.h>
15 #include <mm/vmspace.h>
16 #include <mm/kmalloc.h>
17 #include <mm/mm.h>
18 #include <mm/uaccess.h>
19
20 struct cow_private_page {
21 struct list_head node;
22 void *page;
23 };
24
alloc_vmregion(vaddr_t start,size_t len,vmr_prop_t perm,struct pmobject * pmo)25 static struct vmregion *alloc_vmregion(vaddr_t start, size_t len,
26 vmr_prop_t perm, struct pmobject *pmo)
27 {
28 struct vmregion *vmr;
29
30 vmr = kmalloc(sizeof(*vmr));
31 if (vmr == NULL)
32 return NULL;
33
34 vmr->start = start;
35 vmr->size = len;
36 vmr->perm = perm;
37 vmr->pmo = pmo;
38
39 if (pmo->type == PMO_DEVICE)
40 vmr->perm |= VMR_DEVICE;
41 else if (pmo->type == PMO_DATA_NOCACHE)
42 vmr->perm |= VMR_NOCACHE;
43 #ifdef CHCORE_OH_TEE
44 else if (pmo->type == PMO_TZ_NS)
45 vmr->perm |= VMR_TZ_NS;
46 #endif /* CHCORE_OH_TEE */
47
48 init_list_head(&vmr->cow_private_pages);
49
50 return vmr;
51 }
52
free_cow_private_page(struct cow_private_page * record)53 static void free_cow_private_page(struct cow_private_page *record)
54 {
55 kfree(record->page);
56 kfree(record);
57 }
58
vmregion_record_cow_private_page(struct vmregion * vmr,void * private_page)59 void vmregion_record_cow_private_page(struct vmregion *vmr, void *private_page)
60 {
61 struct cow_private_page *record;
62
63 record = kmalloc(sizeof(*record));
64 record->page = private_page;
65 list_add(&record->node, &vmr->cow_private_pages);
66 }
67
free_vmregion(struct vmregion * vmr)68 static void free_vmregion(struct vmregion *vmr)
69 {
70 struct cow_private_page *cur_record = NULL, *tmp = NULL;
71
72 for_each_in_list_safe (cur_record, tmp, node, &vmr->cow_private_pages) {
73 free_cow_private_page(cur_record);
74 }
75 kfree((void *)vmr);
76 }
77
78 /*
79 * Return value:
80 * -1: node1 (vm range1) < node2 (vm range2)
81 * 0: overlap
82 * 1: node1 > node2
83 */
cmp_two_vmrs(const struct rb_node * node1,const struct rb_node * node2)84 static bool cmp_two_vmrs(const struct rb_node *node1,
85 const struct rb_node *node2)
86 {
87 struct vmregion *vmr1, *vmr2;
88 vaddr_t vmr1_start, vmr1_end, vmr2_start;
89
90 vmr1 = rb_entry(node1, struct vmregion, tree_node);
91 vmr2 = rb_entry(node2, struct vmregion, tree_node);
92
93 vmr1_start = vmr1->start;
94 vmr1_end = vmr1_start + vmr1->size - 1;
95
96 vmr2_start = vmr2->start;
97
98 /* vmr1 < vmr2 */
99 if (vmr1_end < vmr2_start)
100 return true;
101
102 /* vmr1 > vmr2 or vmr1 and vmr2 overlap */
103 return false;
104 }
105
106 struct va_range {
107 vaddr_t start;
108 vaddr_t end;
109 };
110
111 /*
112 * Return value:
113 * -1: va_range < node (vmr)
114 * 0: overlap
115 * 1: va_range > node
116 */
cmp_vmr_and_range(const void * va_range,const struct rb_node * node)117 static int cmp_vmr_and_range(const void *va_range, const struct rb_node *node)
118 {
119 struct vmregion *vmr;
120 vaddr_t vmr_start, vmr_end;
121
122 vmr = rb_entry(node, struct vmregion, tree_node);
123 vmr_start = vmr->start;
124 vmr_end = vmr_start + vmr->size - 1;
125
126 struct va_range *range = (struct va_range *)va_range;
127 /* range < vmr */
128 if (range->end < vmr_start)
129 return -1;
130
131 /* range > vmr */
132 if (range->start > vmr_end)
133 return 1;
134
135 /* range and vmr overlap */
136 return 0;
137 }
138
139 /*
140 * Return value:
141 * -1: va < node (vmr)
142 * 0: va belongs to node
143 * 1: va > node
144 */
cmp_vmr_and_va(const void * va,const struct rb_node * node)145 static int cmp_vmr_and_va(const void *va, const struct rb_node *node)
146 {
147 struct vmregion *vmr;
148 vaddr_t vmr_start, vmr_end;
149
150 vmr = rb_entry(node, struct vmregion, tree_node);
151 vmr_start = vmr->start;
152 vmr_end = vmr_start + vmr->size - 1;
153
154 if ((vaddr_t)va < vmr_start)
155 return -1;
156
157 if ((vaddr_t)va > vmr_end)
158 return 1;
159
160 return 0;
161 }
162
163 /* Returns 0 when no intersection detected. */
check_vmr_intersect(struct vmspace * vmspace,struct vmregion * vmr_to_add)164 static int check_vmr_intersect(struct vmspace *vmspace,
165 struct vmregion *vmr_to_add)
166 {
167 struct va_range range;
168
169 range.start = vmr_to_add->start;
170 range.end = range.start + vmr_to_add->size - 1;
171
172 struct rb_node *res;
173 res =
174 rb_search(&vmspace->vmr_tree, (const void *)&range, cmp_vmr_and_range);
175 /*
176 * If rb_search returns NULL,
177 * the vmr_to_add will not overlap with any existing vmr.
178 */
179 return (res == NULL) ? 0 : 1;
180 }
181
add_vmr_to_vmspace(struct vmspace * vmspace,struct vmregion * vmr)182 static int add_vmr_to_vmspace(struct vmspace *vmspace, struct vmregion *vmr)
183 {
184 if (check_vmr_intersect(vmspace, vmr) != 0) {
185 kwarn("%s: vmr overlap.\n", __func__);
186 return -EINVAL;
187 }
188
189 list_add(&vmr->list_node, &vmspace->vmr_list);
190 rb_insert(&vmspace->vmr_tree, &vmr->tree_node, cmp_two_vmrs);
191 return 0;
192 }
193
194 /* The @vmr is only removed but not freed. */
remove_vmr_from_vmspace(struct vmspace * vmspace,struct vmregion * vmr)195 static void remove_vmr_from_vmspace(struct vmspace *vmspace,
196 struct vmregion *vmr)
197 {
198 if (check_vmr_intersect(vmspace, vmr) != 0) {
199 rb_erase(&vmspace->vmr_tree, &vmr->tree_node);
200 list_del(&vmr->list_node);
201 }
202 }
203
del_vmr_from_vmspace(struct vmspace * vmspace,struct vmregion * vmr)204 static void del_vmr_from_vmspace(struct vmspace *vmspace, struct vmregion *vmr)
205 {
206 remove_vmr_from_vmspace(vmspace, vmr);
207 free_vmregion(vmr);
208 }
209
fill_page_table(struct vmspace * vmspace,struct vmregion * vmr)210 static int fill_page_table(struct vmspace *vmspace, struct vmregion *vmr)
211 {
212 size_t pm_size;
213 paddr_t pa;
214 vaddr_t va;
215 int ret;
216 long rss = 0;
217
218 pm_size = vmr->pmo->size;
219 pa = vmr->pmo->start;
220 va = vmr->start;
221
222 lock(&vmspace->pgtbl_lock);
223 ret = map_range_in_pgtbl(vmspace->pgtbl, va, pa, pm_size, vmr->perm, &rss);
224 vmspace->rss += rss;
225 unlock(&vmspace->pgtbl_lock);
226
227 return ret;
228 }
229
230 /* In the beginning, a vmspace ran on zero CPU */
reset_history_cpus(struct vmspace * vmspace)231 static inline void reset_history_cpus(struct vmspace *vmspace)
232 {
233 int i;
234
235 for (i = 0; i < PLAT_CPU_NUM; ++i)
236 vmspace->history_cpus[i] = 0;
237 }
238
vmspace_init(struct vmspace * vmspace,unsigned long pcid)239 int vmspace_init(struct vmspace *vmspace, unsigned long pcid)
240 {
241 init_list_head(&vmspace->vmr_list);
242 init_rb_root(&vmspace->vmr_tree);
243
244 /* Allocate the root page table page */
245 vmspace->pgtbl = get_pages(0);
246 BUG_ON(vmspace->pgtbl == NULL);
247 memset((void *)vmspace->pgtbl, 0, PAGE_SIZE);
248 vmspace->pcid = pcid;
249
250 /* Architecture-dependent initilization */
251 arch_vmspace_init(vmspace);
252
253 /*
254 * Note: acquire vmspace_lock before pgtbl_lock
255 * when locking them together.
256 */
257 lock_init(&vmspace->vmspace_lock);
258 lock_init(&vmspace->pgtbl_lock);
259
260 /* The vmspace does not run on any CPU for now */
261 reset_history_cpus(vmspace);
262
263 vmspace->heap_vmr = NULL;
264
265 vmspace->rss = 0;
266
267 return 0;
268 }
269
vmspace_deinit(void * ptr)270 void vmspace_deinit(void *ptr)
271 {
272 struct vmspace *vmspace;
273 struct vmregion *vmr = NULL;
274 struct vmregion *tmp;
275
276 vmspace = (struct vmspace *)ptr;
277
278 /*
279 * Free each vmregion in vmspace->vmr_list.
280 * Only invoked when a process exits. No need to acquire the lock.
281 */
282 for_each_in_list_safe (vmr, tmp, list_node, &vmspace->vmr_list) {
283 free_vmregion(vmr);
284 }
285
286 free_page_table(vmspace->pgtbl);
287
288 /* TLB flush (PCID reusing) */
289 flush_tlb_by_vmspace(vmspace);
290 }
291
vmspace_map_range(struct vmspace * vmspace,vaddr_t va,size_t len,vmr_prop_t flags,struct pmobject * pmo)292 int vmspace_map_range(struct vmspace *vmspace, vaddr_t va, size_t len,
293 vmr_prop_t flags, struct pmobject *pmo)
294 {
295 struct vmregion *vmr;
296 int ret;
297
298 if (len == 0)
299 return 0;
300
301 /* Align a vmr to PAGE_SIZE */
302 va = ROUND_DOWN(va, PAGE_SIZE);
303 len = ROUND_UP(len, PAGE_SIZE);
304
305 vmr = alloc_vmregion(va, len, flags, pmo);
306 if (!vmr) {
307 ret = -ENOMEM;
308 goto out_fail;
309 }
310
311 /*
312 * Each operation on the vmspace should be protected by
313 * the per-vmspace lock, i.e., vmspace_lock.
314 */
315 lock(&vmspace->vmspace_lock);
316 ret = add_vmr_to_vmspace(vmspace, vmr);
317 unlock(&vmspace->vmspace_lock);
318
319 if (ret < 0) {
320 kdebug("add_vmr_to_vmspace fails\n");
321 goto out_free_vmr;
322 }
323
324 #ifdef CHCORE_OH_TEE
325 if (pmo->type == PMO_TZ_NS) {
326 struct ns_pmo_private *private;
327 private
328 = (struct ns_pmo_private *)pmo->private;
329 private
330 ->mapped = true;
331 private
332 ->vaddr = va;
333 private
334 ->len = len;
335 fill_page_table(vmspace, vmr);
336 }
337 #endif /* CHCORE_OH_TEE */
338
339 /* Eager mapping for the following pmo types and otherwise lazy mapping.
340 */
341 if ((pmo->type == PMO_DATA) || (pmo->type == PMO_DATA_NOCACHE)
342 || (pmo->type == PMO_DEVICE))
343 fill_page_table(vmspace, vmr);
344
345 return 0;
346
347 out_free_vmr:
348 free_vmregion(vmr);
349 out_fail:
350 return ret;
351 }
352
353 /*
354 * Unmap routine: unmap a virtual memory range.
355 * Current limitation: only supports unmap one whole exsiting vmr.
356 */
vmspace_unmap_range(struct vmspace * vmspace,vaddr_t va,size_t len)357 int vmspace_unmap_range(struct vmspace *vmspace, vaddr_t va, size_t len)
358 {
359 struct vmregion *vmr;
360 vaddr_t start;
361 size_t size;
362 int ret = 0;
363
364 lock(&vmspace->vmspace_lock);
365 vmr = find_vmr_for_va(vmspace, va);
366 if (!vmr)
367 goto out_unlock;
368
369
370 start = vmr->start;
371 size = vmr->size;
372 if ((va != start) || (len != size)) {
373 kwarn("Only support unmapping a whole vmregion now.\n");
374 ret = -EINVAL;
375 goto out_unlock;
376 }
377
378 del_vmr_from_vmspace(vmspace, vmr);
379 unlock(&vmspace->vmspace_lock);
380
381 /* Remove the potential mappings in the page table. */
382 if (len != 0) {
383 long rss = 0;
384 lock(&vmspace->pgtbl_lock);
385 unmap_range_in_pgtbl(vmspace->pgtbl, va, len, &rss);
386 vmspace->rss += rss;
387 unlock(&vmspace->pgtbl_lock);
388 flush_tlb_by_range(vmspace, va, len);
389 }
390
391 return 0;
392
393 out_unlock:
394 unlock(&vmspace->vmspace_lock);
395 return ret;
396 }
397
398 /* This function should be surrounded with the vmspace_lock. */
find_vmr_for_va(struct vmspace * vmspace,vaddr_t addr)399 struct vmregion *find_vmr_for_va(struct vmspace *vmspace, vaddr_t addr)
400 {
401 struct vmregion *vmr;
402 struct rb_node *node;
403
404 node = rb_search(&vmspace->vmr_tree, (const void *)addr, cmp_vmr_and_va);
405
406 if (unlikely(node == NULL))
407 return NULL;
408
409 vmr = rb_entry(node, struct vmregion, tree_node);
410 return vmr;
411 }
412
413 /* Each process has one heap_vmr. */
init_heap_vmr(struct vmspace * vmspace,vaddr_t va,struct pmobject * pmo)414 struct vmregion *init_heap_vmr(struct vmspace *vmspace, vaddr_t va,
415 struct pmobject *pmo)
416 {
417 return alloc_vmregion(va, 0, VMR_READ | VMR_WRITE, pmo);
418 }
419
adjust_heap_vmr(struct vmspace * vmspace,unsigned long add_len)420 void adjust_heap_vmr(struct vmspace *vmspace, unsigned long add_len)
421 {
422 struct vmregion *vmr;
423
424 vmr = vmspace->heap_vmr;
425 remove_vmr_from_vmspace(vmspace, vmr);
426 vmr->size += add_len;
427 vmr->pmo->size += add_len;
428 add_vmr_to_vmspace(vmspace, vmr);
429 }
430
431 /* Dumping all the vmrs of one vmspace. */
kprint_vmr(struct vmspace * vmspace)432 void kprint_vmr(struct vmspace *vmspace)
433 {
434 struct rb_node *node;
435 struct vmregion *vmr;
436 vaddr_t start, end;
437
438 /* rb_for_each will iterate the vmrs in order. */
439 rb_for_each(&vmspace->vmr_tree, node)
440 {
441 vmr = rb_entry(node, struct vmregion, tree_node);
442 start = vmr->start;
443 end = start + vmr->size;
444 kinfo("[%p] [vmregion] start=%p end=%p. vmr->pmo->type=%d\n",
445 vmspace,
446 start,
447 end,
448 vmr->pmo->type);
449 }
450 }
451
452 /*
453 * Note that lock/atomic_ops are not required here
454 * because only CPU X will modify (record/clear) history_cpus[X].
455 */
record_history_cpu(struct vmspace * vmspace,unsigned int cpuid)456 void record_history_cpu(struct vmspace *vmspace, unsigned int cpuid)
457 {
458 BUG_ON(cpuid >= PLAT_CPU_NUM);
459 vmspace->history_cpus[cpuid] = 1;
460 }
461
clear_history_cpu(struct vmspace * vmspace,unsigned int cpuid)462 void clear_history_cpu(struct vmspace *vmspace, unsigned int cpuid)
463 {
464 BUG_ON(cpuid >= PLAT_CPU_NUM);
465 vmspace->history_cpus[cpuid] = 0;
466 }
467