• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #ifdef CHCORE_ENABLE_FMAP
13 
14 #include <arch/mmu.h>
15 #include <arch/mm/cache.h>
16 #include <ipc/notification.h>
17 #include <irq/irq.h>
18 #include <common/errno.h>
19 #include <common/radix.h>
20 #include <common/lock.h>
21 #include <common/types.h>
22 #include <sched/sched.h>
23 #include <mm/kmalloc.h>
24 #include <lib/printk.h>
25 #include <mm/vmspace.h>
26 #include <mm/kmalloc.h>
27 #include <mm/mm.h>
28 #include <object/object.h>
29 #include <object/user_fault.h>
30 #include <lib/ring_buffer.h>
31 
32 struct lock fmap_fault_pool_list_lock;
33 struct list_head fmap_fault_pool_list;
34 
user_fault_init(void)35 static void user_fault_init(void)
36 {
37     static int inited = 0;
38 
39     if (!atomic_cmpxchg_32(&inited, 0, 1)) {
40         lock_init(&fmap_fault_pool_list_lock);
41         init_list_head(&fmap_fault_pool_list);
42     }
43 }
44 
get_current_fault_pool(void)45 static struct fmap_fault_pool *get_current_fault_pool(void)
46 {
47     badge_t badge;
48     struct fmap_fault_pool *pool_iter;
49 
50     badge = current_cap_group->badge;
51     for_each_in_list (
52         pool_iter, struct fmap_fault_pool, node, &fmap_fault_pool_list) {
53         if (pool_iter->cap_group_badge == badge) {
54             return pool_iter;
55         }
56     }
57 
58     return NULL;
59 }
60 
61 static struct fault_pending_thread *
get_current_pending_thread(badge_t client_badge,vaddr_t fault_va)62 get_current_pending_thread(badge_t client_badge, vaddr_t fault_va)
63 {
64     struct fault_pending_thread *pt;
65     struct fmap_fault_pool *pool;
66 
67     pool = get_current_fault_pool();
68     if (!pool)
69         return NULL;
70 
71     for_each_in_list (
72         pt, struct fault_pending_thread, node, &pool->pending_threads) {
73         if (pt->fault_badge == client_badge && pt->fault_va == fault_va) {
74             return pt;
75         }
76     }
77     return NULL;
78 }
79 
80 /* syscall */
sys_user_fault_register(cap_t notific_cap,vaddr_t msg_buffer)81 int sys_user_fault_register(cap_t notific_cap, vaddr_t msg_buffer)
82 {
83     int ret;
84     struct notification *notific;
85     struct ring_buffer *msg_buffer_kva;
86     /* *msg_buffer_kva points to the virtual address of a ring buffer
87      * struct, so no need to initialize */
88     badge_t badge;
89     struct fmap_fault_pool *pool_iter;
90 
91     user_fault_init();
92 
93     badge = current_cap_group->badge;
94 
95     /* Validate arguments */
96     notific = obj_get(current_cap_group, notific_cap, TYPE_NOTIFICATION);
97     if (notific == NULL) {
98         return -EINVAL;
99     }
100 
101     ret = trans_uva_to_kva(msg_buffer, (vaddr_t *)&msg_buffer_kva);
102     if (ret != 0) {
103         return -EINVAL;
104     }
105 
106     lock(&fmap_fault_pool_list_lock);
107     if (get_current_fault_pool() != NULL) {
108         /* pool already exists */
109         unlock(&fmap_fault_pool_list_lock);
110         return -EINVAL;
111     }
112 
113     /* Create a fmap_fault_pool and add to list */
114     pool_iter = (struct fmap_fault_pool *)kmalloc(sizeof(*pool_iter));
115     if (!pool_iter) {
116         unlock(&fmap_fault_pool_list_lock);
117         return -ENOMEM;
118     }
119 
120     pool_iter->cap_group_badge = badge;
121     pool_iter->notific = notific;
122     pool_iter->msg_buffer_kva = msg_buffer_kva;
123     lock_init(&pool_iter->lock);
124     init_list_head(&pool_iter->pending_threads);
125 
126     list_append(&pool_iter->node, &fmap_fault_pool_list);
127     unlock(&fmap_fault_pool_list_lock);
128 
129     return 0;
130 }
131 
sys_user_fault_map(badge_t client_badge,vaddr_t fault_va,vaddr_t remap_va,bool copy,unsigned long perm)132 int sys_user_fault_map(badge_t client_badge, vaddr_t fault_va, vaddr_t remap_va,
133                        bool copy, unsigned long perm)
134 {
135     struct fmap_fault_pool *current_pool;
136     struct fault_pending_thread *pending_thread;
137     struct thread *thread_to_wake;
138     struct vmspace *handler_vmspace;
139     struct vmspace *fault_vmspace;
140     struct vmregion *fault_vmr;
141     struct pmobject *fault_pmo;
142     paddr_t pa, new_pa;
143     void *new_page;
144     int ret;
145     bool page_allocated = false;
146     long rss = 0;
147 
148     current_pool = get_current_fault_pool();
149 
150     /* Find corresponding pending thread */
151     lock(&current_pool->lock);
152     pending_thread = get_current_pending_thread(client_badge, fault_va);
153     if (!pending_thread) {
154         unlock(&current_pool->lock);
155         return -EINVAL;
156     }
157     list_del(&pending_thread->node);
158     unlock(&current_pool->lock);
159 
160     thread_to_wake = pending_thread->thread;
161     kfree(pending_thread);
162 
163     /* Get handler space va, which page will be mapped in fault va */
164     if (remap_va) {
165         handler_vmspace =
166             obj_get(current_cap_group, VMSPACE_OBJ_ID, TYPE_VMSPACE);
167         if (handler_vmspace == NULL) {
168             return -EINVAL;
169         }
170         lock(&handler_vmspace->pgtbl_lock);
171         ret = query_in_pgtbl(handler_vmspace->pgtbl, remap_va, &pa, NULL);
172         if (ret) {
173             /* remap_va is not mapped in handler_vmspace */
174             unlock(&handler_vmspace->pgtbl_lock);
175             obj_put(handler_vmspace);
176             return -EINVAL;
177         }
178         unlock(&handler_vmspace->pgtbl_lock);
179         obj_put(handler_vmspace);
180     }
181 
182     /* Decide whether copy the physical page or share */
183     if (!copy) {
184         if (!remap_va)
185             return -EINVAL;
186         new_pa = pa;
187     } else {
188         new_page = get_pages(0);
189         if (new_page == NULL)
190             return -EINVAL;
191         if (remap_va)
192             memcpy(new_page, (void *)phys_to_virt(pa), PAGE_SIZE);
193         else
194             memset(new_page, 0, PAGE_SIZE);
195         new_pa = (paddr_t)virt_to_phys(new_page);
196         page_allocated = true;
197     }
198 
199     /* Fill fault pa with target page's pa */
200     fault_vmspace =
201         obj_get(thread_to_wake->cap_group, VMSPACE_OBJ_ID, TYPE_VMSPACE);
202     if (fault_vmspace == NULL) {
203         return -EINVAL;
204     }
205     if (page_allocated) {
206         lock(&fault_vmspace->vmspace_lock);
207         fault_vmr = find_vmr_for_va(fault_vmspace, fault_va);
208         if (fault_vmr == NULL) {
209             unlock(&fault_vmspace->vmspace_lock);
210             obj_put(fault_vmspace);
211             return -EINVAL;
212         }
213         fault_pmo = fault_vmr->pmo;
214         commit_page_to_pmo(fault_pmo, new_pa, new_pa);
215     }
216 
217     lock(&fault_vmspace->pgtbl_lock);
218 
219     ret = map_range_in_pgtbl(
220         fault_vmspace->pgtbl, fault_va, new_pa, PAGE_SIZE, perm, &rss);
221     fault_vmspace->rss += rss;
222     BUG_ON(ret);
223     unlock(&fault_vmspace->pgtbl_lock);
224 
225     if (page_allocated) {
226         unlock(&fault_vmspace->vmspace_lock);
227     }
228 
229     obj_put(fault_vmspace);
230 
231     switch_thread_vmspace_to(thread_to_wake);
232     if (perm & VMR_EXEC) {
233         arch_flush_cache(fault_va, PAGE_SIZE, SYNC_IDCACHE);
234     }
235     switch_thread_vmspace_to(current_thread);
236 
237     /* Pending thread should come back to scheduler */
238     thread_to_wake->thread_ctx->state = TS_INTER;
239     BUG_ON(sched_enqueue(thread_to_wake));
240 
241     return 0;
242 }
243 
244 /* Handling a user page fault */
handle_user_fault(struct pmobject * pmo,vaddr_t fault_va)245 void handle_user_fault(struct pmobject *pmo, vaddr_t fault_va)
246 {
247     struct fmap_fault_pool *fault_pool;
248     struct fault_pending_thread *pending_thread;
249     int ret;
250 
251     fault_pool = (struct fmap_fault_pool *)pmo->private;
252     kdebug("pmo file fault: badge=%x, va=%lx\n",
253            fault_pool->cap_group_badge,
254            fault_va);
255 
256     /*
257      * Fault thread should pending until user handling finished.
258      * Record (fault_badge, fault_va) -> thread here.
259      */
260     pending_thread =
261         (struct fault_pending_thread *)kmalloc(sizeof(*pending_thread));
262     if (!pending_thread) {
263 
264         BUG_ON(1);
265     }
266 
267     pending_thread->fault_badge = current_cap_group->badge;
268     pending_thread->fault_va = fault_va;
269     pending_thread->thread = current_thread;
270 
271     /* The fault_pool lock also protect producer ptr racing */
272     lock(&fault_pool->lock);
273 
274     if (if_buffer_full(fault_pool->msg_buffer_kva)) {
275 
276         BUG_ON(1);
277     } else {
278         /* successfully fetch slot from server space */
279         struct user_fault_msg tmp;
280         tmp.fault_badge = current_cap_group->badge;
281         tmp.fault_va = fault_va;
282         set_one_msg(fault_pool->msg_buffer_kva, &tmp);
283     }
284     list_append(&pending_thread->node, &fault_pool->pending_threads);
285 
286     /* Notify the fault handler when buffer is updated */
287     ret = signal_notific(fault_pool->notific);
288     BUG_ON(ret != 0);
289 
290     /*
291      * Give up the control flow here.
292      * The thread will wake up when map finished.
293      */
294     current_thread->thread_ctx->state = TS_WAITING;
295 
296     sched();
297     /*
298      * To avoid sys_user_fault_map get pending thread too early,
299      *      or modify thread->state early than here.
300      * Release lock here.
301      */
302     unlock(&fault_pool->lock);
303     eret_to_thread(switch_context());
304 }
305 
306 #endif
307