• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include <ipc/connection.h>
13 #include <ipc/notification.h>
14 #include <common/list.h>
15 #include <common/errno.h>
16 #include <object/thread.h>
17 #include <object/irq.h>
18 #include <sched/sched.h>
19 #include <sched/context.h>
20 #include <irq/irq.h>
21 #include <mm/uaccess.h>
22 
init_notific(struct notification * notifc)23 void init_notific(struct notification *notifc)
24 {
25     notifc->not_delivered_notifc_count = 0;
26     notifc->waiting_threads_count = 0;
27     init_list_head(&notifc->waiting_threads);
28     lock_init(&notifc->notifc_lock);
29     notifc->state = OBJECT_STATE_VALID;
30 }
31 
notification_deinit(void * ptr)32 void notification_deinit(void *ptr)
33 {
34     /* No deinitialization is required for now. */
35 }
36 
37 /*
38  * A waiting thread can be awoken by timeout and signal, which leads to racing.
39  * We guarantee that a thread is not awoken for twice by 1. removing a thread
40  * from notification waiting_threads when timeout and 2. removing a thread from
41  * sleep_list when get signaled.
42  * When signaled:
43  *	lock(notification)
44  *	remove from waiting_threads
45  *      thread state = TS_READY
46  *	unlock(notification)
47  *
48  *	if (sleep_state.cb != NULL) {
49  *		lock(sleep_list)
50  *		if (sleep_state.cb != NULL)
51  *			remove from sleep_list
52  *		unlock(sleep_list)
53  *	}
54  *
55  * When timeout:
56  *	lock(sleep_list)
57  *	remove from sleep_list
58  *	lock(notification)
59  *	if (thread state == TS_WAITING)
60  *		remove from waiting_threads
61  *	unlock(notification)
62  *	sleep_state.cb = NULL
63  *	unlock(sleep_list)
64  */
65 
notific_timer_cb(struct thread * thread)66 static void notific_timer_cb(struct thread *thread)
67 {
68     struct notification *notifc;
69 
70     notifc = thread->sleep_state.pending_notific;
71     thread->sleep_state.pending_notific = NULL;
72 
73     lock(&notifc->notifc_lock);
74 
75     /* For recycling: the state is set in stop_notification */
76     if (notifc->state == OBJECT_STATE_INVALID) {
77         thread->thread_ctx->thread_exit_state = TE_EXITED;
78         unlock(&notifc->notifc_lock);
79         return;
80     }
81 
82     if (thread->thread_ctx->state != TS_WAITING) {
83         unlock(&notifc->notifc_lock);
84         return;
85     }
86 
87     list_del(&thread->notification_queue_node);
88     BUG_ON(notifc->waiting_threads_count <= 0);
89     notifc->waiting_threads_count--;
90 
91     arch_set_thread_return(thread, -ETIMEDOUT);
92     thread->thread_ctx->state = TS_INTER;
93     BUG_ON(sched_enqueue(thread));
94 
95     unlock(&notifc->notifc_lock);
96 }
97 
98 /* Return 0 if wait successfully, -EAGAIN otherwise */
wait_notific(struct notification * notifc,bool is_block,struct timespec * timeout)99 int wait_notific(struct notification *notifc, bool is_block,
100                  struct timespec *timeout)
101 {
102     int ret = 0;
103     struct thread *thread;
104 
105     lock(&notifc->notifc_lock);
106 
107     /* For recycling: the state is set in stop_notification */
108     if (notifc->state == OBJECT_STATE_INVALID) {
109         unlock(&notifc->notifc_lock);
110         return -ECAPBILITY;
111     }
112 
113     if (notifc->not_delivered_notifc_count > 0) {
114         notifc->not_delivered_notifc_count--;
115         ret = 0;
116     } else {
117         if (is_block) {
118             thread = current_thread;
119             /*
120              * queue_lock: grab the lock and then insert/remove
121              * a thread into one list.
122              */
123 
124             lock(&thread->sleep_state.queue_lock);
125 
126             /* Add this thread to waiting list */
127             list_append(&thread->notification_queue_node,
128                         &notifc->waiting_threads);
129             thread->thread_ctx->state = TS_WAITING;
130             notifc->waiting_threads_count++;
131             arch_set_thread_return(thread, 0);
132 
133             if (timeout) {
134                 thread->sleep_state.pending_notific = notifc;
135                 enqueue_sleeper(thread, timeout, notific_timer_cb);
136             }
137 
138             /*
139              * Since current_thread is TS_WAITING,
140              * sched() will not put current_thread into the
141              * ready_queue.
142              *
143              * sched() must executed before unlock.
144              * Otherwise, current_thread maybe be notified and then
145              * its state will be set to TS_RUNNING. If so, sched()
146              * will put it into the ready_queue and it maybe
147              * directly switch to.
148              */
149             sched();
150 
151             unlock(&thread->sleep_state.queue_lock);
152 
153             unlock(&notifc->notifc_lock);
154 
155             /* See the below impl of sys_notify */
156             obj_put(notifc);
157 
158             eret_to_thread(switch_context());
159             /* The control flow will never reach here */
160         } else {
161             ret = -EAGAIN;
162         }
163     }
164     unlock(&notifc->notifc_lock);
165     return ret;
166 }
167 
stop_irq_notific(struct irq_notification * irq_notifc)168 int stop_irq_notific(struct irq_notification *irq_notifc)
169 {
170     struct notification *notifc;
171     struct thread *target, *tmp;
172 
173     notifc = &(irq_notifc->notifc);
174     lock(&notifc->notifc_lock);
175     notifc->state = OBJECT_STATE_INVALID;
176     for_each_in_list_safe (target, tmp, notification_queue_node, &notifc->waiting_threads) {
177         list_del(&target->notification_queue_node);
178         notifc->waiting_threads_count--;
179         target->thread_ctx->state = TS_INTER;
180         arch_set_thread_return(target, -ECANCELED);
181         BUG_ON(sched_enqueue(target));
182     }
183     unlock(&notifc->notifc_lock);
184 
185     return 0;
186 }
187 
wait_irq_notific(struct irq_notification * irq_notifc)188 int wait_irq_notific(struct irq_notification *irq_notifc)
189 {
190     struct notification *notifc;
191     int ret;
192 
193     notifc = &(irq_notifc->notifc);
194     lock(&notifc->notifc_lock);
195 
196     if (notifc->state == OBJECT_STATE_INVALID) {
197         ret = -EINVAL;
198         goto out_unlock;
199     }
200 
201     /* Add this thread to waiting list */
202     list_append(&current_thread->notification_queue_node,
203                 &notifc->waiting_threads);
204     current_thread->thread_ctx->state = TS_WAITING;
205     notifc->waiting_threads_count++;
206     arch_set_thread_return(current_thread, 0);
207 
208     irq_notifc->user_handler_ready = 1;
209 
210     sched();
211 
212     unlock(&notifc->notifc_lock);
213 
214     eret_to_thread(switch_context());
215     /* The control flow will never reach here */
216     BUG_ON(1);
217 
218 out_unlock:
219     unlock(&notifc->notifc_lock);
220     return ret;
221 }
222 
signal_irq_notific(struct irq_notification * irq_notifc)223 int signal_irq_notific(struct irq_notification *irq_notifc)
224 {
225     struct notification *notifc;
226     struct thread *target = NULL;
227     int ret = 0;
228 
229     notifc = &(irq_notifc->notifc);
230 
231     lock(&notifc->notifc_lock);
232 
233     if (notifc->state == OBJECT_STATE_INVALID) {
234         ret = -EINVAL;
235         goto out_unlock;
236     }
237 
238     irq_notifc->user_handler_ready = 0;
239 
240     /*
241      * Some threads have been blocked and waiting for notifc.
242      * Wake up one waiting thread
243      */
244     target = list_entry(
245         notifc->waiting_threads.next, struct thread, notification_queue_node);
246     list_del(&target->notification_queue_node);
247     notifc->waiting_threads_count--;
248 
249     BUG_ON(target->thread_ctx->sc == NULL);
250 
251 
252     // the interrupt direct to.
253 
254     BUG_ON(target->thread_ctx->affinity != NO_AFF
255            && target->thread_ctx->affinity != smp_get_cpu_id());
256 
257     target->thread_ctx->state = TS_INTER;
258     BUG_ON(sched_enqueue(target));
259 
260 out_unlock:
261     unlock(&notifc->notifc_lock);
262     return ret;
263 }
264 
try_remove_timeout(struct thread * target)265 void try_remove_timeout(struct thread *target)
266 {
267     if (target == NULL)
268         return;
269     if (target->sleep_state.cb == NULL)
270         return;
271 
272     try_dequeue_sleeper(target);
273 
274     target->sleep_state.pending_notific = NULL;
275 }
276 
signal_notific(struct notification * notifc)277 int signal_notific(struct notification *notifc)
278 {
279     struct thread *target = NULL;
280 
281     lock(&notifc->notifc_lock);
282 
283     /* For recycling: the state is set in stop_notification */
284     if (notifc->state == OBJECT_STATE_INVALID) {
285         unlock(&notifc->notifc_lock);
286         return -ECAPBILITY;
287     }
288 
289     if (notifc->not_delivered_notifc_count > 0
290         || notifc->waiting_threads_count == 0) {
291         notifc->not_delivered_notifc_count++;
292     } else {
293         /*
294          * Some threads have been blocked and waiting for notifc.
295          * Wake up one waiting thread
296          */
297         target = list_entry(notifc->waiting_threads.next,
298                             struct thread,
299                             notification_queue_node);
300 
301         BUG_ON(target == NULL);
302 
303         /*
304          * signal_notific may return -EAGAIN because of unable to lock.
305          * The user-level library will transparently notify again.
306          *
307          * This is for preventing dead lock because handler_timer_irq
308          * may already grab the queue_lock of a thread or the sleep_list
309          * lock.
310          */
311         if (try_lock(&target->sleep_state.queue_lock) != 0) {
312             /* Lock failed: must be timeout now */
313             unlock(&notifc->notifc_lock);
314             return -EAGAIN;
315         }
316 
317         if (target->sleep_state.cb != NULL) {
318             if (try_dequeue_sleeper(target) == false) {
319                 /* Failed to remove target in sleep list */
320                 unlock(&target->sleep_state.queue_lock);
321                 unlock(&notifc->notifc_lock);
322                 return -EAGAIN;
323             }
324         }
325 
326         /* Delete the thread from the waiting list of the notification
327          */
328         list_del(&target->notification_queue_node);
329         notifc->waiting_threads_count--;
330 
331         target->thread_ctx->state = TS_INTER;
332         BUG_ON(sched_enqueue(target));
333 
334         unlock(&target->sleep_state.queue_lock);
335     }
336 
337     unlock(&notifc->notifc_lock);
338 
339     return 0;
340 }
341 
sys_create_notifc(void)342 cap_t sys_create_notifc(void)
343 {
344     struct notification *notifc = NULL;
345     cap_t notifc_cap = 0;
346     int ret = 0;
347 
348     notifc = obj_alloc(TYPE_NOTIFICATION, sizeof(*notifc));
349     if (!notifc) {
350         ret = -ENOMEM;
351         goto out_fail;
352     }
353     init_notific(notifc);
354 
355     notifc_cap = cap_alloc(current_cap_group, notifc);
356     if (notifc_cap < 0) {
357         ret = notifc_cap;
358         goto out_free_obj;
359     }
360 
361     return notifc_cap;
362 
363 out_free_obj:
364     obj_free(notifc);
365 out_fail:
366     return ret;
367 }
368 
sys_wait(cap_t notifc_cap,bool is_block,struct timespec * timeout)369 int sys_wait(cap_t notifc_cap, bool is_block, struct timespec *timeout)
370 {
371     struct notification *notifc = NULL;
372     struct timespec timeout_k;
373     int ret;
374 
375     notifc = obj_get(current_thread->cap_group, notifc_cap, TYPE_NOTIFICATION);
376     if (!notifc) {
377         ret = -ECAPBILITY;
378         goto out;
379     }
380 
381     if (timeout) {
382         ret = copy_from_user(
383             (char *)&timeout_k, (char *)timeout, sizeof(timeout_k));
384         if (ret != 0)
385             goto out_obj_put;
386     }
387 
388     ret = wait_notific(notifc, is_block, timeout ? &timeout_k : NULL);
389 out_obj_put:
390     obj_put(notifc);
391 out:
392     return ret;
393 }
394 
sys_notify(cap_t notifc_cap)395 int sys_notify(cap_t notifc_cap)
396 {
397     struct notification *notifc = NULL;
398     int ret;
399     notifc = obj_get(current_thread->cap_group, notifc_cap, TYPE_NOTIFICATION);
400     if (!notifc) {
401         ret = -ECAPBILITY;
402         goto out;
403     }
404     ret = signal_notific(notifc);
405     obj_put(notifc);
406 out:
407     return ret;
408 }
409