• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include <irq/irq.h>
13 #include <irq/timer.h>
14 #include <sched/sched.h>
15 #include <arch/machine/smp.h>
16 #include <object/thread.h>
17 #include <common/kprint.h>
18 #include <common/list.h>
19 #include <common/lock.h>
20 #include <mm/uaccess.h>
21 #include <sched/context.h>
22 
23 
24 
25 /* Per-core timer states */
26 struct time_state {
27     /* The tick when the next timer irq will occur */
28     u64 next_expire;
29     /*
30      * Record all sleepers on each core.
31      * Threads in sleep_list are sorted by the time to wakeup.
32      */
33     struct list_head sleep_list;
34     /* Protect per core sleep_list */
35     struct lock sleep_list_lock;
36 };
37 
38 struct time_state time_states[PLAT_CPU_NUM];
39 
timer_init(void)40 void timer_init(void)
41 {
42     int i;
43 
44     if (smp_get_cpu_id() == 0) {
45         for (i = 0; i < PLAT_CPU_NUM; i++) {
46             init_list_head(&time_states[i].sleep_list);
47             lock_init(&time_states[i].sleep_list_lock);
48         }
49     }
50 
51     /* Per-core timer init */
52     plat_timer_init();
53 }
54 
55 /* Should be called when holding sleep_list_lock */
get_next_tick_delta(void)56 static u64 get_next_tick_delta(void)
57 {
58     u64 waiting_tick, current_tick;
59     struct list_head *local_sleep_list;
60     struct sleep_state *first_sleeper;
61 
62     local_sleep_list = &time_states[smp_get_cpu_id()].sleep_list;
63 
64     /* Default tick */
65     waiting_tick = TICK_MS * US_IN_MS * tick_per_us;
66     if (list_empty(local_sleep_list))
67         return waiting_tick;
68 
69     current_tick = plat_get_current_tick();
70     first_sleeper =
71         list_entry(local_sleep_list->next, struct sleep_state, sleep_node);
72     /* If a thread will wake up before default tick, update the tick. */
73     if (current_tick + waiting_tick > first_sleeper->wakeup_tick)
74         waiting_tick = first_sleeper->wakeup_tick > current_tick ?
75                            first_sleeper->wakeup_tick - current_tick :
76                            0;
77 
78     return waiting_tick;
79 }
80 
81 static void sleep_timer_cb(struct thread *thread);
handle_timer_irq(void)82 void handle_timer_irq(void)
83 {
84     u64 current_tick, tick_delta;
85     struct time_state *local_time_state;
86     struct list_head *local_sleep_list;
87     struct lock *local_sleep_list_lock;
88     struct sleep_state *iter = NULL, *tmp = NULL;
89     struct thread *wakeup_thread;
90 
91     /* Remove the thread to wakeup from sleep list */
92     current_tick = plat_get_current_tick();
93     local_time_state = &time_states[smp_get_cpu_id()];
94     local_sleep_list = &local_time_state->sleep_list;
95     local_sleep_list_lock = &local_time_state->sleep_list_lock;
96 
97     lock(local_sleep_list_lock);
98     for_each_in_list_safe (iter, tmp, sleep_node, local_sleep_list) {
99         if (iter->wakeup_tick > current_tick) {
100             break;
101         }
102 
103         wakeup_thread = container_of(iter, struct thread, sleep_state);
104 
105         /*
106          * Grab the thread's queue_lock before operating the
107          * waiting list (sleep_list and the wait_list of
108          * timedout notification.
109          */
110         lock(&wakeup_thread->sleep_state.queue_lock);
111 
112         list_del(&iter->sleep_node);
113 
114         BUG_ON(wakeup_thread->sleep_state.cb == sleep_timer_cb
115                && wakeup_thread->thread_ctx->state != TS_WAITING);
116         kdebug("wake up t:%p at:%ld\n", wakeup_thread, current_tick);
117         BUG_ON(wakeup_thread->sleep_state.cb == NULL);
118 
119         wakeup_thread->sleep_state.cb(wakeup_thread);
120         wakeup_thread->sleep_state.cb = NULL;
121 
122         unlock(&wakeup_thread->sleep_state.queue_lock);
123     }
124 
125     /* Set when the next timer irq will arrive */
126     tick_delta = get_next_tick_delta();
127     unlock(local_sleep_list_lock);
128 
129     time_states[smp_get_cpu_id()].next_expire = current_tick + tick_delta;
130     plat_handle_timer_irq(tick_delta);
131 
132     /* Current running thread in current_threads[cpuid] */
133     if (current_thread) {
134         BUG_ON(!current_thread->thread_ctx->sc);
135         BUG_ON(current_thread->thread_ctx->sc->budget == 0);
136         current_thread->thread_ctx->sc->budget--;
137         /* print debug message */
138 #if LOG_LEVEL == DEBUG
139         print_thread(current_thread);
140 #endif
141     } else {
142         kdebug("Timer: system not runnig!\n");
143     }
144 }
145 
146 /*
147  * clock_gettime:
148  * - the return time is caculated from the system boot
149  */
sys_clock_gettime(clockid_t clock,struct timespec * ts)150 int sys_clock_gettime(clockid_t clock, struct timespec *ts)
151 {
152     struct timespec ts_k;
153     u64 mono_ns;
154     int r = 0;
155 
156     if (!ts)
157         return -1;
158 
159     r = copy_from_user(&ts_k, ts, sizeof(ts_k));
160     if (r) {
161         r = -EINVAL;
162         goto out_fail;
163     }
164 
165     mono_ns = plat_get_mono_time();
166 
167     ts_k.tv_sec = mono_ns / NS_IN_S;
168     ts_k.tv_nsec = mono_ns % NS_IN_S;
169 
170     r = copy_to_user(ts, &ts_k, sizeof(ts_k));
171     if (r) {
172         r = -EINVAL;
173         goto out_fail;
174     }
175 
176     return 0;
177 out_fail:
178     return r;
179 }
180 
enqueue_sleeper(struct thread * thread,const struct timespec * timeout,timer_cb cb)181 int enqueue_sleeper(struct thread *thread, const struct timespec *timeout,
182                     timer_cb cb)
183 {
184     u64 s, ns, total_us;
185     u64 wakeup_tick;
186     struct time_state *local_time_state;
187     struct list_head *local_sleep_list;
188     struct lock *local_sleep_list_lock;
189     struct sleep_state *iter;
190 
191     s = timeout->tv_sec;
192     ns = timeout->tv_nsec;
193     total_us = s * US_IN_S + ns / NS_IN_US;
194 
195     wakeup_tick = plat_get_current_tick() + total_us * tick_per_us;
196     thread->sleep_state.wakeup_tick = wakeup_tick;
197     thread->sleep_state.sleep_cpu = smp_get_cpu_id();
198 
199     local_time_state = &time_states[smp_get_cpu_id()];
200     local_sleep_list = &local_time_state->sleep_list;
201     local_sleep_list_lock = &local_time_state->sleep_list_lock;
202 
203     lock(local_sleep_list_lock);
204     for_each_in_list (iter, struct sleep_state, sleep_node, local_sleep_list) {
205         if (iter->wakeup_tick > wakeup_tick)
206             break;
207     }
208     list_append(&thread->sleep_state.sleep_node, &iter->sleep_node);
209     thread->sleep_state.cb = cb;
210 
211     unlock(local_sleep_list_lock);
212 
213     /*
214      * If the current sleep need to wake up earlier than when next timer
215      * irq occurs, update timer.
216      */
217     kdebug("next tick:%lld current tick:%lld\n",
218            wakeup_tick,
219            time_states[smp_get_cpu_id()].next_expire);
220     if (time_states[smp_get_cpu_id()].next_expire > wakeup_tick) {
221         time_states[smp_get_cpu_id()].next_expire = wakeup_tick;
222         plat_set_next_timer(total_us * tick_per_us);
223     }
224 
225     return 0;
226 }
227 
228 /* Returns true if dequeue successfully, false otherwise */
try_dequeue_sleeper(struct thread * thread)229 bool try_dequeue_sleeper(struct thread *thread)
230 {
231     struct time_state *target_time_state;
232     struct lock *target_sleep_list_lock;
233     bool ret = false;
234 
235     BUG_ON(thread == NULL);
236     target_time_state = &time_states[thread->sleep_state.sleep_cpu];
237     target_sleep_list_lock = &target_time_state->sleep_list_lock;
238 
239     /*
240      * This rountine will be invoked in sys_notify.
241      * Use try_lock for preventing dead lock. sys_notify can be retried.
242      */
243     if (try_lock(target_sleep_list_lock) == 0) {
244         BUG_ON(thread->sleep_state.cb == NULL);
245 
246         list_del(&thread->sleep_state.sleep_node);
247         thread->sleep_state.cb = NULL;
248         ret = true;
249 
250         unlock(target_sleep_list_lock);
251     }
252 
253     return ret;
254 }
255 
sleep_timer_cb(struct thread * thread)256 static void sleep_timer_cb(struct thread *thread)
257 {
258     thread->thread_ctx->state = TS_INTER;
259     BUG_ON(sched_enqueue(thread));
260 }
261 
sys_clock_nanosleep(clockid_t clk,int flags,const struct timespec * req,struct timespec * rem)262 int sys_clock_nanosleep(clockid_t clk, int flags, const struct timespec *req,
263                         struct timespec *rem)
264 {
265     int ret;
266     struct timespec ts_k = {0};
267 
268     kdebug("sleep clk:%d flag:%x ", clk, flags);
269     kdebug("req:%ld.%ld\n", req ? req->tv_sec : 0, req ? req->tv_nsec : 0);
270 
271 
272     if (rem != NULL) {
273         rem = NULL;
274     }
275 
276     ret = copy_from_user(&ts_k, (void *)req, sizeof(ts_k));
277     if (ret) {
278         return -EFAULT;
279     }
280 
281     /*
282      * Note: every operation that inserts/removes one thread into
283      * a waiting queue (no mater sleep or wait_notific)
284      * should grab the thread's queue_lock.
285      *
286      * If we do not grab the queue_lock here,
287      * a potential wrong case may happen:
288      *
289      * CPU-0: handle_timer_irq -> T is timeout and sched_equeued to CPU-1
290      *      CPU-1: T starts to run and invokes nanosleep again.
291      *             Thus, enqueue_sleeper is invoked (cb is set)
292      * CPU-1: handle_timer_irq -> continues to run and sets cb to NULL,
293      *                            i.e., override the setted cb.
294      */
295     lock(&current_thread->sleep_state.queue_lock);
296 
297     enqueue_sleeper(current_thread, &ts_k, sleep_timer_cb);
298 
299     unlock(&current_thread->sleep_state.queue_lock);
300 
301     current_thread->thread_ctx->state = TS_WAITING;
302 
303     /* Set the return value for nanosleep. */
304     arch_set_thread_return(current_thread, 0);
305 
306     sched();
307     eret_to_thread(switch_context());
308     BUG("should not reach here\n");
309 
310     return 0;
311 }
312