• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include <sched/sched.h>
13 #include <common/kprint.h>
14 #include <machine.h>
15 #include <mm/kmalloc.h>
16 #include <object/thread.h>
17 
18 /*
19  * RR policy also has idle threads.
20  * When no active user threads in ready queue,
21  * we will choose the idle thread to execute.
22  * Idle thread will **NOT** be in the RQ.
23  */
24 
25 /* Metadata for ready queue */
26 struct queue_meta {
27     struct list_head queue_head;
28     unsigned int queue_len;
29     struct lock queue_lock;
30     char pad[pad_to_cache_line(sizeof(unsigned int) + sizeof(struct list_head)
31                                + sizeof(struct lock))];
32 };
33 
34 /*
35  * All the variables (except struct sched_ops rr) and functions are static.
36  * We omit the modifier due to they are used in kernel tests.
37  */
38 
39 /* rr_ready_queue: Per-CPU ready queue for ready tasks. */
40 struct queue_meta rr_ready_queue_meta[PLAT_CPU_NUM];
41 
__rr_sched_enqueue(struct thread * thread,int cpuid)42 int __rr_sched_enqueue(struct thread *thread, int cpuid)
43 {
44     /* Already in the ready queue */
45     if (thread->thread_ctx->state == TS_READY) {
46         return -EINVAL;
47     }
48     thread->thread_ctx->cpuid = cpuid;
49     thread->thread_ctx->state = TS_READY;
50     obj_ref(thread);
51     list_append(&(thread->ready_queue_node),
52                 &(rr_ready_queue_meta[cpuid].queue_head));
53     rr_ready_queue_meta[cpuid].queue_len++;
54 
55     return 0;
56 }
57 
58 
59 #define LOADBALANCE_THRESHOLD 5
60 #define MIGRATE_THRESHOLD     5
61 
62 /* A simple load balance when enqueue threads */
rr_sched_choose_cpu(void)63 unsigned int rr_sched_choose_cpu(void)
64 {
65     unsigned int i, cpuid, min_rr_len, local_cpuid, queue_len;
66 
67     local_cpuid = smp_get_cpu_id();
68     min_rr_len = rr_ready_queue_meta[local_cpuid].queue_len;
69 
70     if (min_rr_len <= LOADBALANCE_THRESHOLD) {
71         return local_cpuid;
72     }
73 
74     /* Find the cpu with the shortest ready queue */
75     cpuid = local_cpuid;
76     for (i = 0; i < PLAT_CPU_NUM; i++) {
77         if (i == local_cpuid) {
78             continue;
79         }
80 
81         queue_len = rr_ready_queue_meta[i].queue_len + MIGRATE_THRESHOLD;
82         if (queue_len < min_rr_len) {
83             min_rr_len = queue_len;
84             cpuid = i;
85         }
86     }
87 
88     return cpuid;
89 }
90 
91 /*
92  * Sched_enqueue
93  * Put `thread` at the end of ready queue of assigned `affinity` and `prio`.
94  * If affinity = NO_AFF, assign the core to the current cpu.
95  * If the thread is IDEL thread, do nothing!
96  */
rr_sched_enqueue(struct thread * thread)97 int rr_sched_enqueue(struct thread *thread)
98 {
99     BUG_ON(!thread);
100     BUG_ON(!thread->thread_ctx);
101 
102     int cpubind = 0;
103     unsigned int cpuid = 0;
104     int ret = 0;
105 
106     if (thread->thread_ctx->type == TYPE_IDLE)
107         return 0;
108 
109     cpubind = get_cpubind(thread);
110     cpuid = cpubind == NO_AFF ? rr_sched_choose_cpu() : cpubind;
111 
112     if (unlikely(thread->thread_ctx->sc->prio > MAX_PRIO))
113         return -EINVAL;
114 
115     if (unlikely(cpuid >= PLAT_CPU_NUM)) {
116         return -EINVAL;
117     }
118 
119     lock(&(rr_ready_queue_meta[cpuid].queue_lock));
120     ret = __rr_sched_enqueue(thread, cpuid);
121     unlock(&(rr_ready_queue_meta[cpuid].queue_lock));
122     return ret;
123 }
124 
125 /* dequeue w/o lock */
__rr_sched_dequeue(struct thread * thread)126 int __rr_sched_dequeue(struct thread *thread)
127 {
128     if (thread->thread_ctx->state != TS_READY) {
129         kwarn("%s: thread state is %d\n", __func__, thread->thread_ctx->state);
130         return -EINVAL;
131     }
132     list_del(&(thread->ready_queue_node));
133     rr_ready_queue_meta[thread->thread_ctx->cpuid].queue_len--;
134     thread->thread_ctx->state = TS_INTER;
135     obj_put(thread);
136     return 0;
137 }
138 
139 /*
140  * remove @thread from its current residual ready queue
141  */
rr_sched_dequeue(struct thread * thread)142 int rr_sched_dequeue(struct thread *thread)
143 {
144     BUG_ON(!thread);
145     BUG_ON(!thread->thread_ctx);
146     /* IDLE thread will **not** be in any ready queue */
147     BUG_ON(thread->thread_ctx->type == TYPE_IDLE);
148 
149     unsigned int cpuid = 0;
150     int ret = 0;
151 
152     cpuid = thread->thread_ctx->cpuid;
153     lock(&(rr_ready_queue_meta[cpuid].queue_lock));
154     ret = __rr_sched_dequeue(thread);
155     unlock(&(rr_ready_queue_meta[cpuid].queue_lock));
156     return ret;
157 }
158 
159 /*
160  * Choose an appropriate thread and dequeue from ready queue
161  */
rr_sched_choose_thread(void)162 struct thread *rr_sched_choose_thread(void)
163 {
164     unsigned int cpuid = smp_get_cpu_id();
165     struct thread *thread = NULL;
166 
167     if (!list_empty(&(rr_ready_queue_meta[cpuid].queue_head))) {
168         lock(&(rr_ready_queue_meta[cpuid].queue_lock));
169     again:
170         if (list_empty(&(rr_ready_queue_meta[cpuid].queue_head))) {
171             unlock(&(rr_ready_queue_meta[cpuid].queue_lock));
172             goto out;
173         }
174         /*
175          * When the thread is just moved from another cpu and
176          * the kernel stack is used by the origina core, try
177          * to find another thread.
178          */
179         if (!(thread = find_runnable_thread(
180                   &(rr_ready_queue_meta[cpuid].queue_head)))) {
181             unlock(&(rr_ready_queue_meta[cpuid].queue_lock));
182             goto out;
183         }
184 
185         BUG_ON(__rr_sched_dequeue(thread));
186         if (thread->thread_ctx->thread_exit_state == TE_EXITING
187             || thread->thread_ctx->thread_exit_state == TE_EXITED) {
188             /* Thread need to exit. Set the state to TS_EXIT */
189             thread->thread_ctx->state = TS_EXIT;
190             thread->thread_ctx->thread_exit_state = TE_EXITED;
191             goto again;
192         }
193         unlock(&(rr_ready_queue_meta[cpuid].queue_lock));
194         return thread;
195     }
196 out:
197     return &idle_threads[cpuid];
198 }
199 
rr_sched_refill_budget(struct thread * target,unsigned int budget)200 static inline void rr_sched_refill_budget(struct thread *target,
201                                           unsigned int budget)
202 {
203     target->thread_ctx->sc->budget = budget;
204 }
205 
206 /*
207  * Schedule a thread to execute.
208  * current_thread can be NULL, or the state is TS_RUNNING or TS_WAITING.
209  * This function will suspend current running thread, if any, and schedule
210  * another thread from `(rr_ready_queue_meta[cpuid].queue_head)`.
211  * ***the following text might be outdated***
212  * 1. Choose an appropriate thread through calling *chooseThread* (Simple
213  * Priority-Based Policy)
214  * 2. Update current running thread and left the caller to restore the executing
215  * context
216  */
rr_sched(void)217 int rr_sched(void)
218 {
219     /* WITH IRQ Disabled */
220     struct thread *old = current_thread;
221     struct thread *new = 0;
222 
223     if (old) {
224         BUG_ON(!old->thread_ctx);
225 
226         /* old thread may pass its scheduling context to others. */
227         if (old->thread_ctx->type != TYPE_SHADOW
228             && old->thread_ctx->type != TYPE_REGISTER) {
229             BUG_ON(!old->thread_ctx->sc);
230         }
231 
232         /* Set TE_EXITING after check won't cause any trouble, the
233          * thread will be recycle afterwards. Just a fast path. */
234         /* Check whether the thread is going to exit */
235         if (old->thread_ctx->thread_exit_state == TE_EXITING) {
236             /* Set the state to TS_EXIT */
237             old->thread_ctx->state = TS_EXIT;
238             old->thread_ctx->thread_exit_state = TE_EXITED;
239         }
240 
241         /* check old state */
242         switch (old->thread_ctx->state) {
243         case TS_EXIT:
244             /* do nothing */
245             break;
246         case TS_RUNNING:
247             /* A thread without SC should not be TS_RUNNING. */
248             BUG_ON(!old->thread_ctx->sc);
249             if (old->thread_ctx->sc->budget != 0) {
250                 switch_to_thread(old);
251                 return 0; /* no schedule needed */
252             }
253             rr_sched_refill_budget(old, DEFAULT_BUDGET);
254             old->thread_ctx->state = TS_INTER;
255             BUG_ON(rr_sched_enqueue(old) != 0);
256             break;
257         case TS_WAITING:
258             /* do nothing */
259             break;
260         default:
261             kinfo("thread state: %d\n", old->thread_ctx->state);
262             BUG_ON(1);
263             break;
264         }
265     }
266 
267     BUG_ON(!(new = rr_sched_choose_thread()));
268     switch_to_thread(new);
269 
270     return 0;
271 }
272 
rr_sched_init(void)273 int rr_sched_init(void)
274 {
275     int i = 0;
276 
277     /* Initialize global variables */
278     for (i = 0; i < PLAT_CPU_NUM; i++) {
279         init_list_head(&(rr_ready_queue_meta[i].queue_head));
280         lock_init(&(rr_ready_queue_meta[i].queue_lock));
281         rr_ready_queue_meta[i].queue_len = 0;
282     }
283 
284     return 0;
285 }
286 
rr_top(void)287 void rr_top(void)
288 {
289     unsigned int cpuid;
290     struct thread *thread;
291 #define MAX_CAP_GROUP_BUF 128
292     struct cap_group *cap_group_buf[MAX_CAP_GROUP_BUF] = {0};
293     unsigned int cap_group_num = 0;
294     int i = 0;
295 
296     for (cpuid = 0; cpuid < PLAT_CPU_NUM; cpuid++) {
297         lock(&(rr_ready_queue_meta[cpuid].queue_lock));
298     }
299 
300     printk("\n*****CPU RQ Info*****\n");
301     for (cpuid = 0; cpuid < PLAT_CPU_NUM; cpuid++) {
302         printk("== CPU %d RQ LEN %lu==\n",
303                cpuid,
304                rr_ready_queue_meta[cpuid].queue_len);
305         thread = current_threads[cpuid];
306         if (thread != NULL) {
307             for (i = 0; i < cap_group_num; i++)
308                 if (thread->cap_group == cap_group_buf[i])
309                     break;
310             if (i == cap_group_num && cap_group_num < MAX_CAP_GROUP_BUF) {
311                 cap_group_buf[cap_group_num] = thread->cap_group;
312                 cap_group_num++;
313             }
314             printk("Current ");
315             print_thread(thread);
316         }
317         if (!list_empty(&(rr_ready_queue_meta[cpuid].queue_head))) {
318             for_each_in_list (thread,
319                               struct thread,
320                               ready_queue_node,
321                               &(rr_ready_queue_meta[cpuid].queue_head)) {
322                 for (i = 0; i < cap_group_num; i++)
323                     if (thread->cap_group == cap_group_buf[i])
324                         break;
325                 if (i == cap_group_num && cap_group_num < MAX_CAP_GROUP_BUF) {
326                     cap_group_buf[cap_group_num] = thread->cap_group;
327                     cap_group_num++;
328                 }
329                 print_thread(thread);
330             }
331         }
332         printk("\n");
333     }
334 
335     printk("\n*****CAP GROUP Info*****\n");
336     for (i = 0; i < cap_group_num; i++) {
337         printk("== CAP GROUP:%s ==\n", cap_group_buf[i]->cap_group_name);
338         for_each_in_list (
339             thread, struct thread, node, &(cap_group_buf[i]->thread_list)) {
340             print_thread(thread);
341         }
342         printk("\n");
343     }
344     for (cpuid = 0; cpuid < PLAT_CPU_NUM; cpuid++) {
345         unlock(&(rr_ready_queue_meta[cpuid].queue_lock));
346     }
347 }
348 
349 struct sched_ops rr = {.sched_init = rr_sched_init,
350                        .sched = rr_sched,
351                        .sched_enqueue = rr_sched_enqueue,
352                        .sched_dequeue = rr_sched_dequeue,
353                        .sched_top = rr_top};
354