1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <sched/sched.h>
13 #include <common/util.h>
14 #include <common/bitops.h>
15 #include <common/kprint.h>
16 #include <object/thread.h>
17
18 /*
19 * All the variables (except struct sched_ops pbrr) and functions are static.
20 * We omit some modifiers due to they are used in kernel tests.
21 */
22
23 /*
24 * Priority bitmap.
25 * Record the priorities of the ready threads in a bitmap
26 * so that we can find the ready thread with the highest priortiy in O(1) time.
27 * Single bitmap for small PRIO_NUM and multi-level bitmap for large PRIO_NUM.
28 */
29 #define PRIOS_PER_LEVEL 32
30 #define PRIO_LEVEL_SHIFT 5
31 #define PRIO_LEVEL_MASK 0x1f
32
33 #if PRIO_NUM <= PRIOS_PER_LEVEL
34 struct prio_bitmap {
35 unsigned int bitmap;
36 };
37
prio_bitmap_init(struct prio_bitmap * bitmap)38 static inline void prio_bitmap_init(struct prio_bitmap *bitmap)
39 {
40 bitmap->bitmap = 0;
41 }
42
prio_bitmap_set(struct prio_bitmap * bitmap,unsigned int prio)43 static inline void prio_bitmap_set(struct prio_bitmap *bitmap,
44 unsigned int prio)
45 {
46 bitmap->bitmap |= BIT(prio);
47 }
48
prio_bitmap_clear(struct prio_bitmap * bitmap,unsigned int prio)49 static inline void prio_bitmap_clear(struct prio_bitmap *bitmap,
50 unsigned int prio)
51 {
52 bitmap->bitmap &= ~BIT(prio);
53 }
54
prio_bitmap_is_empty(struct prio_bitmap * bitmap)55 static inline bool prio_bitmap_is_empty(struct prio_bitmap *bitmap)
56 {
57 return bitmap->bitmap == 0;
58 }
59
get_highest_prio(struct prio_bitmap * bitmap)60 static inline int get_highest_prio(struct prio_bitmap *bitmap)
61 {
62 return bsr(bitmap->bitmap);
63 }
64 #elif PRIO_NUM <= PRIOS_PER_LEVEL * PRIOS_PER_LEVEL
65
66 struct prio_bitmap {
67 unsigned int bitmap_lvl0;
68 unsigned int bitmap_lvl1[PRIOS_PER_LEVEL];
69 };
70
prio_bitmap_init(struct prio_bitmap * bitmap)71 static inline void prio_bitmap_init(struct prio_bitmap *bitmap)
72 {
73 memset(bitmap, 0, sizeof(*bitmap));
74 }
75
prio_bitmap_set(struct prio_bitmap * bitmap,unsigned int prio)76 static inline void prio_bitmap_set(struct prio_bitmap *bitmap,
77 unsigned int prio)
78 {
79 unsigned int index_lvl0 = prio >> PRIO_LEVEL_SHIFT;
80 unsigned int index_lvl1 = prio & PRIO_LEVEL_MASK;
81
82 BUG_ON(index_lvl0 >= PRIOS_PER_LEVEL);
83
84 bitmap->bitmap_lvl0 |= BIT(index_lvl0);
85 bitmap->bitmap_lvl1[index_lvl0] |= BIT(index_lvl1);
86 }
87
prio_bitmap_clear(struct prio_bitmap * bitmap,unsigned int prio)88 static inline void prio_bitmap_clear(struct prio_bitmap *bitmap,
89 unsigned int prio)
90 {
91 unsigned int index_lvl0 = prio >> PRIO_LEVEL_SHIFT;
92 unsigned int index_lvl1 = prio & PRIO_LEVEL_MASK;
93
94 BUG_ON(index_lvl0 >= PRIOS_PER_LEVEL);
95
96 bitmap->bitmap_lvl1[index_lvl0] &= ~BIT(index_lvl1);
97 if (bitmap->bitmap_lvl1[index_lvl0] == 0) {
98 bitmap->bitmap_lvl0 &= ~BIT(index_lvl0);
99 }
100 }
101
prio_bitmap_is_empty(struct prio_bitmap * bitmap)102 static inline bool prio_bitmap_is_empty(struct prio_bitmap *bitmap)
103 {
104 return bitmap->bitmap_lvl0 == 0;
105 }
106
get_highest_prio(struct prio_bitmap * bitmap)107 static inline unsigned int get_highest_prio(struct prio_bitmap *bitmap)
108 {
109 unsigned int index_lvl0;
110 unsigned int index_lvl1;
111
112 index_lvl0 = bsr(bitmap->bitmap_lvl0);
113 index_lvl1 = bsr(bitmap->bitmap_lvl1[index_lvl0]);
114
115 return (index_lvl0 << PRIO_LEVEL_SHIFT) + index_lvl1;
116 }
117 #else
118 #error PRIO_NUM should not be larger than 1024
119 #endif
120
121 struct pbrr_ready_queue {
122 struct list_head queues[PRIO_NUM];
123 struct prio_bitmap bitmap;
124 struct lock lock;
125 };
126
127 static struct pbrr_ready_queue pbrr_ready_queues[PLAT_CPU_NUM];
128
pbrr_sched_enqueue(struct thread * thread)129 int pbrr_sched_enqueue(struct thread *thread)
130 {
131 int cpubind;
132 unsigned int cpuid, prio;
133 struct pbrr_ready_queue *ready_queue;
134
135 BUG_ON(thread == NULL);
136 BUG_ON(thread->thread_ctx == NULL);
137
138 /* Already in a ready queue */
139 if (thread->thread_ctx->state == TS_READY) {
140 return -EINVAL;
141 }
142
143 prio = thread->thread_ctx->sc->prio;
144 BUG_ON(prio >= PRIO_NUM);
145
146 cpubind = get_cpubind(thread);
147 cpuid = (cpubind == NO_AFF ? smp_get_cpu_id() : cpubind);
148
149 thread->thread_ctx->cpuid = cpuid;
150 thread->thread_ctx->state = TS_READY;
151
152 ready_queue = &pbrr_ready_queues[cpuid];
153 lock(&ready_queue->lock);
154 if (thread->thread_ctx->type != TYPE_IDLE)
155 obj_ref(thread);
156 list_append(&thread->ready_queue_node, &ready_queue->queues[prio]);
157 prio_bitmap_set(&ready_queue->bitmap, prio);
158 unlock(&ready_queue->lock);
159
160 #ifdef CHCORE_KERNEL_RT
161 #ifdef CHCORE_KERNEL_TEST
162 if (thread->thread_ctx->type != TYPE_TESTS) {
163 add_pending_resched(cpuid);
164 }
165 #else
166 add_pending_resched(cpuid);
167 #endif
168 #endif /* CHCORE_KERNEL_RT */
169
170 return 0;
171 }
172
__pbrr_sched_dequeue(struct thread * thread)173 static void __pbrr_sched_dequeue(struct thread *thread)
174 {
175 unsigned int cpuid, prio;
176 struct pbrr_ready_queue *ready_queue;
177
178 cpuid = thread->thread_ctx->cpuid;
179 prio = thread->thread_ctx->sc->prio;
180 ready_queue = &pbrr_ready_queues[cpuid];
181
182 thread->thread_ctx->state = TS_INTER;
183 list_del(&thread->ready_queue_node);
184 if (list_empty(&ready_queue->queues[prio])) {
185 prio_bitmap_clear(&ready_queue->bitmap, prio);
186 }
187 if (thread->thread_ctx->type != TYPE_IDLE)
188 obj_put(thread);
189 }
190
pbrr_sched_dequeue(struct thread * thread)191 static int pbrr_sched_dequeue(struct thread *thread)
192 {
193 return -1; // unused
194 }
195
pbrr_sched_choose_thread(void)196 static struct thread *pbrr_sched_choose_thread(void)
197 {
198 unsigned int cpuid, highest_prio;
199 struct thread *thread;
200 struct pbrr_ready_queue *ready_queue;
201 bool current_thread_runnable;
202
203 cpuid = smp_get_cpu_id();
204 ready_queue = &pbrr_ready_queues[cpuid];
205
206 thread = current_thread;
207 current_thread_runnable = thread != NULL
208 && thread->thread_ctx->state == TS_RUNNING
209 && (thread->thread_ctx->affinity == NO_AFF
210 || thread->thread_ctx->affinity == cpuid);
211
212 lock(&ready_queue->lock);
213
214 retry:
215 thread = current_thread;
216
217 /* Choose current_thread if there is no other thread */
218 if (prio_bitmap_is_empty(&ready_queue->bitmap)) {
219 BUG_ON(thread->thread_ctx->type != TYPE_IDLE);
220 BUG_ON(!current_thread_runnable);
221 goto out_unlock_ready_queue;
222 }
223
224 highest_prio = get_highest_prio(&ready_queue->bitmap);
225
226 /* Check whether we should choose current_thread */
227 if (current_thread_runnable
228 && (thread->thread_ctx->sc->prio > highest_prio
229 || (thread->thread_ctx->sc->prio == highest_prio
230 && thread->thread_ctx->sc->budget > 0))) {
231 goto out_unlock_ready_queue;
232 }
233
234 /*
235 * If the thread is just moved from another CPU and
236 * the kernel stack is still used by the original CPU,
237 * just choose the idle thread.
238 *
239 * We assume that thread moving between CPUs is rare
240 * in realtime system, because users usually set the
241 * CPU affinity of the threads.
242 */
243 thread = find_runnable_thread(&ready_queue->queues[highest_prio]);
244 if (thread == NULL) {
245 thread = &idle_threads[cpuid];
246 if (thread == current_thread) {
247 BUG_ON(!current_thread_runnable);
248 goto out_unlock_ready_queue;
249 }
250 }
251
252 __pbrr_sched_dequeue(thread);
253
254 /* If the thread is going to exit, choose another thread. */
255 if (thread->thread_ctx->thread_exit_state == TE_EXITING
256 || thread->thread_ctx->thread_exit_state == TE_EXITED) {
257 thread->thread_ctx->thread_exit_state = TE_EXITED;
258 thread->thread_ctx->state = TS_EXIT;
259 goto retry;
260 }
261
262 out_unlock_ready_queue:
263 unlock(&ready_queue->lock);
264 return thread;
265 }
266
pbrr_top(void)267 static void pbrr_top(void)
268 {
269 printk("pbrr_top unimplemented\n");
270 }
271
pbrr_sched(void)272 int pbrr_sched(void)
273 {
274 struct thread *old, *new;
275
276 old = current_thread;
277
278 /* Check whether the old thread is going to exit */
279 if (old != NULL && old->thread_ctx->thread_exit_state == TE_EXITING) {
280 old->thread_ctx->thread_exit_state = TE_EXITED;
281 old->thread_ctx->state = TS_EXIT;
282 }
283
284 new = pbrr_sched_choose_thread();
285 BUG_ON(new == NULL);
286
287 if (old != NULL && old->thread_ctx->state == TS_RUNNING && new != old) {
288 BUG_ON(pbrr_sched_enqueue(old));
289 }
290
291 if (new->thread_ctx->sc->budget == 0) {
292 new->thread_ctx->sc->budget = DEFAULT_BUDGET;
293 }
294
295 switch_to_thread(new);
296
297 return 0;
298 }
299
pbrr_sched_init(void)300 static int pbrr_sched_init(void)
301 {
302 unsigned int i, j;
303
304 /* Initialize the ready queues */
305 for (i = 0; i < PLAT_CPU_NUM; i++) {
306 prio_bitmap_init(&pbrr_ready_queues[i].bitmap);
307 lock_init(&pbrr_ready_queues[i].lock);
308 for (j = 0; j < PRIO_NUM; j++) {
309 init_list_head(&pbrr_ready_queues[i].queues[j]);
310 }
311 }
312
313 /* Insert the idle threads into the ready queues */
314 for (i = 0; i < PLAT_CPU_NUM; i++) {
315 pbrr_sched_enqueue(&idle_threads[i]);
316 }
317
318 return 0;
319 }
320
321 struct sched_ops pbrr = {.sched_init = pbrr_sched_init,
322 .sched = pbrr_sched,
323 .sched_enqueue = pbrr_sched_enqueue,
324 .sched_dequeue = pbrr_sched_dequeue,
325 .sched_top = pbrr_top};
326