• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #ifndef SCHED_SCHED_H
13 #define SCHED_SCHED_H
14 
15 #include <sched/context.h>
16 #include <common/list.h>
17 #include <machine.h>
18 
19 /* BUDGET represents the number of TICKs */
20 #define DEFAULT_BUDGET 1
21 /* The time interval of one tick in ms (by default, trigger one tick per 10 ms) */
22 #define TICK_MS 10
23 
24 /* Priority */
25 #define MAX_PRIO     255
26 #define MIN_PRIO     0
27 #define PRIO_NUM     (MAX_PRIO + 1)
28 #define IDLE_PRIO    MIN_PRIO
29 #define DEFAULT_PRIO 10
30 /* No CPU affinity */
31 #define NO_AFF (-1)
32 
33 enum thread_state {
34     TS_INIT = 0,
35     TS_READY,
36     TS_INTER, /* Intermediate stat used by sched (only for debug) */
37     TS_RUNNING,
38     TS_EXIT, /* Only for debug use */
39     TS_WAITING, /* Waiting IPC or etc */
40 };
41 
42 enum kernel_stack_state { KS_FREE = 0, KS_LOCKED };
43 
44 enum thread_exit_state { TE_RUNNING = 0, TE_EXITING, TE_EXITED };
45 
46 enum thread_type {
47     /*
48      * Kernel-level threads
49      * 1. Without FPU states
50      * 2. Won't swap TLS
51      */
52     TYPE_IDLE = 0, /* IDLE thread dose not have stack, pause cpu */
53     TYPE_KERNEL = 1, /* KERNEL thread has stack */
54 
55     /*
56      * User-level threads
57      * Should be larger than TYPE_KERNEL!
58      */
59     TYPE_USER = 2,
60     TYPE_SHADOW = 3, /* SHADOW thread is used to achieve migrate IPC */
61     TYPE_REGISTER = 4, /* Use as the IPC register callback threads */
62     TYPE_TESTS = 5 /* TESTS thread is used by kernel tests */
63 };
64 
65 /* Struct thread declaraion */
66 struct thread;
67 
68 struct sched_ops {
69     int (*sched_init)(void);
70     int (*sched)(void);
71     int (*sched_enqueue)(struct thread *thread);
72     int (*sched_dequeue)(struct thread *thread);
73     /* Debug tools */
74     void (*sched_top)(void);
75 };
76 
77 /* Provided Scheduling Policies */
78 extern struct sched_ops pbrr; /* Priority Based Round Robin */
79 extern struct sched_ops rr; /* Simple Round Robin */
80 
81 /* Chosen Scheduling Policies */
82 extern struct sched_ops *cur_sched_ops;
83 
84 /* Scheduler module local interfaces */
85 int switch_to_thread(struct thread *target);
86 int get_cpubind(struct thread *thread);
87 struct thread *find_runnable_thread(struct list_head *thread_list);
88 
89 /* Global interfaces */
90 /* Print the thread information */
91 void print_thread(struct thread *thread);
92 
93 /*
94  * A common usage pattern:
95  *   sched(); // Choose one thread to run (as current_thread)
96  *   eret_to_thread(switch_context()); // Switch context between current_thread and previous thread
97  */
98 vaddr_t switch_context(void);
99 void eret_to_thread(vaddr_t sp);
100 
101 /* Arch-dependent func declaraions, which are defined in arch/.../sched.c */
102 void arch_idle_ctx_init(struct thread_ctx *idle_ctx, void (*func)(void));
103 void arch_switch_context(struct thread *target);
104 
105 /* Arch-dependent func declaraions, which are defined in assembly files */
106 extern void idle_thread_routine(void);
107 extern void __eret_to_thread(unsigned long sp);
108 
109 /* Direct switch to the target thread (fast path) or put it into the ready queue (slow path) */
110 void sched_to_thread(struct thread *target);
111 /* Add a mark indicating re-sched is needed on cpuid */
112 void add_pending_resched(unsigned int cpuid);
113 /* Wait until the kernel stack of target thread is free */
114 void wait_for_kernel_stack(struct thread *thread);
115 
116 int sched_init(struct sched_ops *sched_ops);
117 
sched(void)118 static inline int sched(void)
119 {
120     return cur_sched_ops->sched();
121 }
122 
sched_enqueue(struct thread * thread)123 static inline int sched_enqueue(struct thread *thread)
124 {
125     return cur_sched_ops->sched_enqueue(thread);
126 }
127 
sched_dequeue(struct thread * thread)128 static inline int sched_dequeue(struct thread *thread)
129 {
130     return cur_sched_ops->sched_dequeue(thread);
131 }
132 
133 /* Syscalls */
134 void sys_yield(void);
135 void sys_top(void);
136 
137 #endif /* SCHED_SCHED_H */