1 // Copyright 2025 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14
15 use core::cell::UnsafeCell;
16 use core::mem::offset_of;
17
18 use foreign_box::ForeignBox;
19 use list::*;
20 use pw_log::info;
21
22 use crate::arch::{Arch, ArchInterface, ArchThreadState, ThreadState};
23 use crate::sync::spinlock::{SpinLock, SpinLockGuard};
24
25 mod locks;
26
27 pub use locks::{SchedLock, SchedLockGuard, WaitQueueLock};
28
29 #[derive(Clone, Copy)]
30 pub struct Stack {
31 start: *const u8,
32 end: *const u8,
33 }
34
35 #[allow(dead_code)]
36 impl Stack {
from_slice(slice: &[u8]) -> Self37 pub const fn from_slice(slice: &[u8]) -> Self {
38 let start: *const u8 = slice.as_ptr();
39 // Safety: offset based on known size of slice.
40 let end = unsafe { start.add(slice.len() - 1) };
41 Self { start, end }
42 }
43
new() -> Self44 const fn new() -> Self {
45 Self {
46 start: core::ptr::null(),
47 end: core::ptr::null(),
48 }
49 }
50
start(self) -> *const u851 pub fn start(self) -> *const u8 {
52 self.start
53 }
end(self) -> *const u854 pub fn end(self) -> *const u8 {
55 self.end
56 }
57 }
58
59 // TODO: want to name this ThreadState, but collides with ArchThreadstate
60 #[derive(Copy, Clone, PartialEq)]
61 enum State {
62 New,
63 Initial,
64 Ready,
65 Running,
66 Stopped,
67 Waiting,
68 }
69
70 // TODO: use From or Into trait (unclear how to do it with 'static str)
to_string(s: State) -> &'static str71 fn to_string(s: State) -> &'static str {
72 match s {
73 State::New => "New",
74 State::Initial => "Initial",
75 State::Ready => "Ready",
76 State::Running => "Running",
77 State::Stopped => "Stopped",
78 State::Waiting => "Waiting",
79 }
80 }
81
82 pub struct Thread {
83 // List of the threads in the system
84 pub global_link: Link,
85
86 // Active state link (run queue, wait queue, etc)
87 pub active_link: Link,
88
89 state: State,
90 stack: Stack,
91
92 // Architecturally specific thread state, saved on context switch
93 pub arch_thread_state: UnsafeCell<ArchThreadState>,
94 }
95
96 pub struct ThreadListAdapter {}
97
98 impl list::Adapter for ThreadListAdapter {
99 const LINK_OFFSET: usize = offset_of!(Thread, active_link);
100 }
101
102 pub struct GlobalThreadListAdapter {}
103
104 impl list::Adapter for GlobalThreadListAdapter {
105 const LINK_OFFSET: usize = offset_of!(Thread, global_link);
106 }
107
108 impl Thread {
109 // Create an empty, uninitialzed thread
new() -> Self110 pub fn new() -> Self {
111 Thread {
112 global_link: Link::new(),
113 active_link: Link::new(),
114 state: State::New,
115 arch_thread_state: UnsafeCell::new(ThreadState::new()),
116 stack: Stack::new(),
117 }
118 }
119
120 // Initialize the mutable parts of the thread, must be called once per
121 // thread prior to starting it
122 #[allow(dead_code)]
initialize(&mut self, stack: Stack, entry_point: fn(usize), arg: usize) -> &mut Thread123 pub fn initialize(&mut self, stack: Stack, entry_point: fn(usize), arg: usize) -> &mut Thread {
124 assert!(self.state == State::New);
125 self.stack = stack;
126
127 // Call the arch to arrange for the thread to start directly
128 unsafe {
129 (*self.arch_thread_state.get()).initialize_frame(stack, entry_point, arg);
130 }
131 self.state = State::Initial;
132
133 // Add our list to the global thread list
134 SCHEDULER_STATE.lock().add_thread_to_list(self);
135
136 self
137 }
138
139 #[allow(dead_code)]
start(mut thread: ForeignBox<Self>)140 pub fn start(mut thread: ForeignBox<Self>) {
141 info!("starting thread {:#x}", thread.id());
142
143 assert!(thread.state == State::Initial);
144 thread.state = State::Ready;
145
146 let mut sched_state = SCHEDULER_STATE.lock();
147
148 // If there is a current thread, put it back on the top of the run queue.
149 let id = if let Some(mut current_thread) = sched_state.current_thread.take() {
150 let id = current_thread.id();
151 current_thread.state = State::Ready;
152 sched_state.insert_in_run_queue_head(current_thread);
153 id
154 } else {
155 Self::null_id()
156 };
157
158 sched_state.insert_in_run_queue_tail(thread);
159
160 // Add this thread to the scheduler and trigger a reschedule event
161 reschedule(sched_state, id);
162 }
163
164 // Dump to the console useful information about this thread
165 #[allow(dead_code)]
dump(&self)166 pub fn dump(&self) {
167 info!("thread {:#x} state {}", self.id(), to_string(self.state));
168 }
169
170 // A simple id for debugging purposes, currently the pointer to the thread structure itself
id(&self) -> usize171 pub fn id(&self) -> usize {
172 core::ptr::from_ref(self) as usize
173 }
174
175 // An id that can not be assigned to any thread in the system.
null_id() -> usize176 pub const fn null_id() -> usize {
177 // `core::ptr::null::<Self>() as usize` can not be evaluated at const time
178 // and a null pointer is defined to be at address 0 (see
179 // https://doc.rust-lang.org/beta/core/ptr/fn.null.html).
180 0usize
181 }
182 }
183
bootstrap_scheduler(mut thread: ForeignBox<Thread>) -> !184 pub fn bootstrap_scheduler(mut thread: ForeignBox<Thread>) -> ! {
185 let mut sched_state = SCHEDULER_STATE.lock();
186
187 // TODO: assert that this is called exactly once at bootup to switch
188 // to this particular thread.
189 assert!(thread.state == State::Initial);
190 thread.state = State::Ready;
191
192 sched_state.run_queue.push_back(thread);
193
194 info!("context switching to first thread");
195
196 // Special case where we're switching from a non-thread to something real
197 let mut temp_arch_thread_state = ArchThreadState::new();
198 sched_state.current_arch_thread_state = &raw mut temp_arch_thread_state;
199
200 reschedule(sched_state, Thread::null_id());
201 panic!("should not reach here");
202 }
203
204 // Global scheduler state (single processor for now)
205 #[allow(dead_code)]
206 pub struct SchedulerState {
207 current_thread: Option<ForeignBox<Thread>>,
208 current_arch_thread_state: *mut ArchThreadState,
209 thread_list: UnsafeList<Thread, GlobalThreadListAdapter>,
210 // For now just have a single round robin list, expand to multiple queues.
211 run_queue: ForeignList<Thread, ThreadListAdapter>,
212 }
213
214 pub static SCHEDULER_STATE: SpinLock<SchedulerState> = SpinLock::new(SchedulerState::new());
215
216 unsafe impl Sync for SchedulerState {}
217 unsafe impl Send for SchedulerState {}
218 impl SchedulerState {
219 #[allow(dead_code)]
new() -> Self220 const fn new() -> Self {
221 Self {
222 current_thread: None,
223 current_arch_thread_state: core::ptr::null_mut(),
224 thread_list: UnsafeList::new(),
225 run_queue: ForeignList::new(),
226 }
227 }
228
229 #[allow(dead_code)]
get_current_arch_thread_state(&mut self) -> *mut ArchThreadState230 pub(super) unsafe fn get_current_arch_thread_state(&mut self) -> *mut ArchThreadState {
231 self.current_arch_thread_state
232 }
233
move_current_thread_to_back(&mut self) -> usize234 fn move_current_thread_to_back(&mut self) -> usize {
235 let Some(mut current_thread) = self.current_thread.take() else {
236 panic!("no current thread");
237 };
238 let current_thread_id = current_thread.id();
239 current_thread.state = State::Ready;
240 self.insert_in_run_queue_tail(current_thread);
241 current_thread_id
242 }
243
move_current_thread_to_front(&mut self) -> usize244 fn move_current_thread_to_front(&mut self) -> usize {
245 let Some(mut current_thread) = self.current_thread.take() else {
246 panic!("no current thread");
247 };
248 let current_thread_id = current_thread.id();
249 current_thread.state = State::Ready;
250 self.insert_in_run_queue_head(current_thread);
251 current_thread_id
252 }
253
set_current_thread(&mut self, thread: ForeignBox<Thread>)254 fn set_current_thread(&mut self, thread: ForeignBox<Thread>) {
255 self.current_arch_thread_state = thread.arch_thread_state.get();
256 self.current_thread = Some(thread);
257 }
258
current_thread_id(&self) -> usize259 pub fn current_thread_id(&self) -> usize {
260 match &self.current_thread {
261 Some(thread) => thread.id(),
262 None => Thread::null_id(),
263 }
264 }
265
266 #[allow(dead_code)]
267 #[inline(never)]
add_thread_to_list(&mut self, thread: &mut Thread)268 pub fn add_thread_to_list(&mut self, thread: &mut Thread) {
269 unsafe {
270 self.thread_list.push_front_unchecked(thread);
271 }
272 }
273
274 #[allow(dead_code)]
dump_all_threads(&self)275 pub fn dump_all_threads(&self) {
276 info!("list of all threads:");
277 unsafe {
278 let _ = self.thread_list.for_each(|thread| -> Result<(), ()> {
279 // info!("ptr {:#x}", thread.id());
280 thread.dump();
281 Ok(())
282 });
283 }
284 }
285
286 #[allow(dead_code)]
insert_in_run_queue_head(&mut self, thread: ForeignBox<Thread>)287 fn insert_in_run_queue_head(&mut self, thread: ForeignBox<Thread>) {
288 assert!(thread.state == State::Ready);
289 // info!("pushing thread {:#x} on run queue head", thread.id());
290
291 self.run_queue.push_front(thread);
292 }
293
294 #[allow(dead_code)]
insert_in_run_queue_tail(&mut self, thread: ForeignBox<Thread>)295 fn insert_in_run_queue_tail(&mut self, thread: ForeignBox<Thread>) {
296 assert!(thread.state == State::Ready);
297 // info!("pushing thread {:#x} on run queue tail", thread.id());
298
299 self.run_queue.push_back(thread);
300 }
301 }
302
303 #[allow(dead_code)]
reschedule( mut sched_state: SpinLockGuard<SchedulerState>, current_thread_id: usize, ) -> SpinLockGuard<SchedulerState>304 fn reschedule(
305 mut sched_state: SpinLockGuard<SchedulerState>,
306 current_thread_id: usize,
307 ) -> SpinLockGuard<SchedulerState> {
308 // Caller to reschedule is responsible for removing current thread and
309 // put it in the correct run/wait queue.
310
311 assert!(sched_state.current_thread.is_none());
312
313 // info!("reschedule");
314
315 // Pop a new thread off the head of the run queue.
316 // At the moment cannot handle an empty queue, so will panic in that case.
317 // TODO: Implement either an idle thread or a special idle routine for that case.
318 let Some(mut new_thread) = sched_state.run_queue.pop_head() else {
319 panic!("run_queue empty");
320 };
321
322 assert!(new_thread.state == State::Ready);
323 new_thread.state = State::Running;
324
325 if current_thread_id == new_thread.id() {
326 sched_state.current_thread = Some(new_thread);
327 // info!("decided to continue running thread {:#x}", new_thread.id());
328 return sched_state;
329 }
330
331 // info!("switching to thread {:#x}", new_thread.id());
332 unsafe {
333 let old_thread_state = sched_state.current_arch_thread_state;
334 let new_thread_state = new_thread.arch_thread_state.get();
335 sched_state.set_current_thread(new_thread);
336 <Arch as ArchInterface>::ThreadState::context_switch(
337 sched_state,
338 old_thread_state,
339 new_thread_state,
340 )
341 }
342 }
343
344 #[allow(dead_code)]
yield_timeslice()345 pub fn yield_timeslice() {
346 // info!("yielding thread {:#x}", current_thread.id());
347 let mut sched_state = SCHEDULER_STATE.lock();
348
349 // Yielding always moves the current task to the back of the run queue
350 let current_thread_id = sched_state.move_current_thread_to_back();
351
352 reschedule(sched_state, current_thread_id);
353 }
354
355 #[allow(dead_code)]
preempt()356 pub fn preempt() {
357 // info!("preempt thread {:#x}", current_thread.id());
358 let mut sched_state = SCHEDULER_STATE.lock();
359
360 // For now, always move the current thread to the back of the run queue.
361 // When the scheduler gets more complex, it should evaluate if it has used
362 // up it's time allocation.
363 let current_thread_id = sched_state.move_current_thread_to_back();
364
365 reschedule(sched_state, current_thread_id);
366 }
367
368 // Tick that is called from a timer handler. The scheduler will evaluate if the current thread
369 // should be preempted or not
370 #[allow(dead_code)]
tick(_time_ms: u32)371 pub fn tick(_time_ms: u32) {
372 // info!("tick {} ms", _time_ms);
373
374 // TODO: dynamically deal with time slice for this thread and put it
375 // at the head or tail depending.
376 TICK_WAIT_QUEUE.lock().wake_one();
377
378 preempt();
379 }
380
381 // Exit the current thread.
382 // For now, simply remove ourselves from the run queue. No cleanup of thread resources
383 // is performed.
384 #[allow(dead_code)]
exit_thread() -> !385 pub fn exit_thread() -> ! {
386 let mut sched_state = SCHEDULER_STATE.lock();
387
388 let Some(mut current_thread) = sched_state.current_thread.take() else {
389 panic!("no current thread");
390 };
391 let current_thread_id = current_thread.id();
392
393 info!("thread {:#x} exiting", current_thread.id());
394 current_thread.state = State::Stopped;
395
396 reschedule(sched_state, current_thread_id);
397
398 // Should not get here
399 #[allow(clippy::empty_loop)]
400 loop {}
401 }
402 pub struct WaitQueue {
403 queue: ForeignList<Thread, ThreadListAdapter>,
404 }
405
406 unsafe impl Sync for WaitQueue {}
407 unsafe impl Send for WaitQueue {}
408
409 impl WaitQueue {
410 #[allow(dead_code)]
new() -> Self411 pub const fn new() -> Self {
412 Self {
413 queue: ForeignList::new(),
414 }
415 }
416 }
417
418 impl SchedLockGuard<'_, WaitQueue> {
wake_one(mut self)419 pub fn wake_one(mut self) {
420 if let Some(mut thread) = self.queue.pop_head() {
421 // Move the current thread to the head of its work queue as to not
422 // steal it's time allocation.
423 let current_thread_id = self.sched_mut().move_current_thread_to_front();
424
425 thread.state = State::Ready;
426 self.sched_mut().run_queue.push_back(thread);
427 reschedule(self.into_sched(), current_thread_id);
428 }
429 }
430
wait(mut self)431 pub fn wait(mut self) {
432 let Some(mut thread) = self.sched_mut().current_thread.take() else {
433 panic!("no active thread");
434 };
435 let current_thread_id = thread.id();
436 thread.state = State::Waiting;
437 self.queue.push_back(thread);
438 reschedule(self.into_sched(), current_thread_id);
439 }
440 }
441
442 pub static TICK_WAIT_QUEUE: SchedLock<WaitQueue> = SchedLock::new(WaitQueue::new());
443