• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 #include "pw_thread/thread.h"
15 
16 #include <zephyr/kernel.h>
17 #include <zephyr/spinlock.h>
18 
19 #include "pw_assert/check.h"
20 #include "pw_preprocessor/compiler.h"
21 #include "pw_thread/id.h"
22 #include "pw_thread_zephyr/config.h"
23 #include "pw_thread_zephyr/context.h"
24 #include "pw_thread_zephyr/options.h"
25 
26 using pw::thread::zephyr::Context;
27 
28 namespace pw::thread {
29 namespace {
30 
31 k_spinlock global_thread_done_lock;
32 
33 }  // namespace
34 
ThreadEntryPoint(void * void_context_ptr,void *,void *)35 void Context::ThreadEntryPoint(void* void_context_ptr, void*, void*) {
36   Context& context = *static_cast<Context*>(void_context_ptr);
37 
38   // Invoke the user's thread function. This may never return.
39   context.fn_();
40   context.fn_ = nullptr;
41 
42   k_spinlock_key_t key = k_spin_lock(&global_thread_done_lock);
43   if (context.detached()) {
44     context.set_task_handle(nullptr);
45   } else {
46     // Defer cleanup to Thread's join() or detach().
47     context.set_thread_done();
48   }
49   k_spin_unlock(&global_thread_done_lock, key);
50 }
51 
CreateThread(const zephyr::Options & options,DeprecatedOrNewThreadFn && thread_fn,Context * & native_type_out)52 void Context::CreateThread(const zephyr::Options& options,
53                            DeprecatedOrNewThreadFn&& thread_fn,
54                            Context*& native_type_out) {
55   PW_CHECK(options.static_context() != nullptr);
56 
57   // Use the statically allocated context.
58   native_type_out = options.static_context();
59   // Can't use a context more than once.
60   PW_DCHECK_PTR_EQ(native_type_out->task_handle(), nullptr);
61   // Reset the state of the static context in case it was re-used.
62   native_type_out->set_detached(false);
63   native_type_out->set_thread_done(false);
64 
65   native_type_out->set_thread_routine(std::move(thread_fn));
66   const k_tid_t task_handle =
67       k_thread_create(&native_type_out->thread_info(),
68                       options.static_context()->stack(),
69                       options.static_context()->available_stack_size(),
70                       Context::ThreadEntryPoint,
71                       options.static_context(),
72                       nullptr,
73                       nullptr,
74                       options.priority(),
75                       options.native_options(),
76                       K_NO_WAIT);
77   PW_CHECK_NOTNULL(task_handle);  // Ensure it succeeded.
78   native_type_out->set_task_handle(task_handle);
79 }
80 
Thread(const thread::Options & facade_options,Function<void ()> && entry)81 Thread::Thread(const thread::Options& facade_options, Function<void()>&& entry)
82     : native_type_(nullptr) {
83   // Cast the generic facade options to the backend specific option of which
84   // only one type can exist at compile time.
85   auto options = static_cast<const zephyr::Options&>(facade_options);
86   Context::CreateThread(options, std::move(entry), native_type_);
87 }
88 
Thread(const thread::Options & facade_options,ThreadRoutine entry,void * arg)89 Thread::Thread(const thread::Options& facade_options,
90                ThreadRoutine entry,
91                void* arg)
92     : native_type_(nullptr) {
93   auto options = static_cast<const zephyr::Options&>(facade_options);
94   Context::CreateThread(
95       options, DeprecatedFnPtrAndArg{entry, arg}, native_type_);
96 }
97 
detach()98 void Thread::detach() {
99   PW_CHECK(joinable());
100 
101   k_spinlock_key_t key = k_spin_lock(&global_thread_done_lock);
102   native_type_->set_detached();
103   const bool thread_done = native_type_->thread_done();
104 
105   if (thread_done) {
106     // The task finished (hit end of Context::ThreadEntryPoint) before we
107     // invoked detach, clean up the task handle to allow the Context reuse.
108     native_type_->set_task_handle(nullptr);
109   } else {
110     // We're detaching before the task finished, defer cleanup to the task at
111     // the end of Context::ThreadEntryPoint.
112   }
113 
114   k_spin_unlock(&global_thread_done_lock, key);
115 
116   // Update to no longer represent a thread of execution.
117   native_type_ = nullptr;
118 }
119 
join()120 void Thread::join() {
121   PW_CHECK(joinable());
122   PW_CHECK(this_thread::get_id() != get_id());
123 
124   PW_CHECK_INT_EQ(0, k_thread_join(native_type_->task_handle_, K_FOREVER));
125 
126   native_type_->set_task_handle(nullptr);
127 
128   // Update to no longer represent a thread of execution.
129   native_type_ = nullptr;
130 }
131 
132 }  // namespace pw::thread
133