1 use crate::loom::sync::Arc;
2 use crate::runtime::context;
3 use crate::runtime::scheduler::{self, current_thread, Inject};
4
5 use backtrace::BacktraceFrame;
6 use std::cell::Cell;
7 use std::collections::VecDeque;
8 use std::ffi::c_void;
9 use std::fmt;
10 use std::future::Future;
11 use std::pin::Pin;
12 use std::ptr::{self, NonNull};
13 use std::task::{self, Poll};
14
15 mod symbol;
16 mod tree;
17
18 use symbol::Symbol;
19 use tree::Tree;
20
21 use super::{Notified, OwnedTasks};
22
23 type Backtrace = Vec<BacktraceFrame>;
24 type SymbolTrace = Vec<Symbol>;
25
26 /// The ambiant backtracing context.
27 pub(crate) struct Context {
28 /// The address of [`Trace::root`] establishes an upper unwinding bound on
29 /// the backtraces in `Trace`.
30 active_frame: Cell<Option<NonNull<Frame>>>,
31 /// The place to stash backtraces.
32 collector: Cell<Option<Trace>>,
33 }
34
35 /// A [`Frame`] in an intrusive, doubly-linked tree of [`Frame`]s.
36 struct Frame {
37 /// The location associated with this frame.
38 inner_addr: *const c_void,
39
40 /// The parent frame, if any.
41 parent: Option<NonNull<Frame>>,
42 }
43
44 /// An tree execution trace.
45 ///
46 /// Traces are captured with [`Trace::capture`], rooted with [`Trace::root`]
47 /// and leaved with [`trace_leaf`].
48 #[derive(Clone, Debug)]
49 pub(crate) struct Trace {
50 // The linear backtraces that comprise this trace. These linear traces can
51 // be re-knitted into a tree.
52 backtraces: Vec<Backtrace>,
53 }
54
55 pin_project_lite::pin_project! {
56 #[derive(Debug, Clone)]
57 #[must_use = "futures do nothing unless you `.await` or poll them"]
58 pub(crate) struct Root<T> {
59 #[pin]
60 future: T,
61 }
62 }
63
64 const FAIL_NO_THREAD_LOCAL: &str = "The Tokio thread-local has been destroyed \
65 as part of shutting down the current \
66 thread, so collecting a taskdump is not \
67 possible.";
68
69 impl Context {
new() -> Self70 pub(crate) const fn new() -> Self {
71 Context {
72 active_frame: Cell::new(None),
73 collector: Cell::new(None),
74 }
75 }
76
77 /// SAFETY: Callers of this function must ensure that trace frames always
78 /// form a valid linked list.
try_with_current<F, R>(f: F) -> Option<R> where F: FnOnce(&Self) -> R,79 unsafe fn try_with_current<F, R>(f: F) -> Option<R>
80 where
81 F: FnOnce(&Self) -> R,
82 {
83 crate::runtime::context::with_trace(f)
84 }
85
with_current_frame<F, R>(f: F) -> R where F: FnOnce(&Cell<Option<NonNull<Frame>>>) -> R,86 unsafe fn with_current_frame<F, R>(f: F) -> R
87 where
88 F: FnOnce(&Cell<Option<NonNull<Frame>>>) -> R,
89 {
90 Self::try_with_current(|context| f(&context.active_frame)).expect(FAIL_NO_THREAD_LOCAL)
91 }
92
with_current_collector<F, R>(f: F) -> R where F: FnOnce(&Cell<Option<Trace>>) -> R,93 fn with_current_collector<F, R>(f: F) -> R
94 where
95 F: FnOnce(&Cell<Option<Trace>>) -> R,
96 {
97 // SAFETY: This call can only access the collector field, so it cannot
98 // break the trace frame linked list.
99 unsafe {
100 Self::try_with_current(|context| f(&context.collector)).expect(FAIL_NO_THREAD_LOCAL)
101 }
102 }
103 }
104
105 impl Trace {
106 /// Invokes `f`, returning both its result and the collection of backtraces
107 /// captured at each sub-invocation of [`trace_leaf`].
108 #[inline(never)]
capture<F, R>(f: F) -> (R, Trace) where F: FnOnce() -> R,109 pub(crate) fn capture<F, R>(f: F) -> (R, Trace)
110 where
111 F: FnOnce() -> R,
112 {
113 let collector = Trace { backtraces: vec![] };
114
115 let previous = Context::with_current_collector(|current| current.replace(Some(collector)));
116
117 let result = f();
118
119 let collector =
120 Context::with_current_collector(|current| current.replace(previous)).unwrap();
121
122 (result, collector)
123 }
124
125 /// The root of a trace.
126 #[inline(never)]
root<F>(future: F) -> Root<F>127 pub(crate) fn root<F>(future: F) -> Root<F> {
128 Root { future }
129 }
130 }
131
132 /// If this is a sub-invocation of [`Trace::capture`], capture a backtrace.
133 ///
134 /// The captured backtrace will be returned by [`Trace::capture`].
135 ///
136 /// Invoking this function does nothing when it is not a sub-invocation
137 /// [`Trace::capture`].
138 // This function is marked `#[inline(never)]` to ensure that it gets a distinct `Frame` in the
139 // backtrace, below which frames should not be included in the backtrace (since they reflect the
140 // internal implementation details of this crate).
141 #[inline(never)]
trace_leaf(cx: &mut task::Context<'_>) -> Poll<()>142 pub(crate) fn trace_leaf(cx: &mut task::Context<'_>) -> Poll<()> {
143 // Safety: We don't manipulate the current context's active frame.
144 let did_trace = unsafe {
145 Context::try_with_current(|context_cell| {
146 if let Some(mut collector) = context_cell.collector.take() {
147 let mut frames = vec![];
148 let mut above_leaf = false;
149
150 if let Some(active_frame) = context_cell.active_frame.get() {
151 let active_frame = active_frame.as_ref();
152
153 backtrace::trace(|frame| {
154 let below_root = !ptr::eq(frame.symbol_address(), active_frame.inner_addr);
155
156 // only capture frames above `Trace::leaf` and below
157 // `Trace::root`.
158 if above_leaf && below_root {
159 frames.push(frame.to_owned().into());
160 }
161
162 if ptr::eq(frame.symbol_address(), trace_leaf as *const _) {
163 above_leaf = true;
164 }
165
166 // only continue unwinding if we're below `Trace::root`
167 below_root
168 });
169 }
170 collector.backtraces.push(frames);
171 context_cell.collector.set(Some(collector));
172 true
173 } else {
174 false
175 }
176 })
177 .unwrap_or(false)
178 };
179
180 if did_trace {
181 // Use the same logic that `yield_now` uses to send out wakeups after
182 // the task yields.
183 context::with_scheduler(|scheduler| {
184 if let Some(scheduler) = scheduler {
185 match scheduler {
186 scheduler::Context::CurrentThread(s) => s.defer.defer(cx.waker()),
187 #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
188 scheduler::Context::MultiThread(s) => s.defer.defer(cx.waker()),
189 #[cfg(all(
190 tokio_unstable,
191 feature = "rt-multi-thread",
192 not(target_os = "wasi")
193 ))]
194 scheduler::Context::MultiThreadAlt(_) => unimplemented!(),
195 }
196 }
197 });
198
199 Poll::Pending
200 } else {
201 Poll::Ready(())
202 }
203 }
204
205 impl fmt::Display for Trace {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result206 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
207 Tree::from_trace(self.clone()).fmt(f)
208 }
209 }
210
defer<F: FnOnce() -> R, R>(f: F) -> impl Drop211 fn defer<F: FnOnce() -> R, R>(f: F) -> impl Drop {
212 use std::mem::ManuallyDrop;
213
214 struct Defer<F: FnOnce() -> R, R>(ManuallyDrop<F>);
215
216 impl<F: FnOnce() -> R, R> Drop for Defer<F, R> {
217 #[inline(always)]
218 fn drop(&mut self) {
219 unsafe {
220 ManuallyDrop::take(&mut self.0)();
221 }
222 }
223 }
224
225 Defer(ManuallyDrop::new(f))
226 }
227
228 impl<T: Future> Future for Root<T> {
229 type Output = T::Output;
230
231 #[inline(never)]
poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output>232 fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
233 // SAFETY: The context's current frame is restored to its original state
234 // before `frame` is dropped.
235 unsafe {
236 let mut frame = Frame {
237 inner_addr: Self::poll as *const c_void,
238 parent: None,
239 };
240
241 Context::with_current_frame(|current| {
242 frame.parent = current.take();
243 current.set(Some(NonNull::from(&frame)));
244 });
245
246 let _restore = defer(|| {
247 Context::with_current_frame(|current| {
248 current.set(frame.parent);
249 });
250 });
251
252 let this = self.project();
253 this.future.poll(cx)
254 }
255 }
256 }
257
258 /// Trace and poll all tasks of the current_thread runtime.
trace_current_thread( owned: &OwnedTasks<Arc<current_thread::Handle>>, local: &mut VecDeque<Notified<Arc<current_thread::Handle>>>, injection: &Inject<Arc<current_thread::Handle>>, ) -> Vec<Trace>259 pub(in crate::runtime) fn trace_current_thread(
260 owned: &OwnedTasks<Arc<current_thread::Handle>>,
261 local: &mut VecDeque<Notified<Arc<current_thread::Handle>>>,
262 injection: &Inject<Arc<current_thread::Handle>>,
263 ) -> Vec<Trace> {
264 // clear the local and injection queues
265 local.clear();
266
267 while let Some(task) = injection.pop() {
268 drop(task);
269 }
270
271 // notify each task
272 let mut tasks = vec![];
273 owned.for_each(|task| {
274 // set the notified bit
275 task.as_raw().state().transition_to_notified_for_tracing();
276 // store the raw tasks into a vec
277 tasks.push(task.as_raw());
278 });
279
280 tasks
281 .into_iter()
282 .map(|task| {
283 let ((), trace) = Trace::capture(|| task.poll());
284 trace
285 })
286 .collect()
287 }
288
289 cfg_rt_multi_thread! {
290 use crate::loom::sync::Mutex;
291 use crate::runtime::scheduler::multi_thread;
292 use crate::runtime::scheduler::multi_thread::Synced;
293 use crate::runtime::scheduler::inject::Shared;
294
295 /// Trace and poll all tasks of the current_thread runtime.
296 ///
297 /// ## Safety
298 ///
299 /// Must be called with the same `synced` that `injection` was created with.
300 pub(in crate::runtime) unsafe fn trace_multi_thread(
301 owned: &OwnedTasks<Arc<multi_thread::Handle>>,
302 local: &mut multi_thread::queue::Local<Arc<multi_thread::Handle>>,
303 synced: &Mutex<Synced>,
304 injection: &Shared<Arc<multi_thread::Handle>>,
305 ) -> Vec<Trace> {
306 // clear the local queue
307 while let Some(notified) = local.pop() {
308 drop(notified);
309 }
310
311 // clear the injection queue
312 let mut synced = synced.lock();
313 while let Some(notified) = injection.pop(&mut synced.inject) {
314 drop(notified);
315 }
316
317 drop(synced);
318
319 // notify each task
320 let mut traces = vec![];
321 owned.for_each(|task| {
322 // set the notified bit
323 task.as_raw().state().transition_to_notified_for_tracing();
324
325 // trace the task
326 let ((), trace) = Trace::capture(|| task.as_raw().poll());
327 traces.push(trace);
328
329 // reschedule the task
330 let _ = task.as_raw().state().transition_to_notified_by_ref();
331 task.as_raw().schedule();
332 });
333
334 traces
335 }
336 }
337