1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/profiling/perf/unwinding.h"
18
19 #include <mutex>
20
21 #include <inttypes.h>
22
23 #include <unwindstack/Unwinder.h>
24
25 #include "perfetto/ext/base/metatrace.h"
26 #include "perfetto/ext/base/thread_utils.h"
27 #include "perfetto/ext/base/utils.h"
28
29 namespace {
30 constexpr size_t kUnwindingMaxFrames = 1000;
31 constexpr uint32_t kDataSourceShutdownRetryDelayMs = 400;
32 } // namespace
33
34 namespace perfetto {
35 namespace profiling {
36
37 Unwinder::Delegate::~Delegate() = default;
38
Unwinder(Delegate * delegate,base::UnixTaskRunner * task_runner)39 Unwinder::Unwinder(Delegate* delegate, base::UnixTaskRunner* task_runner)
40 : task_runner_(task_runner), delegate_(delegate) {
41 ResetAndEnableUnwindstackCache();
42 base::MaybeSetThreadName("stack-unwinding");
43 }
44
PostStartDataSource(DataSourceInstanceID ds_id,bool kernel_frames)45 void Unwinder::PostStartDataSource(DataSourceInstanceID ds_id,
46 bool kernel_frames) {
47 // No need for a weak pointer as the associated task runner quits (stops
48 // running tasks) strictly before the Unwinder's destruction.
49 task_runner_->PostTask(
50 [this, ds_id, kernel_frames] { StartDataSource(ds_id, kernel_frames); });
51 }
52
StartDataSource(DataSourceInstanceID ds_id,bool kernel_frames)53 void Unwinder::StartDataSource(DataSourceInstanceID ds_id, bool kernel_frames) {
54 PERFETTO_DCHECK_THREAD(thread_checker_);
55 PERFETTO_DLOG("Unwinder::StartDataSource(%zu)", static_cast<size_t>(ds_id));
56
57 auto it_and_inserted = data_sources_.emplace(ds_id, DataSourceState{});
58 PERFETTO_DCHECK(it_and_inserted.second);
59
60 if (kernel_frames) {
61 kernel_symbolizer_.GetOrCreateKernelSymbolMap();
62 }
63 }
64
65 // c++11: use shared_ptr to transfer resource handles, so that the resources get
66 // released even if the task runner is destroyed with pending tasks.
67 // "Cleverness" warning:
68 // the task will be executed on a different thread, and will mutate the
69 // pointed-to memory. It may be the case that this posting thread will not
70 // decrement its shared_ptr refcount until *after* the task has executed. In
71 // that scenario, the destruction of the pointed-to memory will be happening on
72 // the posting thread. This implies a data race between the mutation on the task
73 // thread, and the destruction on the posting thread. *However*, we assume that
74 // there is no race in practice due to refcount decrements having
75 // release-acquire semantics. The refcount decrements pair with each other, and
76 // therefore also serve as a memory barrier between the destructor, and any
77 // previous modifications of the pointed-to memory.
78 // TODO(rsavitski): present a more convincing argument, or reimplement
79 // without relying on shared_ptr implementation details.
PostAdoptProcDescriptors(DataSourceInstanceID ds_id,pid_t pid,base::ScopedFile maps_fd,base::ScopedFile mem_fd)80 void Unwinder::PostAdoptProcDescriptors(DataSourceInstanceID ds_id,
81 pid_t pid,
82 base::ScopedFile maps_fd,
83 base::ScopedFile mem_fd) {
84 auto shared_maps = std::make_shared<base::ScopedFile>(std::move(maps_fd));
85 auto shared_mem = std::make_shared<base::ScopedFile>(std::move(mem_fd));
86 task_runner_->PostTask([this, ds_id, pid, shared_maps, shared_mem] {
87 base::ScopedFile maps = std::move(*shared_maps.get());
88 base::ScopedFile mem = std::move(*shared_mem.get());
89 AdoptProcDescriptors(ds_id, pid, std::move(maps), std::move(mem));
90 });
91 }
92
AdoptProcDescriptors(DataSourceInstanceID ds_id,pid_t pid,base::ScopedFile maps_fd,base::ScopedFile mem_fd)93 void Unwinder::AdoptProcDescriptors(DataSourceInstanceID ds_id,
94 pid_t pid,
95 base::ScopedFile maps_fd,
96 base::ScopedFile mem_fd) {
97 PERFETTO_DCHECK_THREAD(thread_checker_);
98 PERFETTO_DLOG("Unwinder::AdoptProcDescriptors(%zu, %d, %d, %d)",
99 static_cast<size_t>(ds_id), static_cast<int>(pid),
100 maps_fd.get(), mem_fd.get());
101
102 auto it = data_sources_.find(ds_id);
103 if (it == data_sources_.end())
104 return;
105 DataSourceState& ds = it->second;
106
107 ProcessState& proc_state = ds.process_states[pid]; // insert if new
108 PERFETTO_DCHECK(proc_state.status != ProcessState::Status::kResolved);
109 PERFETTO_DCHECK(!proc_state.unwind_state.has_value());
110
111 PERFETTO_METATRACE_SCOPED(TAG_PRODUCER, PROFILER_MAPS_PARSE);
112
113 proc_state.status = ProcessState::Status::kResolved;
114 proc_state.unwind_state =
115 UnwindingMetadata{std::move(maps_fd), std::move(mem_fd)};
116 }
117
PostRecordTimedOutProcDescriptors(DataSourceInstanceID ds_id,pid_t pid)118 void Unwinder::PostRecordTimedOutProcDescriptors(DataSourceInstanceID ds_id,
119 pid_t pid) {
120 task_runner_->PostTask(
121 [this, ds_id, pid] { RecordTimedOutProcDescriptors(ds_id, pid); });
122 }
123
RecordTimedOutProcDescriptors(DataSourceInstanceID ds_id,pid_t pid)124 void Unwinder::RecordTimedOutProcDescriptors(DataSourceInstanceID ds_id,
125 pid_t pid) {
126 PERFETTO_DCHECK_THREAD(thread_checker_);
127 PERFETTO_DLOG("Unwinder::RecordTimedOutProcDescriptors(%zu, %d)",
128 static_cast<size_t>(ds_id), static_cast<int>(pid));
129
130 auto it = data_sources_.find(ds_id);
131 if (it == data_sources_.end())
132 return;
133 DataSourceState& ds = it->second;
134
135 ProcessState& proc_state = ds.process_states[pid]; // insert if new
136 PERFETTO_DCHECK(proc_state.status == ProcessState::Status::kResolving);
137 PERFETTO_DCHECK(!proc_state.unwind_state.has_value());
138
139 proc_state.status = ProcessState::Status::kExpired;
140 }
141
PostProcessQueue()142 void Unwinder::PostProcessQueue() {
143 task_runner_->PostTask([this] { ProcessQueue(); });
144 }
145
146 // Note: we always walk the queue in order. So if there are multiple data
147 // sources, one of which is shutting down, its shutdown can be delayed by
148 // unwinding of other sources' samples. Instead, we could scan the queue
149 // multiple times, prioritizing the samples for shutting-down sources. At the
150 // time of writing, the earlier is considered to be fair enough.
ProcessQueue()151 void Unwinder::ProcessQueue() {
152 PERFETTO_DCHECK_THREAD(thread_checker_);
153 PERFETTO_METATRACE_SCOPED(TAG_PRODUCER, PROFILER_UNWIND_TICK);
154 PERFETTO_DLOG("Unwinder::ProcessQueue");
155
156 base::FlatSet<DataSourceInstanceID> pending_sample_sources =
157 ConsumeAndUnwindReadySamples();
158
159 // Deal with the possiblity of data sources that are shutting down.
160 bool post_delayed_reprocess = false;
161 base::FlatSet<DataSourceInstanceID> sources_to_stop;
162 for (auto& id_and_ds : data_sources_) {
163 DataSourceInstanceID ds_id = id_and_ds.first;
164 const DataSourceState& ds = id_and_ds.second;
165
166 if (ds.status == DataSourceState::Status::kActive)
167 continue;
168
169 // Data source that is shutting down. If we're still waiting on proc-fds (or
170 // the lookup to time out) for samples in the queue - repost a later
171 // attempt (as there is no guarantee that there are any readers waking up
172 // the unwinder anymore).
173 if (pending_sample_sources.count(ds_id)) {
174 PERFETTO_DLOG(
175 "Unwinder delaying DS(%zu) stop: waiting on a pending sample",
176 static_cast<size_t>(ds_id));
177 post_delayed_reprocess = true;
178 } else {
179 // Otherwise, proceed with tearing down data source state (after
180 // completing the loop, to avoid invalidating the iterator).
181 sources_to_stop.insert(ds_id);
182 }
183 }
184
185 for (auto ds_id : sources_to_stop)
186 FinishDataSourceStop(ds_id);
187
188 if (post_delayed_reprocess)
189 task_runner_->PostDelayedTask([this] { ProcessQueue(); },
190 kDataSourceShutdownRetryDelayMs);
191 }
192
ConsumeAndUnwindReadySamples()193 base::FlatSet<DataSourceInstanceID> Unwinder::ConsumeAndUnwindReadySamples() {
194 PERFETTO_DCHECK_THREAD(thread_checker_);
195 base::FlatSet<DataSourceInstanceID> pending_sample_sources;
196
197 // Use a single snapshot of the ring buffer pointers.
198 ReadView read_view = unwind_queue_.BeginRead();
199
200 PERFETTO_METATRACE_COUNTER(
201 TAG_PRODUCER, PROFILER_UNWIND_QUEUE_SZ,
202 static_cast<int32_t>(read_view.write_pos - read_view.read_pos));
203
204 if (read_view.read_pos == read_view.write_pos)
205 return pending_sample_sources;
206
207 // Walk the queue.
208 for (auto read_pos = read_view.read_pos; read_pos < read_view.write_pos;
209 read_pos++) {
210 UnwindEntry& entry = unwind_queue_.at(read_pos);
211
212 if (!entry.valid)
213 continue; // already processed
214
215 uint64_t sampled_stack_bytes = entry.sample.stack.size();
216
217 // Data source might be gone due to an abrupt stop.
218 auto it = data_sources_.find(entry.data_source_id);
219 if (it == data_sources_.end()) {
220 entry = UnwindEntry::Invalid();
221 DecrementEnqueuedFootprint(sampled_stack_bytes);
222 continue;
223 }
224 DataSourceState& ds = it->second;
225
226 pid_t pid = entry.sample.common.pid;
227 ProcessState& proc_state = ds.process_states[pid]; // insert if new
228
229 // Giving up on the sample (proc-fd lookup timed out).
230 if (proc_state.status == ProcessState::Status::kExpired) {
231 PERFETTO_DLOG("Unwinder skipping sample for pid [%d]",
232 static_cast<int>(pid));
233
234 // free up the sampled stack as the main thread has no use for it
235 entry.sample.stack.clear();
236 entry.sample.stack.shrink_to_fit();
237
238 delegate_->PostEmitUnwinderSkippedSample(entry.data_source_id,
239 std::move(entry.sample));
240 entry = UnwindEntry::Invalid();
241 DecrementEnqueuedFootprint(sampled_stack_bytes);
242 continue;
243 }
244
245 // Still waiting on the proc-fds.
246 if (proc_state.status == ProcessState::Status::kResolving) {
247 PERFETTO_DLOG("Unwinder deferring sample for pid [%d]",
248 static_cast<int>(pid));
249
250 pending_sample_sources.insert(entry.data_source_id);
251 continue;
252 }
253
254 // Sample ready - process it.
255 if (proc_state.status == ProcessState::Status::kResolved) {
256 // Metatrace: emit both a scoped slice, as well as a "counter"
257 // representing the pid being unwound.
258 PERFETTO_METATRACE_SCOPED(TAG_PRODUCER, PROFILER_UNWIND_SAMPLE);
259 PERFETTO_METATRACE_COUNTER(TAG_PRODUCER, PROFILER_UNWIND_CURRENT_PID,
260 static_cast<int32_t>(pid));
261
262 PERFETTO_CHECK(proc_state.unwind_state.has_value());
263 CompletedSample unwound_sample =
264 UnwindSample(entry.sample, &proc_state.unwind_state.value(),
265 proc_state.attempted_unwinding);
266 proc_state.attempted_unwinding = true;
267
268 PERFETTO_METATRACE_COUNTER(TAG_PRODUCER, PROFILER_UNWIND_CURRENT_PID, 0);
269
270 delegate_->PostEmitSample(entry.data_source_id,
271 std::move(unwound_sample));
272 entry = UnwindEntry::Invalid();
273 DecrementEnqueuedFootprint(sampled_stack_bytes);
274 continue;
275 }
276 }
277
278 // Consume all leading processed entries in the queue.
279 auto new_read_pos = read_view.read_pos;
280 for (; new_read_pos < read_view.write_pos; new_read_pos++) {
281 UnwindEntry& entry = unwind_queue_.at(new_read_pos);
282 if (entry.valid)
283 break;
284 }
285 if (new_read_pos != read_view.read_pos)
286 unwind_queue_.CommitNewReadPosition(new_read_pos);
287
288 PERFETTO_METATRACE_COUNTER(
289 TAG_PRODUCER, PROFILER_UNWIND_QUEUE_SZ,
290 static_cast<int32_t>(read_view.write_pos - new_read_pos));
291
292 PERFETTO_DLOG("Unwind queue drain: [%" PRIu64 "]->[%" PRIu64 "]",
293 read_view.write_pos - read_view.read_pos,
294 read_view.write_pos - new_read_pos);
295
296 return pending_sample_sources;
297 }
298
UnwindSample(const ParsedSample & sample,UnwindingMetadata * unwind_state,bool pid_unwound_before)299 CompletedSample Unwinder::UnwindSample(const ParsedSample& sample,
300 UnwindingMetadata* unwind_state,
301 bool pid_unwound_before) {
302 PERFETTO_DCHECK_THREAD(thread_checker_);
303 PERFETTO_DCHECK(unwind_state);
304
305 CompletedSample ret;
306 ret.common = sample.common;
307
308 // Overlay the stack bytes over /proc/<pid>/mem.
309 std::shared_ptr<unwindstack::Memory> overlay_memory =
310 std::make_shared<StackOverlayMemory>(
311 unwind_state->fd_mem, sample.regs->sp(),
312 reinterpret_cast<const uint8_t*>(sample.stack.data()),
313 sample.stack.size());
314
315 struct UnwindResult {
316 unwindstack::ErrorCode error_code;
317 uint64_t warnings;
318 std::vector<unwindstack::FrameData> frames;
319
320 UnwindResult(unwindstack::ErrorCode e,
321 uint64_t w,
322 std::vector<unwindstack::FrameData> f)
323 : error_code(e), warnings(w), frames(std::move(f)) {}
324 UnwindResult(const UnwindResult&) = delete;
325 UnwindResult& operator=(const UnwindResult&) = delete;
326 UnwindResult(UnwindResult&&) __attribute__((unused)) = default;
327 UnwindResult& operator=(UnwindResult&&) = default;
328 };
329 auto attempt_unwind = [&sample, unwind_state, pid_unwound_before,
330 &overlay_memory]() -> UnwindResult {
331 metatrace::ScopedEvent m(metatrace::TAG_PRODUCER,
332 pid_unwound_before
333 ? metatrace::PROFILER_UNWIND_ATTEMPT
334 : metatrace::PROFILER_UNWIND_INITIAL_ATTEMPT);
335
336 // Unwindstack clobbers registers, so make a copy in case of retries.
337 auto regs_copy = std::unique_ptr<unwindstack::Regs>{sample.regs->Clone()};
338
339 unwindstack::Unwinder unwinder(kUnwindingMaxFrames, &unwind_state->fd_maps,
340 regs_copy.get(), overlay_memory);
341 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
342 unwinder.SetJitDebug(unwind_state->GetJitDebug(regs_copy->Arch()));
343 unwinder.SetDexFiles(unwind_state->GetDexFiles(regs_copy->Arch()));
344 #endif
345 unwinder.Unwind(/*initial_map_names_to_skip=*/nullptr,
346 /*map_suffixes_to_ignore=*/nullptr);
347 return {unwinder.LastErrorCode(), unwinder.warnings(),
348 unwinder.ConsumeFrames()};
349 };
350
351 // first unwind attempt
352 UnwindResult unwind = attempt_unwind();
353
354 bool should_retry = unwind.error_code == unwindstack::ERROR_INVALID_MAP ||
355 unwind.warnings & unwindstack::WARNING_DEX_PC_NOT_IN_MAP;
356
357 // ERROR_INVALID_MAP means that unwinding reached a point in memory without a
358 // corresponding mapping. This is possible if the parsed /proc/pid/maps is
359 // outdated. Reparse and try again.
360 //
361 // Special case: skip reparsing if the stack sample was (most likely)
362 // truncated. We perform the best-effort unwind of the sampled part, but an
363 // error around the truncated part is not unexpected.
364 //
365 // TODO(rsavitski): consider rate-limiting unwind retries.
366 if (should_retry && sample.stack_maxed) {
367 PERFETTO_DLOG("Skipping reparse/reunwind due to maxed stack for tid [%d]",
368 static_cast<int>(sample.common.tid));
369 } else if (should_retry) {
370 {
371 PERFETTO_METATRACE_SCOPED(TAG_PRODUCER, PROFILER_MAPS_REPARSE);
372 PERFETTO_DLOG("Reparsing maps for pid [%d]",
373 static_cast<int>(sample.common.pid));
374 unwind_state->ReparseMaps();
375 }
376 // reunwind attempt
377 unwind = attempt_unwind();
378 }
379
380 // Symbolize kernel-unwound kernel frames (if any).
381 std::vector<unwindstack::FrameData> kernel_frames =
382 SymbolizeKernelCallchain(sample);
383
384 // Concatenate the kernel and userspace frames.
385 auto kernel_frames_size = kernel_frames.size();
386
387 ret.frames = std::move(kernel_frames);
388
389 ret.build_ids.reserve(kernel_frames_size + unwind.frames.size());
390 ret.frames.reserve(kernel_frames_size + unwind.frames.size());
391
392 ret.build_ids.resize(kernel_frames_size, "");
393
394 for (unwindstack::FrameData& frame : unwind.frames) {
395 ret.build_ids.emplace_back(unwind_state->GetBuildId(frame));
396 ret.frames.emplace_back(std::move(frame));
397 }
398
399 PERFETTO_CHECK(ret.build_ids.size() == ret.frames.size());
400
401 // In case of an unwinding error, add a synthetic error frame (which will
402 // appear as a caller of the partially-unwound fragment), for easier
403 // visualization of errors.
404 if (unwind.error_code != unwindstack::ERROR_NONE) {
405 PERFETTO_DLOG("Unwinding error %" PRIu8, unwind.error_code);
406 unwindstack::FrameData frame_data{};
407 frame_data.function_name =
408 "ERROR " + StringifyLibUnwindstackError(unwind.error_code);
409 frame_data.map_name = "ERROR";
410 ret.frames.emplace_back(std::move(frame_data));
411 ret.build_ids.emplace_back("");
412 ret.unwind_error = unwind.error_code;
413 }
414
415 return ret;
416 }
417
SymbolizeKernelCallchain(const ParsedSample & sample)418 std::vector<unwindstack::FrameData> Unwinder::SymbolizeKernelCallchain(
419 const ParsedSample& sample) {
420 std::vector<unwindstack::FrameData> ret;
421 if (sample.kernel_ips.empty())
422 return ret;
423
424 // The list of addresses contains special context marker values (inserted by
425 // the kernel's unwinding) to indicate which section of the callchain belongs
426 // to the kernel/user mode (if the kernel can successfully unwind user
427 // stacks). In our case, we request only the kernel frames.
428 if (sample.kernel_ips[0] != PERF_CONTEXT_KERNEL) {
429 PERFETTO_DFATAL_OR_ELOG(
430 "Unexpected: 0th frame of callchain is not PERF_CONTEXT_KERNEL.");
431 return ret;
432 }
433
434 auto* kernel_map = kernel_symbolizer_.GetOrCreateKernelSymbolMap();
435 PERFETTO_DCHECK(kernel_map);
436 ret.reserve(sample.kernel_ips.size());
437 for (size_t i = 1; i < sample.kernel_ips.size(); i++) {
438 std::string function_name = kernel_map->Lookup(sample.kernel_ips[i]);
439
440 // Synthesise a partially-valid libunwindstack frame struct for the kernel
441 // frame. We reuse the type for convenience. The kernel frames are marked by
442 // a magical "kernel" string as their containing mapping.
443 unwindstack::FrameData frame{};
444 frame.function_name = std::move(function_name);
445 frame.map_name = "kernel";
446 ret.emplace_back(std::move(frame));
447 }
448 return ret;
449 }
450
PostInitiateDataSourceStop(DataSourceInstanceID ds_id)451 void Unwinder::PostInitiateDataSourceStop(DataSourceInstanceID ds_id) {
452 task_runner_->PostTask([this, ds_id] { InitiateDataSourceStop(ds_id); });
453 }
454
InitiateDataSourceStop(DataSourceInstanceID ds_id)455 void Unwinder::InitiateDataSourceStop(DataSourceInstanceID ds_id) {
456 PERFETTO_DCHECK_THREAD(thread_checker_);
457 PERFETTO_DLOG("Unwinder::InitiateDataSourceStop(%zu)",
458 static_cast<size_t>(ds_id));
459
460 auto it = data_sources_.find(ds_id);
461 if (it == data_sources_.end())
462 return;
463 DataSourceState& ds = it->second;
464
465 PERFETTO_CHECK(ds.status == DataSourceState::Status::kActive);
466 ds.status = DataSourceState::Status::kShuttingDown;
467
468 // Make sure that there's an outstanding task to process the unwinding queue,
469 // as it is the point that evaluates the stop condition.
470 PostProcessQueue();
471 }
472
FinishDataSourceStop(DataSourceInstanceID ds_id)473 void Unwinder::FinishDataSourceStop(DataSourceInstanceID ds_id) {
474 PERFETTO_DCHECK_THREAD(thread_checker_);
475 PERFETTO_DLOG("Unwinder::FinishDataSourceStop(%zu)",
476 static_cast<size_t>(ds_id));
477
478 auto it = data_sources_.find(ds_id);
479 if (it == data_sources_.end())
480 return;
481 DataSourceState& ds = it->second;
482
483 // Drop unwinder's state tied to the source.
484 PERFETTO_CHECK(ds.status == DataSourceState::Status::kShuttingDown);
485 data_sources_.erase(it);
486
487 // Clean up state if there are no more active sources.
488 if (data_sources_.empty()) {
489 kernel_symbolizer_.Destroy();
490 ResetAndEnableUnwindstackCache();
491 }
492
493 // Inform service thread that the unwinder is done with the source.
494 delegate_->PostFinishDataSourceStop(ds_id);
495 }
496
PostPurgeDataSource(DataSourceInstanceID ds_id)497 void Unwinder::PostPurgeDataSource(DataSourceInstanceID ds_id) {
498 task_runner_->PostTask([this, ds_id] { PurgeDataSource(ds_id); });
499 }
500
PurgeDataSource(DataSourceInstanceID ds_id)501 void Unwinder::PurgeDataSource(DataSourceInstanceID ds_id) {
502 PERFETTO_DCHECK_THREAD(thread_checker_);
503 PERFETTO_DLOG("Unwinder::PurgeDataSource(%zu)", static_cast<size_t>(ds_id));
504
505 auto it = data_sources_.find(ds_id);
506 if (it == data_sources_.end())
507 return;
508
509 data_sources_.erase(it);
510
511 // Clean up state if there are no more active sources.
512 if (data_sources_.empty()) {
513 kernel_symbolizer_.Destroy();
514 ResetAndEnableUnwindstackCache();
515 // Also purge scudo on Android, which would normally be done by the service
516 // thread in |FinishDataSourceStop|. This is important as most of the scudo
517 // overhead comes from libunwindstack.
518 base::MaybeReleaseAllocatorMemToOS();
519 }
520 }
521
PostClearCachedStatePeriodic(DataSourceInstanceID ds_id,uint32_t period_ms)522 void Unwinder::PostClearCachedStatePeriodic(DataSourceInstanceID ds_id,
523 uint32_t period_ms) {
524 task_runner_->PostDelayedTask(
525 [this, ds_id, period_ms] { ClearCachedStatePeriodic(ds_id, period_ms); },
526 period_ms);
527 }
528
529 // See header for rationale.
ClearCachedStatePeriodic(DataSourceInstanceID ds_id,uint32_t period_ms)530 void Unwinder::ClearCachedStatePeriodic(DataSourceInstanceID ds_id,
531 uint32_t period_ms) {
532 auto it = data_sources_.find(ds_id);
533 if (it == data_sources_.end())
534 return; // stop the periodic task
535
536 DataSourceState& ds = it->second;
537 if (ds.status != DataSourceState::Status::kActive)
538 return;
539
540 PERFETTO_METATRACE_SCOPED(TAG_PRODUCER, PROFILER_UNWIND_CACHE_CLEAR);
541 PERFETTO_DLOG("Clearing unwinder's cached state.");
542
543 for (auto& pid_and_process : ds.process_states) {
544 pid_and_process.second.unwind_state->fd_maps.Reset();
545 }
546 ResetAndEnableUnwindstackCache();
547 base::MaybeReleaseAllocatorMemToOS();
548
549 PostClearCachedStatePeriodic(ds_id, period_ms); // repost
550 }
551
ResetAndEnableUnwindstackCache()552 void Unwinder::ResetAndEnableUnwindstackCache() {
553 PERFETTO_DLOG("Resetting unwindstack cache");
554 // Libunwindstack uses an unsynchronized variable for setting/checking whether
555 // the cache is enabled. Therefore unwinding and cache toggling should stay on
556 // the same thread, but we might be moving unwinding across threads if we're
557 // recreating |Unwinder| instances (during a reconnect to traced). Therefore,
558 // use our own static lock to synchronize the cache toggling.
559 // TODO(rsavitski): consider fixing this in libunwindstack itself.
560 static std::mutex* lock = new std::mutex{};
561 std::lock_guard<std::mutex> guard{*lock};
562 unwindstack::Elf::SetCachingEnabled(false); // free any existing state
563 unwindstack::Elf::SetCachingEnabled(true); // reallocate a fresh cache
564 }
565
566 } // namespace profiling
567 } // namespace perfetto
568