1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/traced/probes/ftrace/ftrace_config_muxer.h"
18
19 #include <stdint.h>
20 #include <string.h>
21 #include <sys/types.h>
22 #include <unistd.h>
23
24 #include <algorithm>
25 #include <iterator>
26
27 #include "perfetto/base/compiler.h"
28 #include "perfetto/ext/base/utils.h"
29 #include "src/traced/probes/ftrace/atrace_wrapper.h"
30 #include "src/traced/probes/ftrace/compact_sched.h"
31 #include "src/traced/probes/ftrace/ftrace_stats.h"
32
33 #include "protos/perfetto/trace/ftrace/ftrace_event.pbzero.h"
34
35 namespace perfetto {
36 namespace {
37
38 constexpr int kDefaultPerCpuBufferSizeKb = 2 * 1024; // 2mb
39 constexpr int kMaxPerCpuBufferSizeKb = 64 * 1024; // 64mb
40
41 // A fake "syscall id" that indicates all syscalls should be recorded. This
42 // allows us to distinguish between the case where `syscall_events` is empty
43 // because raw_syscalls aren't enabled, or the case where it is and we want to
44 // record all events.
45 constexpr size_t kAllSyscallsId = kMaxSyscalls + 1;
46
47 // trace_clocks in preference order.
48 // If this list is changed, the FtraceClocks enum in ftrace_event_bundle.proto
49 // and FtraceConfigMuxer::SetupClock() should be also changed accordingly.
50 constexpr const char* kClocks[] = {"boot", "global", "local"};
51
52 // optional monotonic raw clock.
53 // Enabled by the "use_monotonic_raw_clock" option in the ftrace config.
54 constexpr const char* kClockMonoRaw = "mono_raw";
55
AddEventGroup(const ProtoTranslationTable * table,const std::string & group,std::set<GroupAndName> * to)56 void AddEventGroup(const ProtoTranslationTable* table,
57 const std::string& group,
58 std::set<GroupAndName>* to) {
59 const std::vector<const Event*>* events = table->GetEventsByGroup(group);
60 if (!events)
61 return;
62 for (const Event* event : *events)
63 to->insert(GroupAndName(group, event->name));
64 }
65
ReadEventsInGroupFromFs(const FtraceProcfs & ftrace_procfs,const std::string & group)66 std::set<GroupAndName> ReadEventsInGroupFromFs(
67 const FtraceProcfs& ftrace_procfs,
68 const std::string& group) {
69 std::set<std::string> names =
70 ftrace_procfs.GetEventNamesForGroup("events/" + group);
71 std::set<GroupAndName> events;
72 for (const auto& name : names)
73 events.insert(GroupAndName(group, name));
74 return events;
75 }
76
EventToStringGroupAndName(const std::string & event)77 std::pair<std::string, std::string> EventToStringGroupAndName(
78 const std::string& event) {
79 auto slash_pos = event.find('/');
80 if (slash_pos == std::string::npos)
81 return std::make_pair("", event);
82 return std::make_pair(event.substr(0, slash_pos),
83 event.substr(slash_pos + 1));
84 }
85
UnionInPlace(const std::vector<std::string> & unsorted_a,std::vector<std::string> * out)86 void UnionInPlace(const std::vector<std::string>& unsorted_a,
87 std::vector<std::string>* out) {
88 std::vector<std::string> a = unsorted_a;
89 std::sort(a.begin(), a.end());
90 std::sort(out->begin(), out->end());
91 std::vector<std::string> v;
92 std::set_union(a.begin(), a.end(), out->begin(), out->end(),
93 std::back_inserter(v));
94 *out = std::move(v);
95 }
96
IntersectInPlace(const std::vector<std::string> & unsorted_a,std::vector<std::string> * out)97 void IntersectInPlace(const std::vector<std::string>& unsorted_a,
98 std::vector<std::string>* out) {
99 std::vector<std::string> a = unsorted_a;
100 std::sort(a.begin(), a.end());
101 std::sort(out->begin(), out->end());
102 std::vector<std::string> v;
103 std::set_intersection(a.begin(), a.end(), out->begin(), out->end(),
104 std::back_inserter(v));
105 *out = std::move(v);
106 }
107
108 // This is just to reduce binary size and stack frame size of the insertions.
109 // It effectively undoes STL's set::insert inlining.
InsertEvent(const char * group,const char * name,std::set<GroupAndName> * dst)110 void PERFETTO_NO_INLINE InsertEvent(const char* group,
111 const char* name,
112 std::set<GroupAndName>* dst) {
113 dst->insert(GroupAndName(group, name));
114 }
115
116 } // namespace
117
GetFtraceEvents(const FtraceConfig & request,const ProtoTranslationTable * table)118 std::set<GroupAndName> FtraceConfigMuxer::GetFtraceEvents(
119 const FtraceConfig& request,
120 const ProtoTranslationTable* table) {
121 std::set<GroupAndName> events;
122 for (const auto& config_value : request.ftrace_events()) {
123 std::string group;
124 std::string name;
125 std::tie(group, name) = EventToStringGroupAndName(config_value);
126 if (name == "*") {
127 for (const auto& event : ReadEventsInGroupFromFs(*ftrace_, group))
128 events.insert(event);
129 } else if (group.empty()) {
130 // If there is no group specified, find an event with that name and
131 // use it's group.
132 const Event* e = table->GetEventByName(name);
133 if (!e) {
134 PERFETTO_DLOG(
135 "Event doesn't exist: %s. Include the group in the config to allow "
136 "the event to be output as a generic event.",
137 name.c_str());
138 continue;
139 }
140 events.insert(GroupAndName(e->group, e->name));
141 } else {
142 events.insert(GroupAndName(group, name));
143 }
144 }
145 if (RequiresAtrace(request)) {
146 InsertEvent("ftrace", "print", &events);
147
148 // Ideally we should keep this code in sync with:
149 // platform/frameworks/native/cmds/atrace/atrace.cpp
150 // It's not a disaster if they go out of sync, we can always add the ftrace
151 // categories manually server side but this is user friendly and reduces the
152 // size of the configs.
153 for (const std::string& category : request.atrace_categories()) {
154 if (category == "gfx") {
155 AddEventGroup(table, "mdss", &events);
156 InsertEvent("mdss", "rotator_bw_ao_as_context", &events);
157 InsertEvent("mdss", "mdp_trace_counter", &events);
158 InsertEvent("mdss", "tracing_mark_write", &events);
159 InsertEvent("mdss", "mdp_cmd_wait_pingpong", &events);
160 InsertEvent("mdss", "mdp_cmd_kickoff", &events);
161 InsertEvent("mdss", "mdp_cmd_release_bw", &events);
162 InsertEvent("mdss", "mdp_cmd_readptr_done", &events);
163 InsertEvent("mdss", "mdp_cmd_pingpong_done", &events);
164 InsertEvent("mdss", "mdp_misr_crc", &events);
165 InsertEvent("mdss", "mdp_compare_bw", &events);
166 InsertEvent("mdss", "mdp_perf_update_bus", &events);
167 InsertEvent("mdss", "mdp_video_underrun_done", &events);
168 InsertEvent("mdss", "mdp_commit", &events);
169 InsertEvent("mdss", "mdp_mixer_update", &events);
170 InsertEvent("mdss", "mdp_perf_prefill_calc", &events);
171 InsertEvent("mdss", "mdp_perf_set_ot", &events);
172 InsertEvent("mdss", "mdp_perf_set_wm_levels", &events);
173 InsertEvent("mdss", "mdp_perf_set_panic_luts", &events);
174 InsertEvent("mdss", "mdp_perf_set_qos_luts", &events);
175 InsertEvent("mdss", "mdp_sspp_change", &events);
176 InsertEvent("mdss", "mdp_sspp_set", &events);
177 AddEventGroup(table, "mali", &events);
178 InsertEvent("mali", "tracing_mark_write", &events);
179
180 AddEventGroup(table, "sde", &events);
181 InsertEvent("sde", "tracing_mark_write", &events);
182 InsertEvent("sde", "sde_perf_update_bus", &events);
183 InsertEvent("sde", "sde_perf_set_qos_luts", &events);
184 InsertEvent("sde", "sde_perf_set_ot", &events);
185 InsertEvent("sde", "sde_perf_set_danger_luts", &events);
186 InsertEvent("sde", "sde_perf_crtc_update", &events);
187 InsertEvent("sde", "sde_perf_calc_crtc", &events);
188 InsertEvent("sde", "sde_evtlog", &events);
189 InsertEvent("sde", "sde_encoder_underrun", &events);
190 InsertEvent("sde", "sde_cmd_release_bw", &events);
191
192 AddEventGroup(table, "dpu", &events);
193 InsertEvent("dpu", "tracing_mark_write", &events);
194
195 AddEventGroup(table, "g2d", &events);
196 InsertEvent("g2d", "tracing_mark_write", &events);
197 InsertEvent("g2d", "g2d_perf_update_qos", &events);
198 continue;
199 }
200
201 if (category == "ion") {
202 InsertEvent("kmem", "ion_alloc_buffer_start", &events);
203 continue;
204 }
205
206 // Note: sched_wakeup intentionally removed (diverging from atrace), as it
207 // is high-volume, but mostly redundant when sched_waking is also enabled.
208 // The event can still be enabled explicitly when necessary.
209 if (category == "sched") {
210 InsertEvent("sched", "sched_switch", &events);
211 InsertEvent("sched", "sched_waking", &events);
212 InsertEvent("sched", "sched_blocked_reason", &events);
213 InsertEvent("sched", "sched_cpu_hotplug", &events);
214 InsertEvent("sched", "sched_pi_setprio", &events);
215 InsertEvent("sched", "sched_process_exit", &events);
216 AddEventGroup(table, "cgroup", &events);
217 InsertEvent("cgroup", "cgroup_transfer_tasks", &events);
218 InsertEvent("cgroup", "cgroup_setup_root", &events);
219 InsertEvent("cgroup", "cgroup_rmdir", &events);
220 InsertEvent("cgroup", "cgroup_rename", &events);
221 InsertEvent("cgroup", "cgroup_remount", &events);
222 InsertEvent("cgroup", "cgroup_release", &events);
223 InsertEvent("cgroup", "cgroup_mkdir", &events);
224 InsertEvent("cgroup", "cgroup_destroy_root", &events);
225 InsertEvent("cgroup", "cgroup_attach_task", &events);
226 InsertEvent("oom", "oom_score_adj_update", &events);
227 InsertEvent("task", "task_rename", &events);
228 InsertEvent("task", "task_newtask", &events);
229
230 AddEventGroup(table, "systrace", &events);
231 InsertEvent("systrace", "0", &events);
232
233 AddEventGroup(table, "scm", &events);
234 InsertEvent("scm", "scm_call_start", &events);
235 InsertEvent("scm", "scm_call_end", &events);
236 continue;
237 }
238
239 if (category == "irq") {
240 AddEventGroup(table, "irq", &events);
241 InsertEvent("irq", "tasklet_hi_exit", &events);
242 InsertEvent("irq", "tasklet_hi_entry", &events);
243 InsertEvent("irq", "tasklet_exit", &events);
244 InsertEvent("irq", "tasklet_entry", &events);
245 InsertEvent("irq", "softirq_raise", &events);
246 InsertEvent("irq", "softirq_exit", &events);
247 InsertEvent("irq", "softirq_entry", &events);
248 InsertEvent("irq", "irq_handler_exit", &events);
249 InsertEvent("irq", "irq_handler_entry", &events);
250 AddEventGroup(table, "ipi", &events);
251 InsertEvent("ipi", "ipi_raise", &events);
252 InsertEvent("ipi", "ipi_exit", &events);
253 InsertEvent("ipi", "ipi_entry", &events);
254 continue;
255 }
256
257 if (category == "irqoff") {
258 InsertEvent("preemptirq", "irq_enable", &events);
259 InsertEvent("preemptirq", "irq_disable", &events);
260 continue;
261 }
262
263 if (category == "preemptoff") {
264 InsertEvent("preemptirq", "preempt_enable", &events);
265 InsertEvent("preemptirq", "preempt_disable", &events);
266 continue;
267 }
268
269 if (category == "i2c") {
270 AddEventGroup(table, "i2c", &events);
271 InsertEvent("i2c", "i2c_read", &events);
272 InsertEvent("i2c", "i2c_write", &events);
273 InsertEvent("i2c", "i2c_result", &events);
274 InsertEvent("i2c", "i2c_reply", &events);
275 InsertEvent("i2c", "smbus_read", &events);
276 InsertEvent("i2c", "smbus_write", &events);
277 InsertEvent("i2c", "smbus_result", &events);
278 InsertEvent("i2c", "smbus_reply", &events);
279 continue;
280 }
281
282 if (category == "freq") {
283 InsertEvent("power", "cpu_frequency", &events);
284 InsertEvent("power", "gpu_frequency", &events);
285 InsertEvent("power", "clock_set_rate", &events);
286 InsertEvent("power", "clock_disable", &events);
287 InsertEvent("power", "clock_enable", &events);
288 InsertEvent("clk", "clk_set_rate", &events);
289 InsertEvent("clk", "clk_disable", &events);
290 InsertEvent("clk", "clk_enable", &events);
291 InsertEvent("power", "cpu_frequency_limits", &events);
292 InsertEvent("power", "suspend_resume", &events);
293 InsertEvent("cpuhp", "cpuhp_enter", &events);
294 InsertEvent("cpuhp", "cpuhp_exit", &events);
295 InsertEvent("cpuhp", "cpuhp_pause", &events);
296 AddEventGroup(table, "msm_bus", &events);
297 InsertEvent("msm_bus", "bus_update_request_end", &events);
298 InsertEvent("msm_bus", "bus_update_request", &events);
299 InsertEvent("msm_bus", "bus_rules_matches", &events);
300 InsertEvent("msm_bus", "bus_max_votes", &events);
301 InsertEvent("msm_bus", "bus_client_status", &events);
302 InsertEvent("msm_bus", "bus_bke_params", &events);
303 InsertEvent("msm_bus", "bus_bimc_config_limiter", &events);
304 InsertEvent("msm_bus", "bus_avail_bw", &events);
305 InsertEvent("msm_bus", "bus_agg_bw", &events);
306 continue;
307 }
308
309 if (category == "membus") {
310 AddEventGroup(table, "memory_bus", &events);
311 continue;
312 }
313
314 if (category == "idle") {
315 InsertEvent("power", "cpu_idle", &events);
316 continue;
317 }
318
319 if (category == "disk") {
320 InsertEvent("f2fs", "f2fs_sync_file_enter", &events);
321 InsertEvent("f2fs", "f2fs_sync_file_exit", &events);
322 InsertEvent("f2fs", "f2fs_write_begin", &events);
323 InsertEvent("f2fs", "f2fs_write_end", &events);
324 InsertEvent("f2fs", "f2fs_iostat", &events);
325 InsertEvent("f2fs", "f2fs_iostat_latency", &events);
326 InsertEvent("ext4", "ext4_da_write_begin", &events);
327 InsertEvent("ext4", "ext4_da_write_end", &events);
328 InsertEvent("ext4", "ext4_sync_file_enter", &events);
329 InsertEvent("ext4", "ext4_sync_file_exit", &events);
330 InsertEvent("block", "block_bio_queue", &events);
331 InsertEvent("block", "block_bio_complete", &events);
332 InsertEvent("ufs", "ufshcd_command", &events);
333 continue;
334 }
335
336 if (category == "mmc") {
337 AddEventGroup(table, "mmc", &events);
338 continue;
339 }
340
341 if (category == "load") {
342 AddEventGroup(table, "cpufreq_interactive", &events);
343 continue;
344 }
345
346 if (category == "sync") {
347 // linux kernel < 4.9
348 AddEventGroup(table, "sync", &events);
349 InsertEvent("sync", "sync_pt", &events);
350 InsertEvent("sync", "sync_timeline", &events);
351 InsertEvent("sync", "sync_wait", &events);
352 // linux kernel == 4.9.x
353 AddEventGroup(table, "fence", &events);
354 InsertEvent("fence", "fence_annotate_wait_on", &events);
355 InsertEvent("fence", "fence_destroy", &events);
356 InsertEvent("fence", "fence_emit", &events);
357 InsertEvent("fence", "fence_enable_signal", &events);
358 InsertEvent("fence", "fence_init", &events);
359 InsertEvent("fence", "fence_signaled", &events);
360 InsertEvent("fence", "fence_wait_end", &events);
361 InsertEvent("fence", "fence_wait_start", &events);
362 // linux kernel > 4.9
363 AddEventGroup(table, "dma_fence", &events);
364 continue;
365 }
366
367 if (category == "workq") {
368 AddEventGroup(table, "workqueue", &events);
369 InsertEvent("workqueue", "workqueue_queue_work", &events);
370 InsertEvent("workqueue", "workqueue_execute_start", &events);
371 InsertEvent("workqueue", "workqueue_execute_end", &events);
372 InsertEvent("workqueue", "workqueue_activate_work", &events);
373 continue;
374 }
375
376 if (category == "memreclaim") {
377 InsertEvent("vmscan", "mm_vmscan_direct_reclaim_begin", &events);
378 InsertEvent("vmscan", "mm_vmscan_direct_reclaim_end", &events);
379 InsertEvent("vmscan", "mm_vmscan_kswapd_wake", &events);
380 InsertEvent("vmscan", "mm_vmscan_kswapd_sleep", &events);
381 AddEventGroup(table, "lowmemorykiller", &events);
382 InsertEvent("lowmemorykiller", "lowmemory_kill", &events);
383 continue;
384 }
385
386 if (category == "regulators") {
387 AddEventGroup(table, "regulator", &events);
388 events.insert(
389 GroupAndName("regulator", "regulator_set_voltage_complete"));
390 InsertEvent("regulator", "regulator_set_voltage", &events);
391 InsertEvent("regulator", "regulator_enable_delay", &events);
392 InsertEvent("regulator", "regulator_enable_complete", &events);
393 InsertEvent("regulator", "regulator_enable", &events);
394 InsertEvent("regulator", "regulator_disable_complete", &events);
395 InsertEvent("regulator", "regulator_disable", &events);
396 continue;
397 }
398
399 if (category == "binder_driver") {
400 InsertEvent("binder", "binder_transaction", &events);
401 InsertEvent("binder", "binder_transaction_received", &events);
402 InsertEvent("binder", "binder_transaction_alloc_buf", &events);
403 InsertEvent("binder", "binder_set_priority", &events);
404 continue;
405 }
406
407 if (category == "binder_lock") {
408 InsertEvent("binder", "binder_lock", &events);
409 InsertEvent("binder", "binder_locked", &events);
410 InsertEvent("binder", "binder_unlock", &events);
411 continue;
412 }
413
414 if (category == "pagecache") {
415 AddEventGroup(table, "filemap", &events);
416 events.insert(
417 GroupAndName("filemap", "mm_filemap_delete_from_page_cache"));
418 InsertEvent("filemap", "mm_filemap_add_to_page_cache", &events);
419 InsertEvent("filemap", "filemap_set_wb_err", &events);
420 InsertEvent("filemap", "file_check_and_advance_wb_err", &events);
421 continue;
422 }
423
424 if (category == "memory") {
425 // Use rss_stat_throttled if supported
426 if (ftrace_->SupportsRssStatThrottled()) {
427 InsertEvent("synthetic", "rss_stat_throttled", &events);
428 } else {
429 InsertEvent("kmem", "rss_stat", &events);
430 }
431 InsertEvent("kmem", "ion_heap_grow", &events);
432 InsertEvent("kmem", "ion_heap_shrink", &events);
433 // ion_stat supersedes ion_heap_grow / shrink for kernel 4.19+
434 InsertEvent("ion", "ion_stat", &events);
435 InsertEvent("mm_event", "mm_event_record", &events);
436 InsertEvent("dmabuf_heap", "dma_heap_stat", &events);
437 InsertEvent("gpu_mem", "gpu_mem_total", &events);
438 continue;
439 }
440
441 if (category == "thermal") {
442 InsertEvent("thermal", "thermal_temperature", &events);
443 InsertEvent("thermal", "cdev_update", &events);
444 continue;
445 }
446
447 if (category == "camera") {
448 AddEventGroup(table, "lwis", &events);
449 InsertEvent("lwis", "tracing_mark_write", &events);
450 continue;
451 }
452 }
453 }
454
455 // function_graph tracer emits two builtin ftrace events
456 if (request.enable_function_graph()) {
457 InsertEvent("ftrace", "funcgraph_entry", &events);
458 InsertEvent("ftrace", "funcgraph_exit", &events);
459 }
460
461 // If throttle_rss_stat: true, use the rss_stat_throttled event if supported
462 if (request.throttle_rss_stat() && ftrace_->SupportsRssStatThrottled()) {
463 auto it = std::find_if(
464 events.begin(), events.end(), [](const GroupAndName& event) {
465 return event.group() == "kmem" && event.name() == "rss_stat";
466 });
467
468 if (it != events.end()) {
469 events.erase(it);
470 InsertEvent("synthetic", "rss_stat_throttled", &events);
471 }
472 }
473
474 return events;
475 }
476
GetSyscallsReturningFds(const SyscallTable & syscalls)477 base::FlatSet<int64_t> FtraceConfigMuxer::GetSyscallsReturningFds(
478 const SyscallTable& syscalls) {
479 auto insertSyscallId = [&syscalls](base::FlatSet<int64_t>& set,
480 const char* syscall) {
481 auto syscall_id = syscalls.GetByName(syscall);
482 if (syscall_id)
483 set.insert(static_cast<int64_t>(*syscall_id));
484 };
485
486 base::FlatSet<int64_t> call_ids;
487 insertSyscallId(call_ids, "sys_open");
488 insertSyscallId(call_ids, "sys_openat");
489 insertSyscallId(call_ids, "sys_socket");
490 insertSyscallId(call_ids, "sys_dup");
491 insertSyscallId(call_ids, "sys_dup2");
492 insertSyscallId(call_ids, "sys_dup3");
493 return call_ids;
494 }
495
FilterHasGroup(const EventFilter & filter,const std::string & group)496 bool FtraceConfigMuxer::FilterHasGroup(const EventFilter& filter,
497 const std::string& group) {
498 const std::vector<const Event*>* events = table_->GetEventsByGroup(group);
499 if (!events) {
500 return false;
501 }
502
503 for (const Event* event : *events) {
504 if (filter.IsEventEnabled(event->ftrace_event_id)) {
505 return true;
506 }
507 }
508 return false;
509 }
510
BuildSyscallFilter(const EventFilter & ftrace_filter,const FtraceConfig & request)511 EventFilter FtraceConfigMuxer::BuildSyscallFilter(
512 const EventFilter& ftrace_filter,
513 const FtraceConfig& request) {
514 EventFilter output;
515
516 if (!FilterHasGroup(ftrace_filter, "raw_syscalls")) {
517 return output;
518 }
519
520 if (request.syscall_events().empty()) {
521 output.AddEnabledEvent(kAllSyscallsId);
522 return output;
523 }
524
525 for (const std::string& syscall : request.syscall_events()) {
526 std::optional<size_t> id = syscalls_.GetByName(syscall);
527 if (!id.has_value()) {
528 PERFETTO_ELOG("Can't enable %s, syscall not known", syscall.c_str());
529 continue;
530 }
531 output.AddEnabledEvent(*id);
532 }
533
534 return output;
535 }
536
SetSyscallEventFilter(const EventFilter & extra_syscalls)537 bool FtraceConfigMuxer::SetSyscallEventFilter(
538 const EventFilter& extra_syscalls) {
539 EventFilter syscall_filter;
540
541 syscall_filter.EnableEventsFrom(extra_syscalls);
542 for (const auto& id_config : ds_configs_) {
543 const perfetto::FtraceDataSourceConfig& config = id_config.second;
544 syscall_filter.EnableEventsFrom(config.syscall_filter);
545 }
546
547 std::set<size_t> filter_set = syscall_filter.GetEnabledEvents();
548 if (syscall_filter.IsEventEnabled(kAllSyscallsId)) {
549 filter_set.clear();
550 }
551
552 if (current_state_.syscall_filter != filter_set) {
553 if (!ftrace_->SetSyscallFilter(filter_set)) {
554 return false;
555 }
556
557 current_state_.syscall_filter = filter_set;
558 }
559
560 return true;
561 }
562
563 // Post-conditions:
564 // 1. result >= 1 (should have at least one page per CPU)
565 // 2. result * 4 < kMaxTotalBufferSizeKb
566 // 3. If input is 0 output is a good default number.
ComputeCpuBufferSizeInPages(size_t requested_buffer_size_kb)567 size_t ComputeCpuBufferSizeInPages(size_t requested_buffer_size_kb) {
568 if (requested_buffer_size_kb == 0)
569 requested_buffer_size_kb = kDefaultPerCpuBufferSizeKb;
570 if (requested_buffer_size_kb > kMaxPerCpuBufferSizeKb) {
571 PERFETTO_ELOG(
572 "The requested ftrace buf size (%zu KB) is too big, capping to %d KB",
573 requested_buffer_size_kb, kMaxPerCpuBufferSizeKb);
574 requested_buffer_size_kb = kMaxPerCpuBufferSizeKb;
575 }
576
577 size_t pages = requested_buffer_size_kb / (base::kPageSize / 1024);
578 if (pages == 0)
579 return 1;
580
581 return pages;
582 }
583
FtraceConfigMuxer(FtraceProcfs * ftrace,ProtoTranslationTable * table,SyscallTable syscalls,std::map<std::string,std::vector<GroupAndName>> vendor_events,bool secondary_instance)584 FtraceConfigMuxer::FtraceConfigMuxer(
585 FtraceProcfs* ftrace,
586 ProtoTranslationTable* table,
587 SyscallTable syscalls,
588 std::map<std::string, std::vector<GroupAndName>> vendor_events,
589 bool secondary_instance)
590 : ftrace_(ftrace),
591 table_(table),
592 syscalls_(std::move(syscalls)),
593 current_state_(),
594 ds_configs_(),
595 vendor_events_(vendor_events),
596 secondary_instance_(secondary_instance) {}
597 FtraceConfigMuxer::~FtraceConfigMuxer() = default;
598
SetupConfig(FtraceConfigId id,const FtraceConfig & request,FtraceSetupErrors * errors)599 bool FtraceConfigMuxer::SetupConfig(FtraceConfigId id,
600 const FtraceConfig& request,
601 FtraceSetupErrors* errors) {
602 EventFilter filter;
603 if (ds_configs_.empty()) {
604 PERFETTO_DCHECK(active_configs_.empty());
605
606 // If someone outside of perfetto is using a non-nop tracer, yield. We can't
607 // realistically figure out all notions of "in use" even if we look at
608 // set_event or events/enable, so this is all we check for.
609 if (!request.preserve_ftrace_buffer() && !ftrace_->IsTracingAvailable()) {
610 PERFETTO_ELOG(
611 "ftrace in use by non-Perfetto. Check that %s current_tracer is nop.",
612 ftrace_->GetRootPath().c_str());
613 return false;
614 }
615
616 // Clear tracefs state, remembering which value of "tracing_on" to restore
617 // to after we're done, though we won't restore the rest of the tracefs
618 // state.
619 current_state_.saved_tracing_on = ftrace_->GetTracingOn();
620 if (!request.preserve_ftrace_buffer()) {
621 ftrace_->SetTracingOn(false);
622 // This will fail on release ("user") builds due to ACLs, but that's
623 // acceptable since the per-event enabling/disabling should still be
624 // balanced.
625 ftrace_->DisableAllEvents();
626 ftrace_->ClearTrace();
627 }
628
629 // Set up the rest of the tracefs state, without starting it.
630 // Notes:
631 // * resizing buffers can be quite slow (up to hundreds of ms).
632 // * resizing buffers doesn't clear their existing contents, which matters
633 // to the preserve_ftrace_buffer option.
634 if (!request.preserve_ftrace_buffer()) {
635 SetupClock(request);
636 }
637 SetupBufferSize(request);
638 }
639
640 std::set<GroupAndName> events = GetFtraceEvents(request, table_);
641
642 // Vendors can provide a set of extra ftrace categories to be enabled when a
643 // specific atrace category is used (e.g. "gfx" -> ["my_hw/my_custom_event",
644 // "my_hw/my_special_gpu"]). Merge them with the hard coded events for each
645 // categories.
646 for (const std::string& category : request.atrace_categories()) {
647 if (vendor_events_.count(category)) {
648 for (const GroupAndName& event : vendor_events_[category]) {
649 events.insert(event);
650 }
651 }
652 }
653
654 if (RequiresAtrace(request)) {
655 if (secondary_instance_) {
656 PERFETTO_ELOG(
657 "Secondary ftrace instances do not support atrace_categories and "
658 "atrace_apps options as they affect global state");
659 return false;
660 }
661 if (IsOldAtrace() && !ds_configs_.empty()) {
662 PERFETTO_ELOG(
663 "Concurrent atrace sessions are not supported before Android P, "
664 "bailing out.");
665 return false;
666 }
667 UpdateAtrace(request, errors ? &errors->atrace_errors : nullptr);
668 }
669
670 for (const auto& group_and_name : events) {
671 const Event* event = table_->GetOrCreateEvent(group_and_name);
672 if (!event) {
673 PERFETTO_DLOG("Can't enable %s, event not known",
674 group_and_name.ToString().c_str());
675 if (errors)
676 errors->unknown_ftrace_events.push_back(group_and_name.ToString());
677 continue;
678 }
679 // Niche option to skip events that are in the config, but don't have a
680 // dedicated proto for the event in perfetto. Otherwise such events will be
681 // encoded as GenericFtraceEvent.
682 if (request.disable_generic_events() &&
683 event->proto_field_id ==
684 protos::pbzero::FtraceEvent::kGenericFieldNumber) {
685 if (errors)
686 errors->failed_ftrace_events.push_back(group_and_name.ToString());
687 continue;
688 }
689 // Note: ftrace events are always implicitly enabled (and don't have an
690 // "enable" file). So they aren't tracked by the central event filter (but
691 // still need to be added to the per data source event filter to retain
692 // the events during parsing).
693 if (current_state_.ftrace_events.IsEventEnabled(event->ftrace_event_id) ||
694 std::string("ftrace") == event->group) {
695 filter.AddEnabledEvent(event->ftrace_event_id);
696 continue;
697 }
698 if (ftrace_->EnableEvent(event->group, event->name)) {
699 current_state_.ftrace_events.AddEnabledEvent(event->ftrace_event_id);
700 filter.AddEnabledEvent(event->ftrace_event_id);
701 } else {
702 PERFETTO_DPLOG("Failed to enable %s.", group_and_name.ToString().c_str());
703 if (errors)
704 errors->failed_ftrace_events.push_back(group_and_name.ToString());
705 }
706 }
707
708 EventFilter syscall_filter = BuildSyscallFilter(filter, request);
709 if (!SetSyscallEventFilter(syscall_filter)) {
710 PERFETTO_ELOG("Failed to set raw_syscall ftrace filter in SetupConfig");
711 return false;
712 }
713
714 // Kernel function tracing (function_graph).
715 // Note 1: there is no cleanup in |RemoveConfig| because tracers cannot be
716 // changed while tracing pipes are opened. So we'll keep the current_tracer
717 // until all data sources are gone, at which point ftrace_controller will
718 // make an explicit call to |ResetCurrentTracer|.
719 // Note 2: we don't track the set of filters ourselves and instead let the
720 // kernel statefully collate them, hence the use of |AppendFunctionFilters|.
721 // This is because each concurrent data source that wants funcgraph will get
722 // all of the enabled functions (we don't go as far as doing per-DS event
723 // steering in the parser), and we don't want to remove functions midway
724 // through a trace (but some might get added).
725 if (request.enable_function_graph()) {
726 if (!current_state_.funcgraph_on && !ftrace_->ClearFunctionFilters())
727 return false;
728 if (!current_state_.funcgraph_on && !ftrace_->ClearFunctionGraphFilters())
729 return false;
730 if (!ftrace_->AppendFunctionFilters(request.function_filters()))
731 return false;
732 if (!ftrace_->AppendFunctionGraphFilters(request.function_graph_roots()))
733 return false;
734 if (!current_state_.funcgraph_on &&
735 !ftrace_->SetCurrentTracer("function_graph")) {
736 PERFETTO_LOG(
737 "Unable to enable function_graph tracing since a concurrent ftrace "
738 "data source is using a different tracer");
739 return false;
740 }
741 current_state_.funcgraph_on = true;
742 }
743
744 auto compact_sched =
745 CreateCompactSchedConfig(request, table_->compact_sched_format());
746
747 std::optional<FtracePrintFilterConfig> ftrace_print_filter;
748 if (request.has_print_filter()) {
749 ftrace_print_filter =
750 FtracePrintFilterConfig::Create(request.print_filter(), table_);
751 if (!ftrace_print_filter.has_value()) {
752 if (errors) {
753 errors->failed_ftrace_events.push_back(
754 "ftrace/print (unexpected format for filtering)");
755 }
756 }
757 }
758
759 std::vector<std::string> apps(request.atrace_apps());
760 std::vector<std::string> categories(request.atrace_categories());
761 ds_configs_.emplace(
762 std::piecewise_construct, std::forward_as_tuple(id),
763 std::forward_as_tuple(std::move(filter), std::move(syscall_filter),
764 compact_sched, std::move(ftrace_print_filter),
765 std::move(apps), std::move(categories),
766 request.symbolize_ksyms(),
767 request.preserve_ftrace_buffer(),
768 GetSyscallsReturningFds(syscalls_)));
769 return true;
770 }
771
ActivateConfig(FtraceConfigId id)772 bool FtraceConfigMuxer::ActivateConfig(FtraceConfigId id) {
773 if (!id || ds_configs_.count(id) == 0) {
774 PERFETTO_DFATAL("Config not found");
775 return false;
776 }
777
778 // Enable tracing_on to activate ftrace ring buffer before activate the first
779 // config.
780 if (active_configs_.empty()) {
781 if (!ftrace_->SetTracingOn(true)) {
782 PERFETTO_ELOG("Failed to enable ftrace.");
783 return false;
784 }
785 }
786
787 active_configs_.insert(id);
788 return true;
789 }
790
RemoveConfig(FtraceConfigId config_id)791 bool FtraceConfigMuxer::RemoveConfig(FtraceConfigId config_id) {
792 if (!config_id || !ds_configs_.erase(config_id))
793 return false;
794 EventFilter expected_ftrace_events;
795 std::vector<std::string> expected_apps;
796 std::vector<std::string> expected_categories;
797 for (const auto& id_config : ds_configs_) {
798 const perfetto::FtraceDataSourceConfig& config = id_config.second;
799 expected_ftrace_events.EnableEventsFrom(config.event_filter);
800 UnionInPlace(config.atrace_apps, &expected_apps);
801 UnionInPlace(config.atrace_categories, &expected_categories);
802 }
803 // At this point expected_{apps,categories} contains the union of the
804 // leftover configs (if any) that should be still on. However we did not
805 // necessarily succeed in turning on atrace for each of those configs
806 // previously so we now intersect the {apps,categories} that we *did* manage
807 // to turn on with those we want on to determine the new state we should aim
808 // for:
809 IntersectInPlace(current_state_.atrace_apps, &expected_apps);
810 IntersectInPlace(current_state_.atrace_categories, &expected_categories);
811 // Work out if there is any difference between the current state and the
812 // desired state: It's sufficient to compare sizes here (since we know from
813 // above that expected_{apps,categories} is now a subset of
814 // atrace_{apps,categories}:
815 bool atrace_changed =
816 (current_state_.atrace_apps.size() != expected_apps.size()) ||
817 (current_state_.atrace_categories.size() != expected_categories.size());
818
819 if (!SetSyscallEventFilter(/*extra_syscalls=*/{})) {
820 PERFETTO_ELOG("Failed to set raw_syscall ftrace filter in RemoveConfig");
821 }
822
823 // Disable any events that are currently enabled, but are not in any configs
824 // anymore.
825 std::set<size_t> event_ids = current_state_.ftrace_events.GetEnabledEvents();
826 for (size_t id : event_ids) {
827 if (expected_ftrace_events.IsEventEnabled(id))
828 continue;
829 const Event* event = table_->GetEventById(id);
830 // Any event that was enabled must exist.
831 PERFETTO_DCHECK(event);
832 if (ftrace_->DisableEvent(event->group, event->name))
833 current_state_.ftrace_events.DisableEvent(event->ftrace_event_id);
834 }
835
836 auto active_it = active_configs_.find(config_id);
837 if (active_it != active_configs_.end()) {
838 active_configs_.erase(active_it);
839 if (active_configs_.empty()) {
840 // This was the last active config for now, but potentially more dormant
841 // configs need to be activated. We are not interested in reading while no
842 // active configs so diasble tracing_on here.
843 ftrace_->SetTracingOn(false);
844 }
845 }
846
847 // Even if we don't have any other active configs, we might still have idle
848 // configs around. Tear down the rest of the ftrace config only if all
849 // configs are removed.
850 if (ds_configs_.empty()) {
851 if (ftrace_->SetCpuBufferSizeInPages(1))
852 current_state_.cpu_buffer_size_pages = 1;
853 ftrace_->DisableAllEvents();
854 ftrace_->ClearTrace();
855 ftrace_->SetTracingOn(current_state_.saved_tracing_on);
856 }
857
858 if (current_state_.atrace_on) {
859 if (expected_apps.empty() && expected_categories.empty()) {
860 DisableAtrace();
861 } else if (atrace_changed) {
862 // Update atrace to remove the no longer wanted categories/apps. For
863 // some categories this won't disable them (e.g. categories that just
864 // enable ftrace events) for those there is nothing we can do till the
865 // last ftrace config is removed.
866 if (StartAtrace(expected_apps, expected_categories,
867 /*atrace_errors=*/nullptr)) {
868 // Update current_state_ to reflect this change.
869 current_state_.atrace_apps = expected_apps;
870 current_state_.atrace_categories = expected_categories;
871 }
872 }
873 }
874
875 return true;
876 }
877
ResetCurrentTracer()878 bool FtraceConfigMuxer::ResetCurrentTracer() {
879 if (!current_state_.funcgraph_on)
880 return true;
881 if (!ftrace_->ResetCurrentTracer()) {
882 PERFETTO_PLOG("Failed to reset current_tracer to nop");
883 return false;
884 }
885 current_state_.funcgraph_on = false;
886 if (!ftrace_->ClearFunctionFilters()) {
887 PERFETTO_PLOG("Failed to reset set_ftrace_filter.");
888 return false;
889 }
890 if (!ftrace_->ClearFunctionGraphFilters()) {
891 PERFETTO_PLOG("Failed to reset set_function_graph.");
892 return false;
893 }
894 return true;
895 }
896
GetDataSourceConfig(FtraceConfigId id)897 const FtraceDataSourceConfig* FtraceConfigMuxer::GetDataSourceConfig(
898 FtraceConfigId id) {
899 if (!ds_configs_.count(id))
900 return nullptr;
901 return &ds_configs_.at(id);
902 }
903
SetupClock(const FtraceConfig & config)904 void FtraceConfigMuxer::SetupClock(const FtraceConfig& config) {
905 std::string current_clock = ftrace_->GetClock();
906 std::set<std::string> clocks = ftrace_->AvailableClocks();
907
908 if (config.has_use_monotonic_raw_clock() &&
909 config.use_monotonic_raw_clock() && clocks.count(kClockMonoRaw)) {
910 ftrace_->SetClock(kClockMonoRaw);
911 current_clock = kClockMonoRaw;
912 } else {
913 for (size_t i = 0; i < base::ArraySize(kClocks); i++) {
914 std::string clock = std::string(kClocks[i]);
915 if (!clocks.count(clock))
916 continue;
917 if (current_clock == clock)
918 break;
919 ftrace_->SetClock(clock);
920 current_clock = clock;
921 break;
922 }
923 }
924
925 namespace pb0 = protos::pbzero;
926 if (current_clock == "boot") {
927 // "boot" is the default expectation on modern kernels, which is why we
928 // don't have an explicit FTRACE_CLOCK_BOOT enum and leave it unset.
929 // See comments in ftrace_event_bundle.proto.
930 current_state_.ftrace_clock = pb0::FTRACE_CLOCK_UNSPECIFIED;
931 } else if (current_clock == "global") {
932 current_state_.ftrace_clock = pb0::FTRACE_CLOCK_GLOBAL;
933 } else if (current_clock == "local") {
934 current_state_.ftrace_clock = pb0::FTRACE_CLOCK_LOCAL;
935 } else if (current_clock == kClockMonoRaw) {
936 current_state_.ftrace_clock = pb0::FTRACE_CLOCK_MONO_RAW;
937 } else {
938 current_state_.ftrace_clock = pb0::FTRACE_CLOCK_UNKNOWN;
939 }
940 }
941
SetupBufferSize(const FtraceConfig & request)942 void FtraceConfigMuxer::SetupBufferSize(const FtraceConfig& request) {
943 size_t pages = ComputeCpuBufferSizeInPages(request.buffer_size_kb());
944 ftrace_->SetCpuBufferSizeInPages(pages);
945 current_state_.cpu_buffer_size_pages = pages;
946 }
947
GetPerCpuBufferSizePages()948 size_t FtraceConfigMuxer::GetPerCpuBufferSizePages() {
949 return current_state_.cpu_buffer_size_pages;
950 }
951
UpdateAtrace(const FtraceConfig & request,std::string * atrace_errors)952 void FtraceConfigMuxer::UpdateAtrace(const FtraceConfig& request,
953 std::string* atrace_errors) {
954 // We want to avoid poisoning current_state_.atrace_{categories, apps}
955 // if for some reason these args make atrace unhappy so we stash the
956 // union into temps and only update current_state_ if we successfully
957 // run atrace.
958
959 std::vector<std::string> combined_categories = request.atrace_categories();
960 UnionInPlace(current_state_.atrace_categories, &combined_categories);
961
962 std::vector<std::string> combined_apps = request.atrace_apps();
963 UnionInPlace(current_state_.atrace_apps, &combined_apps);
964
965 if (current_state_.atrace_on &&
966 combined_apps.size() == current_state_.atrace_apps.size() &&
967 combined_categories.size() == current_state_.atrace_categories.size()) {
968 return;
969 }
970
971 if (StartAtrace(combined_apps, combined_categories, atrace_errors)) {
972 current_state_.atrace_categories = combined_categories;
973 current_state_.atrace_apps = combined_apps;
974 current_state_.atrace_on = true;
975 }
976 }
977
978 // static
StartAtrace(const std::vector<std::string> & apps,const std::vector<std::string> & categories,std::string * atrace_errors)979 bool FtraceConfigMuxer::StartAtrace(const std::vector<std::string>& apps,
980 const std::vector<std::string>& categories,
981 std::string* atrace_errors) {
982 PERFETTO_DLOG("Update atrace config...");
983
984 std::vector<std::string> args;
985 args.push_back("atrace"); // argv0 for exec()
986 args.push_back("--async_start");
987 if (!IsOldAtrace())
988 args.push_back("--only_userspace");
989
990 for (const auto& category : categories)
991 args.push_back(category);
992
993 if (!apps.empty()) {
994 args.push_back("-a");
995 std::string arg = "";
996 for (const auto& app : apps) {
997 arg += app;
998 arg += ",";
999 }
1000 arg.resize(arg.size() - 1);
1001 args.push_back(arg);
1002 }
1003
1004 bool result = RunAtrace(args, atrace_errors);
1005 PERFETTO_DLOG("...done (%s)", result ? "success" : "fail");
1006 return result;
1007 }
1008
DisableAtrace()1009 void FtraceConfigMuxer::DisableAtrace() {
1010 PERFETTO_DCHECK(current_state_.atrace_on);
1011
1012 PERFETTO_DLOG("Stop atrace...");
1013
1014 std::vector<std::string> args{"atrace", "--async_stop"};
1015 if (!IsOldAtrace())
1016 args.push_back("--only_userspace");
1017 if (RunAtrace(args, /*atrace_errors=*/nullptr)) {
1018 current_state_.atrace_categories.clear();
1019 current_state_.atrace_apps.clear();
1020 current_state_.atrace_on = false;
1021 }
1022
1023 PERFETTO_DLOG("...done");
1024 }
1025
1026 } // namespace perfetto
1027