• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/traced/probes/ftrace/ftrace_config_muxer.h"
18 
19 #include <string.h>
20 #include <sys/types.h>
21 #include <unistd.h>
22 #include <cstdint>
23 
24 #include <algorithm>
25 #include <iterator>
26 #include <limits>
27 
28 #include "perfetto/base/compiler.h"
29 #include "perfetto/ext/base/utils.h"
30 #include "protos/perfetto/trace/ftrace/generic.pbzero.h"
31 #include "src/traced/probes/ftrace/atrace_wrapper.h"
32 #include "src/traced/probes/ftrace/compact_sched.h"
33 #include "src/traced/probes/ftrace/ftrace_config_utils.h"
34 #include "src/traced/probes/ftrace/ftrace_stats.h"
35 
36 #include "protos/perfetto/trace/ftrace/ftrace_event.pbzero.h"
37 
38 namespace perfetto {
39 namespace {
40 
41 using protos::pbzero::KprobeEvent;
42 
43 constexpr uint64_t kDefaultLowRamPerCpuBufferSizeKb = 2 * (1ULL << 10);   // 2mb
44 constexpr uint64_t kDefaultHighRamPerCpuBufferSizeKb = 8 * (1ULL << 10);  // 8mb
45 
46 // Threshold for physical ram size used when deciding on default kernel buffer
47 // sizes. We want to detect 8 GB, but the size reported through sysconf is
48 // usually lower.
49 constexpr uint64_t kHighMemBytes = 7 * (1ULL << 30);  // 7gb
50 
51 // A fake "syscall id" that indicates all syscalls should be recorded. This
52 // allows us to distinguish between the case where `syscall_events` is empty
53 // because raw_syscalls aren't enabled, or the case where it is and we want to
54 // record all events.
55 constexpr size_t kAllSyscallsId = kMaxSyscalls + 1;
56 
57 // trace_clocks in preference order.
58 // If this list is changed, the FtraceClocks enum in ftrace_event_bundle.proto
59 // and FtraceConfigMuxer::SetupClock() should be also changed accordingly.
60 constexpr const char* kClocks[] = {"boot", "global", "local"};
61 
62 // optional monotonic raw clock.
63 // Enabled by the "use_monotonic_raw_clock" option in the ftrace config.
64 constexpr const char* kClockMonoRaw = "mono_raw";
65 
AddEventGroup(const ProtoTranslationTable * table,const std::string & group,std::set<GroupAndName> * to)66 void AddEventGroup(const ProtoTranslationTable* table,
67                    const std::string& group,
68                    std::set<GroupAndName>* to) {
69   const std::vector<const Event*>* events = table->GetEventsByGroup(group);
70   if (!events)
71     return;
72   for (const Event* event : *events)
73     to->insert(GroupAndName(group, event->name));
74 }
75 
ReadEventsInGroupFromFs(const FtraceProcfs & ftrace_procfs,const std::string & group)76 std::set<GroupAndName> ReadEventsInGroupFromFs(
77     const FtraceProcfs& ftrace_procfs,
78     const std::string& group) {
79   std::set<std::string> names =
80       ftrace_procfs.GetEventNamesForGroup("events/" + group);
81   std::set<GroupAndName> events;
82   for (const auto& name : names)
83     events.insert(GroupAndName(group, name));
84   return events;
85 }
86 
EventToStringGroupAndName(const std::string & event)87 std::pair<std::string, std::string> EventToStringGroupAndName(
88     const std::string& event) {
89   auto slash_pos = event.find('/');
90   if (slash_pos == std::string::npos)
91     return std::make_pair("", event);
92   return std::make_pair(event.substr(0, slash_pos),
93                         event.substr(slash_pos + 1));
94 }
95 
UnionInPlace(const std::vector<std::string> & unsorted_a,std::vector<std::string> * out)96 void UnionInPlace(const std::vector<std::string>& unsorted_a,
97                   std::vector<std::string>* out) {
98   std::vector<std::string> a = unsorted_a;
99   std::sort(a.begin(), a.end());
100   std::sort(out->begin(), out->end());
101   std::vector<std::string> v;
102   std::set_union(a.begin(), a.end(), out->begin(), out->end(),
103                  std::back_inserter(v));
104   *out = std::move(v);
105 }
106 
IntersectInPlace(const std::vector<std::string> & unsorted_a,std::vector<std::string> * out)107 void IntersectInPlace(const std::vector<std::string>& unsorted_a,
108                       std::vector<std::string>* out) {
109   std::vector<std::string> a = unsorted_a;
110   std::sort(a.begin(), a.end());
111   std::sort(out->begin(), out->end());
112   std::vector<std::string> v;
113   std::set_intersection(a.begin(), a.end(), out->begin(), out->end(),
114                         std::back_inserter(v));
115   *out = std::move(v);
116 }
117 
Subtract(const std::vector<std::string> & unsorted_a,const std::vector<std::string> & unsorted_b)118 std::vector<std::string> Subtract(const std::vector<std::string>& unsorted_a,
119                                   const std::vector<std::string>& unsorted_b) {
120   std::vector<std::string> a = unsorted_a;
121   std::sort(a.begin(), a.end());
122   std::vector<std::string> b = unsorted_b;
123   std::sort(b.begin(), b.end());
124   std::vector<std::string> v;
125   std::set_difference(a.begin(), a.end(), b.begin(), b.end(),
126                       std::back_inserter(v));
127   return v;
128 }
129 
130 // This is just to reduce binary size and stack frame size of the insertions.
131 // It effectively undoes STL's set::insert inlining.
InsertEvent(const char * group,const char * name,std::set<GroupAndName> * dst)132 void PERFETTO_NO_INLINE InsertEvent(const char* group,
133                                     const char* name,
134                                     std::set<GroupAndName>* dst) {
135   dst->insert(GroupAndName(group, name));
136 }
137 
GetFtraceKprobeEvents(const FtraceConfig & request)138 std::map<GroupAndName, KprobeEvent::KprobeType> GetFtraceKprobeEvents(
139     const FtraceConfig& request) {
140   std::map<GroupAndName, KprobeEvent::KprobeType> events;
141   for (const auto& config_value : request.kprobe_events()) {
142     switch (config_value.type()) {
143       case protos::gen::FtraceConfig::KprobeEvent::KPROBE_TYPE_KPROBE:
144         events[GroupAndName(kKprobeGroup, config_value.probe().c_str())] =
145             KprobeEvent::KprobeType::KPROBE_TYPE_INSTANT;
146         break;
147       case protos::gen::FtraceConfig::KprobeEvent::KPROBE_TYPE_KRETPROBE:
148         events[GroupAndName(kKretprobeGroup, config_value.probe().c_str())] =
149             KprobeEvent::KprobeType::KPROBE_TYPE_INSTANT;
150         break;
151       case protos::gen::FtraceConfig::KprobeEvent::KPROBE_TYPE_BOTH:
152         events[GroupAndName(kKprobeGroup, config_value.probe().c_str())] =
153             KprobeEvent::KprobeType::KPROBE_TYPE_BEGIN;
154         events[GroupAndName(kKretprobeGroup, config_value.probe().c_str())] =
155             KprobeEvent::KprobeType::KPROBE_TYPE_END;
156         break;
157       case protos::gen::FtraceConfig::KprobeEvent::KPROBE_TYPE_UNKNOWN:
158         PERFETTO_DLOG("Unknown kprobe event");
159         break;
160     }
161     PERFETTO_DLOG("Added kprobe event: %s", config_value.probe().c_str());
162   }
163   return events;
164 }
165 
ValidateKprobeName(const std::string & name)166 bool ValidateKprobeName(const std::string& name) {
167   for (const char& c : name) {
168     if (!std::isalnum(c) && c != '_') {
169       return false;
170     }
171   }
172   return true;
173 }
174 
175 }  // namespace
176 
GetFtraceEvents(const FtraceConfig & request,const ProtoTranslationTable * table)177 std::set<GroupAndName> FtraceConfigMuxer::GetFtraceEvents(
178     const FtraceConfig& request,
179     const ProtoTranslationTable* table) {
180   std::set<GroupAndName> events;
181   for (const auto& config_value : request.ftrace_events()) {
182     std::string group;
183     std::string name;
184     std::tie(group, name) = EventToStringGroupAndName(config_value);
185     if (name == "*") {
186       for (const auto& event : ReadEventsInGroupFromFs(*ftrace_, group))
187         events.insert(event);
188     } else if (group.empty()) {
189       // If there is no group specified, find an event with that name and
190       // use it's group.
191       const Event* e = table->GetEventByName(name);
192       if (!e) {
193         PERFETTO_DLOG(
194             "Event doesn't exist: %s. Include the group in the config to allow "
195             "the event to be output as a generic event.",
196             name.c_str());
197         continue;
198       }
199       events.insert(GroupAndName(e->group, e->name));
200     } else {
201       events.insert(GroupAndName(group, name));
202     }
203   }
204   if (RequiresAtrace(request)) {
205     InsertEvent("ftrace", "print", &events);
206 
207     // Ideally we should keep this code in sync with:
208     // platform/frameworks/native/cmds/atrace/atrace.cpp
209     // It's not a disaster if they go out of sync, we can always add the ftrace
210     // categories manually server side but this is user friendly and reduces the
211     // size of the configs.
212     for (const std::string& category : request.atrace_categories()) {
213       if (category == "gfx") {
214         AddEventGroup(table, "mdss", &events);
215         InsertEvent("mdss", "rotator_bw_ao_as_context", &events);
216         InsertEvent("mdss", "mdp_trace_counter", &events);
217         InsertEvent("mdss", "tracing_mark_write", &events);
218         InsertEvent("mdss", "mdp_cmd_wait_pingpong", &events);
219         InsertEvent("mdss", "mdp_cmd_kickoff", &events);
220         InsertEvent("mdss", "mdp_cmd_release_bw", &events);
221         InsertEvent("mdss", "mdp_cmd_readptr_done", &events);
222         InsertEvent("mdss", "mdp_cmd_pingpong_done", &events);
223         InsertEvent("mdss", "mdp_misr_crc", &events);
224         InsertEvent("mdss", "mdp_compare_bw", &events);
225         InsertEvent("mdss", "mdp_perf_update_bus", &events);
226         InsertEvent("mdss", "mdp_video_underrun_done", &events);
227         InsertEvent("mdss", "mdp_commit", &events);
228         InsertEvent("mdss", "mdp_mixer_update", &events);
229         InsertEvent("mdss", "mdp_perf_prefill_calc", &events);
230         InsertEvent("mdss", "mdp_perf_set_ot", &events);
231         InsertEvent("mdss", "mdp_perf_set_wm_levels", &events);
232         InsertEvent("mdss", "mdp_perf_set_panic_luts", &events);
233         InsertEvent("mdss", "mdp_perf_set_qos_luts", &events);
234         InsertEvent("mdss", "mdp_sspp_change", &events);
235         InsertEvent("mdss", "mdp_sspp_set", &events);
236         AddEventGroup(table, "mali", &events);
237         InsertEvent("mali", "tracing_mark_write", &events);
238 
239         AddEventGroup(table, "sde", &events);
240         InsertEvent("sde", "tracing_mark_write", &events);
241         InsertEvent("sde", "sde_perf_update_bus", &events);
242         InsertEvent("sde", "sde_perf_set_qos_luts", &events);
243         InsertEvent("sde", "sde_perf_set_ot", &events);
244         InsertEvent("sde", "sde_perf_set_danger_luts", &events);
245         InsertEvent("sde", "sde_perf_crtc_update", &events);
246         InsertEvent("sde", "sde_perf_calc_crtc", &events);
247         InsertEvent("sde", "sde_evtlog", &events);
248         InsertEvent("sde", "sde_encoder_underrun", &events);
249         InsertEvent("sde", "sde_cmd_release_bw", &events);
250 
251         AddEventGroup(table, "dpu", &events);
252         InsertEvent("dpu", "tracing_mark_write", &events);
253         InsertEvent("dpu", "disp_dpu_underrun", &events);
254 
255         AddEventGroup(table, "g2d", &events);
256         InsertEvent("g2d", "tracing_mark_write", &events);
257         InsertEvent("g2d", "g2d_perf_update_qos", &events);
258 
259         AddEventGroup(table, "panel", &events);
260         InsertEvent("panel", "panel_write_generic", &events);
261         continue;
262       }
263 
264       if (category == "ion") {
265         InsertEvent("kmem", "ion_alloc_buffer_start", &events);
266         continue;
267       }
268 
269       // Note: sched_wakeup intentionally removed (diverging from atrace), as it
270       // is high-volume, but mostly redundant when sched_waking is also enabled.
271       // The event can still be enabled explicitly when necessary.
272       if (category == "sched") {
273         InsertEvent("sched", "sched_switch", &events);
274         InsertEvent("sched", "sched_waking", &events);
275         InsertEvent("sched", "sched_blocked_reason", &events);
276         InsertEvent("sched", "sched_cpu_hotplug", &events);
277         InsertEvent("sched", "sched_pi_setprio", &events);
278         InsertEvent("sched", "sched_process_exit", &events);
279         AddEventGroup(table, "cgroup", &events);
280         InsertEvent("cgroup", "cgroup_transfer_tasks", &events);
281         InsertEvent("cgroup", "cgroup_setup_root", &events);
282         InsertEvent("cgroup", "cgroup_rmdir", &events);
283         InsertEvent("cgroup", "cgroup_rename", &events);
284         InsertEvent("cgroup", "cgroup_remount", &events);
285         InsertEvent("cgroup", "cgroup_release", &events);
286         InsertEvent("cgroup", "cgroup_mkdir", &events);
287         InsertEvent("cgroup", "cgroup_destroy_root", &events);
288         InsertEvent("cgroup", "cgroup_attach_task", &events);
289         InsertEvent("oom", "oom_score_adj_update", &events);
290         InsertEvent("task", "task_rename", &events);
291         InsertEvent("task", "task_newtask", &events);
292 
293         AddEventGroup(table, "systrace", &events);
294         InsertEvent("systrace", "0", &events);
295 
296         AddEventGroup(table, "scm", &events);
297         InsertEvent("scm", "scm_call_start", &events);
298         InsertEvent("scm", "scm_call_end", &events);
299         continue;
300       }
301 
302       if (category == "irq") {
303         AddEventGroup(table, "irq", &events);
304         InsertEvent("irq", "tasklet_hi_exit", &events);
305         InsertEvent("irq", "tasklet_hi_entry", &events);
306         InsertEvent("irq", "tasklet_exit", &events);
307         InsertEvent("irq", "tasklet_entry", &events);
308         InsertEvent("irq", "softirq_raise", &events);
309         InsertEvent("irq", "softirq_exit", &events);
310         InsertEvent("irq", "softirq_entry", &events);
311         InsertEvent("irq", "irq_handler_exit", &events);
312         InsertEvent("irq", "irq_handler_entry", &events);
313         AddEventGroup(table, "ipi", &events);
314         InsertEvent("ipi", "ipi_raise", &events);
315         InsertEvent("ipi", "ipi_exit", &events);
316         InsertEvent("ipi", "ipi_entry", &events);
317         continue;
318       }
319 
320       if (category == "irqoff") {
321         InsertEvent("preemptirq", "irq_enable", &events);
322         InsertEvent("preemptirq", "irq_disable", &events);
323         continue;
324       }
325 
326       if (category == "preemptoff") {
327         InsertEvent("preemptirq", "preempt_enable", &events);
328         InsertEvent("preemptirq", "preempt_disable", &events);
329         continue;
330       }
331 
332       if (category == "i2c") {
333         AddEventGroup(table, "i2c", &events);
334         InsertEvent("i2c", "i2c_read", &events);
335         InsertEvent("i2c", "i2c_write", &events);
336         InsertEvent("i2c", "i2c_result", &events);
337         InsertEvent("i2c", "i2c_reply", &events);
338         InsertEvent("i2c", "smbus_read", &events);
339         InsertEvent("i2c", "smbus_write", &events);
340         InsertEvent("i2c", "smbus_result", &events);
341         InsertEvent("i2c", "smbus_reply", &events);
342         continue;
343       }
344 
345       if (category == "freq") {
346         InsertEvent("power", "cpu_frequency", &events);
347         InsertEvent("power", "gpu_frequency", &events);
348         InsertEvent("power", "clock_set_rate", &events);
349         InsertEvent("power", "clock_disable", &events);
350         InsertEvent("power", "clock_enable", &events);
351         InsertEvent("clk", "clk_set_rate", &events);
352         InsertEvent("clk", "clk_disable", &events);
353         InsertEvent("clk", "clk_enable", &events);
354         InsertEvent("power", "cpu_frequency_limits", &events);
355         InsertEvent("power", "suspend_resume", &events);
356         InsertEvent("cpuhp", "cpuhp_enter", &events);
357         InsertEvent("cpuhp", "cpuhp_exit", &events);
358         InsertEvent("cpuhp", "cpuhp_pause", &events);
359         AddEventGroup(table, "msm_bus", &events);
360         InsertEvent("msm_bus", "bus_update_request_end", &events);
361         InsertEvent("msm_bus", "bus_update_request", &events);
362         InsertEvent("msm_bus", "bus_rules_matches", &events);
363         InsertEvent("msm_bus", "bus_max_votes", &events);
364         InsertEvent("msm_bus", "bus_client_status", &events);
365         InsertEvent("msm_bus", "bus_bke_params", &events);
366         InsertEvent("msm_bus", "bus_bimc_config_limiter", &events);
367         InsertEvent("msm_bus", "bus_avail_bw", &events);
368         InsertEvent("msm_bus", "bus_agg_bw", &events);
369         continue;
370       }
371 
372       if (category == "membus") {
373         AddEventGroup(table, "memory_bus", &events);
374         continue;
375       }
376 
377       if (category == "idle") {
378         InsertEvent("power", "cpu_idle", &events);
379         continue;
380       }
381 
382       if (category == "disk") {
383         InsertEvent("f2fs", "f2fs_sync_file_enter", &events);
384         InsertEvent("f2fs", "f2fs_sync_file_exit", &events);
385         InsertEvent("f2fs", "f2fs_write_begin", &events);
386         InsertEvent("f2fs", "f2fs_write_end", &events);
387         InsertEvent("f2fs", "f2fs_iostat", &events);
388         InsertEvent("f2fs", "f2fs_iostat_latency", &events);
389         InsertEvent("ext4", "ext4_da_write_begin", &events);
390         InsertEvent("ext4", "ext4_da_write_end", &events);
391         InsertEvent("ext4", "ext4_sync_file_enter", &events);
392         InsertEvent("ext4", "ext4_sync_file_exit", &events);
393         InsertEvent("block", "block_bio_queue", &events);
394         InsertEvent("block", "block_bio_complete", &events);
395         InsertEvent("ufs", "ufshcd_command", &events);
396         continue;
397       }
398 
399       if (category == "mmc") {
400         AddEventGroup(table, "mmc", &events);
401         continue;
402       }
403 
404       if (category == "load") {
405         AddEventGroup(table, "cpufreq_interactive", &events);
406         continue;
407       }
408 
409       if (category == "sync") {
410         // linux kernel < 4.9
411         AddEventGroup(table, "sync", &events);
412         InsertEvent("sync", "sync_pt", &events);
413         InsertEvent("sync", "sync_timeline", &events);
414         InsertEvent("sync", "sync_wait", &events);
415         // linux kernel == 4.9.x
416         AddEventGroup(table, "fence", &events);
417         InsertEvent("fence", "fence_annotate_wait_on", &events);
418         InsertEvent("fence", "fence_destroy", &events);
419         InsertEvent("fence", "fence_emit", &events);
420         InsertEvent("fence", "fence_enable_signal", &events);
421         InsertEvent("fence", "fence_init", &events);
422         InsertEvent("fence", "fence_signaled", &events);
423         InsertEvent("fence", "fence_wait_end", &events);
424         InsertEvent("fence", "fence_wait_start", &events);
425         // linux kernel > 4.9
426         AddEventGroup(table, "dma_fence", &events);
427         continue;
428       }
429 
430       if (category == "workq") {
431         AddEventGroup(table, "workqueue", &events);
432         InsertEvent("workqueue", "workqueue_queue_work", &events);
433         InsertEvent("workqueue", "workqueue_execute_start", &events);
434         InsertEvent("workqueue", "workqueue_execute_end", &events);
435         InsertEvent("workqueue", "workqueue_activate_work", &events);
436         continue;
437       }
438 
439       if (category == "memreclaim") {
440         InsertEvent("vmscan", "mm_vmscan_direct_reclaim_begin", &events);
441         InsertEvent("vmscan", "mm_vmscan_direct_reclaim_end", &events);
442         InsertEvent("vmscan", "mm_vmscan_kswapd_wake", &events);
443         InsertEvent("vmscan", "mm_vmscan_kswapd_sleep", &events);
444         AddEventGroup(table, "lowmemorykiller", &events);
445         InsertEvent("lowmemorykiller", "lowmemory_kill", &events);
446         continue;
447       }
448 
449       if (category == "regulators") {
450         AddEventGroup(table, "regulator", &events);
451         events.insert(
452             GroupAndName("regulator", "regulator_set_voltage_complete"));
453         InsertEvent("regulator", "regulator_set_voltage", &events);
454         InsertEvent("regulator", "regulator_enable_delay", &events);
455         InsertEvent("regulator", "regulator_enable_complete", &events);
456         InsertEvent("regulator", "regulator_enable", &events);
457         InsertEvent("regulator", "regulator_disable_complete", &events);
458         InsertEvent("regulator", "regulator_disable", &events);
459         continue;
460       }
461 
462       if (category == "binder_driver") {
463         InsertEvent("binder", "binder_transaction", &events);
464         InsertEvent("binder", "binder_transaction_received", &events);
465         InsertEvent("binder", "binder_transaction_alloc_buf", &events);
466         InsertEvent("binder", "binder_set_priority", &events);
467         continue;
468       }
469 
470       if (category == "binder_lock") {
471         InsertEvent("binder", "binder_lock", &events);
472         InsertEvent("binder", "binder_locked", &events);
473         InsertEvent("binder", "binder_unlock", &events);
474         continue;
475       }
476 
477       if (category == "pagecache") {
478         AddEventGroup(table, "filemap", &events);
479         events.insert(
480             GroupAndName("filemap", "mm_filemap_delete_from_page_cache"));
481         InsertEvent("filemap", "mm_filemap_add_to_page_cache", &events);
482         InsertEvent("filemap", "filemap_set_wb_err", &events);
483         InsertEvent("filemap", "file_check_and_advance_wb_err", &events);
484         continue;
485       }
486 
487       if (category == "memory") {
488         // Use rss_stat_throttled if supported
489         if (ftrace_->SupportsRssStatThrottled()) {
490           InsertEvent("synthetic", "rss_stat_throttled", &events);
491         } else {
492           InsertEvent("kmem", "rss_stat", &events);
493         }
494         InsertEvent("kmem", "ion_heap_grow", &events);
495         InsertEvent("kmem", "ion_heap_shrink", &events);
496         // ion_stat supersedes ion_heap_grow / shrink for kernel 4.19+
497         InsertEvent("ion", "ion_stat", &events);
498         InsertEvent("mm_event", "mm_event_record", &events);
499         InsertEvent("dmabuf_heap", "dma_heap_stat", &events);
500         InsertEvent("gpu_mem", "gpu_mem_total", &events);
501         continue;
502       }
503 
504       if (category == "thermal") {
505         InsertEvent("thermal", "thermal_temperature", &events);
506         InsertEvent("thermal", "cdev_update", &events);
507         continue;
508       }
509 
510       if (category == "camera") {
511         AddEventGroup(table, "lwis", &events);
512         InsertEvent("lwis", "tracing_mark_write", &events);
513         continue;
514       }
515     }
516   }
517 
518   // recording a subset of syscalls -> enable the backing events
519   if (request.syscall_events_size() > 0) {
520     InsertEvent("raw_syscalls", "sys_enter", &events);
521     InsertEvent("raw_syscalls", "sys_exit", &events);
522   }
523 
524   // function_graph tracer emits two builtin ftrace events
525   if (request.enable_function_graph()) {
526     InsertEvent("ftrace", "funcgraph_entry", &events);
527     InsertEvent("ftrace", "funcgraph_exit", &events);
528   }
529 
530   // If throttle_rss_stat: true, use the rss_stat_throttled event if supported
531   if (request.throttle_rss_stat() && ftrace_->SupportsRssStatThrottled()) {
532     auto it = std::find_if(
533         events.begin(), events.end(), [](const GroupAndName& event) {
534           return event.group() == "kmem" && event.name() == "rss_stat";
535         });
536 
537     if (it != events.end()) {
538       events.erase(it);
539       InsertEvent("synthetic", "rss_stat_throttled", &events);
540     }
541   }
542 
543   return events;
544 }
545 
GetSyscallsReturningFds(const SyscallTable & syscalls)546 base::FlatSet<int64_t> FtraceConfigMuxer::GetSyscallsReturningFds(
547     const SyscallTable& syscalls) {
548   auto insertSyscallId = [&syscalls](base::FlatSet<int64_t>& set,
549                                      const char* syscall) {
550     auto syscall_id = syscalls.GetByName(syscall);
551     if (syscall_id)
552       set.insert(static_cast<int64_t>(*syscall_id));
553   };
554 
555   base::FlatSet<int64_t> call_ids;
556   insertSyscallId(call_ids, "sys_open");
557   insertSyscallId(call_ids, "sys_openat");
558   insertSyscallId(call_ids, "sys_socket");
559   insertSyscallId(call_ids, "sys_dup");
560   insertSyscallId(call_ids, "sys_dup2");
561   insertSyscallId(call_ids, "sys_dup3");
562   return call_ids;
563 }
564 
FilterHasGroup(const EventFilter & filter,const std::string & group)565 bool FtraceConfigMuxer::FilterHasGroup(const EventFilter& filter,
566                                        const std::string& group) {
567   const std::vector<const Event*>* events = table_->GetEventsByGroup(group);
568   if (!events) {
569     return false;
570   }
571 
572   for (const Event* event : *events) {
573     if (filter.IsEventEnabled(event->ftrace_event_id)) {
574       return true;
575     }
576   }
577   return false;
578 }
579 
BuildSyscallFilter(const EventFilter & ftrace_filter,const FtraceConfig & request)580 EventFilter FtraceConfigMuxer::BuildSyscallFilter(
581     const EventFilter& ftrace_filter,
582     const FtraceConfig& request) {
583   EventFilter output;
584 
585   if (!FilterHasGroup(ftrace_filter, "raw_syscalls")) {
586     return output;
587   }
588 
589   if (request.syscall_events().empty()) {
590     output.AddEnabledEvent(kAllSyscallsId);
591     return output;
592   }
593 
594   for (const std::string& syscall : request.syscall_events()) {
595     std::optional<size_t> id = syscalls_.GetByName(syscall);
596     if (!id.has_value()) {
597       PERFETTO_ELOG("Can't enable %s, syscall not known", syscall.c_str());
598       continue;
599     }
600     output.AddEnabledEvent(*id);
601   }
602 
603   return output;
604 }
605 
SetSyscallEventFilter(const EventFilter & extra_syscalls)606 bool FtraceConfigMuxer::SetSyscallEventFilter(
607     const EventFilter& extra_syscalls) {
608   EventFilter syscall_filter;
609 
610   syscall_filter.EnableEventsFrom(extra_syscalls);
611   for (const auto& id_config : ds_configs_) {
612     const perfetto::FtraceDataSourceConfig& config = id_config.second;
613     syscall_filter.EnableEventsFrom(config.syscall_filter);
614   }
615 
616   std::set<size_t> filter_set = syscall_filter.GetEnabledEvents();
617   if (syscall_filter.IsEventEnabled(kAllSyscallsId)) {
618     filter_set.clear();
619   }
620 
621   if (current_state_.syscall_filter != filter_set) {
622     if (!ftrace_->SetSyscallFilter(filter_set)) {
623       return false;
624     }
625 
626     current_state_.syscall_filter = filter_set;
627   }
628 
629   return true;
630 }
631 
EnableFtraceEvent(const Event * event,const GroupAndName & group_and_name,EventFilter * filter,FtraceSetupErrors * errors)632 void FtraceConfigMuxer::EnableFtraceEvent(const Event* event,
633                                           const GroupAndName& group_and_name,
634                                           EventFilter* filter,
635                                           FtraceSetupErrors* errors) {
636   // Note: ftrace events are always implicitly enabled (and don't have an
637   // "enable" file). So they aren't tracked by the central event filter (but
638   // still need to be added to the per data source event filter to retain
639   // the events during parsing).
640   if (current_state_.ftrace_events.IsEventEnabled(event->ftrace_event_id) ||
641       std::string("ftrace") == event->group) {
642     filter->AddEnabledEvent(event->ftrace_event_id);
643     return;
644   }
645   if (ftrace_->EnableEvent(event->group, event->name)) {
646     current_state_.ftrace_events.AddEnabledEvent(event->ftrace_event_id);
647     filter->AddEnabledEvent(event->ftrace_event_id);
648   } else {
649     PERFETTO_DPLOG("Failed to enable %s.", group_and_name.ToString().c_str());
650     if (errors)
651       errors->failed_ftrace_events.push_back(group_and_name.ToString());
652   }
653 }
654 
FtraceConfigMuxer(FtraceProcfs * ftrace,AtraceWrapper * atrace_wrapper,ProtoTranslationTable * table,SyscallTable syscalls,std::map<std::string,std::vector<GroupAndName>> vendor_events,bool secondary_instance)655 FtraceConfigMuxer::FtraceConfigMuxer(
656     FtraceProcfs* ftrace,
657     AtraceWrapper* atrace_wrapper,
658     ProtoTranslationTable* table,
659     SyscallTable syscalls,
660     std::map<std::string, std::vector<GroupAndName>> vendor_events,
661     bool secondary_instance)
662     : ftrace_(ftrace),
663       atrace_wrapper_(atrace_wrapper),
664       table_(table),
665       syscalls_(syscalls),
666       current_state_(),
667       vendor_events_(std::move(vendor_events)),
668       secondary_instance_(secondary_instance) {}
669 FtraceConfigMuxer::~FtraceConfigMuxer() = default;
670 
SetupConfig(FtraceConfigId id,const FtraceConfig & request,FtraceSetupErrors * errors)671 bool FtraceConfigMuxer::SetupConfig(FtraceConfigId id,
672                                     const FtraceConfig& request,
673                                     FtraceSetupErrors* errors) {
674   EventFilter filter;
675   if (ds_configs_.empty()) {
676     PERFETTO_DCHECK(active_configs_.empty());
677 
678     // If someone outside of perfetto is using a non-nop tracer, yield. We can't
679     // realistically figure out all notions of "in use" even if we look at
680     // set_event or events/enable, so this is all we check for.
681     if (!request.preserve_ftrace_buffer() && !ftrace_->IsTracingAvailable()) {
682       PERFETTO_ELOG(
683           "ftrace in use by non-Perfetto. Check that %s current_tracer is nop.",
684           ftrace_->GetRootPath().c_str());
685       return false;
686     }
687 
688     // Clear tracefs state, remembering which value of "tracing_on" to restore
689     // to after we're done, though we won't restore the rest of the tracefs
690     // state.
691     current_state_.saved_tracing_on = ftrace_->GetTracingOn();
692     if (!request.preserve_ftrace_buffer()) {
693       ftrace_->SetTracingOn(false);
694       // This will fail on release ("user") builds due to ACLs, but that's
695       // acceptable since the per-event enabling/disabling should still be
696       // balanced.
697       ftrace_->DisableAllEvents();
698       ftrace_->ClearTrace();
699     }
700 
701     // Set up the rest of the tracefs state, without starting it.
702     // Notes:
703     // * resizing buffers can be quite slow (up to hundreds of ms).
704     // * resizing buffers may truncate existing contents if the new size is
705     // smaller, which matters to the preserve_ftrace_buffer option.
706     if (!request.preserve_ftrace_buffer()) {
707       SetupClock(request);
708       SetupBufferSize(request);
709     }
710   }
711 
712   std::set<GroupAndName> events = GetFtraceEvents(request, table_);
713   std::map<GroupAndName, KprobeEvent::KprobeType> events_kprobes =
714       GetFtraceKprobeEvents(request);
715 
716   // Vendors can provide a set of extra ftrace categories to be enabled when a
717   // specific atrace category is used (e.g. "gfx" -> ["my_hw/my_custom_event",
718   // "my_hw/my_special_gpu"]). Merge them with the hard coded events for each
719   // categories.
720   for (const std::string& category : request.atrace_categories()) {
721     if (vendor_events_.count(category)) {
722       for (const GroupAndName& event : vendor_events_[category]) {
723         events.insert(event);
724       }
725     }
726   }
727 
728   if (RequiresAtrace(request)) {
729     if (secondary_instance_) {
730       PERFETTO_ELOG(
731           "Secondary ftrace instances do not support atrace_categories and "
732           "atrace_apps options as they affect global state");
733       return false;
734     }
735     if (!atrace_wrapper_->SupportsUserspaceOnly() && !ds_configs_.empty()) {
736       PERFETTO_ELOG(
737           "Concurrent atrace sessions are not supported before Android P, "
738           "bailing out.");
739       return false;
740     }
741     UpdateAtrace(request, errors ? &errors->atrace_errors : nullptr);
742   }
743 
744   base::FlatHashMap<uint32_t, KprobeEvent::KprobeType> kprobes;
745   for (const auto& [group_and_name, type] : events_kprobes) {
746     if (!ValidateKprobeName(group_and_name.name())) {
747       PERFETTO_ELOG("Invalid kprobes event %s", group_and_name.name().c_str());
748       if (errors)
749         errors->failed_ftrace_events.push_back(group_and_name.ToString());
750       continue;
751     }
752     // Kprobes events are created after their definition is written in the
753     // kprobe_events file
754     if (!ftrace_->CreateKprobeEvent(
755             group_and_name.group(), group_and_name.name(),
756             group_and_name.group() == kKretprobeGroup)) {
757       PERFETTO_ELOG("Failed creation of kprobes event %s",
758                     group_and_name.name().c_str());
759       if (errors)
760         errors->failed_ftrace_events.push_back(group_and_name.ToString());
761       continue;
762     }
763 
764     const Event* event = table_->GetOrCreateKprobeEvent(group_and_name);
765     if (!event) {
766       ftrace_->RemoveKprobeEvent(group_and_name.group(), group_and_name.name());
767 
768       PERFETTO_ELOG("Can't enable kprobe %s",
769                     group_and_name.ToString().c_str());
770       if (errors)
771         errors->unknown_ftrace_events.push_back(group_and_name.ToString());
772       continue;
773     }
774     current_state_.installed_kprobes.insert(group_and_name);
775     EnableFtraceEvent(event, group_and_name, &filter, errors);
776     kprobes[event->ftrace_event_id] = type;
777   }
778 
779   for (const auto& group_and_name : events) {
780     if (group_and_name.group() == kKprobeGroup ||
781         group_and_name.group() == kKretprobeGroup) {
782       PERFETTO_DLOG("Can't enable %s, group reserved for kprobes",
783                     group_and_name.ToString().c_str());
784       if (errors)
785         errors->failed_ftrace_events.push_back(group_and_name.ToString());
786       continue;
787     }
788     const Event* event = table_->GetOrCreateEvent(group_and_name);
789     if (!event) {
790       PERFETTO_DLOG("Can't enable %s, event not known",
791                     group_and_name.ToString().c_str());
792       if (errors)
793         errors->unknown_ftrace_events.push_back(group_and_name.ToString());
794       continue;
795     }
796 
797     // Niche option to skip events that are in the config, but don't have a
798     // dedicated proto for the event in perfetto. Otherwise such events will be
799     // encoded as GenericFtraceEvent.
800     if (request.disable_generic_events() &&
801         event->proto_field_id ==
802             protos::pbzero::FtraceEvent::kGenericFieldNumber) {
803       if (errors)
804         errors->failed_ftrace_events.push_back(group_and_name.ToString());
805       continue;
806     }
807 
808     EnableFtraceEvent(event, group_and_name, &filter, errors);
809   }
810 
811   EventFilter syscall_filter = BuildSyscallFilter(filter, request);
812   if (!SetSyscallEventFilter(syscall_filter)) {
813     PERFETTO_ELOG("Failed to set raw_syscall ftrace filter in SetupConfig");
814     return false;
815   }
816 
817   // Kernel function tracing (function_graph).
818   // Note 1: there is no cleanup in |RemoveConfig| because tracers cannot be
819   // changed while tracing pipes are opened. So we'll keep the current_tracer
820   // until all data sources are gone, at which point ftrace_controller will
821   // make an explicit call to |ResetCurrentTracer|.
822   // Note 2: we don't track the set of filters ourselves and instead let the
823   // kernel statefully collate them, hence the use of |AppendFunctionFilters|.
824   // This is because each concurrent data source that wants funcgraph will get
825   // all of the enabled functions (we don't go as far as doing per-DS event
826   // steering in the parser), and we don't want to remove functions midway
827   // through a trace (but some might get added).
828   if (request.enable_function_graph()) {
829     if (!current_state_.funcgraph_on && !ftrace_->ClearFunctionFilters())
830       return false;
831     if (!current_state_.funcgraph_on && !ftrace_->ClearFunctionGraphFilters())
832       return false;
833     if (!ftrace_->AppendFunctionFilters(request.function_filters()))
834       return false;
835     if (!ftrace_->AppendFunctionGraphFilters(request.function_graph_roots()))
836       return false;
837     if (!current_state_.funcgraph_on &&
838         !ftrace_->SetCurrentTracer("function_graph")) {
839       PERFETTO_LOG(
840           "Unable to enable function_graph tracing since a concurrent ftrace "
841           "data source is using a different tracer");
842       return false;
843     }
844     current_state_.funcgraph_on = true;
845   }
846   const auto& compact_format = table_->compact_sched_format();
847   auto compact_sched = CreateCompactSchedConfig(
848       request, filter.IsEventEnabled(compact_format.sched_switch.event_id),
849       compact_format);
850   if (errors && !compact_format.format_valid) {
851     errors->failed_ftrace_events.emplace_back(
852         "perfetto/compact_sched (unexpected sched event format)");
853   }
854 
855   std::optional<FtracePrintFilterConfig> ftrace_print_filter;
856   if (request.has_print_filter()) {
857     ftrace_print_filter =
858         FtracePrintFilterConfig::Create(request.print_filter(), table_);
859     if (!ftrace_print_filter.has_value()) {
860       if (errors) {
861         errors->failed_ftrace_events.emplace_back(
862             "ftrace/print (unexpected format for filtering)");
863       }
864     }
865   }
866 
867   std::vector<std::string> apps(request.atrace_apps());
868   std::vector<std::string> categories(request.atrace_categories());
869   std::vector<std::string> categories_sdk_optout = Subtract(
870       request.atrace_categories(), request.atrace_categories_prefer_sdk());
871   auto [it, inserted] = ds_configs_.emplace(
872       std::piecewise_construct, std::forward_as_tuple(id),
873       std::forward_as_tuple(
874           std::move(filter), std::move(syscall_filter), compact_sched,
875           std::move(ftrace_print_filter), std::move(apps),
876           std::move(categories), std::move(categories_sdk_optout),
877           request.symbolize_ksyms(), request.drain_buffer_percent(),
878           GetSyscallsReturningFds(syscalls_)));
879   if (inserted) {
880     it->second.kprobes = std::move(kprobes);
881   }
882   return true;
883 }
884 
ActivateConfig(FtraceConfigId id)885 bool FtraceConfigMuxer::ActivateConfig(FtraceConfigId id) {
886   if (!id || ds_configs_.count(id) == 0) {
887     PERFETTO_DFATAL("Config not found");
888     return false;
889   }
890 
891   bool first_config = active_configs_.empty();
892   active_configs_.insert(id);
893 
894   // Pick the lowest buffer_percent across the new set of active configs.
895   if (!UpdateBufferPercent()) {
896     PERFETTO_ELOG(
897         "Invalid FtraceConfig.drain_buffer_percent or "
898         "/sys/kernel/tracing/buffer_percent file permissions.");
899     // carry on, non-critical error
900   }
901 
902   // Enable kernel event writer.
903   if (first_config) {
904     if (!ftrace_->SetTracingOn(true)) {
905       PERFETTO_ELOG("Failed to enable ftrace.");
906       active_configs_.erase(id);
907       return false;
908     }
909   }
910   return true;
911 }
912 
RemoveConfig(FtraceConfigId config_id)913 bool FtraceConfigMuxer::RemoveConfig(FtraceConfigId config_id) {
914   if (!config_id || !ds_configs_.erase(config_id))
915     return false;
916   EventFilter expected_ftrace_events;
917   std::vector<std::string> expected_apps;
918   std::vector<std::string> expected_categories;
919   std::vector<std::string> expected_categories_sdk_optout;
920   for (const auto& id_config : ds_configs_) {
921     const perfetto::FtraceDataSourceConfig& config = id_config.second;
922     expected_ftrace_events.EnableEventsFrom(config.event_filter);
923     UnionInPlace(config.atrace_apps, &expected_apps);
924     UnionInPlace(config.atrace_categories, &expected_categories);
925     UnionInPlace(config.atrace_categories_sdk_optout,
926                  &expected_categories_sdk_optout);
927   }
928   std::vector<std::string> expected_categories_prefer_sdk =
929       Subtract(expected_categories, expected_categories_sdk_optout);
930 
931   // At this point expected_{apps,categories} contains the union of the
932   // leftover configs (if any) that should be still on. However we did not
933   // necessarily succeed in turning on atrace for each of those configs
934   // previously so we now intersect the {apps,categories} that we *did* manage
935   // to turn on with those we want on to determine the new state we should aim
936   // for:
937   IntersectInPlace(current_state_.atrace_apps, &expected_apps);
938   IntersectInPlace(current_state_.atrace_categories, &expected_categories);
939 
940   // Work out if there is any difference between the current state and the
941   // desired state: It's sufficient to compare sizes here (since we know from
942   // above that expected_{apps,categories} is now a subset of
943   // atrace_{apps,categories}:
944   bool atrace_changed =
945       (current_state_.atrace_apps.size() != expected_apps.size()) ||
946       (current_state_.atrace_categories.size() != expected_categories.size());
947 
948   bool atrace_prefer_sdk_changed =
949       current_state_.atrace_categories_prefer_sdk !=
950       expected_categories_prefer_sdk;
951 
952   if (!SetSyscallEventFilter(/*extra_syscalls=*/{})) {
953     PERFETTO_ELOG("Failed to set raw_syscall ftrace filter in RemoveConfig");
954   }
955 
956   // Disable any events that are currently enabled, but are not in any configs
957   // anymore.
958   std::set<size_t> event_ids = current_state_.ftrace_events.GetEnabledEvents();
959   for (size_t id : event_ids) {
960     if (expected_ftrace_events.IsEventEnabled(id))
961       continue;
962     const Event* event = table_->GetEventById(id);
963     // Any event that was enabled must exist.
964     PERFETTO_DCHECK(event);
965     if (ftrace_->DisableEvent(event->group, event->name))
966       current_state_.ftrace_events.DisableEvent(event->ftrace_event_id);
967   }
968 
969   auto active_it = active_configs_.find(config_id);
970   if (active_it != active_configs_.end()) {
971     active_configs_.erase(active_it);
972     if (active_configs_.empty()) {
973       // This was the last active config for now, but potentially more dormant
974       // configs need to be activated. We are not interested in reading while no
975       // active configs so diasble tracing_on here.
976       ftrace_->SetTracingOn(false);
977     }
978   }
979 
980   // Update buffer_percent to the minimum of the remaining configs.
981   UpdateBufferPercent();
982 
983   // Even if we don't have any other active configs, we might still have idle
984   // configs around. Tear down the rest of the ftrace config only if all
985   // configs are removed.
986   if (ds_configs_.empty()) {
987     if (ftrace_->SetCpuBufferSizeInPages(1))
988       current_state_.cpu_buffer_size_pages = 1;
989     ftrace_->SetBufferPercent(50);
990     ftrace_->DisableAllEvents();
991     ftrace_->ClearTrace();
992     ftrace_->SetTracingOn(current_state_.saved_tracing_on);
993 
994     // Kprobe cleanup cannot happen while we're still tracing as uninstalling
995     // kprobes clears all tracing buffers in the kernel.
996     for (const GroupAndName& probe : current_state_.installed_kprobes) {
997       ftrace_->RemoveKprobeEvent(probe.group(), probe.name());
998       table_->RemoveEvent(probe);
999     }
1000     current_state_.installed_kprobes.clear();
1001   }
1002 
1003   if (current_state_.atrace_on) {
1004     if (expected_apps.empty() && expected_categories.empty()) {
1005       DisableAtrace();
1006     } else if (atrace_changed) {
1007       // Update atrace to remove the no longer wanted categories/apps. For
1008       // some categories this won't disable them (e.g. categories that just
1009       // enable ftrace events) for those there is nothing we can do till the
1010       // last ftrace config is removed.
1011       if (StartAtrace(expected_apps, expected_categories,
1012                       /*atrace_errors=*/nullptr)) {
1013         // Update current_state_ to reflect this change.
1014         current_state_.atrace_apps = expected_apps;
1015         current_state_.atrace_categories = expected_categories;
1016       }
1017     }
1018   }
1019 
1020   if (atrace_prefer_sdk_changed) {
1021     if (SetAtracePreferSdk(expected_categories_prefer_sdk,
1022                            /*atrace_errors=*/nullptr)) {
1023       current_state_.atrace_categories_prefer_sdk =
1024           expected_categories_prefer_sdk;
1025     }
1026   }
1027 
1028   return true;
1029 }
1030 
ResetCurrentTracer()1031 bool FtraceConfigMuxer::ResetCurrentTracer() {
1032   if (!current_state_.funcgraph_on)
1033     return true;
1034   if (!ftrace_->ResetCurrentTracer()) {
1035     PERFETTO_PLOG("Failed to reset current_tracer to nop");
1036     return false;
1037   }
1038   current_state_.funcgraph_on = false;
1039   if (!ftrace_->ClearFunctionFilters()) {
1040     PERFETTO_PLOG("Failed to reset set_ftrace_filter.");
1041     return false;
1042   }
1043   if (!ftrace_->ClearFunctionGraphFilters()) {
1044     PERFETTO_PLOG("Failed to reset set_function_graph.");
1045     return false;
1046   }
1047   return true;
1048 }
1049 
GetDataSourceConfig(FtraceConfigId id)1050 const FtraceDataSourceConfig* FtraceConfigMuxer::GetDataSourceConfig(
1051     FtraceConfigId id) {
1052   if (!ds_configs_.count(id))
1053     return nullptr;
1054   return &ds_configs_.at(id);
1055 }
1056 
SetupClock(const FtraceConfig & config)1057 void FtraceConfigMuxer::SetupClock(const FtraceConfig& config) {
1058   std::string current_clock = ftrace_->GetClock();
1059   std::set<std::string> clocks = ftrace_->AvailableClocks();
1060 
1061   if (config.has_use_monotonic_raw_clock() &&
1062       config.use_monotonic_raw_clock() && clocks.count(kClockMonoRaw)) {
1063     ftrace_->SetClock(kClockMonoRaw);
1064     current_clock = kClockMonoRaw;
1065   } else {
1066     for (size_t i = 0; i < base::ArraySize(kClocks); i++) {
1067       std::string clock = std::string(kClocks[i]);
1068       if (!clocks.count(clock))
1069         continue;
1070       if (current_clock == clock)
1071         break;
1072       ftrace_->SetClock(clock);
1073       current_clock = clock;
1074       break;
1075     }
1076   }
1077 
1078   namespace pb0 = protos::pbzero;
1079   if (current_clock == "boot") {
1080     // "boot" is the default expectation on modern kernels, which is why we
1081     // don't have an explicit FTRACE_CLOCK_BOOT enum and leave it unset.
1082     // See comments in ftrace_event_bundle.proto.
1083     current_state_.ftrace_clock = pb0::FTRACE_CLOCK_UNSPECIFIED;
1084   } else if (current_clock == "global") {
1085     current_state_.ftrace_clock = pb0::FTRACE_CLOCK_GLOBAL;
1086   } else if (current_clock == "local") {
1087     current_state_.ftrace_clock = pb0::FTRACE_CLOCK_LOCAL;
1088   } else if (current_clock == kClockMonoRaw) {
1089     current_state_.ftrace_clock = pb0::FTRACE_CLOCK_MONO_RAW;
1090   } else {
1091     current_state_.ftrace_clock = pb0::FTRACE_CLOCK_UNKNOWN;
1092   }
1093 }
1094 
SetupBufferSize(const FtraceConfig & request)1095 void FtraceConfigMuxer::SetupBufferSize(const FtraceConfig& request) {
1096   int64_t phys_ram_pages = sysconf(_SC_PHYS_PAGES);
1097   size_t pages = ComputeCpuBufferSizeInPages(request.buffer_size_kb(),
1098                                              request.buffer_size_lower_bound(),
1099                                              phys_ram_pages);
1100   ftrace_->SetCpuBufferSizeInPages(pages);
1101   current_state_.cpu_buffer_size_pages = pages;
1102 }
1103 
1104 // Post-conditions:
1105 // * result >= 1 (should have at least one page per CPU)
1106 // * If input is 0 output is a good default number
ComputeCpuBufferSizeInPages(size_t requested_buffer_size_kb,bool buffer_size_lower_bound,int64_t sysconf_phys_pages)1107 size_t ComputeCpuBufferSizeInPages(size_t requested_buffer_size_kb,
1108                                    bool buffer_size_lower_bound,
1109                                    int64_t sysconf_phys_pages) {
1110   uint32_t page_sz = base::GetSysPageSize();
1111   uint64_t default_size_kb =
1112       (sysconf_phys_pages > 0 &&
1113        (static_cast<uint64_t>(sysconf_phys_pages) >= (kHighMemBytes / page_sz)))
1114           ? kDefaultHighRamPerCpuBufferSizeKb
1115           : kDefaultLowRamPerCpuBufferSizeKb;
1116 
1117   size_t actual_size_kb = requested_buffer_size_kb;
1118   if ((requested_buffer_size_kb == 0) ||
1119       (buffer_size_lower_bound && default_size_kb > requested_buffer_size_kb)) {
1120     actual_size_kb = default_size_kb;
1121   }
1122 
1123   size_t pages = actual_size_kb / (page_sz / 1024);
1124   return pages ? pages : 1;
1125 }
1126 
1127 // TODO(rsavitski): stop caching the "input" value, as the kernel can and will
1128 // choose a slightly different buffer size (especially on 6.x kernels). And even
1129 // then the value might not be exactly page accurate due to scratch pages (more
1130 // of a concern for the |FtraceController::FlushForInstance| caller).
GetPerCpuBufferSizePages()1131 size_t FtraceConfigMuxer::GetPerCpuBufferSizePages() {
1132   return current_state_.cpu_buffer_size_pages;
1133 }
1134 
1135 // If new_cfg_id is set, consider it in addition to already active configs
1136 // as we're trying to activate it.
UpdateBufferPercent()1137 bool FtraceConfigMuxer::UpdateBufferPercent() {
1138   uint32_t kUnsetPercent = std::numeric_limits<uint32_t>::max();
1139   uint32_t min_percent = kUnsetPercent;
1140   for (auto cfg_id : active_configs_) {
1141     auto ds_it = ds_configs_.find(cfg_id);
1142     if (ds_it != ds_configs_.end() && ds_it->second.buffer_percent > 0) {
1143       min_percent = std::min(min_percent, ds_it->second.buffer_percent);
1144     }
1145   }
1146   if (min_percent == kUnsetPercent)
1147     return true;
1148   // Let the kernel ignore values >100.
1149   return ftrace_->SetBufferPercent(min_percent);
1150 }
1151 
UpdateAtrace(const FtraceConfig & request,std::string * atrace_errors)1152 void FtraceConfigMuxer::UpdateAtrace(const FtraceConfig& request,
1153                                      std::string* atrace_errors) {
1154   // We want to avoid poisoning current_state_.atrace_{categories, apps}
1155   // if for some reason these args make atrace unhappy so we stash the
1156   // union into temps and only update current_state_ if we successfully
1157   // run atrace.
1158 
1159   std::vector<std::string> combined_categories = request.atrace_categories();
1160   UnionInPlace(current_state_.atrace_categories, &combined_categories);
1161 
1162   std::vector<std::string> combined_apps = request.atrace_apps();
1163   UnionInPlace(current_state_.atrace_apps, &combined_apps);
1164 
1165   // Each data source can list some atrace categories for which the SDK is
1166   // preferred (the rest of the categories are considered to opt out of the
1167   // SDK). When merging multiple data sources, opting out wins. Therefore this
1168   // code does a union of the opt outs for all data sources.
1169   std::vector<std::string> combined_categories_sdk_optout = Subtract(
1170       request.atrace_categories(), request.atrace_categories_prefer_sdk());
1171 
1172   std::vector<std::string> current_categories_sdk_optout =
1173       Subtract(current_state_.atrace_categories,
1174                current_state_.atrace_categories_prefer_sdk);
1175   UnionInPlace(current_categories_sdk_optout, &combined_categories_sdk_optout);
1176 
1177   std::vector<std::string> combined_categories_prefer_sdk =
1178       Subtract(combined_categories, combined_categories_sdk_optout);
1179 
1180   if (combined_categories_prefer_sdk !=
1181       current_state_.atrace_categories_prefer_sdk) {
1182     if (SetAtracePreferSdk(combined_categories_prefer_sdk, atrace_errors)) {
1183       current_state_.atrace_categories_prefer_sdk =
1184           combined_categories_prefer_sdk;
1185     }
1186   }
1187 
1188   if (!current_state_.atrace_on ||
1189       combined_apps.size() != current_state_.atrace_apps.size() ||
1190       combined_categories.size() != current_state_.atrace_categories.size()) {
1191     if (StartAtrace(combined_apps, combined_categories, atrace_errors)) {
1192       current_state_.atrace_categories = combined_categories;
1193       current_state_.atrace_apps = combined_apps;
1194       current_state_.atrace_on = true;
1195     }
1196   }
1197 }
1198 
StartAtrace(const std::vector<std::string> & apps,const std::vector<std::string> & categories,std::string * atrace_errors)1199 bool FtraceConfigMuxer::StartAtrace(const std::vector<std::string>& apps,
1200                                     const std::vector<std::string>& categories,
1201                                     std::string* atrace_errors) {
1202   PERFETTO_DLOG("Update atrace config...");
1203 
1204   std::vector<std::string> args;
1205   args.push_back("atrace");  // argv0 for exec()
1206   args.push_back("--async_start");
1207   if (atrace_wrapper_->SupportsUserspaceOnly())
1208     args.push_back("--only_userspace");
1209 
1210   for (const auto& category : categories)
1211     args.push_back(category);
1212 
1213   if (!apps.empty()) {
1214     args.push_back("-a");
1215     std::string arg = "";
1216     for (const auto& app : apps) {
1217       arg += app;
1218       arg += ",";
1219     }
1220     arg.resize(arg.size() - 1);
1221     args.push_back(arg);
1222   }
1223 
1224   bool result = atrace_wrapper_->RunAtrace(args, atrace_errors);
1225   PERFETTO_DLOG("...done (%s)", result ? "success" : "fail");
1226   return result;
1227 }
1228 
SetAtracePreferSdk(const std::vector<std::string> & prefer_sdk_categories,std::string * atrace_errors)1229 bool FtraceConfigMuxer::SetAtracePreferSdk(
1230     const std::vector<std::string>& prefer_sdk_categories,
1231     std::string* atrace_errors) {
1232   if (!atrace_wrapper_->SupportsPreferSdk()) {
1233     return false;
1234   }
1235   PERFETTO_DLOG("Update atrace prefer sdk categories...");
1236 
1237   std::vector<std::string> args;
1238   args.push_back("atrace");  // argv0 for exec()
1239   args.push_back("--prefer_sdk");
1240 
1241   for (const auto& category : prefer_sdk_categories)
1242     args.push_back(category);
1243 
1244   bool result = atrace_wrapper_->RunAtrace(args, atrace_errors);
1245   PERFETTO_DLOG("...done (%s)", result ? "success" : "fail");
1246   return result;
1247 }
1248 
DisableAtrace()1249 void FtraceConfigMuxer::DisableAtrace() {
1250   PERFETTO_DCHECK(current_state_.atrace_on);
1251 
1252   PERFETTO_DLOG("Stop atrace...");
1253 
1254   std::vector<std::string> args{"atrace", "--async_stop"};
1255   if (atrace_wrapper_->SupportsUserspaceOnly())
1256     args.push_back("--only_userspace");
1257   if (atrace_wrapper_->RunAtrace(args, /*atrace_errors=*/nullptr)) {
1258     current_state_.atrace_categories.clear();
1259     current_state_.atrace_apps.clear();
1260     current_state_.atrace_on = false;
1261   }
1262 
1263   PERFETTO_DLOG("...done");
1264 }
1265 
1266 }  // namespace perfetto
1267