• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/profiling/perf/event_config.h"
18 
19 #include <linux/perf_event.h>
20 #include <time.h>
21 
22 #include <unwindstack/Regs.h>
23 #include <optional>
24 #include <vector>
25 
26 #include "perfetto/base/flat_set.h"
27 #include "perfetto/ext/base/utils.h"
28 #include "src/profiling/perf/regs_parsing.h"
29 
30 #include "protos/perfetto/common/perf_events.gen.h"
31 #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
32 
33 namespace perfetto {
34 namespace profiling {
35 
36 namespace {
37 constexpr uint64_t kDefaultSamplingFrequencyHz = 10;
38 constexpr uint32_t kDefaultDataPagesPerRingBuffer = 256;  // 1 MB: 256x 4k pages
39 constexpr uint32_t kDefaultReadTickPeriodMs = 100;
40 constexpr uint32_t kDefaultRemoteDescriptorTimeoutMs = 100;
41 
42 // Acceptable forms: "sched/sched_switch" or "sched:sched_switch".
SplitTracepointString(const std::string & input)43 std::pair<std::string, std::string> SplitTracepointString(
44     const std::string& input) {
45   auto slash_pos = input.find("/");
46   if (slash_pos != std::string::npos)
47     return std::make_pair(input.substr(0, slash_pos),
48                           input.substr(slash_pos + 1));
49 
50   auto colon_pos = input.find(":");
51   if (colon_pos != std::string::npos)
52     return std::make_pair(input.substr(0, colon_pos),
53                           input.substr(colon_pos + 1));
54 
55   return std::make_pair("", input);
56 }
57 
58 // If set, the returned id is guaranteed to be non-zero.
ParseTracepointAndResolveId(const protos::gen::PerfEvents::Tracepoint & tracepoint,EventConfig::tracepoint_id_fn_t tracepoint_id_lookup)59 std::optional<uint32_t> ParseTracepointAndResolveId(
60     const protos::gen::PerfEvents::Tracepoint& tracepoint,
61     EventConfig::tracepoint_id_fn_t tracepoint_id_lookup) {
62   std::string full_name = tracepoint.name();
63   std::string tp_group;
64   std::string tp_name;
65   std::tie(tp_group, tp_name) = SplitTracepointString(full_name);
66   if (tp_group.empty() || tp_name.empty()) {
67     PERFETTO_ELOG(
68         "Invalid tracepoint format: %s. Should be a full path like "
69         "sched:sched_switch or sched/sched_switch.",
70         full_name.c_str());
71     return std::nullopt;
72   }
73 
74   uint32_t tracepoint_id = tracepoint_id_lookup(tp_group, tp_name);
75   if (!tracepoint_id) {
76     PERFETTO_ELOG(
77         "Failed to resolve tracepoint %s to its id. Check that tracefs is "
78         "accessible and the event exists.",
79         full_name.c_str());
80     return std::nullopt;
81   }
82   return std::make_optional(tracepoint_id);
83 }
84 
85 // |T| is either gen::PerfEventConfig or gen::PerfEventConfig::Scope.
86 // Note: the semantics of target_cmdline and exclude_cmdline were changed since
87 // their original introduction. They used to be put through a canonicalization
88 // function that simplified them to the binary name alone. We no longer do this,
89 // regardless of whether we're parsing an old-style config. The overall outcome
90 // shouldn't change for almost all existing uses.
91 template <typename T>
ParseTargetFilter(const T & cfg,std::optional<ProcessSharding> process_sharding)92 TargetFilter ParseTargetFilter(
93     const T& cfg,
94     std::optional<ProcessSharding> process_sharding) {
95   TargetFilter filter;
96   for (const auto& str : cfg.target_cmdline()) {
97     filter.cmdlines.push_back(str);
98   }
99   for (const auto& str : cfg.exclude_cmdline()) {
100     filter.exclude_cmdlines.push_back(str);
101   }
102   for (const int32_t pid : cfg.target_pid()) {
103     filter.pids.insert(pid);
104   }
105   for (const int32_t pid : cfg.exclude_pid()) {
106     filter.exclude_pids.insert(pid);
107   }
108   filter.additional_cmdline_count = cfg.additional_cmdline_count();
109   filter.process_sharding = process_sharding;
110   return filter;
111 }
112 
IsPowerOfTwo(size_t v)113 constexpr bool IsPowerOfTwo(size_t v) {
114   return (v != 0 && ((v & (v - 1)) == 0));
115 }
116 
117 // returns |std::nullopt| if the input is invalid.
ChooseActualRingBufferPages(uint32_t config_value)118 std::optional<uint32_t> ChooseActualRingBufferPages(uint32_t config_value) {
119   if (!config_value) {
120     static_assert(IsPowerOfTwo(kDefaultDataPagesPerRingBuffer), "");
121     return std::make_optional(kDefaultDataPagesPerRingBuffer);
122   }
123 
124   if (!IsPowerOfTwo(config_value)) {
125     PERFETTO_ELOG("kernel buffer size must be a power of two pages");
126     return std::nullopt;
127   }
128 
129   return std::make_optional(config_value);
130 }
131 
ToPerfCounter(std::string name,protos::gen::PerfEvents::Counter pb_enum)132 std::optional<PerfCounter> ToPerfCounter(
133     std::string name,
134     protos::gen::PerfEvents::Counter pb_enum) {
135   using protos::gen::PerfEvents;
136   switch (static_cast<int>(pb_enum)) {  // cast to pacify -Wswitch-enum
137     case PerfEvents::SW_CPU_CLOCK:
138       return PerfCounter::BuiltinCounter(name, PerfEvents::SW_CPU_CLOCK,
139                                          PERF_TYPE_SOFTWARE,
140                                          PERF_COUNT_SW_CPU_CLOCK);
141     case PerfEvents::SW_PAGE_FAULTS:
142       return PerfCounter::BuiltinCounter(name, PerfEvents::SW_PAGE_FAULTS,
143                                          PERF_TYPE_SOFTWARE,
144                                          PERF_COUNT_SW_PAGE_FAULTS);
145     case PerfEvents::SW_TASK_CLOCK:
146       return PerfCounter::BuiltinCounter(name, PerfEvents::SW_TASK_CLOCK,
147                                          PERF_TYPE_SOFTWARE,
148                                          PERF_COUNT_SW_TASK_CLOCK);
149     case PerfEvents::SW_CONTEXT_SWITCHES:
150       return PerfCounter::BuiltinCounter(name, PerfEvents::SW_CONTEXT_SWITCHES,
151                                          PERF_TYPE_SOFTWARE,
152                                          PERF_COUNT_SW_CONTEXT_SWITCHES);
153     case PerfEvents::SW_CPU_MIGRATIONS:
154       return PerfCounter::BuiltinCounter(name, PerfEvents::SW_CPU_MIGRATIONS,
155                                          PERF_TYPE_SOFTWARE,
156                                          PERF_COUNT_SW_CPU_MIGRATIONS);
157     case PerfEvents::SW_PAGE_FAULTS_MIN:
158       return PerfCounter::BuiltinCounter(name, PerfEvents::SW_PAGE_FAULTS_MIN,
159                                          PERF_TYPE_SOFTWARE,
160                                          PERF_COUNT_SW_PAGE_FAULTS_MIN);
161     case PerfEvents::SW_PAGE_FAULTS_MAJ:
162       return PerfCounter::BuiltinCounter(name, PerfEvents::SW_PAGE_FAULTS_MAJ,
163                                          PERF_TYPE_SOFTWARE,
164                                          PERF_COUNT_SW_PAGE_FAULTS_MAJ);
165     case PerfEvents::SW_ALIGNMENT_FAULTS:
166       return PerfCounter::BuiltinCounter(name, PerfEvents::SW_ALIGNMENT_FAULTS,
167                                          PERF_TYPE_SOFTWARE,
168                                          PERF_COUNT_SW_ALIGNMENT_FAULTS);
169     case PerfEvents::SW_EMULATION_FAULTS:
170       return PerfCounter::BuiltinCounter(name, PerfEvents::SW_EMULATION_FAULTS,
171                                          PERF_TYPE_SOFTWARE,
172                                          PERF_COUNT_SW_EMULATION_FAULTS);
173     case PerfEvents::SW_DUMMY:
174       return PerfCounter::BuiltinCounter(
175           name, PerfEvents::SW_DUMMY, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY);
176 
177     case PerfEvents::HW_CPU_CYCLES:
178       return PerfCounter::BuiltinCounter(name, PerfEvents::HW_CPU_CYCLES,
179                                          PERF_TYPE_HARDWARE,
180                                          PERF_COUNT_HW_CPU_CYCLES);
181     case PerfEvents::HW_INSTRUCTIONS:
182       return PerfCounter::BuiltinCounter(name, PerfEvents::HW_INSTRUCTIONS,
183                                          PERF_TYPE_HARDWARE,
184                                          PERF_COUNT_HW_INSTRUCTIONS);
185     case PerfEvents::HW_CACHE_REFERENCES:
186       return PerfCounter::BuiltinCounter(name, PerfEvents::HW_CACHE_REFERENCES,
187                                          PERF_TYPE_HARDWARE,
188                                          PERF_COUNT_HW_CACHE_REFERENCES);
189     case PerfEvents::HW_CACHE_MISSES:
190       return PerfCounter::BuiltinCounter(name, PerfEvents::HW_CACHE_MISSES,
191                                          PERF_TYPE_HARDWARE,
192                                          PERF_COUNT_HW_CACHE_MISSES);
193     case PerfEvents::HW_BRANCH_INSTRUCTIONS:
194       return PerfCounter::BuiltinCounter(
195           name, PerfEvents::HW_BRANCH_INSTRUCTIONS, PERF_TYPE_HARDWARE,
196           PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
197     case PerfEvents::HW_BRANCH_MISSES:
198       return PerfCounter::BuiltinCounter(name, PerfEvents::HW_BRANCH_MISSES,
199                                          PERF_TYPE_HARDWARE,
200                                          PERF_COUNT_HW_BRANCH_MISSES);
201     case PerfEvents::HW_BUS_CYCLES:
202       return PerfCounter::BuiltinCounter(name, PerfEvents::HW_BUS_CYCLES,
203                                          PERF_TYPE_HARDWARE,
204                                          PERF_COUNT_HW_BUS_CYCLES);
205     case PerfEvents::HW_STALLED_CYCLES_FRONTEND:
206       return PerfCounter::BuiltinCounter(
207           name, PerfEvents::HW_STALLED_CYCLES_FRONTEND, PERF_TYPE_HARDWARE,
208           PERF_COUNT_HW_STALLED_CYCLES_FRONTEND);
209     case PerfEvents::HW_STALLED_CYCLES_BACKEND:
210       return PerfCounter::BuiltinCounter(
211           name, PerfEvents::HW_STALLED_CYCLES_BACKEND, PERF_TYPE_HARDWARE,
212           PERF_COUNT_HW_STALLED_CYCLES_BACKEND);
213     case PerfEvents::HW_REF_CPU_CYCLES:
214       return PerfCounter::BuiltinCounter(name, PerfEvents::HW_REF_CPU_CYCLES,
215                                          PERF_TYPE_HARDWARE,
216                                          PERF_COUNT_HW_REF_CPU_CYCLES);
217 
218     default:
219       PERFETTO_ELOG("Unrecognised PerfEvents::Counter enum value: %zu",
220                     static_cast<size_t>(pb_enum));
221       return std::nullopt;
222   }
223 }
224 
ToClockId(protos::gen::PerfEvents::PerfClock pb_enum)225 int32_t ToClockId(protos::gen::PerfEvents::PerfClock pb_enum) {
226   using protos::gen::PerfEvents;
227   switch (static_cast<int>(pb_enum)) {  // cast to pacify -Wswitch-enum
228     case PerfEvents::PERF_CLOCK_REALTIME:
229       return CLOCK_REALTIME;
230     case PerfEvents::PERF_CLOCK_MONOTONIC:
231       return CLOCK_MONOTONIC;
232     case PerfEvents::PERF_CLOCK_MONOTONIC_RAW:
233       return CLOCK_MONOTONIC_RAW;
234     case PerfEvents::PERF_CLOCK_BOOTTIME:
235       return CLOCK_BOOTTIME;
236     // Default to a monotonic clock since it should be compatible with all types
237     // of events. Whereas boottime cannot be used with hardware events due to
238     // potential access within non-maskable interrupts.
239     default:
240       return CLOCK_MONOTONIC_RAW;
241   }
242 }
243 
244 }  // namespace
245 
246 // static
BuiltinCounter(std::string name,protos::gen::PerfEvents::Counter counter,uint32_t type,uint64_t config)247 PerfCounter PerfCounter::BuiltinCounter(
248     std::string name,
249     protos::gen::PerfEvents::Counter counter,
250     uint32_t type,
251     uint64_t config) {
252   PerfCounter ret;
253   ret.type = PerfCounter::Type::kBuiltinCounter;
254   ret.counter = counter;
255   ret.name = std::move(name);
256 
257   ret.attr_type = type;
258   ret.attr_config = config;
259   // none of the builtin counters require config1 and config2 at the moment
260   return ret;
261 }
262 
263 // static
Tracepoint(std::string name,std::string tracepoint_name,std::string tracepoint_filter,uint64_t id)264 PerfCounter PerfCounter::Tracepoint(std::string name,
265                                     std::string tracepoint_name,
266                                     std::string tracepoint_filter,
267                                     uint64_t id) {
268   PerfCounter ret;
269   ret.type = PerfCounter::Type::kTracepoint;
270   ret.tracepoint_name = std::move(tracepoint_name);
271   ret.tracepoint_filter = std::move(tracepoint_filter);
272   ret.name = std::move(name);
273 
274   ret.attr_type = PERF_TYPE_TRACEPOINT;
275   ret.attr_config = id;
276   return ret;
277 }
278 
279 // static
RawEvent(std::string name,uint32_t type,uint64_t config,uint64_t config1,uint64_t config2)280 PerfCounter PerfCounter::RawEvent(std::string name,
281                                   uint32_t type,
282                                   uint64_t config,
283                                   uint64_t config1,
284                                   uint64_t config2) {
285   PerfCounter ret;
286   ret.type = PerfCounter::Type::kRawEvent;
287   ret.name = std::move(name);
288 
289   ret.attr_type = type;
290   ret.attr_config = config;
291   ret.attr_config1 = config1;
292   ret.attr_config2 = config2;
293   return ret;
294 }
295 
296 // static
Create(const protos::gen::PerfEventConfig & pb_config,const DataSourceConfig & raw_ds_config,std::optional<ProcessSharding> process_sharding,tracepoint_id_fn_t tracepoint_id_lookup)297 std::optional<EventConfig> EventConfig::Create(
298     const protos::gen::PerfEventConfig& pb_config,
299     const DataSourceConfig& raw_ds_config,
300     std::optional<ProcessSharding> process_sharding,
301     tracepoint_id_fn_t tracepoint_id_lookup) {
302   // Timebase: sampling interval.
303   uint64_t sampling_frequency = 0;
304   uint64_t sampling_period = 0;
305   if (pb_config.timebase().period()) {
306     sampling_period = pb_config.timebase().period();
307   } else if (pb_config.timebase().frequency()) {
308     sampling_frequency = pb_config.timebase().frequency();
309   } else if (pb_config.sampling_frequency()) {  // backwards compatibility
310     sampling_frequency = pb_config.sampling_frequency();
311   } else {
312     sampling_frequency = kDefaultSamplingFrequencyHz;
313   }
314   PERFETTO_DCHECK(sampling_period && !sampling_frequency ||
315                   !sampling_period && sampling_frequency);
316 
317   // Timebase event. Default: CPU timer.
318   PerfCounter timebase_event;
319   std::string timebase_name = pb_config.timebase().name();
320   if (pb_config.timebase().has_counter()) {
321     auto maybe_counter =
322         ToPerfCounter(timebase_name, pb_config.timebase().counter());
323     if (!maybe_counter)
324       return std::nullopt;
325     timebase_event = *maybe_counter;
326 
327   } else if (pb_config.timebase().has_tracepoint()) {
328     const auto& tracepoint_pb = pb_config.timebase().tracepoint();
329     std::optional<uint32_t> maybe_id =
330         ParseTracepointAndResolveId(tracepoint_pb, tracepoint_id_lookup);
331     if (!maybe_id)
332       return std::nullopt;
333     timebase_event = PerfCounter::Tracepoint(
334         timebase_name, tracepoint_pb.name(), tracepoint_pb.filter(), *maybe_id);
335 
336   } else if (pb_config.timebase().has_raw_event()) {
337     const auto& raw = pb_config.timebase().raw_event();
338     timebase_event = PerfCounter::RawEvent(
339         timebase_name, raw.type(), raw.config(), raw.config1(), raw.config2());
340 
341   } else {
342     timebase_event = PerfCounter::BuiltinCounter(
343         timebase_name, protos::gen::PerfEvents::PerfEvents::SW_CPU_CLOCK,
344         PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK);
345   }
346 
347   // Callstack sampling.
348   bool user_frames = false;
349   bool kernel_frames = false;
350   TargetFilter target_filter;
351   bool legacy_config = pb_config.all_cpus();  // all_cpus was mandatory before
352   if (pb_config.has_callstack_sampling() || legacy_config) {
353     user_frames = true;
354 
355     // Userspace callstacks.
356     using protos::gen::PerfEventConfig;
357     switch (static_cast<int>(pb_config.callstack_sampling().user_frames())) {
358       case PerfEventConfig::UNWIND_UNKNOWN:
359         // default to true, both for backwards compatibility and because it's
360         // almost always what the user wants.
361         user_frames = true;
362         break;
363       case PerfEventConfig::UNWIND_SKIP:
364         user_frames = false;
365         break;
366       case PerfEventConfig::UNWIND_DWARF:
367         user_frames = true;
368         break;
369       default:
370         // enum value from the future that we don't yet know, refuse the config
371         // TODO(rsavitski): double-check that both pbzero and ::gen propagate
372         // unknown enum values.
373         return std::nullopt;
374     }
375 
376     // Process scoping. Sharding parameter is supplied from outside as it is
377     // shared by all data sources within a tracing session.
378     target_filter =
379         pb_config.callstack_sampling().has_scope()
380             ? ParseTargetFilter(pb_config.callstack_sampling().scope(),
381                                 process_sharding)
382             : ParseTargetFilter(pb_config,
383                                 process_sharding);  // backwards compatibility
384 
385     // Kernel callstacks.
386     kernel_frames = pb_config.callstack_sampling().kernel_frames() ||
387                     pb_config.kernel_frames();
388   }
389 
390   // Ring buffer options.
391   std::optional<uint32_t> ring_buffer_pages =
392       ChooseActualRingBufferPages(pb_config.ring_buffer_pages());
393   if (!ring_buffer_pages.has_value())
394     return std::nullopt;
395 
396   uint32_t read_tick_period_ms = pb_config.ring_buffer_read_period_ms()
397                                      ? pb_config.ring_buffer_read_period_ms()
398                                      : kDefaultReadTickPeriodMs;
399 
400   // Calculate a rough upper limit for the amount of samples the producer
401   // should read per read tick, as a safeguard against getting stuck chasing the
402   // ring buffer head indefinitely.
403   uint64_t samples_per_tick_limit = 0;
404   if (sampling_frequency) {
405     // expected = rate * period, with a conversion of period from ms to s:
406     uint64_t expected_samples_per_tick =
407         1 + (sampling_frequency * read_tick_period_ms) / 1000;
408     // Double the limit to account of actual sample rate uncertainties, as
409     // well as any other factors:
410     samples_per_tick_limit = 2 * expected_samples_per_tick;
411   } else {  // sampling_period
412     // We don't know the sample rate that a fixed period would cause, but we can
413     // still estimate how many samples will fit in one pass of the ring buffer
414     // (with the assumption that we don't want to read more than one buffer's
415     // capacity within a tick).
416     // TODO(rsavitski): for now, make an extremely conservative guess of an 8
417     // byte sample (stack sampling samples can be up to 64KB). This is most
418     // likely as good as no limit in practice.
419     samples_per_tick_limit = *ring_buffer_pages * (base::kPageSize / 8);
420   }
421   PERFETTO_DLOG("Capping samples (not records) per tick to [%" PRIu64 "]",
422                 samples_per_tick_limit);
423   if (samples_per_tick_limit == 0)
424     return std::nullopt;
425 
426   // Optional footprint controls.
427   uint64_t max_enqueued_footprint_bytes =
428       pb_config.max_enqueued_footprint_kb() * 1024;
429 
430   // Android-specific options.
431   uint32_t remote_descriptor_timeout_ms =
432       pb_config.remote_descriptor_timeout_ms()
433           ? pb_config.remote_descriptor_timeout_ms()
434           : kDefaultRemoteDescriptorTimeoutMs;
435 
436   // Build the underlying syscall config struct.
437   perf_event_attr pe = {};
438   pe.size = sizeof(perf_event_attr);
439   pe.disabled = 1;  // will be activated via ioctl
440 
441   // Sampling timebase.
442   pe.type = timebase_event.attr_type;
443   pe.config = timebase_event.attr_config;
444   pe.config1 = timebase_event.attr_config1;
445   pe.config2 = timebase_event.attr_config2;
446   if (sampling_frequency) {
447     pe.freq = true;
448     pe.sample_freq = sampling_frequency;
449   } else {
450     pe.sample_period = sampling_period;
451   }
452 
453   // What the samples will contain.
454   pe.sample_type = PERF_SAMPLE_TID | PERF_SAMPLE_TIME | PERF_SAMPLE_READ;
455   // PERF_SAMPLE_TIME:
456   pe.clockid = ToClockId(pb_config.timebase().timestamp_clock());
457   pe.use_clockid = true;
458 
459   if (user_frames) {
460     pe.sample_type |= PERF_SAMPLE_STACK_USER | PERF_SAMPLE_REGS_USER;
461     // PERF_SAMPLE_STACK_USER:
462     // Needs to be < ((u16)(~0u)), and have bottom 8 bits clear.
463     // Note that the kernel still needs to make space for the other parts of the
464     // sample (up to the max record size of 64k), so the effective maximum
465     // can be lower than this.
466     pe.sample_stack_user = (1u << 16) - 256;
467     // PERF_SAMPLE_REGS_USER:
468     pe.sample_regs_user =
469         PerfUserRegsMaskForArch(unwindstack::Regs::CurrentArch());
470   }
471   if (kernel_frames) {
472     pe.sample_type |= PERF_SAMPLE_CALLCHAIN;
473     pe.exclude_callchain_user = true;
474   }
475 
476   return EventConfig(
477       raw_ds_config, pe, timebase_event, user_frames, kernel_frames,
478       std::move(target_filter), ring_buffer_pages.value(), read_tick_period_ms,
479       samples_per_tick_limit, remote_descriptor_timeout_ms,
480       pb_config.unwind_state_clear_period_ms(), max_enqueued_footprint_bytes,
481       pb_config.target_installed_by());
482 }
483 
EventConfig(const DataSourceConfig & raw_ds_config,const perf_event_attr & pe,const PerfCounter & timebase_event,bool user_frames,bool kernel_frames,TargetFilter target_filter,uint32_t ring_buffer_pages,uint32_t read_tick_period_ms,uint64_t samples_per_tick_limit,uint32_t remote_descriptor_timeout_ms,uint32_t unwind_state_clear_period_ms,uint64_t max_enqueued_footprint_bytes,std::vector<std::string> target_installed_by)484 EventConfig::EventConfig(const DataSourceConfig& raw_ds_config,
485                          const perf_event_attr& pe,
486                          const PerfCounter& timebase_event,
487                          bool user_frames,
488                          bool kernel_frames,
489                          TargetFilter target_filter,
490                          uint32_t ring_buffer_pages,
491                          uint32_t read_tick_period_ms,
492                          uint64_t samples_per_tick_limit,
493                          uint32_t remote_descriptor_timeout_ms,
494                          uint32_t unwind_state_clear_period_ms,
495                          uint64_t max_enqueued_footprint_bytes,
496                          std::vector<std::string> target_installed_by)
497     : perf_event_attr_(pe),
498       timebase_event_(timebase_event),
499       user_frames_(user_frames),
500       kernel_frames_(kernel_frames),
501       target_filter_(std::move(target_filter)),
502       ring_buffer_pages_(ring_buffer_pages),
503       read_tick_period_ms_(read_tick_period_ms),
504       samples_per_tick_limit_(samples_per_tick_limit),
505       remote_descriptor_timeout_ms_(remote_descriptor_timeout_ms),
506       unwind_state_clear_period_ms_(unwind_state_clear_period_ms),
507       max_enqueued_footprint_bytes_(max_enqueued_footprint_bytes),
508       target_installed_by_(std::move(target_installed_by)),
509       raw_ds_config_(raw_ds_config) /* full copy */ {}
510 
511 }  // namespace profiling
512 }  // namespace perfetto
513