1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/traced/probes/ftrace/cpu_reader.h"
18
19 #include <dirent.h>
20 #include <fcntl.h>
21
22 #include <algorithm>
23 #include <optional>
24 #include <utility>
25
26 #include "perfetto/base/logging.h"
27 #include "perfetto/ext/base/metatrace.h"
28 #include "perfetto/ext/base/utils.h"
29 #include "perfetto/ext/tracing/core/trace_writer.h"
30 #include "src/kallsyms/kernel_symbol_map.h"
31 #include "src/kallsyms/lazy_kernel_symbolizer.h"
32 #include "src/traced/probes/ftrace/ftrace_config_muxer.h"
33 #include "src/traced/probes/ftrace/ftrace_controller.h" // FtraceClockSnapshot
34 #include "src/traced/probes/ftrace/ftrace_data_source.h"
35 #include "src/traced/probes/ftrace/ftrace_print_filter.h"
36 #include "src/traced/probes/ftrace/proto_translation_table.h"
37
38 #include "protos/perfetto/trace/ftrace/ftrace_event.pbzero.h"
39 #include "protos/perfetto/trace/ftrace/ftrace_event_bundle.pbzero.h"
40 #include "protos/perfetto/trace/ftrace/ftrace_stats.pbzero.h" // FtraceParseStatus
41 #include "protos/perfetto/trace/ftrace/generic.pbzero.h"
42 #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
43 #include "protos/perfetto/trace/profiling/profile_common.pbzero.h"
44 #include "protos/perfetto/trace/trace_packet.pbzero.h"
45
46 namespace perfetto {
47 namespace {
48
49 using FtraceParseStatus = protos::pbzero::FtraceParseStatus;
50
51 // If the compact_sched buffer accumulates more unique strings, the reader will
52 // flush it to reset the interning state (and make it cheap again).
53 // This is not an exact cap, since we check only at tracing page boundaries.
54 constexpr size_t kCompactSchedInternerThreshold = 64;
55
56 // For further documentation of these constants see the kernel source:
57 // linux/include/linux/ring_buffer.h
58 // Some of this is also available to userspace at runtime via:
59 // /sys/kernel/tracing/events/header_event
60 constexpr uint32_t kTypePadding = 29;
61 constexpr uint32_t kTypeTimeExtend = 30;
62 constexpr uint32_t kTypeTimeStamp = 31;
63
64 struct EventHeader {
65 // bottom 5 bits
66 uint32_t type_or_length : 5;
67 // top 27 bits
68 uint32_t time_delta : 27;
69 };
70
71 // Reads a string from `start` until the first '\0' byte or until fixed_len
72 // characters have been read. Appends it to `*out` as field `field_id`.
ReadIntoString(const uint8_t * start,size_t fixed_len,uint32_t field_id,protozero::Message * out)73 void ReadIntoString(const uint8_t* start,
74 size_t fixed_len,
75 uint32_t field_id,
76 protozero::Message* out) {
77 size_t len = strnlen(reinterpret_cast<const char*>(start), fixed_len);
78 out->AppendBytes(field_id, reinterpret_cast<const char*>(start), len);
79 }
80
ReadDataLoc(const uint8_t * start,const uint8_t * field_start,const uint8_t * end,const Field & field,protozero::Message * message)81 bool ReadDataLoc(const uint8_t* start,
82 const uint8_t* field_start,
83 const uint8_t* end,
84 const Field& field,
85 protozero::Message* message) {
86 PERFETTO_DCHECK(field.ftrace_size == 4);
87 // See kernel header include/trace/trace_events.h
88 uint32_t data = 0;
89 const uint8_t* ptr = field_start;
90 if (!CpuReader::ReadAndAdvance(&ptr, end, &data)) {
91 PERFETTO_DFATAL("couldn't read __data_loc value");
92 return false;
93 }
94
95 const uint16_t offset = data & 0xffff;
96 const uint16_t len = (data >> 16) & 0xffff;
97 const uint8_t* const string_start = start + offset;
98
99 if (PERFETTO_UNLIKELY(len == 0))
100 return true;
101 if (PERFETTO_UNLIKELY(string_start < start || string_start + len > end)) {
102 PERFETTO_DFATAL("__data_loc points at invalid location");
103 return false;
104 }
105 ReadIntoString(string_start, len, field.proto_field_id, message);
106 return true;
107 }
108
109 template <typename T>
ReadValue(const uint8_t * ptr)110 T ReadValue(const uint8_t* ptr) {
111 T t;
112 memcpy(&t, reinterpret_cast<const void*>(ptr), sizeof(T));
113 return t;
114 }
115
116 // Reads a signed ftrace value as an int64_t, sign extending if necessary.
ReadSignedFtraceValue(const uint8_t * ptr,FtraceFieldType ftrace_type)117 int64_t ReadSignedFtraceValue(const uint8_t* ptr, FtraceFieldType ftrace_type) {
118 if (ftrace_type == kFtraceInt32) {
119 int32_t value;
120 memcpy(&value, reinterpret_cast<const void*>(ptr), sizeof(value));
121 return int64_t(value);
122 }
123 if (ftrace_type == kFtraceInt64) {
124 int64_t value;
125 memcpy(&value, reinterpret_cast<const void*>(ptr), sizeof(value));
126 return value;
127 }
128 PERFETTO_FATAL("unexpected ftrace type");
129 }
130
SetBlocking(int fd,bool is_blocking)131 bool SetBlocking(int fd, bool is_blocking) {
132 int flags = fcntl(fd, F_GETFL, 0);
133 flags = (is_blocking) ? (flags & ~O_NONBLOCK) : (flags | O_NONBLOCK);
134 return fcntl(fd, F_SETFL, flags) == 0;
135 }
136
SetParseError(const std::set<FtraceDataSource * > & started_data_sources,size_t cpu,FtraceParseStatus status)137 void SetParseError(const std::set<FtraceDataSource*>& started_data_sources,
138 size_t cpu,
139 FtraceParseStatus status) {
140 PERFETTO_DPLOG("[cpu%zu]: unexpected ftrace read error: %s", cpu,
141 protos::pbzero::FtraceParseStatus_Name(status));
142 for (FtraceDataSource* data_source : started_data_sources) {
143 data_source->mutable_parse_errors()->insert(status);
144 }
145 }
146
WriteAndSetParseError(CpuReader::Bundler * bundler,base::FlatSet<FtraceParseStatus> * stat,uint64_t timestamp,FtraceParseStatus status)147 void WriteAndSetParseError(CpuReader::Bundler* bundler,
148 base::FlatSet<FtraceParseStatus>* stat,
149 uint64_t timestamp,
150 FtraceParseStatus status) {
151 PERFETTO_DLOG("Error parsing ftrace page: %s",
152 protos::pbzero::FtraceParseStatus_Name(status));
153 stat->insert(status);
154 auto* proto = bundler->GetOrCreateBundle()->add_error();
155 if (timestamp)
156 proto->set_timestamp(timestamp);
157 proto->set_status(status);
158 }
159
160 } // namespace
161
162 using protos::pbzero::GenericFtraceEvent;
163
CpuReader(size_t cpu,base::ScopedFile trace_fd,const ProtoTranslationTable * table,LazyKernelSymbolizer * symbolizer,protos::pbzero::FtraceClock ftrace_clock,const FtraceClockSnapshot * ftrace_clock_snapshot)164 CpuReader::CpuReader(size_t cpu,
165 base::ScopedFile trace_fd,
166 const ProtoTranslationTable* table,
167 LazyKernelSymbolizer* symbolizer,
168 protos::pbzero::FtraceClock ftrace_clock,
169 const FtraceClockSnapshot* ftrace_clock_snapshot)
170 : cpu_(cpu),
171 table_(table),
172 symbolizer_(symbolizer),
173 trace_fd_(std::move(trace_fd)),
174 ftrace_clock_(ftrace_clock),
175 ftrace_clock_snapshot_(ftrace_clock_snapshot) {
176 PERFETTO_CHECK(trace_fd_);
177 PERFETTO_CHECK(SetBlocking(*trace_fd_, false));
178 }
179
180 CpuReader::~CpuReader() = default;
181
ReadCycle(ParsingBuffers * parsing_bufs,size_t max_pages,const std::set<FtraceDataSource * > & started_data_sources)182 size_t CpuReader::ReadCycle(
183 ParsingBuffers* parsing_bufs,
184 size_t max_pages,
185 const std::set<FtraceDataSource*>& started_data_sources) {
186 PERFETTO_DCHECK(max_pages > 0 && parsing_bufs->ftrace_data_buf_pages() > 0);
187 metatrace::ScopedEvent evt(metatrace::TAG_FTRACE,
188 metatrace::FTRACE_CPU_READ_CYCLE);
189
190 // Work in batches to keep cache locality, and limit memory usage.
191 size_t total_pages_read = 0;
192 for (bool is_first_batch = true;; is_first_batch = false) {
193 size_t batch_pages = std::min(parsing_bufs->ftrace_data_buf_pages(),
194 max_pages - total_pages_read);
195 size_t pages_read = ReadAndProcessBatch(
196 parsing_bufs->ftrace_data_buf(), batch_pages, is_first_batch,
197 parsing_bufs->compact_sched_buf(), started_data_sources);
198
199 PERFETTO_DCHECK(pages_read <= batch_pages);
200 total_pages_read += pages_read;
201
202 // Check whether we've caught up to the writer, or possibly giving up on
203 // this attempt due to some error.
204 if (pages_read != batch_pages)
205 break;
206 // Check if we've hit the limit of work for this cycle.
207 if (total_pages_read >= max_pages)
208 break;
209 }
210 PERFETTO_METATRACE_COUNTER(TAG_FTRACE, FTRACE_PAGES_DRAINED,
211 total_pages_read);
212 return total_pages_read;
213 }
214
215 // metatrace note: mark the reading phase as FTRACE_CPU_READ_BATCH, but let the
216 // parsing time be implied (by the difference between the caller's span, and
217 // this reading span). Makes it easier to estimate the read/parse ratio when
218 // looking at the trace in the UI.
ReadAndProcessBatch(uint8_t * parsing_buf,size_t max_pages,bool first_batch_in_cycle,CompactSchedBuffer * compact_sched_buf,const std::set<FtraceDataSource * > & started_data_sources)219 size_t CpuReader::ReadAndProcessBatch(
220 uint8_t* parsing_buf,
221 size_t max_pages,
222 bool first_batch_in_cycle,
223 CompactSchedBuffer* compact_sched_buf,
224 const std::set<FtraceDataSource*>& started_data_sources) {
225 const uint32_t sys_page_size = base::GetSysPageSize();
226 size_t pages_read = 0;
227 {
228 metatrace::ScopedEvent evt(metatrace::TAG_FTRACE,
229 metatrace::FTRACE_CPU_READ_BATCH);
230 for (; pages_read < max_pages;) {
231 uint8_t* curr_page = parsing_buf + (pages_read * sys_page_size);
232 ssize_t res = PERFETTO_EINTR(read(*trace_fd_, curr_page, sys_page_size));
233 if (res < 0) {
234 // Expected errors:
235 // EAGAIN: no data (since we're in non-blocking mode).
236 // ENOMEM, EBUSY: temporary ftrace failures (they happen).
237 // ENODEV: the cpu is offline (b/145583318).
238 if (errno != EAGAIN && errno != ENOMEM && errno != EBUSY &&
239 errno != ENODEV) {
240 SetParseError(started_data_sources, cpu_,
241 FtraceParseStatus::FTRACE_STATUS_UNEXPECTED_READ_ERROR);
242 }
243 break; // stop reading regardless of errno
244 }
245
246 // As long as all of our reads are for a single page, the kernel should
247 // return exactly a well-formed raw ftrace page (if not in the steady
248 // state of reading out fully-written pages, the kernel will construct
249 // pages as necessary, copying over events and zero-filling at the end).
250 // A sub-page read() is therefore not expected in practice. Kernel source
251 // pointer: see usage of |info->read| within |tracing_buffers_read|.
252 if (res == 0) {
253 // Very rare, but possible. Stop for now, should recover.
254 PERFETTO_DLOG("[cpu%zu]: 0-sized read from ftrace pipe.", cpu_);
255 break;
256 }
257 if (res != static_cast<ssize_t>(sys_page_size)) {
258 SetParseError(started_data_sources, cpu_,
259 FtraceParseStatus::FTRACE_STATUS_PARTIAL_PAGE_READ);
260 break;
261 }
262
263 pages_read += 1;
264
265 // Compare the amount of ftrace data read against an empirical threshold
266 // to make an educated guess on whether we should read more. To figure
267 // out the amount of ftrace data, we need to parse the page header (since
268 // the read always returns a page, zero-filled at the end). If we read
269 // fewer bytes than the threshold, it means that we caught up with the
270 // write pointer and we started consuming ftrace events in real-time.
271 // This cannot be just 4096 because it needs to account for
272 // fragmentation, i.e. for the fact that the last trace event didn't fit
273 // in the current page and hence the current page was terminated
274 // prematurely.
275 static const size_t kRoughlyAPage = sys_page_size - 512;
276 const uint8_t* scratch_ptr = curr_page;
277 std::optional<PageHeader> hdr =
278 ParsePageHeader(&scratch_ptr, table_->page_header_size_len());
279 PERFETTO_DCHECK(hdr && hdr->size > 0 && hdr->size <= sys_page_size);
280 if (!hdr.has_value()) {
281 // The header error will be logged by ProcessPagesForDataSource.
282 break;
283 }
284 // Note that the first read after starting the read cycle being small is
285 // normal. It means that we're given the remainder of events from a
286 // page that we've partially consumed during the last read of the previous
287 // cycle (having caught up to the writer).
288 if (hdr->size < kRoughlyAPage &&
289 !(first_batch_in_cycle && pages_read == 1)) {
290 break;
291 }
292 }
293 } // end of metatrace::FTRACE_CPU_READ_BATCH
294
295 // Parse the pages and write to the trace for all relevant data
296 // sources.
297 if (pages_read == 0)
298 return pages_read;
299
300 uint64_t last_read_ts = last_read_event_ts_;
301 for (FtraceDataSource* data_source : started_data_sources) {
302 last_read_ts = last_read_event_ts_;
303 ProcessPagesForDataSource(
304 data_source->trace_writer(), data_source->mutable_metadata(), cpu_,
305 data_source->parsing_config(), data_source->mutable_parse_errors(),
306 &last_read_ts, parsing_buf, pages_read, compact_sched_buf, table_,
307 symbolizer_, ftrace_clock_snapshot_, ftrace_clock_);
308 }
309 last_read_event_ts_ = last_read_ts;
310
311 return pages_read;
312 }
313
StartNewPacket(bool lost_events,uint64_t last_read_event_timestamp)314 void CpuReader::Bundler::StartNewPacket(bool lost_events,
315 uint64_t last_read_event_timestamp) {
316 FinalizeAndRunSymbolizer();
317 packet_ = trace_writer_->NewTracePacket();
318 bundle_ = packet_->set_ftrace_events();
319
320 bundle_->set_cpu(static_cast<uint32_t>(cpu_));
321 if (lost_events) {
322 bundle_->set_lost_events(true);
323 }
324
325 // note: set-to-zero is valid and expected for the first bundle per cpu
326 // (outside of concurrent tracing), with the effective meaning of "all data is
327 // valid since the data source was started".
328 bundle_->set_last_read_event_timestamp(last_read_event_timestamp);
329
330 if (ftrace_clock_) {
331 bundle_->set_ftrace_clock(ftrace_clock_);
332 if (ftrace_clock_snapshot_ && ftrace_clock_snapshot_->ftrace_clock_ts) {
333 bundle_->set_ftrace_timestamp(ftrace_clock_snapshot_->ftrace_clock_ts);
334 bundle_->set_boot_timestamp(ftrace_clock_snapshot_->boot_clock_ts);
335 }
336 }
337 }
338
FinalizeAndRunSymbolizer()339 void CpuReader::Bundler::FinalizeAndRunSymbolizer() {
340 if (!packet_) {
341 return;
342 }
343
344 if (compact_sched_enabled_) {
345 compact_sched_buf_->WriteAndReset(bundle_);
346 }
347
348 bundle_->Finalize();
349 bundle_ = nullptr;
350 // Write the kernel symbol index (mangled address) -> name table.
351 // |metadata| is shared across all cpus, is distinct per |data_source| (i.e.
352 // tracing session) and is cleared after each FtraceController::ReadTick().
353 if (symbolizer_) {
354 // Symbol indexes are assigned mononically as |kernel_addrs.size()|,
355 // starting from index 1 (no symbol has index 0). Here we remember the
356 // size() (which is also == the highest value in |kernel_addrs|) at the
357 // beginning and only write newer indexes bigger than that.
358 uint32_t max_index_at_start = metadata_->last_kernel_addr_index_written;
359 PERFETTO_DCHECK(max_index_at_start <= metadata_->kernel_addrs.size());
360 protos::pbzero::InternedData* interned_data = nullptr;
361 auto* ksyms_map = symbolizer_->GetOrCreateKernelSymbolMap();
362 bool wrote_at_least_one_symbol = false;
363 for (const FtraceMetadata::KernelAddr& kaddr : metadata_->kernel_addrs) {
364 if (kaddr.index <= max_index_at_start)
365 continue;
366 std::string sym_name = ksyms_map->Lookup(kaddr.addr);
367 if (sym_name.empty()) {
368 // Lookup failed. This can genuinely happen in many occasions. E.g.,
369 // workqueue_execute_start has two pointers: one is a pointer to a
370 // function (which we expect to be symbolized), the other (|work|) is
371 // a pointer to a heap struct, which is unsymbolizable, even when
372 // using the textual ftrace endpoint.
373 continue;
374 }
375
376 if (!interned_data) {
377 // If this is the very first write, clear the start of the sequence
378 // so the trace processor knows that all previous indexes can be
379 // discarded and that the mapping is restarting.
380 // In most cases this occurs with cpu==0. But if cpu0 is idle, this
381 // will happen with the first CPU that has any ftrace data.
382 if (max_index_at_start == 0) {
383 packet_->set_sequence_flags(
384 protos::pbzero::TracePacket::SEQ_INCREMENTAL_STATE_CLEARED);
385 }
386 interned_data = packet_->set_interned_data();
387 }
388 auto* interned_sym = interned_data->add_kernel_symbols();
389 interned_sym->set_iid(kaddr.index);
390 interned_sym->set_str(sym_name);
391 wrote_at_least_one_symbol = true;
392 }
393
394 auto max_it_at_end = static_cast<uint32_t>(metadata_->kernel_addrs.size());
395
396 // Rationale for the if (wrote_at_least_one_symbol) check: in rare cases,
397 // all symbols seen in a ProcessPagesForDataSource() call can fail the
398 // ksyms_map->Lookup(). If that happens we don't want to bump the
399 // last_kernel_addr_index_written watermark, as that would cause the next
400 // call to NOT emit the SEQ_INCREMENTAL_STATE_CLEARED.
401 if (wrote_at_least_one_symbol) {
402 metadata_->last_kernel_addr_index_written = max_it_at_end;
403 }
404 }
405 packet_ = TraceWriter::TracePacketHandle(nullptr);
406 }
407
408 // Error handling: will attempt parsing all pages even if there are errors in
409 // parsing the binary layout of the data. The error will be recorded in the
410 // event bundle proto with a timestamp, letting the trace processor decide
411 // whether to discard or keep the post-error data. Previously, we crashed as
412 // soon as we encountered such an error.
413 // TODO(rsavitski, b/192586066): consider moving last_read_event_ts tracking to
414 // be per-datasource. The current implementation can be pessimistic if there are
415 // multiple concurrent data sources, one of which is only interested in sparse
416 // events (imagine a print filter and one matching event every minute, while the
417 // buffers are read - advancing the last read timestamp - multiple times per
418 // second). Tracking the timestamp of the last event *written into the
419 // datasource* can be more accurate.
420 // static
ProcessPagesForDataSource(TraceWriter * trace_writer,FtraceMetadata * metadata,size_t cpu,const FtraceDataSourceConfig * ds_config,base::FlatSet<protos::pbzero::FtraceParseStatus> * parse_errors,uint64_t * last_read_event_ts,const uint8_t * parsing_buf,const size_t pages_read,CompactSchedBuffer * compact_sched_buf,const ProtoTranslationTable * table,LazyKernelSymbolizer * symbolizer,const FtraceClockSnapshot * ftrace_clock_snapshot,protos::pbzero::FtraceClock ftrace_clock)421 bool CpuReader::ProcessPagesForDataSource(
422 TraceWriter* trace_writer,
423 FtraceMetadata* metadata,
424 size_t cpu,
425 const FtraceDataSourceConfig* ds_config,
426 base::FlatSet<protos::pbzero::FtraceParseStatus>* parse_errors,
427 uint64_t* last_read_event_ts,
428 const uint8_t* parsing_buf,
429 const size_t pages_read,
430 CompactSchedBuffer* compact_sched_buf,
431 const ProtoTranslationTable* table,
432 LazyKernelSymbolizer* symbolizer,
433 const FtraceClockSnapshot* ftrace_clock_snapshot,
434 protos::pbzero::FtraceClock ftrace_clock) {
435 const uint32_t sys_page_size = base::GetSysPageSize();
436 Bundler bundler(trace_writer, metadata,
437 ds_config->symbolize_ksyms ? symbolizer : nullptr, cpu,
438 ftrace_clock_snapshot, ftrace_clock, compact_sched_buf,
439 ds_config->compact_sched.enabled, *last_read_event_ts);
440
441 bool success = true;
442 size_t pages_parsed = 0;
443 bool compact_sched_enabled = ds_config->compact_sched.enabled;
444 for (; pages_parsed < pages_read; pages_parsed++) {
445 const uint8_t* curr_page = parsing_buf + (pages_parsed * sys_page_size);
446 const uint8_t* curr_page_end = curr_page + sys_page_size;
447 const uint8_t* parse_pos = curr_page;
448 std::optional<PageHeader> page_header =
449 ParsePageHeader(&parse_pos, table->page_header_size_len());
450
451 if (!page_header.has_value() || page_header->size == 0 ||
452 parse_pos >= curr_page_end ||
453 parse_pos + page_header->size > curr_page_end) {
454 WriteAndSetParseError(
455 &bundler, parse_errors,
456 page_header.has_value() ? page_header->timestamp : 0,
457 FtraceParseStatus::FTRACE_STATUS_ABI_INVALID_PAGE_HEADER);
458 success = false;
459 continue;
460 }
461
462 // Start a new bundle if either:
463 // * The page we're about to read indicates that there was a kernel ring
464 // buffer overrun since our last read from that per-cpu buffer. We have
465 // a single |lost_events| field per bundle, so start a new packet.
466 // * The compact_sched buffer is holding more unique interned strings than
467 // a threshold. We need to flush the compact buffer to make the
468 // interning lookups cheap again.
469 bool interner_past_threshold =
470 compact_sched_enabled &&
471 bundler.compact_sched_buf()->interner().interned_comms_size() >
472 kCompactSchedInternerThreshold;
473
474 if (page_header->lost_events || interner_past_threshold) {
475 // pass in an updated last_read_event_ts since we're starting a new
476 // bundle, which needs to reference the last timestamp from the prior one.
477 bundler.StartNewPacket(page_header->lost_events, *last_read_event_ts);
478 }
479
480 FtraceParseStatus status =
481 ParsePagePayload(parse_pos, &page_header.value(), table, ds_config,
482 &bundler, metadata, last_read_event_ts);
483
484 if (status != FtraceParseStatus::FTRACE_STATUS_OK) {
485 WriteAndSetParseError(&bundler, parse_errors, page_header->timestamp,
486 status);
487 success = false;
488 continue;
489 }
490 }
491 // bundler->FinalizeAndRunSymbolizer() will run as part of the destructor.
492 return success;
493 }
494
495 // A page header consists of:
496 // * timestamp: 8 bytes
497 // * commit: 8 bytes on 64 bit, 4 bytes on 32 bit kernels
498 //
499 // The kernel reports this at /sys/kernel/debug/tracing/events/header_page.
500 //
501 // |commit|'s bottom bits represent the length of the payload following this
502 // header. The top bits have been repurposed as a bitset of flags pertaining to
503 // data loss. We look only at the "there has been some data lost" flag
504 // (RB_MISSED_EVENTS), and ignore the relatively tricky "appended the precise
505 // lost events count past the end of the valid data, as there was room to do so"
506 // flag (RB_MISSED_STORED).
507 //
508 // static
ParsePageHeader(const uint8_t ** ptr,uint16_t page_header_size_len)509 std::optional<CpuReader::PageHeader> CpuReader::ParsePageHeader(
510 const uint8_t** ptr,
511 uint16_t page_header_size_len) {
512 // Mask for the data length portion of the |commit| field. Note that the
513 // kernel implementation never explicitly defines the boundary (beyond using
514 // bits 30 and 31 as flags), but 27 bits are mentioned as sufficient in the
515 // original commit message, and is the constant used by trace-cmd.
516 constexpr static uint64_t kDataSizeMask = (1ull << 27) - 1;
517 // If set, indicates that the relevant cpu has lost events since the last read
518 // (clearing the bit internally).
519 constexpr static uint64_t kMissedEventsFlag = (1ull << 31);
520
521 const uint8_t* end_of_page = *ptr + base::GetSysPageSize();
522 PageHeader page_header;
523 if (!CpuReader::ReadAndAdvance<uint64_t>(ptr, end_of_page,
524 &page_header.timestamp))
525 return std::nullopt;
526
527 uint32_t size_and_flags;
528
529 // On little endian, we can just read a uint32_t and reject the rest of the
530 // number later.
531 if (!CpuReader::ReadAndAdvance<uint32_t>(
532 ptr, end_of_page, base::AssumeLittleEndian(&size_and_flags)))
533 return std::nullopt;
534
535 page_header.size = size_and_flags & kDataSizeMask;
536 page_header.lost_events = bool(size_and_flags & kMissedEventsFlag);
537 PERFETTO_DCHECK(page_header.size <= base::GetSysPageSize());
538
539 // Reject rest of the number, if applicable. On 32-bit, size_bytes - 4 will
540 // evaluate to 0 and this will be a no-op. On 64-bit, this will advance by 4
541 // bytes.
542 PERFETTO_DCHECK(page_header_size_len >= 4);
543 *ptr += page_header_size_len - 4;
544
545 return std::make_optional(page_header);
546 }
547
548 // A raw ftrace buffer page consists of a header followed by a sequence of
549 // binary ftrace events. See |ParsePageHeader| for the format of the earlier.
550 //
551 // Error handling: if the binary data disagrees with our understanding of the
552 // ring buffer layout, returns an error and skips the rest of the page (but some
553 // events may have already been parsed and serialised).
554 //
555 // This method is deliberately static so it can be tested independently.
ParsePagePayload(const uint8_t * start_of_payload,const PageHeader * page_header,const ProtoTranslationTable * table,const FtraceDataSourceConfig * ds_config,Bundler * bundler,FtraceMetadata * metadata,uint64_t * last_read_event_ts)556 protos::pbzero::FtraceParseStatus CpuReader::ParsePagePayload(
557 const uint8_t* start_of_payload,
558 const PageHeader* page_header,
559 const ProtoTranslationTable* table,
560 const FtraceDataSourceConfig* ds_config,
561 Bundler* bundler,
562 FtraceMetadata* metadata,
563 uint64_t* last_read_event_ts) {
564 const uint8_t* ptr = start_of_payload;
565 const uint8_t* const end = ptr + page_header->size;
566
567 uint64_t timestamp = page_header->timestamp;
568 uint64_t last_data_record_ts = 0;
569
570 while (ptr < end) {
571 EventHeader event_header;
572 if (!ReadAndAdvance(&ptr, end, &event_header))
573 return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_EVENT_HEADER;
574
575 timestamp += event_header.time_delta;
576
577 switch (event_header.type_or_length) {
578 case kTypePadding: {
579 // Left over page padding or discarded event.
580 if (event_header.time_delta == 0) {
581 // Should never happen: null padding event with unspecified size.
582 // Only written beyond page_header->size.
583 return FtraceParseStatus::FTRACE_STATUS_ABI_NULL_PADDING;
584 }
585 uint32_t length = 0;
586 if (!ReadAndAdvance<uint32_t>(&ptr, end, &length))
587 return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_PADDING_LENGTH;
588 // Length includes itself (4 bytes).
589 if (length < 4)
590 return FtraceParseStatus::FTRACE_STATUS_ABI_INVALID_PADDING_LENGTH;
591 ptr += length - 4;
592 break;
593 }
594 case kTypeTimeExtend: {
595 // Extend the time delta.
596 uint32_t time_delta_ext = 0;
597 if (!ReadAndAdvance<uint32_t>(&ptr, end, &time_delta_ext))
598 return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_TIME_EXTEND;
599 timestamp += (static_cast<uint64_t>(time_delta_ext)) << 27;
600 break;
601 }
602 case kTypeTimeStamp: {
603 // Absolute timestamp. This was historically partially implemented, but
604 // not written. Kernels 4.17+ reimplemented this record, changing its
605 // size in the process. We assume the newer layout. Parsed the same as
606 // kTypeTimeExtend, except that the timestamp is interpreted as an
607 // absolute, instead of a delta on top of the previous state.
608 uint32_t time_delta_ext = 0;
609 if (!ReadAndAdvance<uint32_t>(&ptr, end, &time_delta_ext))
610 return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_TIME_STAMP;
611 timestamp = event_header.time_delta +
612 (static_cast<uint64_t>(time_delta_ext) << 27);
613 break;
614 }
615 // Data record:
616 default: {
617 // If type_or_length <=28, the record length is 4x that value.
618 // If type_or_length == 0, the length of the record is stored in the
619 // first uint32_t word of the payload.
620 uint32_t event_size = 0;
621 if (event_header.type_or_length == 0) {
622 if (!ReadAndAdvance<uint32_t>(&ptr, end, &event_size))
623 return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_DATA_LENGTH;
624 // Size includes itself (4 bytes). However we've seen rare
625 // contradictions on select Android 4.19+ kernels: the page header
626 // says there's still valid data, but the rest of the page is full of
627 // zeroes (which would not decode to a valid event). b/204564312.
628 if (event_size == 0)
629 return FtraceParseStatus::FTRACE_STATUS_ABI_ZERO_DATA_LENGTH;
630 else if (event_size < 4)
631 return FtraceParseStatus::FTRACE_STATUS_ABI_INVALID_DATA_LENGTH;
632 event_size -= 4;
633 } else {
634 event_size = 4 * event_header.type_or_length;
635 }
636 const uint8_t* start = ptr;
637 const uint8_t* next = ptr + event_size;
638
639 if (next > end)
640 return FtraceParseStatus::FTRACE_STATUS_ABI_END_OVERFLOW;
641
642 uint16_t ftrace_event_id = 0;
643 if (!ReadAndAdvance<uint16_t>(&ptr, end, &ftrace_event_id))
644 return FtraceParseStatus::FTRACE_STATUS_ABI_SHORT_EVENT_ID;
645
646 if (ds_config->event_filter.IsEventEnabled(ftrace_event_id)) {
647 // Special-cased handling of some scheduler events when compact format
648 // is enabled.
649 bool compact_sched_enabled = ds_config->compact_sched.enabled;
650 const CompactSchedSwitchFormat& sched_switch_format =
651 table->compact_sched_format().sched_switch;
652 const CompactSchedWakingFormat& sched_waking_format =
653 table->compact_sched_format().sched_waking;
654
655 bool ftrace_print_filter_enabled =
656 ds_config->print_filter.has_value();
657
658 // compact sched_switch
659 if (compact_sched_enabled &&
660 ftrace_event_id == sched_switch_format.event_id) {
661 if (event_size < sched_switch_format.size)
662 return FtraceParseStatus::FTRACE_STATUS_SHORT_COMPACT_EVENT;
663
664 ParseSchedSwitchCompact(start, timestamp, &sched_switch_format,
665 bundler->compact_sched_buf(), metadata);
666
667 // compact sched_waking
668 } else if (compact_sched_enabled &&
669 ftrace_event_id == sched_waking_format.event_id) {
670 if (event_size < sched_waking_format.size)
671 return FtraceParseStatus::FTRACE_STATUS_SHORT_COMPACT_EVENT;
672
673 ParseSchedWakingCompact(start, timestamp, &sched_waking_format,
674 bundler->compact_sched_buf(), metadata);
675
676 } else if (ftrace_print_filter_enabled &&
677 ftrace_event_id == ds_config->print_filter->event_id()) {
678 if (ds_config->print_filter->IsEventInteresting(start, next)) {
679 protos::pbzero::FtraceEvent* event =
680 bundler->GetOrCreateBundle()->add_event();
681 event->set_timestamp(timestamp);
682 if (!ParseEvent(ftrace_event_id, start, next, table, ds_config,
683 event, metadata)) {
684 return FtraceParseStatus::FTRACE_STATUS_INVALID_EVENT;
685 }
686 }
687 } else {
688 // Common case: parse all other types of enabled events.
689 protos::pbzero::FtraceEvent* event =
690 bundler->GetOrCreateBundle()->add_event();
691 event->set_timestamp(timestamp);
692 if (!ParseEvent(ftrace_event_id, start, next, table, ds_config,
693 event, metadata)) {
694 return FtraceParseStatus::FTRACE_STATUS_INVALID_EVENT;
695 }
696 }
697 }
698 last_data_record_ts = timestamp;
699 ptr = next; // jump to next event
700 } // default case
701 } // switch (event_header.type_or_length)
702 } // while (ptr < end)
703 if (last_data_record_ts)
704 *last_read_event_ts = last_data_record_ts;
705 return FtraceParseStatus::FTRACE_STATUS_OK;
706 }
707
708 // |start| is the start of the current event.
709 // |end| is the end of the buffer.
ParseEvent(uint16_t ftrace_event_id,const uint8_t * start,const uint8_t * end,const ProtoTranslationTable * table,const FtraceDataSourceConfig * ds_config,protozero::Message * message,FtraceMetadata * metadata)710 bool CpuReader::ParseEvent(uint16_t ftrace_event_id,
711 const uint8_t* start,
712 const uint8_t* end,
713 const ProtoTranslationTable* table,
714 const FtraceDataSourceConfig* ds_config,
715 protozero::Message* message,
716 FtraceMetadata* metadata) {
717 PERFETTO_DCHECK(start < end);
718
719 // The event must be enabled and known to reach here.
720 const Event& info = *table->GetEventById(ftrace_event_id);
721
722 if (info.size > static_cast<size_t>(end - start)) {
723 PERFETTO_DLOG("Expected event length is beyond end of buffer.");
724 return false;
725 }
726
727 bool success = true;
728 const Field* common_pid_field = table->common_pid();
729 if (PERFETTO_LIKELY(common_pid_field))
730 success &=
731 ParseField(*common_pid_field, start, end, table, message, metadata);
732
733 protozero::Message* nested =
734 message->BeginNestedMessage<protozero::Message>(info.proto_field_id);
735
736 // Parse generic (not known at compile time) event.
737 if (PERFETTO_UNLIKELY(info.proto_field_id ==
738 protos::pbzero::FtraceEvent::kGenericFieldNumber)) {
739 nested->AppendString(GenericFtraceEvent::kEventNameFieldNumber, info.name);
740 for (const Field& field : info.fields) {
741 auto* generic_field = nested->BeginNestedMessage<protozero::Message>(
742 GenericFtraceEvent::kFieldFieldNumber);
743 generic_field->AppendString(GenericFtraceEvent::Field::kNameFieldNumber,
744 field.ftrace_name);
745 success &= ParseField(field, start, end, table, generic_field, metadata);
746 }
747 } else if (PERFETTO_UNLIKELY(
748 info.proto_field_id ==
749 protos::pbzero::FtraceEvent::kSysEnterFieldNumber)) {
750 success &= ParseSysEnter(info, start, end, nested, metadata);
751 } else if (PERFETTO_UNLIKELY(
752 info.proto_field_id ==
753 protos::pbzero::FtraceEvent::kSysExitFieldNumber)) {
754 success &= ParseSysExit(info, start, end, ds_config, nested, metadata);
755 } else { // Parse all other events.
756 for (const Field& field : info.fields) {
757 success &= ParseField(field, start, end, table, nested, metadata);
758 }
759 }
760
761 if (PERFETTO_UNLIKELY(info.proto_field_id ==
762 protos::pbzero::FtraceEvent::kTaskRenameFieldNumber)) {
763 // For task renames, we want to store that the pid was renamed. We use the
764 // common pid to reduce code complexity as in all the cases we care about,
765 // the common pid is the same as the renamed pid (the pid inside the event).
766 PERFETTO_DCHECK(metadata->last_seen_common_pid);
767 metadata->AddRenamePid(metadata->last_seen_common_pid);
768 }
769
770 // This finalizes |nested| and |proto_field| automatically.
771 message->Finalize();
772 metadata->FinishEvent();
773 return success;
774 }
775
776 // Caller must guarantee that the field fits in the range,
777 // explicitly: start + field.ftrace_offset + field.ftrace_size <= end
778 // The only exception is fields with strategy = kCStringToString
779 // where the total size isn't known up front. In this case ParseField
780 // will check the string terminates in the bounds and won't read past |end|.
ParseField(const Field & field,const uint8_t * start,const uint8_t * end,const ProtoTranslationTable * table,protozero::Message * message,FtraceMetadata * metadata)781 bool CpuReader::ParseField(const Field& field,
782 const uint8_t* start,
783 const uint8_t* end,
784 const ProtoTranslationTable* table,
785 protozero::Message* message,
786 FtraceMetadata* metadata) {
787 PERFETTO_DCHECK(start + field.ftrace_offset + field.ftrace_size <= end);
788 const uint8_t* field_start = start + field.ftrace_offset;
789 uint32_t field_id = field.proto_field_id;
790
791 switch (field.strategy) {
792 case kUint8ToUint32:
793 case kUint8ToUint64:
794 ReadIntoVarInt<uint8_t>(field_start, field_id, message);
795 return true;
796 case kUint16ToUint32:
797 case kUint16ToUint64:
798 ReadIntoVarInt<uint16_t>(field_start, field_id, message);
799 return true;
800 case kUint32ToUint32:
801 case kUint32ToUint64:
802 ReadIntoVarInt<uint32_t>(field_start, field_id, message);
803 return true;
804 case kUint64ToUint64:
805 ReadIntoVarInt<uint64_t>(field_start, field_id, message);
806 return true;
807 case kInt8ToInt32:
808 case kInt8ToInt64:
809 ReadIntoVarInt<int8_t>(field_start, field_id, message);
810 return true;
811 case kInt16ToInt32:
812 case kInt16ToInt64:
813 ReadIntoVarInt<int16_t>(field_start, field_id, message);
814 return true;
815 case kInt32ToInt32:
816 case kInt32ToInt64:
817 ReadIntoVarInt<int32_t>(field_start, field_id, message);
818 return true;
819 case kInt64ToInt64:
820 ReadIntoVarInt<int64_t>(field_start, field_id, message);
821 return true;
822 case kFixedCStringToString:
823 // TODO(hjd): Kernel-dive to check this how size:0 char fields work.
824 ReadIntoString(field_start, field.ftrace_size, field_id, message);
825 return true;
826 case kCStringToString:
827 // TODO(hjd): Kernel-dive to check this how size:0 char fields work.
828 ReadIntoString(field_start, static_cast<size_t>(end - field_start),
829 field_id, message);
830 return true;
831 case kStringPtrToString: {
832 uint64_t n = 0;
833 // The ftrace field may be 8 or 4 bytes and we need to copy it into the
834 // bottom of n. In the unlikely case where the field is >8 bytes we
835 // should avoid making things worse by corrupting the stack but we
836 // don't need to handle it correctly.
837 size_t size = std::min<size_t>(field.ftrace_size, sizeof(n));
838 memcpy(base::AssumeLittleEndian(&n),
839 reinterpret_cast<const void*>(field_start), size);
840 // Look up the adddress in the printk format map and write it into the
841 // proto.
842 base::StringView name = table->LookupTraceString(n);
843 message->AppendBytes(field_id, name.begin(), name.size());
844 return true;
845 }
846 case kDataLocToString:
847 return ReadDataLoc(start, field_start, end, field, message);
848 case kBoolToUint32:
849 case kBoolToUint64:
850 ReadIntoVarInt<uint8_t>(field_start, field_id, message);
851 return true;
852 case kInode32ToUint64:
853 ReadInode<uint32_t>(field_start, field_id, message, metadata);
854 return true;
855 case kInode64ToUint64:
856 ReadInode<uint64_t>(field_start, field_id, message, metadata);
857 return true;
858 case kPid32ToInt32:
859 case kPid32ToInt64:
860 ReadPid(field_start, field_id, message, metadata);
861 return true;
862 case kCommonPid32ToInt32:
863 case kCommonPid32ToInt64:
864 ReadCommonPid(field_start, field_id, message, metadata);
865 return true;
866 case kDevId32ToUint64:
867 ReadDevId<uint32_t>(field_start, field_id, message, metadata);
868 return true;
869 case kDevId64ToUint64:
870 ReadDevId<uint64_t>(field_start, field_id, message, metadata);
871 return true;
872 case kFtraceSymAddr32ToUint64:
873 ReadSymbolAddr<uint32_t>(field_start, field_id, message, metadata);
874 return true;
875 case kFtraceSymAddr64ToUint64:
876 ReadSymbolAddr<uint64_t>(field_start, field_id, message, metadata);
877 return true;
878 case kInvalidTranslationStrategy:
879 break;
880 }
881 // Shouldn't reach this since we only attempt to parse fields that were
882 // validated by the proto translation table earlier.
883 return false;
884 }
885
ParseSysEnter(const Event & info,const uint8_t * start,const uint8_t * end,protozero::Message * message,FtraceMetadata *)886 bool CpuReader::ParseSysEnter(const Event& info,
887 const uint8_t* start,
888 const uint8_t* end,
889 protozero::Message* message,
890 FtraceMetadata* /* metadata */) {
891 if (info.fields.size() != 2) {
892 PERFETTO_DLOG("Unexpected number of fields for sys_enter");
893 return false;
894 }
895 const auto& id_field = info.fields[0];
896 const auto& args_field = info.fields[1];
897 if (start + id_field.ftrace_size + args_field.ftrace_size > end) {
898 return false;
899 }
900 // field:long id;
901 if (id_field.ftrace_type != kFtraceInt32 &&
902 id_field.ftrace_type != kFtraceInt64) {
903 return false;
904 }
905 const int64_t syscall_id = ReadSignedFtraceValue(
906 start + id_field.ftrace_offset, id_field.ftrace_type);
907 message->AppendVarInt(id_field.proto_field_id, syscall_id);
908 // field:unsigned long args[6];
909 // proto_translation_table will only allow exactly 6-element array, so we can
910 // make the same hard assumption here.
911 constexpr uint16_t arg_count = 6;
912 size_t element_size = 0;
913 if (args_field.ftrace_type == kFtraceUint32) {
914 element_size = 4u;
915 } else if (args_field.ftrace_type == kFtraceUint64) {
916 element_size = 8u;
917 } else {
918 return false;
919 }
920 for (uint16_t i = 0; i < arg_count; ++i) {
921 const uint8_t* element_ptr =
922 start + args_field.ftrace_offset + i * element_size;
923 uint64_t arg_value = 0;
924 if (element_size == 8) {
925 arg_value = ReadValue<uint64_t>(element_ptr);
926 } else {
927 arg_value = ReadValue<uint32_t>(element_ptr);
928 }
929 message->AppendVarInt(args_field.proto_field_id, arg_value);
930 }
931 return true;
932 }
933
ParseSysExit(const Event & info,const uint8_t * start,const uint8_t * end,const FtraceDataSourceConfig * ds_config,protozero::Message * message,FtraceMetadata * metadata)934 bool CpuReader::ParseSysExit(const Event& info,
935 const uint8_t* start,
936 const uint8_t* end,
937 const FtraceDataSourceConfig* ds_config,
938 protozero::Message* message,
939 FtraceMetadata* metadata) {
940 if (info.fields.size() != 2) {
941 PERFETTO_DLOG("Unexpected number of fields for sys_exit");
942 return false;
943 }
944 const auto& id_field = info.fields[0];
945 const auto& ret_field = info.fields[1];
946 if (start + id_field.ftrace_size + ret_field.ftrace_size > end) {
947 return false;
948 }
949 // field:long id;
950 if (id_field.ftrace_type != kFtraceInt32 &&
951 id_field.ftrace_type != kFtraceInt64) {
952 return false;
953 }
954 const int64_t syscall_id = ReadSignedFtraceValue(
955 start + id_field.ftrace_offset, id_field.ftrace_type);
956 message->AppendVarInt(id_field.proto_field_id, syscall_id);
957 // field:long ret;
958 if (ret_field.ftrace_type != kFtraceInt32 &&
959 ret_field.ftrace_type != kFtraceInt64) {
960 return false;
961 }
962 const int64_t syscall_ret = ReadSignedFtraceValue(
963 start + ret_field.ftrace_offset, ret_field.ftrace_type);
964 message->AppendVarInt(ret_field.proto_field_id, syscall_ret);
965 // for any syscalls which return a new filedescriptor
966 // we mark the fd as potential candidate for scraping
967 // if the call succeeded and is within fd bounds
968 if (ds_config->syscalls_returning_fd.count(syscall_id) && syscall_ret >= 0 &&
969 syscall_ret <= std::numeric_limits<int>::max()) {
970 const auto pid = metadata->last_seen_common_pid;
971 const auto syscall_ret_u = static_cast<uint64_t>(syscall_ret);
972 metadata->fds.insert(std::make_pair(pid, syscall_ret_u));
973 }
974 return true;
975 }
976
977 // Parse a sched_switch event according to pre-validated format, and buffer the
978 // individual fields in the current compact batch. See the code populating
979 // |CompactSchedSwitchFormat| for the assumptions made around the format, which
980 // this code is closely tied to.
981 // static
ParseSchedSwitchCompact(const uint8_t * start,uint64_t timestamp,const CompactSchedSwitchFormat * format,CompactSchedBuffer * compact_buf,FtraceMetadata * metadata)982 void CpuReader::ParseSchedSwitchCompact(const uint8_t* start,
983 uint64_t timestamp,
984 const CompactSchedSwitchFormat* format,
985 CompactSchedBuffer* compact_buf,
986 FtraceMetadata* metadata) {
987 compact_buf->sched_switch().AppendTimestamp(timestamp);
988
989 int32_t next_pid = ReadValue<int32_t>(start + format->next_pid_offset);
990 compact_buf->sched_switch().next_pid().Append(next_pid);
991 metadata->AddPid(next_pid);
992
993 int32_t next_prio = ReadValue<int32_t>(start + format->next_prio_offset);
994 compact_buf->sched_switch().next_prio().Append(next_prio);
995
996 // Varint encoding of int32 and int64 is the same, so treat the value as
997 // int64 after reading.
998 int64_t prev_state = ReadSignedFtraceValue(start + format->prev_state_offset,
999 format->prev_state_type);
1000 compact_buf->sched_switch().prev_state().Append(prev_state);
1001
1002 // next_comm
1003 const char* comm_ptr =
1004 reinterpret_cast<const char*>(start + format->next_comm_offset);
1005 size_t iid = compact_buf->interner().InternComm(comm_ptr);
1006 compact_buf->sched_switch().next_comm_index().Append(iid);
1007 }
1008
1009 // static
ParseSchedWakingCompact(const uint8_t * start,uint64_t timestamp,const CompactSchedWakingFormat * format,CompactSchedBuffer * compact_buf,FtraceMetadata * metadata)1010 void CpuReader::ParseSchedWakingCompact(const uint8_t* start,
1011 uint64_t timestamp,
1012 const CompactSchedWakingFormat* format,
1013 CompactSchedBuffer* compact_buf,
1014 FtraceMetadata* metadata) {
1015 compact_buf->sched_waking().AppendTimestamp(timestamp);
1016
1017 int32_t pid = ReadValue<int32_t>(start + format->pid_offset);
1018 compact_buf->sched_waking().pid().Append(pid);
1019 metadata->AddPid(pid);
1020
1021 int32_t target_cpu = ReadValue<int32_t>(start + format->target_cpu_offset);
1022 compact_buf->sched_waking().target_cpu().Append(target_cpu);
1023
1024 int32_t prio = ReadValue<int32_t>(start + format->prio_offset);
1025 compact_buf->sched_waking().prio().Append(prio);
1026
1027 // comm
1028 const char* comm_ptr =
1029 reinterpret_cast<const char*>(start + format->comm_offset);
1030 size_t iid = compact_buf->interner().InternComm(comm_ptr);
1031 compact_buf->sched_waking().comm_index().Append(iid);
1032
1033 uint32_t common_flags =
1034 ReadValue<uint8_t>(start + format->common_flags_offset);
1035 compact_buf->sched_waking().common_flags().Append(common_flags);
1036 }
1037
1038 } // namespace perfetto
1039