1 // Copyright 2015 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "sample_info_reader.h"
6
7 #include <string.h>
8
9 #include "base/logging.h"
10 #include "buffer_reader.h"
11 #include "buffer_writer.h"
12 #include "kernel/perf_internals.h"
13 #include "perf_data_utils.h"
14
15 namespace quipper {
16
17 namespace {
18
IsSupportedEventType(uint32_t type)19 bool IsSupportedEventType(uint32_t type) {
20 switch (type) {
21 case PERF_RECORD_SAMPLE:
22 case PERF_RECORD_MMAP:
23 case PERF_RECORD_MMAP2:
24 case PERF_RECORD_FORK:
25 case PERF_RECORD_EXIT:
26 case PERF_RECORD_COMM:
27 case PERF_RECORD_LOST:
28 case PERF_RECORD_THROTTLE:
29 case PERF_RECORD_UNTHROTTLE:
30 case PERF_RECORD_AUX:
31 return true;
32 case PERF_RECORD_READ:
33 case PERF_RECORD_MAX:
34 return false;
35 default:
36 LOG(FATAL) << "Unknown event type " << type;
37 return false;
38 }
39 }
40
41 // Read read info from perf data. Corresponds to sample format type
42 // PERF_SAMPLE_READ in the !PERF_FORMAT_GROUP case.
ReadReadInfo(DataReader * reader,uint64_t read_format,struct perf_sample * sample)43 void ReadReadInfo(DataReader* reader, uint64_t read_format,
44 struct perf_sample* sample) {
45 reader->ReadUint64(&sample->read.one.value);
46 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
47 reader->ReadUint64(&sample->read.time_enabled);
48 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
49 reader->ReadUint64(&sample->read.time_running);
50 if (read_format & PERF_FORMAT_ID) reader->ReadUint64(&sample->read.one.id);
51 }
52
53 // Read read info from perf data. Corresponds to sample format type
54 // PERF_SAMPLE_READ in the PERF_FORMAT_GROUP case.
ReadGroupReadInfo(DataReader * reader,uint64_t read_format,struct perf_sample * sample)55 void ReadGroupReadInfo(DataReader* reader, uint64_t read_format,
56 struct perf_sample* sample) {
57 uint64_t num = 0;
58 reader->ReadUint64(&num);
59 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
60 reader->ReadUint64(&sample->read.time_enabled);
61 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
62 reader->ReadUint64(&sample->read.time_running);
63
64 // Make sure there is no existing allocated memory in
65 // |sample->read.group.values|.
66 CHECK_EQ(static_cast<void*>(NULL), sample->read.group.values);
67 sample_read_value* values = new sample_read_value[num];
68 for (uint64_t i = 0; i < num; i++) {
69 reader->ReadUint64(&values[i].value);
70 if (read_format & PERF_FORMAT_ID) reader->ReadUint64(&values[i].id);
71 }
72 sample->read.group.nr = num;
73 sample->read.group.values = values;
74 }
75
76 // Read call chain info from perf data. Corresponds to sample format type
77 // PERF_SAMPLE_CALLCHAIN.
ReadCallchain(DataReader * reader,struct perf_sample * sample)78 void ReadCallchain(DataReader* reader, struct perf_sample* sample) {
79 // Make sure there is no existing allocated memory in |sample->callchain|.
80 CHECK_EQ(static_cast<void*>(NULL), sample->callchain);
81
82 // The callgraph data consists of a uint64_t value |nr| followed by |nr|
83 // addresses.
84 uint64_t callchain_size = 0;
85 reader->ReadUint64(&callchain_size);
86
87 struct ip_callchain* callchain =
88 reinterpret_cast<struct ip_callchain*>(new uint64_t[callchain_size + 1]);
89 callchain->nr = callchain_size;
90
91 for (size_t i = 0; i < callchain_size; ++i)
92 reader->ReadUint64(&callchain->ips[i]);
93
94 sample->callchain = callchain;
95 }
96
97 // Read raw info from perf data. Corresponds to sample format type
98 // PERF_SAMPLE_RAW.
ReadRawData(DataReader * reader,struct perf_sample * sample)99 void ReadRawData(DataReader* reader, struct perf_sample* sample) {
100 // Save the original read offset.
101 size_t reader_offset = reader->Tell();
102
103 reader->ReadUint32(&sample->raw_size);
104
105 // Allocate space for and read the raw data bytes.
106 sample->raw_data = new uint8_t[sample->raw_size];
107 reader->ReadData(sample->raw_size, sample->raw_data);
108
109 // Determine the bytes that were read, and align to the next 64 bits.
110 reader_offset += Align<uint64_t>(sizeof(sample->raw_size) + sample->raw_size);
111 reader->SeekSet(reader_offset);
112 }
113
114 // Read call chain info from perf data. Corresponds to sample format type
115 // PERF_SAMPLE_CALLCHAIN.
ReadBranchStack(DataReader * reader,struct perf_sample * sample)116 void ReadBranchStack(DataReader* reader, struct perf_sample* sample) {
117 // Make sure there is no existing allocated memory in
118 // |sample->branch_stack|.
119 CHECK_EQ(static_cast<void*>(NULL), sample->branch_stack);
120
121 // The branch stack data consists of a uint64_t value |nr| followed by |nr|
122 // branch_entry structs.
123 uint64_t branch_stack_size = 0;
124 reader->ReadUint64(&branch_stack_size);
125
126 struct branch_stack* branch_stack = reinterpret_cast<struct branch_stack*>(
127 new uint8_t[sizeof(uint64_t) +
128 branch_stack_size * sizeof(struct branch_entry)]);
129 branch_stack->nr = branch_stack_size;
130 for (size_t i = 0; i < branch_stack_size; ++i) {
131 reader->ReadUint64(&branch_stack->entries[i].from);
132 reader->ReadUint64(&branch_stack->entries[i].to);
133 reader->ReadData(sizeof(branch_stack->entries[i].flags),
134 &branch_stack->entries[i].flags);
135 if (reader->is_cross_endian()) {
136 LOG(ERROR) << "Byte swapping of branch stack flags is not yet supported.";
137 }
138 }
139 sample->branch_stack = branch_stack;
140 }
141
142 // Reads perf sample info data from |event| into |sample|.
143 // |attr| is the event attribute struct, which contains info such as which
144 // sample info fields are present.
145 // |is_cross_endian| indicates that the data is cross-endian and that the byte
146 // order should be reversed for each field according to its size.
147 // Returns number of bytes of data read or skipped.
ReadPerfSampleFromData(const event_t & event,const struct perf_event_attr & attr,bool is_cross_endian,struct perf_sample * sample)148 size_t ReadPerfSampleFromData(const event_t& event,
149 const struct perf_event_attr& attr,
150 bool is_cross_endian,
151 struct perf_sample* sample) {
152 BufferReader reader(&event, event.header.size);
153 reader.set_is_cross_endian(is_cross_endian);
154 reader.SeekSet(SampleInfoReader::GetPerfSampleDataOffset(event));
155
156 if (!(event.header.type == PERF_RECORD_SAMPLE || attr.sample_id_all)) {
157 return reader.Tell();
158 }
159
160 uint64_t sample_fields = SampleInfoReader::GetSampleFieldsForEventType(
161 event.header.type, attr.sample_type);
162
163 // See structure for PERF_RECORD_SAMPLE in kernel/perf_event.h
164 // and compare sample_id when sample_id_all is set.
165
166 // NB: For sample_id, sample_fields has already been masked to the set
167 // of fields in that struct by GetSampleFieldsForEventType. That set
168 // of fields is mostly in the same order as PERF_RECORD_SAMPLE, with
169 // the exception of PERF_SAMPLE_IDENTIFIER.
170
171 // PERF_SAMPLE_IDENTIFIER is in a different location depending on
172 // if this is a SAMPLE event or the sample_id of another event.
173 if (event.header.type == PERF_RECORD_SAMPLE) {
174 // { u64 id; } && PERF_SAMPLE_IDENTIFIER
175 if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
176 reader.ReadUint64(&sample->id);
177 }
178 }
179
180 // { u64 ip; } && PERF_SAMPLE_IP
181 if (sample_fields & PERF_SAMPLE_IP) {
182 reader.ReadUint64(&sample->ip);
183 }
184
185 // { u32 pid, tid; } && PERF_SAMPLE_TID
186 if (sample_fields & PERF_SAMPLE_TID) {
187 reader.ReadUint32(&sample->pid);
188 reader.ReadUint32(&sample->tid);
189 }
190
191 // { u64 time; } && PERF_SAMPLE_TIME
192 if (sample_fields & PERF_SAMPLE_TIME) {
193 reader.ReadUint64(&sample->time);
194 }
195
196 // { u64 addr; } && PERF_SAMPLE_ADDR
197 if (sample_fields & PERF_SAMPLE_ADDR) {
198 reader.ReadUint64(&sample->addr);
199 }
200
201 // { u64 id; } && PERF_SAMPLE_ID
202 if (sample_fields & PERF_SAMPLE_ID) {
203 reader.ReadUint64(&sample->id);
204 }
205
206 // { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
207 if (sample_fields & PERF_SAMPLE_STREAM_ID) {
208 reader.ReadUint64(&sample->stream_id);
209 }
210
211 // { u32 cpu, res; } && PERF_SAMPLE_CPU
212 if (sample_fields & PERF_SAMPLE_CPU) {
213 reader.ReadUint32(&sample->cpu);
214
215 // The PERF_SAMPLE_CPU format bit specifies 64-bits of data, but the actual
216 // CPU number is really only 32 bits. There is an extra 32-bit word of
217 // reserved padding, as the whole field is aligned to 64 bits.
218
219 // reader.ReadUint32(&sample->res); // reserved
220 u32 reserved;
221 reader.ReadUint32(&reserved);
222 }
223
224 // This is the location of PERF_SAMPLE_IDENTIFIER in struct sample_id.
225 if (event.header.type != PERF_RECORD_SAMPLE) {
226 // { u64 id; } && PERF_SAMPLE_IDENTIFIER
227 if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
228 reader.ReadUint64(&sample->id);
229 }
230 }
231
232 //
233 // The remaining fields are only in PERF_RECORD_SAMPLE
234 //
235
236 // { u64 period; } && PERF_SAMPLE_PERIOD
237 if (sample_fields & PERF_SAMPLE_PERIOD) {
238 reader.ReadUint64(&sample->period);
239 }
240
241 // { struct read_format values; } && PERF_SAMPLE_READ
242 if (sample_fields & PERF_SAMPLE_READ) {
243 if (attr.read_format & PERF_FORMAT_GROUP)
244 ReadGroupReadInfo(&reader, attr.read_format, sample);
245 else
246 ReadReadInfo(&reader, attr.read_format, sample);
247 }
248
249 // { u64 nr,
250 // u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
251 if (sample_fields & PERF_SAMPLE_CALLCHAIN) {
252 ReadCallchain(&reader, sample);
253 }
254
255 // { u32 size;
256 // char data[size];}&& PERF_SAMPLE_RAW
257 if (sample_fields & PERF_SAMPLE_RAW) {
258 ReadRawData(&reader, sample);
259 }
260
261 // { u64 nr;
262 // { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
263 if (sample_fields & PERF_SAMPLE_BRANCH_STACK) {
264 ReadBranchStack(&reader, sample);
265 }
266
267 // { u64 abi; # enum perf_sample_regs_abi
268 // u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
269 if (sample_fields & PERF_SAMPLE_REGS_USER) {
270 VLOG(1) << "Skipping PERF_SAMPLE_REGS_USER data.";
271 u64 abi;
272 if (!reader.ReadUint64(&abi)) {
273 return false;
274 }
275 size_t weight = abi == 0 ? 0 : __builtin_popcountll(attr.sample_regs_user);
276 u64 regs[64];
277 if (weight > 0 && !reader.ReadData(weight * sizeof(u64), regs)) {
278 return false;
279 }
280 }
281
282 // { u64 size;
283 // char data[size];
284 // u64 dyn_size; } && PERF_SAMPLE_STACK_USER
285 if (sample_fields & PERF_SAMPLE_STACK_USER) {
286 VLOG(1) << "Skipping PERF_SAMPLE_STACK_USER data.";
287 u64 size;
288 if (!reader.ReadUint64(&size)) {
289 return false;
290 }
291 if (size != 0) {
292 std::unique_ptr<char[]> data(new char[size]);
293 if (!reader.ReadData(size, data.get())) {
294 return false;
295 }
296 u64 dyn_size;
297 reader.ReadUint64(&dyn_size);
298 }
299 }
300
301 // { u64 weight; } && PERF_SAMPLE_WEIGHT
302 if (sample_fields & PERF_SAMPLE_WEIGHT) {
303 reader.ReadUint64(&sample->weight);
304 }
305
306 // { u64 data_src; } && PERF_SAMPLE_DATA_SRC
307 if (sample_fields & PERF_SAMPLE_DATA_SRC) {
308 reader.ReadUint64(&sample->data_src);
309 }
310
311 // { u64 transaction; } && PERF_SAMPLE_TRANSACTION
312 if (sample_fields & PERF_SAMPLE_TRANSACTION) {
313 reader.ReadUint64(&sample->transaction);
314 }
315
316 if (sample_fields & ~(PERF_SAMPLE_MAX - 1)) {
317 LOG(WARNING) << "Unrecognized sample fields 0x" << std::hex
318 << (sample_fields & ~(PERF_SAMPLE_MAX - 1));
319 }
320
321 return reader.Tell();
322 }
323
324 // Writes sample info data data from |sample| into |event|.
325 // |attr| is the event attribute struct, which contains info such as which
326 // sample info fields are present.
327 // |event| is the destination event. Its header should already be filled out.
328 // Returns the number of bytes written or skipped.
WritePerfSampleToData(const struct perf_sample & sample,const struct perf_event_attr & attr,event_t * event)329 size_t WritePerfSampleToData(const struct perf_sample& sample,
330 const struct perf_event_attr& attr,
331 event_t* event) {
332 const uint64_t* initial_array_ptr = reinterpret_cast<const uint64_t*>(event);
333
334 uint64_t offset = SampleInfoReader::GetPerfSampleDataOffset(*event);
335 uint64_t* array =
336 reinterpret_cast<uint64_t*>(event) + offset / sizeof(uint64_t);
337
338 if (!(event->header.type == PERF_RECORD_SAMPLE || attr.sample_id_all)) {
339 return offset;
340 }
341
342 uint64_t sample_fields = SampleInfoReader::GetSampleFieldsForEventType(
343 event->header.type, attr.sample_type);
344
345 union {
346 uint32_t val32[sizeof(uint64_t) / sizeof(uint32_t)];
347 uint64_t val64;
348 };
349
350 // See notes at the top of ReadPerfSampleFromData regarding the structure
351 // of PERF_RECORD_SAMPLE, sample_id, and PERF_SAMPLE_IDENTIFIER, as they
352 // all apply here as well.
353
354 // PERF_SAMPLE_IDENTIFIER is in a different location depending on
355 // if this is a SAMPLE event or the sample_id of another event.
356 if (event->header.type == PERF_RECORD_SAMPLE) {
357 // { u64 id; } && PERF_SAMPLE_IDENTIFIER
358 if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
359 *array++ = sample.id;
360 }
361 }
362
363 // { u64 ip; } && PERF_SAMPLE_IP
364 if (sample_fields & PERF_SAMPLE_IP) {
365 *array++ = sample.ip;
366 }
367
368 // { u32 pid, tid; } && PERF_SAMPLE_TID
369 if (sample_fields & PERF_SAMPLE_TID) {
370 val32[0] = sample.pid;
371 val32[1] = sample.tid;
372 *array++ = val64;
373 }
374
375 // { u64 time; } && PERF_SAMPLE_TIME
376 if (sample_fields & PERF_SAMPLE_TIME) {
377 *array++ = sample.time;
378 }
379
380 // { u64 addr; } && PERF_SAMPLE_ADDR
381 if (sample_fields & PERF_SAMPLE_ADDR) {
382 *array++ = sample.addr;
383 }
384
385 // { u64 id; } && PERF_SAMPLE_ID
386 if (sample_fields & PERF_SAMPLE_ID) {
387 *array++ = sample.id;
388 }
389
390 // { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
391 if (sample_fields & PERF_SAMPLE_STREAM_ID) {
392 *array++ = sample.stream_id;
393 }
394
395 // { u32 cpu, res; } && PERF_SAMPLE_CPU
396 if (sample_fields & PERF_SAMPLE_CPU) {
397 val32[0] = sample.cpu;
398 // val32[1] = sample.res; // reserved
399 val32[1] = 0;
400 *array++ = val64;
401 }
402
403 // This is the location of PERF_SAMPLE_IDENTIFIER in struct sample_id.
404 if (event->header.type != PERF_RECORD_SAMPLE) {
405 // { u64 id; } && PERF_SAMPLE_IDENTIFIER
406 if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
407 *array++ = sample.id;
408 }
409 }
410
411 //
412 // The remaining fields are only in PERF_RECORD_SAMPLE
413 //
414
415 // { u64 period; } && PERF_SAMPLE_PERIOD
416 if (sample_fields & PERF_SAMPLE_PERIOD) {
417 *array++ = sample.period;
418 }
419
420 // { struct read_format values; } && PERF_SAMPLE_READ
421 if (sample_fields & PERF_SAMPLE_READ) {
422 if (attr.read_format & PERF_FORMAT_GROUP) {
423 *array++ = sample.read.group.nr;
424 if (attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
425 *array++ = sample.read.time_enabled;
426 if (attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
427 *array++ = sample.read.time_running;
428 for (size_t i = 0; i < sample.read.group.nr; i++) {
429 *array++ = sample.read.group.values[i].value;
430 if (attr.read_format & PERF_FORMAT_ID) *array++ = sample.read.one.id;
431 }
432 } else {
433 *array++ = sample.read.one.value;
434 if (attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
435 *array++ = sample.read.time_enabled;
436 if (attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
437 *array++ = sample.read.time_running;
438 if (attr.read_format & PERF_FORMAT_ID) *array++ = sample.read.one.id;
439 }
440 }
441
442 // { u64 nr,
443 // u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
444 if (sample_fields & PERF_SAMPLE_CALLCHAIN) {
445 if (!sample.callchain) {
446 LOG(ERROR) << "Expecting callchain data, but none was found.";
447 } else {
448 *array++ = sample.callchain->nr;
449 for (size_t i = 0; i < sample.callchain->nr; ++i)
450 *array++ = sample.callchain->ips[i];
451 }
452 }
453
454 // { u32 size;
455 // char data[size];}&& PERF_SAMPLE_RAW
456 if (sample_fields & PERF_SAMPLE_RAW) {
457 uint32_t* ptr = reinterpret_cast<uint32_t*>(array);
458 *ptr++ = sample.raw_size;
459 memcpy(ptr, sample.raw_data, sample.raw_size);
460
461 // Update the data read pointer after aligning to the next 64 bytes.
462 int num_bytes = Align<uint64_t>(sizeof(sample.raw_size) + sample.raw_size);
463 array += num_bytes / sizeof(uint64_t);
464 }
465
466 // { u64 nr;
467 // { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
468 if (sample_fields & PERF_SAMPLE_BRANCH_STACK) {
469 if (!sample.branch_stack) {
470 LOG(ERROR) << "Expecting branch stack data, but none was found.";
471 } else {
472 *array++ = sample.branch_stack->nr;
473 for (size_t i = 0; i < sample.branch_stack->nr; ++i) {
474 *array++ = sample.branch_stack->entries[i].from;
475 *array++ = sample.branch_stack->entries[i].to;
476 memcpy(array++, &sample.branch_stack->entries[i].flags,
477 sizeof(uint64_t));
478 }
479 }
480 }
481
482 // { u64 abi; # enum perf_sample_regs_abi
483 // u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
484 if (sample_fields & PERF_SAMPLE_REGS_USER) {
485 LOG(ERROR) << "PERF_SAMPLE_REGS_USER is not yet supported.";
486 return (array - initial_array_ptr) * sizeof(uint64_t);
487 }
488
489 // { u64 size;
490 // char data[size];
491 // u64 dyn_size; } && PERF_SAMPLE_STACK_USER
492 if (sample_fields & PERF_SAMPLE_STACK_USER) {
493 LOG(ERROR) << "PERF_SAMPLE_STACK_USER is not yet supported.";
494 return (array - initial_array_ptr) * sizeof(uint64_t);
495 }
496
497 // { u64 weight; } && PERF_SAMPLE_WEIGHT
498 if (sample_fields & PERF_SAMPLE_WEIGHT) {
499 *array++ = sample.weight;
500 }
501
502 // { u64 data_src; } && PERF_SAMPLE_DATA_SRC
503 if (sample_fields & PERF_SAMPLE_DATA_SRC) {
504 *array++ = sample.data_src;
505 }
506
507 // { u64 transaction; } && PERF_SAMPLE_TRANSACTION
508 if (sample_fields & PERF_SAMPLE_TRANSACTION) {
509 *array++ = sample.transaction;
510 }
511
512 return (array - initial_array_ptr) * sizeof(uint64_t);
513 }
514
515 } // namespace
516
ReadPerfSampleInfo(const event_t & event,struct perf_sample * sample) const517 bool SampleInfoReader::ReadPerfSampleInfo(const event_t& event,
518 struct perf_sample* sample) const {
519 CHECK(sample);
520
521 if (!IsSupportedEventType(event.header.type)) {
522 LOG(ERROR) << "Unsupported event type " << event.header.type;
523 return false;
524 }
525
526 size_t size_read_or_skipped =
527 ReadPerfSampleFromData(event, event_attr_, read_cross_endian_, sample);
528
529 if (size_read_or_skipped != event.header.size) {
530 LOG(ERROR) << "Read/skipped " << size_read_or_skipped << " bytes, expected "
531 << event.header.size << " bytes.";
532 }
533
534 return (size_read_or_skipped == event.header.size);
535 }
536
WritePerfSampleInfo(const perf_sample & sample,event_t * event) const537 bool SampleInfoReader::WritePerfSampleInfo(const perf_sample& sample,
538 event_t* event) const {
539 CHECK(event);
540
541 if (!IsSupportedEventType(event->header.type)) {
542 LOG(ERROR) << "Unsupported event type " << event->header.type;
543 return false;
544 }
545
546 size_t size_written_or_skipped =
547 WritePerfSampleToData(sample, event_attr_, event);
548 if (size_written_or_skipped != event->header.size) {
549 LOG(ERROR) << "Wrote/skipped " << size_written_or_skipped
550 << " bytes, expected " << event->header.size << " bytes.";
551 }
552
553 return (size_written_or_skipped == event->header.size);
554 }
555
556 // static
GetSampleFieldsForEventType(uint32_t event_type,uint64_t sample_type)557 uint64_t SampleInfoReader::GetSampleFieldsForEventType(uint32_t event_type,
558 uint64_t sample_type) {
559 uint64_t mask = UINT64_MAX;
560 switch (event_type) {
561 case PERF_RECORD_MMAP:
562 case PERF_RECORD_LOST:
563 case PERF_RECORD_COMM:
564 case PERF_RECORD_EXIT:
565 case PERF_RECORD_THROTTLE:
566 case PERF_RECORD_UNTHROTTLE:
567 case PERF_RECORD_FORK:
568 case PERF_RECORD_READ:
569 case PERF_RECORD_MMAP2:
570 case PERF_RECORD_AUX:
571 // See perf_event.h "struct" sample_id and sample_id_all.
572 mask = PERF_SAMPLE_TID | PERF_SAMPLE_TIME | PERF_SAMPLE_ID |
573 PERF_SAMPLE_STREAM_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER;
574 break;
575 case PERF_RECORD_SAMPLE:
576 break;
577 default:
578 LOG(FATAL) << "Unknown event type " << event_type;
579 }
580 return sample_type & mask;
581 }
582
583 // static
GetPerfSampleDataOffset(const event_t & event)584 uint64_t SampleInfoReader::GetPerfSampleDataOffset(const event_t& event) {
585 uint64_t offset = UINT64_MAX;
586 switch (event.header.type) {
587 case PERF_RECORD_SAMPLE:
588 offset = offsetof(event_t, sample.array);
589 break;
590 case PERF_RECORD_MMAP:
591 offset = sizeof(event.mmap) - sizeof(event.mmap.filename) +
592 GetUint64AlignedStringLength(event.mmap.filename);
593 break;
594 case PERF_RECORD_FORK:
595 case PERF_RECORD_EXIT:
596 offset = sizeof(event.fork);
597 break;
598 case PERF_RECORD_COMM:
599 offset = sizeof(event.comm) - sizeof(event.comm.comm) +
600 GetUint64AlignedStringLength(event.comm.comm);
601 break;
602 case PERF_RECORD_LOST:
603 offset = sizeof(event.lost);
604 break;
605 case PERF_RECORD_THROTTLE:
606 case PERF_RECORD_UNTHROTTLE:
607 offset = sizeof(event.throttle);
608 break;
609 case PERF_RECORD_READ:
610 offset = sizeof(event.read);
611 break;
612 case PERF_RECORD_MMAP2:
613 offset = sizeof(event.mmap2) - sizeof(event.mmap2.filename) +
614 GetUint64AlignedStringLength(event.mmap2.filename);
615 break;
616 case PERF_RECORD_AUX:
617 offset = sizeof(event.aux);
618 break;
619 default:
620 LOG(FATAL) << "Unknown event type " << event.header.type;
621 break;
622 }
623 // Make sure the offset was valid
624 CHECK_NE(offset, UINT64_MAX);
625 CHECK_EQ(offset % sizeof(uint64_t), 0U);
626 return offset;
627 }
628
629 } // namespace quipper
630