• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "RecordReadThread.h"
18 
19 #include <sys/resource.h>
20 #include <unistd.h>
21 
22 #include <algorithm>
23 #include <unordered_map>
24 
25 #include "environment.h"
26 #include "event_type.h"
27 #include "record.h"
28 #include "utils.h"
29 
30 namespace simpleperf {
31 
32 static constexpr size_t kDefaultLowBufferLevel = 10 * 1024 * 1024u;
33 static constexpr size_t kDefaultCriticalBufferLevel = 5 * 1024 * 1024u;
34 
RecordBuffer(size_t buffer_size)35 RecordBuffer::RecordBuffer(size_t buffer_size)
36     : read_head_(0), write_head_(0), buffer_size_(buffer_size), buffer_(new char[buffer_size]) {}
37 
GetFreeSize() const38 size_t RecordBuffer::GetFreeSize() const {
39   size_t write_head = write_head_.load(std::memory_order_relaxed);
40   size_t read_head = read_head_.load(std::memory_order_relaxed);
41   size_t write_tail = read_head > 0 ? read_head - 1 : buffer_size_ - 1;
42   if (write_head <= write_tail) {
43     return write_tail - write_head;
44   }
45   return buffer_size_ - write_head + write_tail;
46 }
47 
AllocWriteSpace(size_t record_size)48 char* RecordBuffer::AllocWriteSpace(size_t record_size) {
49   size_t write_head = write_head_.load(std::memory_order_relaxed);
50   size_t read_head = read_head_.load(std::memory_order_acquire);
51   size_t write_tail = read_head > 0 ? read_head - 1 : buffer_size_ - 1;
52   cur_write_record_size_ = record_size;
53   if (write_head < write_tail) {
54     if (write_head + record_size > write_tail) {
55       return nullptr;
56     }
57   } else if (write_head + record_size > buffer_size_) {
58     // Not enough space at the end of the buffer, need to wrap to the start of the buffer.
59     if (write_tail < record_size) {
60       return nullptr;
61     }
62     if (buffer_size_ - write_head >= sizeof(perf_event_header)) {
63       // Set the size field in perf_event_header to 0. So GetCurrentRecord() can wrap to the start
64       // of the buffer when size is 0.
65       memset(buffer_.get() + write_head, 0, sizeof(perf_event_header));
66     }
67     cur_write_record_size_ += buffer_size_ - write_head;
68     write_head = 0;
69   }
70   return buffer_.get() + write_head;
71 }
72 
FinishWrite()73 void RecordBuffer::FinishWrite() {
74   size_t write_head = write_head_.load(std::memory_order_relaxed);
75   write_head = (write_head + cur_write_record_size_) % buffer_size_;
76   write_head_.store(write_head, std::memory_order_release);
77 }
78 
GetCurrentRecord()79 char* RecordBuffer::GetCurrentRecord() {
80   size_t write_head = write_head_.load(std::memory_order_acquire);
81   size_t read_head = read_head_.load(std::memory_order_relaxed);
82   if (read_head == write_head) {
83     return nullptr;
84   }
85   perf_event_header header;
86   if (read_head > write_head) {
87     if (buffer_size_ - read_head < sizeof(header) ||
88         (memcpy(&header, buffer_.get() + read_head, sizeof(header)) && header.size == 0)) {
89       // Need to wrap to the start of the buffer.
90       cur_read_record_size_ += buffer_size_ - read_head;
91       read_head = 0;
92       memcpy(&header, buffer_.get(), sizeof(header));
93     }
94   } else {
95     memcpy(&header, buffer_.get() + read_head, sizeof(header));
96   }
97   cur_read_record_size_ += header.size;
98   return buffer_.get() + read_head;
99 }
100 
MoveToNextRecord()101 void RecordBuffer::MoveToNextRecord() {
102   size_t read_head = read_head_.load(std::memory_order_relaxed);
103   read_head = (read_head + cur_read_record_size_) % buffer_size_;
104   read_head_.store(read_head, std::memory_order_release);
105   cur_read_record_size_ = 0;
106 }
107 
RecordParser(const perf_event_attr & attr)108 RecordParser::RecordParser(const perf_event_attr& attr)
109     : sample_type_(attr.sample_type),
110       read_format_(attr.read_format),
111       sample_regs_count_(__builtin_popcountll(attr.sample_regs_user)) {
112   size_t pos = sizeof(perf_event_header);
113   uint64_t mask = PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_IP;
114   pos += __builtin_popcountll(sample_type_ & mask) * sizeof(uint64_t);
115   if (sample_type_ & PERF_SAMPLE_TID) {
116     pid_pos_in_sample_records_ = pos;
117     pos += sizeof(uint64_t);
118   }
119   if (sample_type_ & PERF_SAMPLE_TIME) {
120     time_pos_in_sample_records_ = pos;
121     pos += sizeof(uint64_t);
122   }
123   mask = PERF_SAMPLE_ADDR | PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | PERF_SAMPLE_CPU |
124          PERF_SAMPLE_PERIOD;
125   pos += __builtin_popcountll(sample_type_ & mask) * sizeof(uint64_t);
126   read_pos_in_sample_records_ = pos;
127   if ((sample_type_ & PERF_SAMPLE_TIME) && attr.sample_id_all) {
128     mask = PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | PERF_SAMPLE_ID;
129     time_rpos_in_non_sample_records_ =
130         (__builtin_popcountll(sample_type_ & mask) + 1) * sizeof(uint64_t);
131   }
132 }
133 
GetTimePos(const perf_event_header & header) const134 size_t RecordParser::GetTimePos(const perf_event_header& header) const {
135   if (header.type == PERF_RECORD_SAMPLE) {
136     return time_pos_in_sample_records_;
137   }
138   if (time_rpos_in_non_sample_records_ != 0u &&
139       time_rpos_in_non_sample_records_ < header.size - sizeof(perf_event_header)) {
140     return header.size - time_rpos_in_non_sample_records_;
141   }
142   return 0;
143 }
144 
GetStackSizePos(const std::function<void (size_t,size_t,void *)> & read_record_fn) const145 size_t RecordParser::GetStackSizePos(
146     const std::function<void(size_t, size_t, void*)>& read_record_fn) const {
147   size_t pos = read_pos_in_sample_records_;
148   if (sample_type_ & PERF_SAMPLE_READ) {
149     uint64_t nr = 1;
150     if (read_format_ & PERF_FORMAT_GROUP) {
151       read_record_fn(pos, sizeof(nr), &nr);
152       pos += sizeof(uint64_t);
153     }
154     size_t u64_count = nr;
155     u64_count += (read_format_ & PERF_FORMAT_TOTAL_TIME_ENABLED) ? 1 : 0;
156     u64_count += (read_format_ & PERF_FORMAT_TOTAL_TIME_RUNNING) ? 1 : 0;
157     u64_count += (read_format_ & PERF_FORMAT_ID) ? nr : 0;
158     pos += u64_count * sizeof(uint64_t);
159   }
160   if (sample_type_ & PERF_SAMPLE_CALLCHAIN) {
161     uint64_t ip_nr;
162     read_record_fn(pos, sizeof(ip_nr), &ip_nr);
163     pos += (ip_nr + 1) * sizeof(uint64_t);
164   }
165   if (sample_type_ & PERF_SAMPLE_RAW) {
166     uint32_t size;
167     read_record_fn(pos, sizeof(size), &size);
168     pos += size + sizeof(uint32_t);
169   }
170   if (sample_type_ & PERF_SAMPLE_BRANCH_STACK) {
171     uint64_t stack_nr;
172     read_record_fn(pos, sizeof(stack_nr), &stack_nr);
173     pos += sizeof(uint64_t) + stack_nr * sizeof(BranchStackItemType);
174   }
175   if (sample_type_ & PERF_SAMPLE_REGS_USER) {
176     uint64_t abi;
177     read_record_fn(pos, sizeof(abi), &abi);
178     pos += (1 + (abi == 0 ? 0 : sample_regs_count_)) * sizeof(uint64_t);
179   }
180   return (sample_type_ & PERF_SAMPLE_STACK_USER) ? pos : 0;
181 }
182 
KernelRecordReader(EventFd * event_fd)183 KernelRecordReader::KernelRecordReader(EventFd* event_fd) : event_fd_(event_fd) {
184   size_t buffer_size;
185   buffer_ = event_fd_->GetMappedBuffer(buffer_size);
186   buffer_mask_ = buffer_size - 1;
187 }
188 
GetDataFromKernelBuffer()189 bool KernelRecordReader::GetDataFromKernelBuffer() {
190   data_size_ = event_fd_->GetAvailableMmapDataSize(data_pos_);
191   if (data_size_ == 0) {
192     return false;
193   }
194   init_data_size_ = data_size_;
195   record_header_.size = 0;
196   return true;
197 }
198 
ReadRecord(size_t pos,size_t size,void * dest)199 void KernelRecordReader::ReadRecord(size_t pos, size_t size, void* dest) {
200   pos = (pos + data_pos_) & buffer_mask_;
201   size_t copy_size = std::min(size, buffer_mask_ + 1 - pos);
202   memcpy(dest, buffer_ + pos, copy_size);
203   if (copy_size < size) {
204     memcpy(static_cast<char*>(dest) + copy_size, buffer_, size - copy_size);
205   }
206 }
207 
MoveToNextRecord(const RecordParser & parser)208 bool KernelRecordReader::MoveToNextRecord(const RecordParser& parser) {
209   data_pos_ = (data_pos_ + record_header_.size) & buffer_mask_;
210   data_size_ -= record_header_.size;
211   if (data_size_ == 0) {
212     event_fd_->DiscardMmapData(init_data_size_);
213     init_data_size_ = 0;
214     return false;
215   }
216   ReadRecord(0, sizeof(record_header_), &record_header_);
217   size_t time_pos = parser.GetTimePos(record_header_);
218   if (time_pos != 0) {
219     ReadRecord(time_pos, sizeof(record_time_), &record_time_);
220   }
221   return true;
222 }
223 
RecordReadThread(size_t record_buffer_size,const perf_event_attr & attr,size_t min_mmap_pages,size_t max_mmap_pages,size_t aux_buffer_size,bool allow_cutting_samples,bool exclude_perf)224 RecordReadThread::RecordReadThread(size_t record_buffer_size, const perf_event_attr& attr,
225                                    size_t min_mmap_pages, size_t max_mmap_pages,
226                                    size_t aux_buffer_size, bool allow_cutting_samples,
227                                    bool exclude_perf)
228     : record_buffer_(record_buffer_size),
229       record_parser_(attr),
230       attr_(attr),
231       min_mmap_pages_(min_mmap_pages),
232       max_mmap_pages_(max_mmap_pages),
233       aux_buffer_size_(aux_buffer_size) {
234   if (attr.sample_type & PERF_SAMPLE_STACK_USER) {
235     stack_size_in_sample_record_ = attr.sample_stack_user;
236   }
237   record_buffer_low_level_ = std::min(record_buffer_size / 4, kDefaultLowBufferLevel);
238   record_buffer_critical_level_ = std::min(record_buffer_size / 6, kDefaultCriticalBufferLevel);
239   if (!allow_cutting_samples) {
240     record_buffer_low_level_ = record_buffer_critical_level_;
241   }
242   if (exclude_perf) {
243     exclude_pid_ = getpid();
244   }
245 }
246 
~RecordReadThread()247 RecordReadThread::~RecordReadThread() {
248   if (read_thread_) {
249     StopReadThread();
250   }
251 }
252 
RegisterDataCallback(IOEventLoop & loop,const std::function<bool ()> & data_callback)253 bool RecordReadThread::RegisterDataCallback(IOEventLoop& loop,
254                                             const std::function<bool()>& data_callback) {
255   int cmd_fd[2];
256   int data_fd[2];
257   if (pipe2(cmd_fd, O_CLOEXEC) != 0 || pipe2(data_fd, O_CLOEXEC) != 0) {
258     PLOG(ERROR) << "pipe2";
259     return false;
260   }
261   read_cmd_fd_.reset(cmd_fd[0]);
262   write_cmd_fd_.reset(cmd_fd[1]);
263   cmd_ = NO_CMD;
264   read_data_fd_.reset(data_fd[0]);
265   write_data_fd_.reset(data_fd[1]);
266   has_data_notification_ = false;
267   if (!loop.AddReadEvent(read_data_fd_, data_callback)) {
268     return false;
269   }
270   read_thread_.reset(new std::thread([&]() { RunReadThread(); }));
271   return true;
272 }
273 
AddEventFds(const std::vector<EventFd * > & event_fds)274 bool RecordReadThread::AddEventFds(const std::vector<EventFd*>& event_fds) {
275   return SendCmdToReadThread(CMD_ADD_EVENT_FDS, const_cast<std::vector<EventFd*>*>(&event_fds));
276 }
277 
RemoveEventFds(const std::vector<EventFd * > & event_fds)278 bool RecordReadThread::RemoveEventFds(const std::vector<EventFd*>& event_fds) {
279   return SendCmdToReadThread(CMD_REMOVE_EVENT_FDS, const_cast<std::vector<EventFd*>*>(&event_fds));
280 }
281 
SyncKernelBuffer()282 bool RecordReadThread::SyncKernelBuffer() {
283   return SendCmdToReadThread(CMD_SYNC_KERNEL_BUFFER, nullptr);
284 }
285 
StopReadThread()286 bool RecordReadThread::StopReadThread() {
287   bool result = true;
288   if (read_thread_ != nullptr) {
289     result = SendCmdToReadThread(CMD_STOP_THREAD, nullptr);
290     if (result) {
291       read_thread_->join();
292       read_thread_ = nullptr;
293     }
294   }
295   return result;
296 }
297 
SendCmdToReadThread(Cmd cmd,void * cmd_arg)298 bool RecordReadThread::SendCmdToReadThread(Cmd cmd, void* cmd_arg) {
299   {
300     std::lock_guard<std::mutex> lock(cmd_mutex_);
301     cmd_ = cmd;
302     cmd_arg_ = cmd_arg;
303   }
304   char unused = 0;
305   if (TEMP_FAILURE_RETRY(write(write_cmd_fd_, &unused, 1)) != 1) {
306     return false;
307   }
308   std::unique_lock<std::mutex> lock(cmd_mutex_);
309   while (cmd_ != NO_CMD) {
310     cmd_finish_cond_.wait(lock);
311   }
312   return cmd_result_;
313 }
314 
GetRecord()315 std::unique_ptr<Record> RecordReadThread::GetRecord() {
316   record_buffer_.MoveToNextRecord();
317   char* p = record_buffer_.GetCurrentRecord();
318   if (p != nullptr) {
319     std::unique_ptr<Record> r = ReadRecordFromBuffer(attr_, p, record_buffer_.BufferEnd());
320     CHECK(r);
321     if (r->type() == PERF_RECORD_AUXTRACE) {
322       auto auxtrace = static_cast<AuxTraceRecord*>(r.get());
323       record_buffer_.AddCurrentRecordSize(auxtrace->data->aux_size);
324       auxtrace->location.addr = r->Binary() + r->size();
325     }
326     return r;
327   }
328   if (has_data_notification_) {
329     char unused;
330     TEMP_FAILURE_RETRY(read(read_data_fd_, &unused, 1));
331     has_data_notification_ = false;
332   }
333   return nullptr;
334 }
335 
RunReadThread()336 void RecordReadThread::RunReadThread() {
337   IncreaseThreadPriority();
338   IOEventLoop loop;
339   CHECK(loop.AddReadEvent(read_cmd_fd_, [&]() { return HandleCmd(loop); }));
340   loop.RunLoop();
341 }
342 
IncreaseThreadPriority()343 void RecordReadThread::IncreaseThreadPriority() {
344   // TODO: use real time priority for root.
345   rlimit rlim;
346   int result = getrlimit(RLIMIT_NICE, &rlim);
347   if (result == 0 && rlim.rlim_cur == 40) {
348     result = setpriority(PRIO_PROCESS, gettid(), -20);
349     if (result == 0) {
350       LOG(VERBOSE) << "Priority of record read thread is increased";
351     }
352   }
353 }
354 
GetCmd()355 RecordReadThread::Cmd RecordReadThread::GetCmd() {
356   std::lock_guard<std::mutex> lock(cmd_mutex_);
357   return cmd_;
358 }
359 
HandleCmd(IOEventLoop & loop)360 bool RecordReadThread::HandleCmd(IOEventLoop& loop) {
361   char unused;
362   TEMP_FAILURE_RETRY(read(read_cmd_fd_, &unused, 1));
363   bool result = true;
364   switch (GetCmd()) {
365     case CMD_ADD_EVENT_FDS:
366       result = HandleAddEventFds(loop, *static_cast<std::vector<EventFd*>*>(cmd_arg_));
367       break;
368     case CMD_REMOVE_EVENT_FDS:
369       result = HandleRemoveEventFds(*static_cast<std::vector<EventFd*>*>(cmd_arg_));
370       break;
371     case CMD_SYNC_KERNEL_BUFFER:
372       result = ReadRecordsFromKernelBuffer();
373       break;
374     case CMD_STOP_THREAD:
375       result = loop.ExitLoop();
376       break;
377     default:
378       LOG(ERROR) << "Unknown cmd: " << GetCmd();
379       result = false;
380       break;
381   }
382   std::lock_guard<std::mutex> lock(cmd_mutex_);
383   cmd_ = NO_CMD;
384   cmd_result_ = result;
385   cmd_finish_cond_.notify_one();
386   return true;
387 }
388 
HandleAddEventFds(IOEventLoop & loop,const std::vector<EventFd * > & event_fds)389 bool RecordReadThread::HandleAddEventFds(IOEventLoop& loop,
390                                          const std::vector<EventFd*>& event_fds) {
391   std::unordered_map<int, EventFd*> cpu_map;
392   for (size_t pages = max_mmap_pages_; pages >= min_mmap_pages_; pages >>= 1) {
393     bool success = true;
394     bool report_error = pages == min_mmap_pages_;
395     for (EventFd* fd : event_fds) {
396       auto it = cpu_map.find(fd->Cpu());
397       if (it == cpu_map.end()) {
398         if (!fd->CreateMappedBuffer(pages, report_error)) {
399           success = false;
400           break;
401         }
402         if (IsEtmEventType(fd->attr().type)) {
403           if (!fd->CreateAuxBuffer(aux_buffer_size_, report_error)) {
404             fd->DestroyMappedBuffer();
405             success = false;
406             break;
407           }
408         }
409         cpu_map[fd->Cpu()] = fd;
410       } else {
411         if (!fd->ShareMappedBuffer(*(it->second), pages == min_mmap_pages_)) {
412           success = false;
413           break;
414         }
415       }
416     }
417     if (success) {
418       LOG(VERBOSE) << "Each kernel buffer is " << pages << " pages.";
419       break;
420     }
421     for (auto& pair : cpu_map) {
422       pair.second->DestroyMappedBuffer();
423       pair.second->DestroyAuxBuffer();
424     }
425     cpu_map.clear();
426   }
427   if (cpu_map.empty()) {
428     return false;
429   }
430   for (auto& pair : cpu_map) {
431     if (!pair.second->StartPolling(loop, [this]() { return ReadRecordsFromKernelBuffer(); })) {
432       return false;
433     }
434     kernel_record_readers_.emplace_back(pair.second);
435   }
436   return true;
437 }
438 
HandleRemoveEventFds(const std::vector<EventFd * > & event_fds)439 bool RecordReadThread::HandleRemoveEventFds(const std::vector<EventFd*>& event_fds) {
440   for (auto& event_fd : event_fds) {
441     if (event_fd->HasMappedBuffer()) {
442       auto it = std::find_if(
443           kernel_record_readers_.begin(), kernel_record_readers_.end(),
444           [&](const KernelRecordReader& reader) { return reader.GetEventFd() == event_fd; });
445       if (it != kernel_record_readers_.end()) {
446         kernel_record_readers_.erase(it);
447         event_fd->StopPolling();
448         event_fd->DestroyMappedBuffer();
449         event_fd->DestroyAuxBuffer();
450       }
451     }
452   }
453   return true;
454 }
455 
CompareRecordTime(KernelRecordReader * r1,KernelRecordReader * r2)456 static bool CompareRecordTime(KernelRecordReader* r1, KernelRecordReader* r2) {
457   return r1->RecordTime() > r2->RecordTime();
458 }
459 
460 // When reading from mmap buffers, we prefer reading from all buffers at once rather than reading
461 // one buffer at a time. Because by reading all buffers at once, we can merge records from
462 // different buffers easily in memory. Otherwise, we have to sort records with greater effort.
ReadRecordsFromKernelBuffer()463 bool RecordReadThread::ReadRecordsFromKernelBuffer() {
464   do {
465     std::vector<KernelRecordReader*> readers;
466     for (auto& reader : kernel_record_readers_) {
467       if (reader.GetDataFromKernelBuffer()) {
468         readers.push_back(&reader);
469       }
470     }
471     bool has_data = false;
472     if (!readers.empty()) {
473       has_data = true;
474       if (readers.size() == 1u) {
475         // Only one buffer has data, process it directly.
476         while (readers[0]->MoveToNextRecord(record_parser_)) {
477           PushRecordToRecordBuffer(readers[0]);
478         }
479       } else {
480         // Use a binary heap to merge records from different buffers. As records from the same
481         // buffer are already ordered by time, we only need to merge the first record from all
482         // buffers. And each time a record is popped from the heap, we put the next record from its
483         // buffer into the heap.
484         for (auto& reader : readers) {
485           reader->MoveToNextRecord(record_parser_);
486         }
487         std::make_heap(readers.begin(), readers.end(), CompareRecordTime);
488         size_t size = readers.size();
489         while (size > 0) {
490           std::pop_heap(readers.begin(), readers.begin() + size, CompareRecordTime);
491           PushRecordToRecordBuffer(readers[size - 1]);
492           if (readers[size - 1]->MoveToNextRecord(record_parser_)) {
493             std::push_heap(readers.begin(), readers.begin() + size, CompareRecordTime);
494           } else {
495             size--;
496           }
497         }
498       }
499     }
500     ReadAuxDataFromKernelBuffer(&has_data);
501     if (!has_data) {
502       break;
503     }
504     // Having collected everything available, this is a good time to
505     // try to re-enabled any events that might have been disabled by
506     // the kernel.
507     for (auto event_fd : event_fds_disabled_by_kernel_) {
508       event_fd->SetEnableEvent(true);
509     }
510     event_fds_disabled_by_kernel_.clear();
511     if (!SendDataNotificationToMainThread()) {
512       return false;
513     }
514     // If there are no commands, we can loop until there is no more data from the kernel.
515   } while (GetCmd() == NO_CMD);
516   return true;
517 }
518 
PushRecordToRecordBuffer(KernelRecordReader * kernel_record_reader)519 void RecordReadThread::PushRecordToRecordBuffer(KernelRecordReader* kernel_record_reader) {
520   const perf_event_header& header = kernel_record_reader->RecordHeader();
521   if (header.type == PERF_RECORD_SAMPLE && exclude_pid_ != -1) {
522     uint32_t pid;
523     kernel_record_reader->ReadRecord(record_parser_.GetPidPosInSampleRecord(), sizeof(pid), &pid);
524     if (pid == exclude_pid_) {
525       return;
526     }
527   }
528   if (header.type == PERF_RECORD_SAMPLE && stack_size_in_sample_record_ > 1024) {
529     size_t free_size = record_buffer_.GetFreeSize();
530     if (free_size < record_buffer_critical_level_) {
531       // When the free size in record buffer is below critical level, drop sample records to save
532       // space for more important records (like mmap or fork records).
533       stat_.lost_samples++;
534       return;
535     }
536     size_t stack_size_limit = stack_size_in_sample_record_;
537     if (free_size < record_buffer_low_level_) {
538       // When the free size in record buffer is below low level, cut the stack data in sample
539       // records to 1K. This makes the unwinder unwind only part of the callchains, but hopefully
540       // the call chain joiner can complete the callchains.
541       stack_size_limit = 1024;
542     }
543     size_t stack_size_pos =
544         record_parser_.GetStackSizePos([&](size_t pos, size_t size, void* dest) {
545           return kernel_record_reader->ReadRecord(pos, size, dest);
546         });
547     uint64_t stack_size;
548     kernel_record_reader->ReadRecord(stack_size_pos, sizeof(stack_size), &stack_size);
549     if (stack_size > 0) {
550       size_t dyn_stack_size_pos = stack_size_pos + sizeof(stack_size) + stack_size;
551       uint64_t dyn_stack_size;
552       kernel_record_reader->ReadRecord(dyn_stack_size_pos, sizeof(dyn_stack_size), &dyn_stack_size);
553       if (dyn_stack_size == 0) {
554         // If stack_user_data.dyn_size == 0, it may be because the kernel misses the patch to
555         // update dyn_size, like in N9 (See b/22612370). So assume all stack data is valid if
556         // dyn_size == 0.
557         // TODO: Add cts test.
558         dyn_stack_size = stack_size;
559       }
560       // When simpleperf requests the kernel to dump 64K stack per sample, it will allocate 64K
561       // space in each sample to store stack data. However, a thread may use less stack than 64K.
562       // So not all the 64K stack data in a sample is valid, and we only need to keep valid stack
563       // data, whose size is dyn_stack_size.
564       uint64_t new_stack_size = Align(std::min<uint64_t>(dyn_stack_size, stack_size_limit), 8);
565       if (stack_size > new_stack_size) {
566         // Remove part of the stack data.
567         perf_event_header new_header = header;
568         new_header.size -= stack_size - new_stack_size;
569         char* p = record_buffer_.AllocWriteSpace(new_header.size);
570         if (p != nullptr) {
571           memcpy(p, &new_header, sizeof(new_header));
572           size_t pos = sizeof(new_header);
573           kernel_record_reader->ReadRecord(pos, stack_size_pos - pos, p + pos);
574           memcpy(p + stack_size_pos, &new_stack_size, sizeof(uint64_t));
575           pos = stack_size_pos + sizeof(uint64_t);
576           kernel_record_reader->ReadRecord(pos, new_stack_size, p + pos);
577           memcpy(p + pos + new_stack_size, &new_stack_size, sizeof(uint64_t));
578           record_buffer_.FinishWrite();
579           if (new_stack_size < dyn_stack_size) {
580             stat_.cut_stack_samples++;
581           }
582         } else {
583           stat_.lost_samples++;
584         }
585         return;
586       }
587     }
588   }
589   char* p = record_buffer_.AllocWriteSpace(header.size);
590   if (p != nullptr) {
591     kernel_record_reader->ReadRecord(0, header.size, p);
592     if (header.type == PERF_RECORD_AUX) {
593       AuxRecord r;
594       if (r.Parse(attr_, p, p + header.size) && (r.data->flags & PERF_AUX_FLAG_TRUNCATED)) {
595         // When the kernel sees aux output flagged with PERF_AUX_FLAG_TRUNCATED,
596         // it sets a pending disable on the event:
597         // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/events/ring_buffer.c?h=v5.13#n516
598         // The truncated flag is set by the Coresight driver when some trace was lost,
599         // which can be caused by a full buffer. Therefore, try to re-enable the event
600         // only after we have collected the aux data.
601         event_fds_disabled_by_kernel_.insert(kernel_record_reader->GetEventFd());
602       }
603     }
604     record_buffer_.FinishWrite();
605   } else {
606     if (header.type == PERF_RECORD_SAMPLE) {
607       stat_.lost_samples++;
608     } else {
609       stat_.lost_non_samples++;
610     }
611   }
612 }
613 
ReadAuxDataFromKernelBuffer(bool * has_data)614 void RecordReadThread::ReadAuxDataFromKernelBuffer(bool* has_data) {
615   for (auto& reader : kernel_record_readers_) {
616     EventFd* event_fd = reader.GetEventFd();
617     if (event_fd->HasAuxBuffer()) {
618       char* buf[2];
619       size_t size[2];
620       uint64_t offset = event_fd->GetAvailableAuxData(&buf[0], &size[0], &buf[1], &size[1]);
621       size_t aux_size = size[0] + size[1];
622       if (aux_size == 0) {
623         continue;
624       }
625       *has_data = true;
626       AuxTraceRecord auxtrace(Align(aux_size, 8), offset, event_fd->Cpu(), 0, event_fd->Cpu());
627       size_t alloc_size = auxtrace.size() + auxtrace.data->aux_size;
628       if (record_buffer_.GetFreeSize() < alloc_size + record_buffer_critical_level_) {
629         stat_.lost_aux_data_size += aux_size;
630       } else {
631         char* p = record_buffer_.AllocWriteSpace(alloc_size);
632         CHECK(p != nullptr);
633         MoveToBinaryFormat(auxtrace.Binary(), auxtrace.size(), p);
634         MoveToBinaryFormat(buf[0], size[0], p);
635         if (size[1] != 0) {
636           MoveToBinaryFormat(buf[1], size[1], p);
637         }
638         size_t pad_size = auxtrace.data->aux_size - aux_size;
639         if (pad_size != 0) {
640           uint64_t pad = 0;
641           memcpy(p, &pad, pad_size);
642         }
643         record_buffer_.FinishWrite();
644         stat_.aux_data_size += aux_size;
645         LOG(DEBUG) << "record aux data " << aux_size << " bytes";
646       }
647       event_fd->DiscardAuxData(aux_size);
648     }
649   }
650 }
651 
SendDataNotificationToMainThread()652 bool RecordReadThread::SendDataNotificationToMainThread() {
653   if (!has_data_notification_.load(std::memory_order_relaxed)) {
654     has_data_notification_ = true;
655     char unused = 0;
656     if (TEMP_FAILURE_RETRY(write(write_data_fd_, &unused, 1)) != 1) {
657       PLOG(ERROR) << "write";
658       return false;
659     }
660   }
661   return true;
662 }
663 
664 }  // namespace simpleperf
665