• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "event_selection_set.h"
18 
19 #include <algorithm>
20 #include <atomic>
21 #include <thread>
22 
23 #include <android-base/logging.h>
24 
25 #include "environment.h"
26 #include "ETMRecorder.h"
27 #include "event_attr.h"
28 #include "event_type.h"
29 #include "IOEventLoop.h"
30 #include "perf_regs.h"
31 #include "utils.h"
32 #include "RecordReadThread.h"
33 
34 using namespace simpleperf;
35 
IsBranchSamplingSupported()36 bool IsBranchSamplingSupported() {
37   const EventType* type = FindEventTypeByName("cpu-cycles");
38   if (type == nullptr) {
39     return false;
40   }
41   perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
42   attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
43   attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
44   return IsEventAttrSupported(attr, type->name);
45 }
46 
IsDwarfCallChainSamplingSupported()47 bool IsDwarfCallChainSamplingSupported() {
48   const EventType* type = FindEventTypeByName("cpu-clock");
49   if (type == nullptr) {
50     return false;
51   }
52   perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
53   attr.sample_type |=
54       PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER;
55   attr.exclude_callchain_user = 1;
56   attr.sample_regs_user = GetSupportedRegMask(GetBuildArch());
57   attr.sample_stack_user = 8192;
58   return IsEventAttrSupported(attr, type->name);
59 }
60 
IsDumpingRegsForTracepointEventsSupported()61 bool IsDumpingRegsForTracepointEventsSupported() {
62   const EventType* event_type = FindEventTypeByName("sched:sched_switch", false);
63   if (event_type == nullptr) {
64     return false;
65   }
66   std::atomic<bool> done(false);
67   std::atomic<pid_t> thread_id(0);
68   std::thread thread([&]() {
69     thread_id = gettid();
70     while (!done) {
71       usleep(1);
72     }
73     usleep(1);  // Make a sched out to generate one sample.
74   });
75   while (thread_id == 0) {
76     usleep(1);
77   }
78   perf_event_attr attr = CreateDefaultPerfEventAttr(*event_type);
79   attr.freq = 0;
80   attr.sample_period = 1;
81   std::unique_ptr<EventFd> event_fd =
82       EventFd::OpenEventFile(attr, thread_id, -1, nullptr, event_type->name);
83   if (event_fd == nullptr || !event_fd->CreateMappedBuffer(4, true)) {
84     done = true;
85     thread.join();
86     return false;
87   }
88   done = true;
89   thread.join();
90 
91   std::vector<char> buffer = event_fd->GetAvailableMmapData();
92   std::vector<std::unique_ptr<Record>> records =
93       ReadRecordsFromBuffer(attr, buffer.data(), buffer.size());
94   for (auto& r : records) {
95     if (r->type() == PERF_RECORD_SAMPLE) {
96       auto& record = *static_cast<SampleRecord*>(r.get());
97       if (record.ip_data.ip != 0) {
98         return true;
99       }
100     }
101   }
102   return false;
103 }
104 
IsSettingClockIdSupported()105 bool IsSettingClockIdSupported() {
106   // Do the real check only once and keep the result in a static variable.
107   static int is_supported = -1;
108   if (is_supported == -1) {
109     const EventType* type = FindEventTypeByName("cpu-clock");
110     if (type == nullptr) {
111       is_supported = 0;
112     } else {
113       // Check if the kernel supports setting clockid, which was added in kernel 4.0. Just check
114       // with one clockid is enough. Because all needed clockids were supported before kernel 4.0.
115       perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
116       attr.use_clockid = 1;
117       attr.clockid = CLOCK_MONOTONIC;
118       is_supported = IsEventAttrSupported(attr, type->name) ? 1 : 0;
119     }
120   }
121   return is_supported;
122 }
123 
IsMmap2Supported()124 bool IsMmap2Supported() {
125   const EventType* type = FindEventTypeByName("cpu-clock");
126   if (type == nullptr) {
127     return false;
128   }
129   perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
130   attr.mmap2 = 1;
131   return IsEventAttrSupported(attr, type->name);
132 }
133 
EventSelectionSet(bool for_stat_cmd)134 EventSelectionSet::EventSelectionSet(bool for_stat_cmd)
135     : for_stat_cmd_(for_stat_cmd), loop_(new IOEventLoop) {}
136 
~EventSelectionSet()137 EventSelectionSet::~EventSelectionSet() {}
138 
BuildAndCheckEventSelection(const std::string & event_name,bool first_event,EventSelection * selection)139 bool EventSelectionSet::BuildAndCheckEventSelection(const std::string& event_name, bool first_event,
140                                                     EventSelection* selection) {
141   std::unique_ptr<EventTypeAndModifier> event_type = ParseEventType(event_name);
142   if (event_type == nullptr) {
143     return false;
144   }
145   if (for_stat_cmd_) {
146     if (event_type->event_type.name == "cpu-clock" ||
147         event_type->event_type.name == "task-clock") {
148       if (event_type->exclude_user || event_type->exclude_kernel) {
149         LOG(ERROR) << "Modifier u and modifier k used in event type "
150                    << event_type->event_type.name
151                    << " are not supported by the kernel.";
152         return false;
153       }
154     }
155   }
156   selection->event_type_modifier = *event_type;
157   selection->event_attr = CreateDefaultPerfEventAttr(event_type->event_type);
158   selection->event_attr.exclude_user = event_type->exclude_user;
159   selection->event_attr.exclude_kernel = event_type->exclude_kernel;
160   selection->event_attr.exclude_hv = event_type->exclude_hv;
161   selection->event_attr.exclude_host = event_type->exclude_host;
162   selection->event_attr.exclude_guest = event_type->exclude_guest;
163   selection->event_attr.precise_ip = event_type->precise_ip;
164   if (IsEtmEventType(event_type->event_type.type)) {
165     auto& etm_recorder = ETMRecorder::GetInstance();
166     if (!etm_recorder.CheckEtmSupport()) {
167       return false;
168     }
169     ETMRecorder::GetInstance().SetEtmPerfEventAttr(&selection->event_attr);
170   }
171   bool set_default_sample_freq = false;
172   if (!for_stat_cmd_) {
173     if (event_type->event_type.type == PERF_TYPE_TRACEPOINT) {
174       selection->event_attr.freq = 0;
175       selection->event_attr.sample_period = DEFAULT_SAMPLE_PERIOD_FOR_TRACEPOINT_EVENT;
176     } else if (IsEtmEventType(event_type->event_type.type)) {
177       // ETM recording has no sample frequency to adjust. Using sample frequency only wastes time
178       // enabling/disabling etm devices. So don't adjust frequency by default.
179       selection->event_attr.freq = 0;
180       selection->event_attr.sample_period = 1;
181     } else {
182       selection->event_attr.freq = 1;
183       // Set default sample freq here may print msg "Adjust sample freq to max allowed sample
184       // freq". But this is misleading. Because default sample freq may not be the final sample
185       // freq we use. So use minimum sample freq (1) here.
186       selection->event_attr.sample_freq = 1;
187       set_default_sample_freq = true;
188     }
189     // We only need to dump mmap and comm records for the first event type. Because all event types
190     // are monitoring the same processes.
191     if (first_event) {
192       selection->event_attr.mmap = 1;
193       selection->event_attr.comm = 1;
194       if (IsMmap2Supported()) {
195         selection->event_attr.mmap2 = 1;
196       }
197     }
198   }
199   // PMU events are provided by kernel, so they should be supported
200   if (!event_type->event_type.IsPmuEvent() &&
201       !IsEventAttrSupported(selection->event_attr, selection->event_type_modifier.name)) {
202     LOG(ERROR) << "Event type '" << event_type->name
203                << "' is not supported on the device";
204     return false;
205   }
206   if (set_default_sample_freq) {
207     selection->event_attr.sample_freq = DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT;
208   }
209 
210   selection->event_fds.clear();
211 
212   for (const auto& group : groups_) {
213     for (const auto& sel : group) {
214       if (sel.event_type_modifier.name == selection->event_type_modifier.name) {
215         LOG(ERROR) << "Event type '" << sel.event_type_modifier.name
216                    << "' appears more than once";
217         return false;
218       }
219     }
220   }
221   return true;
222 }
223 
AddEventType(const std::string & event_name,size_t * group_id)224 bool EventSelectionSet::AddEventType(const std::string& event_name, size_t* group_id) {
225   return AddEventGroup(std::vector<std::string>(1, event_name), group_id);
226 }
227 
AddEventGroup(const std::vector<std::string> & event_names,size_t * group_id)228 bool EventSelectionSet::AddEventGroup(
229     const std::vector<std::string>& event_names, size_t* group_id) {
230   EventSelectionGroup group;
231   bool first_event = groups_.empty();
232   bool first_in_group = true;
233   for (const auto& event_name : event_names) {
234     EventSelection selection;
235     if (!BuildAndCheckEventSelection(event_name, first_event, &selection)) {
236       return false;
237     }
238     if (IsEtmEventType(selection.event_attr.type)) {
239       has_aux_trace_ = true;
240     }
241     if (first_in_group) {
242       auto& event_type = selection.event_type_modifier.event_type;
243       if (event_type.IsPmuEvent()) {
244         selection.allowed_cpus = event_type.GetPmuCpumask();
245       }
246     }
247     first_event = false;
248     first_in_group = false;
249     group.push_back(std::move(selection));
250   }
251   groups_.push_back(std::move(group));
252   UnionSampleType();
253   if (group_id != nullptr) {
254     *group_id = groups_.size() - 1;
255   }
256   return true;
257 }
258 
GetEvents() const259 std::vector<const EventType*> EventSelectionSet::GetEvents() const {
260   std::vector<const EventType*> result;
261   for (const auto& group : groups_) {
262     for (const auto& selection : group) {
263       result.push_back(&selection.event_type_modifier.event_type);
264     }
265   }
266   return result;
267 }
268 
GetTracepointEvents() const269 std::vector<const EventType*> EventSelectionSet::GetTracepointEvents() const {
270   std::vector<const EventType*> result;
271   for (const auto& group : groups_) {
272     for (const auto& selection : group) {
273       if (selection.event_type_modifier.event_type.type ==
274           PERF_TYPE_TRACEPOINT) {
275         result.push_back(&selection.event_type_modifier.event_type);
276       }
277     }
278   }
279   return result;
280 }
281 
ExcludeKernel() const282 bool EventSelectionSet::ExcludeKernel() const {
283   for (const auto& group : groups_) {
284     for (const auto& selection : group) {
285       if (!selection.event_type_modifier.exclude_kernel) {
286         return false;
287       }
288     }
289   }
290   return true;
291 }
292 
GetEventAttrWithId() const293 std::vector<EventAttrWithId> EventSelectionSet::GetEventAttrWithId() const {
294   std::vector<EventAttrWithId> result;
295   for (const auto& group : groups_) {
296     for (const auto& selection : group) {
297       EventAttrWithId attr_id;
298       attr_id.attr = &selection.event_attr;
299       for (const auto& fd : selection.event_fds) {
300         attr_id.ids.push_back(fd->Id());
301       }
302       result.push_back(attr_id);
303     }
304   }
305   return result;
306 }
307 
308 // Union the sample type of different event attrs can make reading sample
309 // records in perf.data easier.
UnionSampleType()310 void EventSelectionSet::UnionSampleType() {
311   uint64_t sample_type = 0;
312   for (const auto& group : groups_) {
313     for (const auto& selection : group) {
314       sample_type |= selection.event_attr.sample_type;
315     }
316   }
317   for (auto& group : groups_) {
318     for (auto& selection : group) {
319       selection.event_attr.sample_type = sample_type;
320     }
321   }
322 }
323 
SetEnableOnExec(bool enable)324 void EventSelectionSet::SetEnableOnExec(bool enable) {
325   for (auto& group : groups_) {
326     for (auto& selection : group) {
327       // If sampling is enabled on exec, then it is disabled at startup,
328       // otherwise it should be enabled at startup. Don't use
329       // ioctl(PERF_EVENT_IOC_ENABLE) to enable it after perf_event_open().
330       // Because some android kernels can't handle ioctl() well when cpu-hotplug
331       // happens. See http://b/25193162.
332       if (enable) {
333         selection.event_attr.enable_on_exec = 1;
334         selection.event_attr.disabled = 1;
335       } else {
336         selection.event_attr.enable_on_exec = 0;
337         selection.event_attr.disabled = 0;
338       }
339     }
340   }
341 }
342 
GetEnableOnExec()343 bool EventSelectionSet::GetEnableOnExec() {
344   for (const auto& group : groups_) {
345     for (const auto& selection : group) {
346       if (selection.event_attr.enable_on_exec == 0) {
347         return false;
348       }
349     }
350   }
351   return true;
352 }
353 
SampleIdAll()354 void EventSelectionSet::SampleIdAll() {
355   for (auto& group : groups_) {
356     for (auto& selection : group) {
357       selection.event_attr.sample_id_all = 1;
358     }
359   }
360 }
361 
SetSampleSpeed(size_t group_id,const SampleSpeed & speed)362 void EventSelectionSet::SetSampleSpeed(size_t group_id, const SampleSpeed& speed) {
363   CHECK_LT(group_id, groups_.size());
364   for (auto& selection : groups_[group_id]) {
365     if (speed.UseFreq()) {
366       selection.event_attr.freq = 1;
367       selection.event_attr.sample_freq = speed.sample_freq;
368     } else {
369       selection.event_attr.freq = 0;
370       selection.event_attr.sample_period = speed.sample_period;
371     }
372   }
373 }
374 
SetBranchSampling(uint64_t branch_sample_type)375 bool EventSelectionSet::SetBranchSampling(uint64_t branch_sample_type) {
376   if (branch_sample_type != 0 &&
377       (branch_sample_type &
378        (PERF_SAMPLE_BRANCH_ANY | PERF_SAMPLE_BRANCH_ANY_CALL |
379         PERF_SAMPLE_BRANCH_ANY_RETURN | PERF_SAMPLE_BRANCH_IND_CALL)) == 0) {
380     LOG(ERROR) << "Invalid branch_sample_type: 0x" << std::hex
381                << branch_sample_type;
382     return false;
383   }
384   if (branch_sample_type != 0 && !IsBranchSamplingSupported()) {
385     LOG(ERROR) << "branch stack sampling is not supported on this device.";
386     return false;
387   }
388   for (auto& group : groups_) {
389     for (auto& selection : group) {
390       perf_event_attr& attr = selection.event_attr;
391       if (branch_sample_type != 0) {
392         attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
393       } else {
394         attr.sample_type &= ~PERF_SAMPLE_BRANCH_STACK;
395       }
396       attr.branch_sample_type = branch_sample_type;
397     }
398   }
399   return true;
400 }
401 
EnableFpCallChainSampling()402 void EventSelectionSet::EnableFpCallChainSampling() {
403   for (auto& group : groups_) {
404     for (auto& selection : group) {
405       selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
406     }
407   }
408 }
409 
EnableDwarfCallChainSampling(uint32_t dump_stack_size)410 bool EventSelectionSet::EnableDwarfCallChainSampling(uint32_t dump_stack_size) {
411   if (!IsDwarfCallChainSamplingSupported()) {
412     LOG(ERROR) << "dwarf callchain sampling is not supported on this device.";
413     return false;
414   }
415   for (auto& group : groups_) {
416     for (auto& selection : group) {
417       selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN |
418                                           PERF_SAMPLE_REGS_USER |
419                                           PERF_SAMPLE_STACK_USER;
420       selection.event_attr.exclude_callchain_user = 1;
421       selection.event_attr.sample_regs_user =
422           GetSupportedRegMask(GetMachineArch());
423       selection.event_attr.sample_stack_user = dump_stack_size;
424     }
425   }
426   return true;
427 }
428 
SetInherit(bool enable)429 void EventSelectionSet::SetInherit(bool enable) {
430   for (auto& group : groups_) {
431     for (auto& selection : group) {
432       selection.event_attr.inherit = (enable ? 1 : 0);
433     }
434   }
435 }
436 
SetClockId(int clock_id)437 void EventSelectionSet::SetClockId(int clock_id) {
438   for (auto& group : groups_) {
439     for (auto& selection : group) {
440       selection.event_attr.use_clockid = 1;
441       selection.event_attr.clockid = clock_id;
442     }
443   }
444 }
445 
NeedKernelSymbol() const446 bool EventSelectionSet::NeedKernelSymbol() const {
447   for (const auto& group : groups_) {
448     for (const auto& selection : group) {
449       if (!selection.event_type_modifier.exclude_kernel) {
450         return true;
451       }
452     }
453   }
454   return false;
455 }
456 
SetRecordNotExecutableMaps(bool record)457 void EventSelectionSet::SetRecordNotExecutableMaps(bool record) {
458   // We only need to dump non-executable mmap records for the first event type.
459   groups_[0][0].event_attr.mmap_data = record ? 1 : 0;
460 }
461 
RecordNotExecutableMaps() const462 bool EventSelectionSet::RecordNotExecutableMaps() const {
463   return groups_[0][0].event_attr.mmap_data == 1;
464 }
465 
CheckIfCpusOnline(const std::vector<int> & cpus)466 static bool CheckIfCpusOnline(const std::vector<int>& cpus) {
467   std::vector<int> online_cpus = GetOnlineCpus();
468   for (const auto& cpu : cpus) {
469     if (std::find(online_cpus.begin(), online_cpus.end(), cpu) ==
470         online_cpus.end()) {
471       LOG(ERROR) << "cpu " << cpu << " is not online.";
472       return false;
473     }
474   }
475   return true;
476 }
477 
OpenEventFilesOnGroup(EventSelectionGroup & group,pid_t tid,int cpu,std::string * failed_event_type)478 bool EventSelectionSet::OpenEventFilesOnGroup(EventSelectionGroup& group,
479                                               pid_t tid, int cpu,
480                                               std::string* failed_event_type) {
481   std::vector<std::unique_ptr<EventFd>> event_fds;
482   // Given a tid and cpu, events on the same group should be all opened
483   // successfully or all failed to open.
484   EventFd* group_fd = nullptr;
485   for (auto& selection : group) {
486     std::unique_ptr<EventFd> event_fd = EventFd::OpenEventFile(
487         selection.event_attr, tid, cpu, group_fd, selection.event_type_modifier.name, false);
488     if (!event_fd) {
489         *failed_event_type = selection.event_type_modifier.name;
490         return false;
491     }
492     LOG(VERBOSE) << "OpenEventFile for " << event_fd->Name();
493     event_fds.push_back(std::move(event_fd));
494     if (group_fd == nullptr) {
495       group_fd = event_fds.back().get();
496     }
497   }
498   for (size_t i = 0; i < group.size(); ++i) {
499     group[i].event_fds.push_back(std::move(event_fds[i]));
500   }
501   return true;
502 }
503 
PrepareThreads(const std::set<pid_t> & processes,const std::set<pid_t> & threads)504 static std::set<pid_t> PrepareThreads(const std::set<pid_t>& processes,
505                                       const std::set<pid_t>& threads) {
506   std::set<pid_t> result = threads;
507   for (auto& pid : processes) {
508     std::vector<pid_t> tids = GetThreadsInProcess(pid);
509     result.insert(tids.begin(), tids.end());
510   }
511   return result;
512 }
513 
OpenEventFiles(const std::vector<int> & cpus)514 bool EventSelectionSet::OpenEventFiles(const std::vector<int>& cpus) {
515   std::vector<int> monitored_cpus;
516   if (cpus.empty()) {
517     monitored_cpus = GetOnlineCpus();
518   } else if (cpus.size() == 1 && cpus[0] == -1) {
519     monitored_cpus = {-1};
520   } else {
521     if (!CheckIfCpusOnline(cpus)) {
522       return false;
523     }
524     monitored_cpus = cpus;
525   }
526   std::set<pid_t> threads = PrepareThreads(processes_, threads_);
527   for (auto& group : groups_) {
528     size_t success_count = 0;
529     std::string failed_event_type;
530     for (const auto tid : threads) {
531       const std::vector<int>* pcpus = &monitored_cpus;
532       if (!group[0].allowed_cpus.empty()) {
533         // override cpu list if event's PMU has a cpumask as those PMUs are
534         // agnostic to cpu and it's meaningless to specify cpus for them.
535         pcpus = &group[0].allowed_cpus;
536       }
537       for (const auto& cpu : *pcpus) {
538         if (OpenEventFilesOnGroup(group, tid, cpu, &failed_event_type)) {
539           success_count++;
540         }
541       }
542     }
543     // We can't guarantee to open perf event file successfully for each thread on each cpu.
544     // Because threads may exit between PrepareThreads() and OpenEventFilesOnGroup(), and
545     // cpus may be offlined between GetOnlineCpus() and OpenEventFilesOnGroup().
546     // So we only check that we can at least monitor one thread for each event group.
547     if (success_count == 0) {
548       int error_number = errno;
549       PLOG(ERROR) << "failed to open perf event file for event_type " << failed_event_type;
550       if (error_number == EMFILE) {
551         LOG(ERROR) << "Please increase hard limit of open file numbers.";
552       }
553       return false;
554     }
555   }
556   return ApplyFilters();
557 }
558 
ApplyFilters()559 bool EventSelectionSet::ApplyFilters() {
560   if (include_filters_.empty()) {
561     return true;
562   }
563   if (!has_aux_trace_) {
564     LOG(ERROR) << "include filters only take effect in cs-etm instruction tracing";
565     return false;
566   }
567   size_t supported_pairs = ETMRecorder::GetInstance().GetAddrFilterPairs();
568   if (supported_pairs < include_filters_.size()) {
569     LOG(ERROR) << "filter binary count is " << include_filters_.size()
570                << ", bigger than maximum supported filters on device, which is " << supported_pairs;
571     return false;
572   }
573   std::string filter_str;
574   for (auto& binary : include_filters_) {
575     std::string path;
576     if (!android::base::Realpath(binary, &path)) {
577       PLOG(ERROR) << "failed to find include filter binary: " << binary;
578       return false;
579     }
580     uint64_t file_size = GetFileSize(path);
581     if (!filter_str.empty()) {
582       filter_str += ',';
583     }
584     android::base::StringAppendF(&filter_str, "filter 0/%" PRIu64 "@%s", file_size, path.c_str());
585   }
586   for (auto& group : groups_) {
587     for (auto& selection : group) {
588       if (IsEtmEventType(selection.event_type_modifier.event_type.type)) {
589         for (auto& event_fd : selection.event_fds) {
590           if (!event_fd->SetFilter(filter_str)) {
591             return false;
592           }
593         }
594       }
595     }
596   }
597   return true;
598 }
599 
ReadCounter(EventFd * event_fd,CounterInfo * counter)600 static bool ReadCounter(EventFd* event_fd, CounterInfo* counter) {
601   if (!event_fd->ReadCounter(&counter->counter)) {
602     return false;
603   }
604   counter->tid = event_fd->ThreadId();
605   counter->cpu = event_fd->Cpu();
606   return true;
607 }
608 
ReadCounters(std::vector<CountersInfo> * counters)609 bool EventSelectionSet::ReadCounters(std::vector<CountersInfo>* counters) {
610   counters->clear();
611   for (size_t i = 0; i < groups_.size(); ++i) {
612     for (auto& selection : groups_[i]) {
613       CountersInfo counters_info;
614       counters_info.group_id = i;
615       counters_info.event_name = selection.event_type_modifier.event_type.name;
616       counters_info.event_modifier = selection.event_type_modifier.modifier;
617       counters_info.counters = selection.hotplugged_counters;
618       for (auto& event_fd : selection.event_fds) {
619         CounterInfo counter;
620         if (!ReadCounter(event_fd.get(), &counter)) {
621           return false;
622         }
623         counters_info.counters.push_back(counter);
624       }
625       counters->push_back(counters_info);
626     }
627   }
628   return true;
629 }
630 
MmapEventFiles(size_t min_mmap_pages,size_t max_mmap_pages,size_t aux_buffer_size,size_t record_buffer_size,bool allow_cutting_samples,bool exclude_perf)631 bool EventSelectionSet::MmapEventFiles(size_t min_mmap_pages, size_t max_mmap_pages,
632                                        size_t aux_buffer_size, size_t record_buffer_size,
633                                        bool allow_cutting_samples, bool exclude_perf) {
634   record_read_thread_.reset(
635       new simpleperf::RecordReadThread(record_buffer_size, groups_[0][0].event_attr, min_mmap_pages,
636                                        max_mmap_pages, aux_buffer_size, allow_cutting_samples,
637                                        exclude_perf));
638   return true;
639 }
640 
PrepareToReadMmapEventData(const std::function<bool (Record *)> & callback)641 bool EventSelectionSet::PrepareToReadMmapEventData(const std::function<bool(Record*)>& callback) {
642   // Prepare record callback function.
643   record_callback_ = callback;
644   if (!record_read_thread_->RegisterDataCallback(*loop_,
645                                                  [this]() { return ReadMmapEventData(true); })) {
646     return false;
647   }
648   std::vector<EventFd*> event_fds;
649   for (auto& group : groups_) {
650     for (auto& selection : group) {
651       for (auto& event_fd : selection.event_fds) {
652         event_fds.push_back(event_fd.get());
653       }
654     }
655   }
656   return record_read_thread_->AddEventFds(event_fds);
657 }
658 
SyncKernelBuffer()659 bool EventSelectionSet::SyncKernelBuffer() {
660   return record_read_thread_->SyncKernelBuffer();
661 }
662 
663 // Read records from the RecordBuffer. If with_time_limit is false, read until the RecordBuffer is
664 // empty, otherwise stop after 100 ms or when the record buffer is empty.
ReadMmapEventData(bool with_time_limit)665 bool EventSelectionSet::ReadMmapEventData(bool with_time_limit) {
666   uint64_t start_time_in_ns;
667   if (with_time_limit) {
668     start_time_in_ns = GetSystemClock();
669   }
670   std::unique_ptr<Record> r;
671   while ((r = record_read_thread_->GetRecord()) != nullptr) {
672     if (!record_callback_(r.get())) {
673       return false;
674     }
675     if (with_time_limit && (GetSystemClock() - start_time_in_ns) >= 1e8) {
676       break;
677     }
678   }
679   return true;
680 }
681 
FinishReadMmapEventData()682 bool EventSelectionSet::FinishReadMmapEventData() {
683   // Stop the read thread, so we don't get more records beyond current time.
684   if (!SyncKernelBuffer() || !record_read_thread_->StopReadThread()) {
685     return false;
686   }
687   if (!ReadMmapEventData(false)) {
688     return false;
689   }
690   return true;
691 }
692 
StopWhenNoMoreTargets(double check_interval_in_sec)693 bool EventSelectionSet::StopWhenNoMoreTargets(double check_interval_in_sec) {
694   return loop_->AddPeriodicEvent(SecondToTimeval(check_interval_in_sec),
695                                  [&]() { return CheckMonitoredTargets(); });
696 }
697 
CheckMonitoredTargets()698 bool EventSelectionSet::CheckMonitoredTargets() {
699   if (!HasSampler()) {
700     return loop_->ExitLoop();
701   }
702   for (const auto& tid : threads_) {
703     if (IsThreadAlive(tid)) {
704       return true;
705     }
706   }
707   for (const auto& pid : processes_) {
708     if (IsThreadAlive(pid)) {
709       return true;
710     }
711   }
712   return loop_->ExitLoop();
713 }
714 
HasSampler()715 bool EventSelectionSet::HasSampler() {
716   for (auto& group : groups_) {
717     for (auto& sel : group) {
718       if (!sel.event_fds.empty()) {
719         return true;
720       }
721     }
722   }
723   return false;
724 }
725 
SetEnableEvents(bool enable)726 bool EventSelectionSet::SetEnableEvents(bool enable) {
727   for (auto& group : groups_) {
728     for (auto& sel : group) {
729       for (auto& fd : sel.event_fds) {
730         if (!fd->SetEnableEvent(enable)) {
731           return false;
732         }
733       }
734     }
735   }
736   return true;
737 }
738