• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #define ATRACE_TAG ATRACE_TAG_ALWAYS
17 #include "event_fd.h"
18 
19 #include <fcntl.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <sys/ioctl.h>
23 #include <sys/mman.h>
24 #include <sys/syscall.h>
25 #include <sys/types.h>
26 #include <atomic>
27 #include <memory>
28 #include <cutils/trace.h>
29 #include <utils/Trace.h>
30 
31 #include <android-base/file.h>
32 #include <android-base/logging.h>
33 #include <android-base/stringprintf.h>
34 
35 #include "environment.h"
36 #include "event_attr.h"
37 #include "event_type.h"
38 #include "perf_event.h"
39 #include "utils.h"
40 
perf_event_open(const perf_event_attr & attr,pid_t pid,int cpu,int group_fd,unsigned long flags)41 static int perf_event_open(const perf_event_attr& attr, pid_t pid, int cpu,
42                            int group_fd, unsigned long flags) {  // NOLINT
43   return syscall(__NR_perf_event_open, &attr, pid, cpu, group_fd, flags);
44 }
45 
OpenEventFile(const perf_event_attr & attr,pid_t tid,int cpu,EventFd * group_event_fd,bool report_error)46 std::unique_ptr<EventFd> EventFd::OpenEventFile(const perf_event_attr& attr,
47                                                 pid_t tid, int cpu,
48                                                 EventFd* group_event_fd,
49                                                 bool report_error) {
50   std::string event_name = GetEventNameByAttr(attr);
51   int group_fd = -1;
52   if (group_event_fd != nullptr) {
53     group_fd = group_event_fd->perf_event_fd_;
54   }
55   perf_event_attr real_attr = attr;
56   if (attr.freq) {
57     uint64_t max_sample_freq;
58     if (GetMaxSampleFrequency(&max_sample_freq) && max_sample_freq < attr.sample_freq) {
59       static bool warned = false;
60       if (!warned) {
61         warned = true;
62         LOG(INFO) << "Adjust sample freq to max allowed sample freq " << max_sample_freq;
63       }
64       real_attr.sample_freq = max_sample_freq;
65     }
66   }
67   int perf_event_fd = perf_event_open(real_attr, tid, cpu, group_fd, 0);
68   if (perf_event_fd == -1) {
69     if (report_error) {
70       PLOG(ERROR) << "open perf_event_file (event " << event_name << ", tid "
71                   << tid << ", cpu " << cpu << ", group_fd " << group_fd
72                   << ") failed";
73     } else {
74       PLOG(DEBUG) << "open perf_event_file (event " << event_name << ", tid "
75                   << tid << ", cpu " << cpu << ", group_fd " << group_fd
76                   << ") failed";
77     }
78     return nullptr;
79   }
80   if (fcntl(perf_event_fd, F_SETFD, FD_CLOEXEC) == -1) {
81     if (report_error) {
82       PLOG(ERROR) << "fcntl(FD_CLOEXEC) for perf_event_file (event "
83                   << event_name << ", tid " << tid << ", cpu " << cpu
84                   << ", group_fd " << group_fd << ") failed";
85     } else {
86       PLOG(DEBUG) << "fcntl(FD_CLOEXEC) for perf_event_file (event "
87                   << event_name << ", tid " << tid << ", cpu " << cpu
88                   << ", group_fd " << group_fd << ") failed";
89     }
90     return nullptr;
91   }
92   return std::unique_ptr<EventFd>(
93       new EventFd(real_attr, perf_event_fd, event_name, tid, cpu));
94 }
95 
~EventFd()96 EventFd::~EventFd() {
97   DestroyMappedBuffer();
98   close(perf_event_fd_);
99 }
100 
Name() const101 std::string EventFd::Name() const {
102   return android::base::StringPrintf(
103       "perf_event_file(event %s, tid %d, cpu %d)", event_name_.c_str(), tid_,
104       cpu_);
105 }
106 
Id() const107 uint64_t EventFd::Id() const {
108   if (id_ == 0) {
109     PerfCounter counter;
110     if (InnerReadCounter(&counter)) {
111       id_ = counter.id;
112     }
113   }
114   return id_;
115 }
116 
SetEnableEvent(bool enable)117 bool EventFd::SetEnableEvent(bool enable) {
118   int result = ioctl(perf_event_fd_, enable ? PERF_EVENT_IOC_ENABLE : PERF_EVENT_IOC_DISABLE, 0);
119   if (result < 0) {
120     PLOG(ERROR) << "ioctl(" << (enable ? "enable" : "disable") << ")" << Name() << " failed";
121     return false;
122   }
123   return true;
124 }
125 
InnerReadCounter(PerfCounter * counter) const126 bool EventFd::InnerReadCounter(PerfCounter* counter) const {
127   CHECK(counter != nullptr);
128   if (!android::base::ReadFully(perf_event_fd_, counter, sizeof(*counter))) {
129     PLOG(ERROR) << "ReadCounter from " << Name() << " failed";
130     return false;
131   }
132   return true;
133 }
134 
ReadCounter(PerfCounter * counter)135 bool EventFd::ReadCounter(PerfCounter* counter) {
136   if (!InnerReadCounter(counter)) {
137     return false;
138   }
139   // Trace is always available to systrace if enabled
140   if (tid_ > 0) {
141     ATRACE_INT64(android::base::StringPrintf(
142                    "%s_tid%d_cpu%d", event_name_.c_str(), tid_,
143                    cpu_).c_str(), counter->value - last_counter_value_);
144   } else {
145     ATRACE_INT64(android::base::StringPrintf(
146                    "%s_cpu%d", event_name_.c_str(),
147                    cpu_).c_str(), counter->value - last_counter_value_);
148   }
149   last_counter_value_ = counter->value;
150   return true;
151 }
152 
CreateMappedBuffer(size_t mmap_pages,bool report_error)153 bool EventFd::CreateMappedBuffer(size_t mmap_pages, bool report_error) {
154   CHECK(IsPowerOfTwo(mmap_pages));
155   size_t page_size = sysconf(_SC_PAGE_SIZE);
156   size_t mmap_len = (mmap_pages + 1) * page_size;
157   void* mmap_addr = mmap(nullptr, mmap_len, PROT_READ | PROT_WRITE, MAP_SHARED,
158                          perf_event_fd_, 0);
159   if (mmap_addr == MAP_FAILED) {
160     bool is_perm_error = (errno == EPERM);
161     if (report_error) {
162       PLOG(ERROR) << "mmap(" << mmap_pages << ") failed for " << Name();
163     } else {
164       PLOG(DEBUG) << "mmap(" << mmap_pages << ") failed for " << Name();
165     }
166     if (report_error && is_perm_error) {
167       LOG(ERROR)
168           << "It seems the kernel doesn't allow allocating enough "
169           << "buffer for dumping samples, consider decreasing mmap pages(-m).";
170     }
171     return false;
172   }
173   mmap_addr_ = mmap_addr;
174   mmap_len_ = mmap_len;
175   mmap_metadata_page_ = reinterpret_cast<perf_event_mmap_page*>(mmap_addr_);
176   mmap_data_buffer_ = reinterpret_cast<char*>(mmap_addr_) + page_size;
177   mmap_data_buffer_size_ = mmap_len_ - page_size;
178   return true;
179 }
180 
ShareMappedBuffer(const EventFd & event_fd,bool report_error)181 bool EventFd::ShareMappedBuffer(const EventFd& event_fd, bool report_error) {
182   CHECK(!HasMappedBuffer());
183   CHECK(event_fd.HasMappedBuffer());
184   int result =
185       ioctl(perf_event_fd_, PERF_EVENT_IOC_SET_OUTPUT, event_fd.perf_event_fd_);
186   if (result != 0) {
187     if (report_error) {
188       PLOG(ERROR) << "failed to share mapped buffer of "
189                   << event_fd.perf_event_fd_ << " with " << perf_event_fd_;
190     }
191     return false;
192   }
193   return true;
194 }
195 
DestroyMappedBuffer()196 void EventFd::DestroyMappedBuffer() {
197   if (HasMappedBuffer()) {
198     munmap(mmap_addr_, mmap_len_);
199     mmap_addr_ = nullptr;
200     mmap_len_ = 0;
201     mmap_metadata_page_ = nullptr;
202     mmap_data_buffer_ = nullptr;
203     mmap_data_buffer_size_ = 0;
204   }
205 }
206 
GetAvailableMmapData()207 std::vector<char> EventFd::GetAvailableMmapData() {
208   size_t data_pos;
209   size_t data_size = GetAvailableMmapDataSize(data_pos);
210   std::vector<char> data(data_size);
211   if (data_size > 0) {
212     size_t copy_size = std::min(data_size, mmap_data_buffer_size_ - data_pos);
213     memcpy(&data[0], mmap_data_buffer_ + data_pos, copy_size);
214     if (copy_size < data_size) {
215       memcpy(&data[copy_size], mmap_data_buffer_, data_size - copy_size);
216     }
217     DiscardMmapData(data_size);
218   }
219   return data;
220 }
221 
GetAvailableMmapDataSize(size_t & data_pos)222 size_t EventFd::GetAvailableMmapDataSize(size_t& data_pos) {
223   // The mmap_data_buffer is used as a ring buffer between the kernel and
224   // simpleperf. The kernel continuously writes records to the buffer, and
225   // simpleperf continuously read records out.
226   //         _________________________________________
227   // buffer | can write   |   can read   |  can write |
228   //                      ^              ^
229   //                    read_head       write_head
230   //
231   // So simpleperf can read records in [read_head, write_head), and the kernel
232   // can write records in [write_head, read_head). The kernel is responsible
233   // for updating write_head, and simpleperf is responsible for updating
234   // read_head.
235 
236   uint64_t write_head = mmap_metadata_page_->data_head;
237   uint64_t read_head = mmap_metadata_page_->data_tail;
238   if (write_head == read_head) {
239     // No available data.
240     return 0;
241   }
242   // rmb() used to ensure reading data after reading data_head.
243   __sync_synchronize();
244   data_pos = read_head & (mmap_data_buffer_size_ - 1);
245   return write_head - read_head;
246 }
247 
DiscardMmapData(size_t discard_size)248 void EventFd::DiscardMmapData(size_t discard_size) {
249   // mb() used to ensure finish reading data before writing data_tail.
250   __sync_synchronize();
251   mmap_metadata_page_->data_tail += discard_size;
252 }
253 
StartPolling(IOEventLoop & loop,const std::function<bool ()> & callback)254 bool EventFd::StartPolling(IOEventLoop& loop,
255                            const std::function<bool()>& callback) {
256   ioevent_ref_ = loop.AddReadEvent(perf_event_fd_, callback);
257   return ioevent_ref_ != nullptr;
258 }
259 
StopPolling()260 bool EventFd::StopPolling() { return IOEventLoop::DelEvent(ioevent_ref_); }
261 
IsEventAttrSupported(const perf_event_attr & attr)262 bool IsEventAttrSupported(const perf_event_attr& attr) {
263   if (attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS &&
264       attr.config == SIMPLEPERF_CONFIG_INPLACE_SAMPLER) {
265     // User space samplers don't need kernel support.
266     return true;
267   }
268   std::unique_ptr<EventFd> event_fd = EventFd::OpenEventFile(attr, getpid(), -1, nullptr, false);
269   return event_fd != nullptr;
270 }
271