• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016, Google Inc.
3  * All rights reserved.
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include <cstring>
9 #include <iomanip>
10 #include <memory>
11 #include <sstream>
12 #include <unordered_map>
13 #include <vector>
14 
15 #include "int_compat.h"
16 #include "intervalmap.h"
17 #include "path_matching.h"
18 #include "perf_data_handler.h"
19 #include "string_compat.h"
20 #include "quipper/perf_reader.h"
21 
22 using quipper::PerfDataProto;
23 using quipper::PerfDataProto_MMapEvent;
24 using quipper::PerfDataProto_CommEvent;
25 
26 namespace perftools {
27 namespace {
28 
29 // Normalizer processes a PerfDataProto and maintains tables to the
30 // current metadata for each process.  It drives callbacks to
31 // PerfDataHandler with samples in a fully normalized form.
32 class Normalizer {
33  public:
Normalizer(const PerfDataProto & perf_proto,PerfDataHandler * handler)34   Normalizer(const PerfDataProto& perf_proto, PerfDataHandler* handler)
35       : perf_proto_(perf_proto), handler_(handler) {
36     for (const auto& build_id : perf_proto_.build_ids()) {
37       const string& bytes = build_id.build_id_hash();
38       std::stringstream hex;
39       for (size_t i = 0; i < bytes.size(); ++i) {
40         // The char must be turned into an int to be used by stringstream;
41         // however, if the byte's value -8 it should be turned to 0x00f8 as an
42         // int, not 0xfff8. This cast solves this problem.
43         const auto& byte = static_cast<unsigned char>(bytes[i]);
44         hex << std::hex << std::setfill('0') << std::setw(2)
45             << static_cast<int>(byte);
46       }
47       if (build_id.filename() != "") {
48         filename_to_build_id_[build_id.filename()] = hex.str();
49       } else {
50         std::stringstream filename;
51         filename << std::hex << build_id.filename_md5_prefix();
52         filename_to_build_id_[filename.str().c_str()] = hex.str();
53       }
54     }
55 
56     uint64 current_event_index = 0;
57     for (const auto& attr : perf_proto_.file_attrs()) {
58       for (uint64 id : attr.ids()) {
59         id_to_event_index_[id] = current_event_index;
60       }
61       current_event_index++;
62     }
63   }
64 
65   Normalizer(const Normalizer&) = delete;
66   Normalizer& operator=(const Normalizer&) = delete;
67 
~Normalizer()68   ~Normalizer() {}
69 
70   // Convert to a protobuf using quipper and then aggregate the results.
71   void Normalize();
72 
73  private:
74   // Using a 32-bit type for the PID values as the max PID value on 64-bit
75   // systems is 2^22, see http://man7.org/linux/man-pages/man5/proc.5.html.
76   typedef std::unordered_map<uint32, PerfDataHandler::Mapping*> PidToMMapMap;
77   typedef std::unordered_map<uint32, const PerfDataProto_CommEvent*>
78       PidToCommMap;
79 
80   typedef IntervalMap<const PerfDataHandler::Mapping*> MMapIntervalMap;
81 
82   // Copy the parent's mmaps/comm if they exist.  Otherwise, items
83   // will be lazily populated.
84   void UpdateMapsWithMMapEvent(const quipper::PerfDataProto_MMapEvent* mmap);
85 
86   void UpdateMapsWithForkEvent(const quipper::PerfDataProto_ForkEvent& fork);
87   void LogStats();
88 
89   // Normalize the sample_event in event_proto and call handler_->Sample
90   void InvokeHandleSample(const quipper::PerfDataProto::PerfEvent& perf_event);
91 
92   // Find the MMAP event which has ip in its address range from pid.  If no
93   // mapping is found, returns nullptr.
94   const PerfDataHandler::Mapping* TryLookupInPid(uint32 pid, uint64 ip) const;
95 
96   // Find the mapping for a given ip given a pid context (in user or kernel
97   // mappings); returns nullptr if none can be found.
98   const PerfDataHandler::Mapping* GetMappingFromPidAndIP(uint32 pid,
99                                                          uint64 ip) const;
100 
101   // Find the main MMAP event for this pid.  If no mapping is found,
102   // nullptr is returned.
103   const PerfDataHandler::Mapping* GetMainMMapFromPid(uint32 pid) const;
104 
105   // For profiles with a single event, perf doesn't bother sending the
106   // id.  So, if there is only one event, the event index must be 0.
107   // Returns the event index corresponding to the id for this sample, or
108   // -1 for an error.
109   int64 GetEventIndexForSample(
110       const quipper::PerfDataProto_SampleEvent& sample) const;
111 
112   const quipper::PerfDataProto& perf_proto_;
113   PerfDataHandler* handler_;  // unowned.
114 
115   // Mapping we have allocated.
116   std::vector<std::unique_ptr<PerfDataHandler::Mapping>> owned_mappings_;
117   std::vector<std::unique_ptr<quipper::PerfDataProto_MMapEvent>>
118       owned_quipper_mappings_;
119 
120   // The event for a given sample is determined by the id.
121   // Map each id to an index in the event_profiles_ vector.
122   std::unordered_map<uint64, uint64> id_to_event_index_;
123 
124   // pid_to_comm_event maps a pid to the corresponding comm event.
125   PidToCommMap pid_to_comm_event_;
126 
127   // pid_to_mmaps maps a pid to all mmap events that correspond to that pid.
128   std::unordered_map<uint32, std::unique_ptr<MMapIntervalMap>> pid_to_mmaps_;
129 
130   // pid_to_executable_mmap maps a pid to mmap that most likely contains the
131   // filename of the main executable for that pid.
132   PidToMMapMap pid_to_executable_mmap_;
133 
134   // map filenames to build-ids.
135   std::unordered_map<string, string> filename_to_build_id_;
136 
137   struct {
138     int64 samples = 0;
139     int64 missing_main_mmap = 0;
140     int64 missing_sample_mmap = 0;
141 
142     int64 callchain_ips = 0;
143     int64 missing_callchain_mmap = 0;
144 
145     int64 branch_stack_ips = 0;
146     int64 missing_branch_stack_mmap = 0;
147 
148     int64 no_event_errors = 0;
149   } stat_;
150 };
151 
UpdateMapsWithForkEvent(const quipper::PerfDataProto_ForkEvent & fork)152 void Normalizer::UpdateMapsWithForkEvent(
153     const quipper::PerfDataProto_ForkEvent& fork) {
154   if (fork.pid() == fork.ppid()) {
155     // Don't care about threads.
156     return;
157   }
158   const auto& it = pid_to_mmaps_.find(fork.ppid());
159   if (it != pid_to_mmaps_.end()) {
160     pid_to_mmaps_[fork.pid()] = std::unique_ptr<MMapIntervalMap>(
161         new MMapIntervalMap(*it->second.get()));
162   }
163   auto comm_it = pid_to_comm_event_.find(fork.ppid());
164   if (comm_it != pid_to_comm_event_.end()) {
165     pid_to_comm_event_[fork.pid()] = comm_it->second;
166   }
167   auto exec_mmap_it = pid_to_executable_mmap_.find(fork.ppid());
168   if (exec_mmap_it != pid_to_executable_mmap_.end()) {
169     pid_to_executable_mmap_[fork.pid()] = exec_mmap_it->second;
170   }
171 }
172 
HasPrefixString(const string & haystack,const char * needle)173 inline bool HasPrefixString(const string& haystack, const char* needle) {
174   const size_t needle_len = strlen(needle);
175   const size_t haystack_len = haystack.length();
176   return haystack_len >= needle_len &&
177          haystack.compare(0, needle_len, needle) == 0;
178 }
179 
HasSuffixString(const string & haystack,const char * needle)180 inline bool HasSuffixString(const string& haystack, const char* needle) {
181   const size_t needle_len = strlen(needle);
182   const size_t haystack_len = haystack.length();
183   return haystack_len >= needle_len &&
184          haystack.compare(haystack_len - needle_len, needle_len, needle) == 0;
185 }
186 
Normalize()187 void Normalizer::Normalize() {
188   for (const auto& event_proto : perf_proto_.events()) {
189     if (event_proto.has_mmap_event()) {
190       UpdateMapsWithMMapEvent(&event_proto.mmap_event());
191     } else if (event_proto.has_comm_event()) {
192       if (event_proto.comm_event().pid() == event_proto.comm_event().tid()) {
193         // pid==tid happens on exec()
194         pid_to_executable_mmap_.erase(event_proto.comm_event().pid());
195         pid_to_comm_event_[event_proto.comm_event().pid()] =
196             &event_proto.comm_event();
197       }
198       PerfDataHandler::CommContext comm_context;
199       comm_context.comm = &event_proto.comm_event();
200       handler_->Comm(comm_context);
201     } else if (event_proto.has_fork_event()) {
202       UpdateMapsWithForkEvent(event_proto.fork_event());
203     } else if (event_proto.has_lost_event()) {
204       stat_.samples += event_proto.lost_event().lost();
205       stat_.missing_main_mmap += event_proto.lost_event().lost();
206       stat_.missing_sample_mmap += event_proto.lost_event().lost();
207       quipper::PerfDataProto::SampleEvent sample;
208       quipper::PerfDataProto::EventHeader header;
209       sample.set_id(event_proto.lost_event().id());
210       sample.set_pid(event_proto.lost_event().sample_info().pid());
211       sample.set_tid(event_proto.lost_event().sample_info().tid());
212       PerfDataHandler::SampleContext context(header, sample);
213       context.file_attrs_index = GetEventIndexForSample(sample);
214       if (context.file_attrs_index == -1) {
215         ++stat_.no_event_errors;
216         continue;
217       }
218       for (uint64 i = 0; i < event_proto.lost_event().lost(); ++i) {
219         handler_->Sample(context);
220       }
221     } else if (event_proto.has_sample_event()) {
222       InvokeHandleSample(event_proto);
223     }
224   }
225 
226   LogStats();
227 }
228 
InvokeHandleSample(const quipper::PerfDataProto::PerfEvent & event_proto)229 void Normalizer::InvokeHandleSample(
230     const quipper::PerfDataProto::PerfEvent& event_proto) {
231   if (!event_proto.has_sample_event()) {
232     std::cerr << "Expected sample event." << std::endl;
233     abort();
234   }
235   const auto& sample = event_proto.sample_event();
236   PerfDataHandler::SampleContext context(event_proto.header(),
237                                          event_proto.sample_event());
238   context.file_attrs_index = GetEventIndexForSample(context.sample);
239   if (context.file_attrs_index == -1) {
240     ++stat_.no_event_errors;
241     return;
242   }
243   ++stat_.samples;
244 
245   uint32 pid = sample.pid();
246 
247   context.sample_mapping = GetMappingFromPidAndIP(pid, sample.ip());
248   stat_.missing_sample_mmap += context.sample_mapping == nullptr;
249 
250   context.main_mapping = GetMainMMapFromPid(pid);
251   std::unique_ptr<PerfDataHandler::Mapping> fake;
252   // Kernel samples might take some extra work.
253   if (context.main_mapping == nullptr &&
254       (event_proto.header().misc() & PERF_RECORD_MISC_CPUMODE_MASK) ==
255           PERF_RECORD_MISC_KERNEL) {
256     auto comm_it = pid_to_comm_event_.find(pid);
257     auto kernel_it = pid_to_executable_mmap_.find(-1);
258     if (comm_it != pid_to_comm_event_.end()) {
259       const string* build_id = nullptr;
260       if (kernel_it != pid_to_executable_mmap_.end()) {
261         build_id = kernel_it->second->build_id;
262       }
263       fake.reset(new PerfDataHandler::Mapping(&comm_it->second->comm(),
264                                               build_id, 0, 1, 0, 0));
265       context.main_mapping = fake.get();
266     } else if (pid == 0 && kernel_it != pid_to_executable_mmap_.end()) {
267       context.main_mapping = kernel_it->second;
268     }
269   }
270 
271   stat_.missing_main_mmap += context.main_mapping == nullptr;
272 
273   // Normalize the callchain.
274   context.callchain.resize(sample.callchain_size());
275   for (int i = 0; i < sample.callchain_size(); ++i) {
276     ++stat_.callchain_ips;
277     context.callchain[i].ip = sample.callchain(i);
278     context.callchain[i].mapping =
279         GetMappingFromPidAndIP(pid, sample.callchain(i));
280     stat_.missing_callchain_mmap += context.callchain[i].mapping == nullptr;
281   }
282 
283   // Normalize the branch_stack.
284   context.branch_stack.resize(sample.branch_stack_size());
285   for (int i = 0; i < sample.branch_stack_size(); ++i) {
286     stat_.branch_stack_ips += 2;
287     auto bse = sample.branch_stack(i);
288     // from
289     context.branch_stack[i].from.ip = bse.from_ip();
290     context.branch_stack[i].from.mapping =
291         GetMappingFromPidAndIP(pid, bse.from_ip());
292     stat_.missing_branch_stack_mmap +=
293         context.branch_stack[i].from.mapping == nullptr;
294     // to
295     context.branch_stack[i].to.ip = bse.to_ip();
296     context.branch_stack[i].to.mapping =
297         GetMappingFromPidAndIP(pid, bse.to_ip());
298     stat_.missing_branch_stack_mmap +=
299         context.branch_stack[i].to.mapping == nullptr;
300     // mispredicted
301     context.branch_stack[i].mispredicted = bse.mispredicted();
302   }
303 
304   handler_->Sample(context);
305 }
306 
CheckStat(int64 num,int64 denom,const string & desc)307 static void CheckStat(int64 num, int64 denom, const string& desc) {
308   const int max_missing_pct = 1;
309   if (denom > 0 && num * 100 / denom > max_missing_pct) {
310     LOG(ERROR) << "stat: " << desc << " " << num << "/" << denom;
311   }
312 }
313 
LogStats()314 void Normalizer::LogStats() {
315   CheckStat(stat_.missing_main_mmap, stat_.samples, "missing_main_mmap");
316   CheckStat(stat_.missing_sample_mmap, stat_.samples, "missing_sample_mmap");
317   CheckStat(stat_.missing_callchain_mmap, stat_.callchain_ips,
318             "missing_callchain_mmap");
319   CheckStat(stat_.missing_branch_stack_mmap, stat_.branch_stack_ips,
320             "missing_branch_stack_mmap");
321   CheckStat(stat_.no_event_errors, 1, "unknown event id");
322 }
323 
IsVirtualMapping(const string & map_name)324 static bool IsVirtualMapping(const string& map_name) {
325   return HasPrefixString(map_name, "//") ||
326          (HasPrefixString(map_name, "[") && HasSuffixString(map_name, "]"));
327 }
328 
UpdateMapsWithMMapEvent(const quipper::PerfDataProto_MMapEvent * mmap)329 void Normalizer::UpdateMapsWithMMapEvent(
330     const quipper::PerfDataProto_MMapEvent* mmap) {
331   if (mmap->len() == 0) {
332     LOG(WARNING) << "bogus mapping: " << mmap->filename();
333     return;
334   }
335   uint32 pid = mmap->pid();
336   MMapIntervalMap* interval_map = nullptr;
337   const auto& it = pid_to_mmaps_.find(pid);
338   if (it == pid_to_mmaps_.end()) {
339     interval_map = new MMapIntervalMap;
340     pid_to_mmaps_[pid] = std::unique_ptr<MMapIntervalMap>(interval_map);
341   } else {
342     interval_map = it->second.get();
343   }
344   std::unordered_map<string, string>::const_iterator build_id_it;
345   if (mmap->filename() != "") {
346     build_id_it = filename_to_build_id_.find(mmap->filename());
347   } else {
348     std::stringstream filename;
349     filename << std::hex << mmap->filename_md5_prefix();
350     build_id_it = filename_to_build_id_.find(filename.str());
351   }
352 
353   const string* build_id = build_id_it == filename_to_build_id_.end()
354                                ? nullptr
355                                : &build_id_it->second;
356   PerfDataHandler::Mapping* mapping = new PerfDataHandler::Mapping(
357       &mmap->filename(), build_id, mmap->start(), mmap->start() + mmap->len(),
358       mmap->pgoff(), mmap->filename_md5_prefix());
359   owned_mappings_.emplace_back(mapping);
360   if (mapping->file_offset > (static_cast<uint64>(1) << 63) &&
361       mapping->limit > (static_cast<uint64>(1) << 63)) {
362     // kernel is funky and basically swaps start and offset.  Arrange
363     // them such that we can reasonably symbolize them later.
364     uint64 old_start = mapping->start;
365     // file_offset here actually refers to the address of the _stext
366     // kernel symbol, so we need to align it.
367     mapping->start = mapping->file_offset - mapping->file_offset % 4096;
368     mapping->file_offset = old_start;
369   }
370 
371   interval_map->Set(mapping->start, mapping->limit, mapping);
372   // Pass the final mapping through to the subclass also.
373   PerfDataHandler::MMapContext mmap_context;
374   mmap_context.pid = pid;
375   mmap_context.mapping = mapping;
376   handler_->MMap(mmap_context);
377 
378   // Main executables are usually loaded at 0x8048000 or 0x400000.
379   // If we ever see an MMAP starting at one of those locations, that should be
380   // our guess.
381   // This is true even if the old MMAP started at one of the locations, because
382   // the pid may have been recycled since then (so newer is better).
383   if (mapping->start == 0x8048000 || mapping->start == 0x400000) {
384     pid_to_executable_mmap_[pid] = mapping;
385     return;
386   }
387   // Figure out whether this MMAP is the main executable.
388   // If there have been no previous MMAPs for this pid, then this MMAP is our
389   // best guess.
390   auto old_mapping_it = pid_to_executable_mmap_.find(pid);
391   PerfDataHandler::Mapping* old_mapping =
392       old_mapping_it == pid_to_executable_mmap_.end() ? nullptr
393                                                       : old_mapping_it->second;
394 
395   if (old_mapping != nullptr && old_mapping->start == 0x400000 &&
396       (old_mapping->filename == nullptr || *old_mapping->filename == "") &&
397       mapping->start - mapping->file_offset == 0x400000) {
398     // Hugepages remap the main binary, but the original mapping loses
399     // its name, so we have this hack.
400     old_mapping->filename = &mmap->filename();
401   }
402 
403   static const char kKernelPrefix[] = "[kernel.kallsyms]";
404 
405   if (old_mapping == nullptr && !HasSuffixString(mmap->filename(), ".ko") &&
406       !HasSuffixString(mmap->filename(), ".so") &&
407       !IsDeletedSharedObject(mmap->filename()) &&
408       !IsVersionedSharedObject(mmap->filename()) &&
409       !IsVirtualMapping(mmap->filename()) &&
410       !HasPrefixString(mmap->filename(), kKernelPrefix)) {
411     if (!HasPrefixString(mmap->filename(), "/usr/bin") &&
412         !HasPrefixString(mmap->filename(), "/usr/sbin") &&
413         !HasSuffixString(mmap->filename(), "/sel_ldr")) {
414       LOG(INFO) << "guessing main for pid: " << pid << " " << mmap->filename();
415     }
416     pid_to_executable_mmap_[pid] = mapping;
417     return;
418   }
419 
420   if (pid == std::numeric_limits<uint32>::max() &&
421       HasPrefixString(mmap->filename(), kKernelPrefix)) {
422     pid_to_executable_mmap_[pid] = mapping;
423   }
424 }
425 
TryLookupInPid(uint32 pid,uint64 ip) const426 const PerfDataHandler::Mapping* Normalizer::TryLookupInPid(uint32 pid,
427                                                            uint64 ip) const {
428   const auto& it = pid_to_mmaps_.find(pid);
429   if (it == pid_to_mmaps_.end()) {
430     VLOG(2) << "No mmaps for pid " << pid;
431     return nullptr;
432   }
433   MMapIntervalMap* mmaps = it->second.get();
434 
435   const PerfDataHandler::Mapping* mapping = nullptr;
436   mmaps->Lookup(ip, &mapping);
437   return mapping;
438 }
439 
440 // Find the mapping for ip in the context of pid.  We might be looking
441 // at a kernel IP, however (which can show up in any pid, and are
442 // stored in our map as pid = -1), so check there if the lookup fails
443 // in our process.
GetMappingFromPidAndIP(uint32 pid,uint64 ip) const444 const PerfDataHandler::Mapping* Normalizer::GetMappingFromPidAndIP(
445     uint32 pid, uint64 ip) const {
446   if (ip >= PERF_CONTEXT_MAX) {
447     // These aren't real IPs, they're context hints.  Drop them.
448     return nullptr;
449   }
450   // One could try to decide if this is a kernel or user sample
451   // directly.  ahh@ thinks there's a heuristic that should work on
452   // x86 (basically without any error): all kernel samples should have
453   // 16 high bits set, all user samples should have high 16 bits
454   // cleared.  But that's not portable, and on any arch (...hopefully)
455   // the user/kernel mappings should be disjoint anyway, so just check
456   // both, starting with user.  We could also use PERF_CONTEXT_KERNEL
457   // and friends (see for instance how perf handles this:
458   // https://goto.google.com/udgor) to know whether to check user or
459   // kernel, but this seems more robust.
460   const PerfDataHandler::Mapping* mapping = TryLookupInPid(pid, ip);
461   if (mapping == nullptr) {
462     // Might be a kernel sample.
463     mapping = TryLookupInPid(-1, ip);
464   }
465   if (mapping == nullptr) {
466     VLOG(2) << "no sample mmap found for pid " << pid << " and ip " << ip;
467     return nullptr;
468   }
469   if (ip < mapping->start || ip >= mapping->limit) {
470     std::cerr << "IP is not in mapping." << std::endl
471               << "IP: " << ip << std::endl
472               << "Start: " << mapping->start << std::endl
473               << "Limit: " << mapping->limit << std::endl;
474     abort();
475   }
476   return mapping;
477 }
478 
GetMainMMapFromPid(uint32 pid) const479 const PerfDataHandler::Mapping* Normalizer::GetMainMMapFromPid(
480     uint32 pid) const {
481   auto mapping_it = pid_to_executable_mmap_.find(pid);
482   if (mapping_it != pid_to_executable_mmap_.end()) {
483     return mapping_it->second;
484   }
485 
486   VLOG(2) << "No argv0 name found for sample with pid: " << pid;
487   return nullptr;
488 }
489 
GetEventIndexForSample(const quipper::PerfDataProto_SampleEvent & sample) const490 int64 Normalizer::GetEventIndexForSample(
491     const quipper::PerfDataProto_SampleEvent& sample) const {
492   if (perf_proto_.file_attrs().size() == 1) {
493     return 0;
494   }
495 
496   if (!sample.has_id()) {
497     LOG(ERROR) << "Perf sample did not have id";
498     return -1;
499   }
500 
501   auto it = id_to_event_index_.find(sample.id());
502   if (it == id_to_event_index_.end()) {
503     LOG(ERROR) << "Incorrect event id: " << sample.id();
504     return -1;
505   }
506   return it->second;
507 }
508 }  // namespace
509 
510 // Finds needle in haystack starting at cursor. It then returns the index
511 // directly after needle or string::npos if needle was not found.
FindAfter(const string & haystack,const string & needle,size_t cursor)512 size_t FindAfter(const string& haystack, const string& needle, size_t cursor) {
513   auto next_cursor = haystack.find(needle, cursor);
514   if (next_cursor != string::npos) {
515     next_cursor += needle.size();
516   }
517   return next_cursor;
518 }
519 
IsDeletedSharedObject(const string & path)520 bool IsDeletedSharedObject(const string& path) {
521   size_t cursor = 1;
522   while ((cursor = FindAfter(path, ".so", cursor)) != string::npos) {
523     const auto ch = path.at(cursor);
524     if (ch == '.' || ch == '_' || ch == ' ') {
525       return path.find("(deleted)", cursor) != string::npos;
526     }
527   }
528   return false;
529 }
530 
IsVersionedSharedObject(const string & path)531 bool IsVersionedSharedObject(const string& path) {
532   return path.find(".so.", 1) != string::npos;
533 }
534 
PerfDataHandler()535 PerfDataHandler::PerfDataHandler() {}
536 
Process(const quipper::PerfDataProto & perf_proto,PerfDataHandler * handler)537 void PerfDataHandler::Process(const quipper::PerfDataProto& perf_proto,
538                               PerfDataHandler* handler) {
539   Normalizer Normalizer(perf_proto, handler);
540   return Normalizer.Normalize();
541 }
542 
543 }  // namespace perftools
544