• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "huge_page_deducer.h"
2 
3 #include <limits>
4 
5 #include "perf_data_utils.h"
6 
7 #include "base/logging.h"
8 
9 using PerfEvent = quipper::PerfDataProto::PerfEvent;
10 using MMapEvent = quipper::PerfDataProto::MMapEvent;
11 
12 namespace quipper {
13 namespace {
14 
15 const char kAnonFilename[] = "//anon";
16 const size_t kHugepageSize = 1 << 21;
17 
IsAnon(const MMapEvent & event)18 bool IsAnon(const MMapEvent& event) {
19   return event.filename() == kAnonFilename;
20 }
21 
22 // IsContiguous returns true if mmap |a| is immediately followed by |b|
23 // within a process' address space.
IsContiguous(const MMapEvent & a,const MMapEvent & b)24 bool IsContiguous(const MMapEvent& a, const MMapEvent& b) {
25   return a.pid() == b.pid() && (a.start() + a.len()) == b.start();
26 }
27 
28 // IsEquivalentFile returns true iff |a| and |b| have the same name, or if
29 // either of them are anonymous memory (and thus likely to be a --hugepage_text
30 // version of the same file).
IsEquivalentFile(const MMapEvent & a,const MMapEvent & b)31 bool IsEquivalentFile(const MMapEvent& a, const MMapEvent& b) {
32   // perf attributes neighboring anonymous mappings under the argv[0]
33   // filename rather than "//anon", so check filename equality, as well as
34   // anonymous.
35   return a.filename() == b.filename() || IsAnon(a) || IsAnon(b);
36 }
37 
38 // Helper to correctly update a filename on a PerfEvent that contains an
39 // MMapEvent.
SetMmapFilename(PerfEvent * event,const string & new_filename,uint64_t new_filename_md5_prefix)40 void SetMmapFilename(PerfEvent* event, const string& new_filename,
41                      uint64_t new_filename_md5_prefix) {
42   CHECK(event->has_mmap_event());
43   event->mutable_header()->set_size(
44       event->header().size() + GetUint64AlignedStringLength(new_filename) -
45       GetUint64AlignedStringLength(event->mmap_event().filename()));
46 
47   event->mutable_mmap_event()->set_filename(new_filename);
48   event->mutable_mmap_event()->set_filename_md5_prefix(new_filename_md5_prefix);
49 }
50 }  // namespace
51 
52 namespace {
53 
54 // MMapRange represents an index into a PerfEvents sequence that contains
55 // a contiguous region of mmaps that have all of the same filename and pgoff.
56 class MMapRange {
57  public:
58   // Default constructor is an invalid range.
MMapRange()59   MMapRange()
60       : first_(std::numeric_limits<int>::max()),
61         last_(std::numeric_limits<int>::min()) {}
62 
63   // Construct a real range.
MMapRange(int first_index,int last_index)64   MMapRange(int first_index, int last_index)
65       : first_(first_index), last_(last_index) {}
66 
Len(const RepeatedPtrField<PerfEvent> & events) const67   uint64 Len(const RepeatedPtrField<PerfEvent>& events) const {
68     auto& first = events.Get(first_).mmap_event();
69     auto& last = events.Get(last_).mmap_event();
70     return last.start() - first.start() + last.len();
71   }
72 
FirstIndex() const73   int FirstIndex() const { return first_; }
LastIndex() const74   int LastIndex() const { return last_; }
IsValid() const75   bool IsValid() const { return first_ <= last_; }
76 
FirstMmap(const RepeatedPtrField<PerfEvent> & events) const77   const MMapEvent& FirstMmap(const RepeatedPtrField<PerfEvent>& events) const {
78     return events.Get(first_).mmap_event();
79   }
80 
LastMmap(const RepeatedPtrField<PerfEvent> & events) const81   const MMapEvent& LastMmap(const RepeatedPtrField<PerfEvent>& events) const {
82     return events.Get(last_).mmap_event();
83   }
84 
85  private:
86   int first_;
87   int last_;
88 };
89 
operator <<(std::ostream & os,const MMapRange & r)90 std::ostream& operator<<(std::ostream& os, const MMapRange& r) {
91   os << "[" << r.FirstIndex() << "," << r.LastIndex() << "]";
92   return os;
93 }
94 
95 // MMapRange version of IsContiguous(MMapEvent, MMapEvent).
IsContiguous(const RepeatedPtrField<PerfEvent> & events,const MMapRange & a,const MMapRange & b)96 bool IsContiguous(const RepeatedPtrField<PerfEvent>& events, const MMapRange& a,
97                   const MMapRange& b) {
98   return IsContiguous(a.LastMmap(events), b.FirstMmap(events));
99 }
100 
101 // MMapRange version of IsIsEquivalent(MMapEvent, MMapEvent).
IsEquivalentFile(const RepeatedPtrField<PerfEvent> & events,const MMapRange & a,const MMapRange & b)102 bool IsEquivalentFile(const RepeatedPtrField<PerfEvent>& events,
103                       const MMapRange& a, const MMapRange& b) {
104   // Because a range has the same file for all mmaps within it, assume that
105   // checking any mmap in |a| with any in |b| is sufficient.
106   return IsEquivalentFile(a.LastMmap(events), b.FirstMmap(events));
107 }
108 
109 // FindRange returns a MMapRange of contiguous MmapEvents that:
110 // - either:
111 //   - contains 1 or more MmapEvents with pgoff == 0
112 //   - is a single MmapEvent with pgoff != 0
113 // - and:
114 //   - has the same filename for all entries
115 // Otherwise, if none can be found, an invalid range will be returned.
FindRange(const RepeatedPtrField<PerfEvent> & events,int start)116 MMapRange FindRange(const RepeatedPtrField<PerfEvent>& events, int start) {
117   const MMapEvent* prev_mmap = nullptr;
118   MMapRange range;
119   for (int i = start; i < events.size(); i++) {
120     const PerfEvent& event = events.Get(i);
121     // Skip irrelevant events
122     if (!event.has_mmap_event()) {
123       continue;
124     }
125     // Skip dynamic mmap() events. Hugepage deduction only works on mmaps as
126     // synthesized by perf from /proc/${pid}/maps, which have timestamp==0.
127     // Support for deducing hugepages from a sequence of mmap()/mremap() calls
128     // would require additional deduction logic.
129     if (event.timestamp() != 0) {
130       continue;
131     }
132     const MMapEvent& mmap = events.Get(i).mmap_event();
133     if (prev_mmap == nullptr) {
134       range = MMapRange(i, i);
135       prev_mmap = &mmap;
136     }
137     // Ranges match exactly: //anon,//anon, or file,file; If they use different
138     // names, then deduction needs to consider them independently.
139     if (prev_mmap->filename() != mmap.filename()) {
140       break;
141     }
142     // If they're not virtually contiguous, they're not a single range.
143     if (start != i && !IsContiguous(*prev_mmap, mmap)) {
144       break;
145     }
146     // If this segment has a page offset, assume that it is *not* hugepage
147     // backed, and thus does not need separate deduction.
148     if (mmap.pgoff() != 0) {
149       break;
150     }
151     CHECK(mmap.pgoff() == 0 || !IsAnon(mmap))
152         << "Anonymous pages can't have pgoff set";
153     prev_mmap = &mmap;
154     range = MMapRange(range.FirstIndex(), i);
155   }
156   // Range has:
157   // - single file
158   // - virtually contiguous
159   // - either: is multiple mappings *or* has pgoff=0
160   return range;
161 }
162 
163 // FindNextRange will return the next range after the given |prev_range| if
164 // there is one; otherwise it will return an invalid range.
FindNextRange(const RepeatedPtrField<PerfEvent> & events,const MMapRange & prev_range)165 MMapRange FindNextRange(const RepeatedPtrField<PerfEvent>& events,
166                         const MMapRange& prev_range) {
167   MMapRange ret;
168   if (prev_range.IsValid() && prev_range.LastIndex() < events.size()) {
169     ret = FindRange(events, prev_range.LastIndex() + 1);
170   }
171   return ret;
172 }
173 
174 // UpdateRangeFromNext will set the filename / pgoff of all mmaps within |range|
175 // to be pgoff-contiguous with |next_range|, and match its file information.
UpdateRangeFromNext(const MMapRange & range,const MMapRange & next_range,RepeatedPtrField<PerfEvent> * events)176 void UpdateRangeFromNext(const MMapRange& range, const MMapRange& next_range,
177                          RepeatedPtrField<PerfEvent>* events) {
178   CHECK(range.LastIndex() < events->size());
179   CHECK(next_range.LastIndex() < events->size());
180   const MMapEvent& src = next_range.FirstMmap(*events);
181   const uint64 start_pgoff = src.pgoff() - range.Len(*events);
182   uint64 pgoff = start_pgoff;
183   for (int i = range.FirstIndex(); i <= range.LastIndex(); i++) {
184     if (!events->Get(i).has_mmap_event()) {
185       continue;
186     }
187     PerfEvent* event = events->Mutable(i);
188     MMapEvent* mmap = event->mutable_mmap_event();
189 
190     // Replace "//anon" with a regular name if possible.
191     if (IsAnon(*mmap)) {
192       // ANDROID-CHANGED: protobuf-lite.
193       CHECK_EQ(mmap->pgoff(), 0u) << "//anon should have offset=0 for mmap";
194       //                          << event->ShortDebugString();
195       SetMmapFilename(event, src.filename(), src.filename_md5_prefix());
196     }
197 
198     if (mmap->pgoff() == 0) {
199       mmap->set_pgoff(pgoff);
200       if (src.has_maj()) {
201         mmap->set_maj(src.maj());
202       }
203       if (src.has_min()) {
204         mmap->set_min(src.min());
205       }
206       if (src.has_ino()) {
207         mmap->set_ino(src.ino());
208       }
209       if (src.has_ino_generation()) {
210         mmap->set_ino_generation(src.ino_generation());
211       }
212     }
213     pgoff += mmap->len();
214   }
215   CHECK_EQ(pgoff, start_pgoff + range.Len(*events));
216 }
217 }  // namespace
218 
DeduceHugePages(RepeatedPtrField<PerfEvent> * events)219 void DeduceHugePages(RepeatedPtrField<PerfEvent>* events) {
220   // |prev_range|, if IsValid(), represents the preview mmap range seen (and
221   // already processed / updated).
222   MMapRange prev_range;
223   // |range| contains the currently-being-processed mmap range, which will have
224   // its hugepages ranges deduced.
225   MMapRange range = FindRange(*events, 0);
226   // |next_range| contains the next range to process, possibily containing
227   // pgoff != 0 or !IsAnon(filename) from which the current range can be
228   // updated.
229   MMapRange next_range = FindNextRange(*events, range);
230 
231   for (; range.IsValid(); prev_range = range, range = next_range,
232                           next_range = FindNextRange(*events, range)) {
233     const bool have_next =
234         (next_range.IsValid() && IsContiguous(*events, range, next_range) &&
235          IsEquivalentFile(*events, range, next_range));
236 
237     // If there's no mmap after this, then we assume that this is *not* viable
238     // a hugepage_text mapping. This is true unless we're really unlucky. If:
239     // - the binary is mapped such that the limit is hugepage aligned
240     //   (presumably 4Ki/2Mi chance == p=0.03125)
241     // - and the entire binaryis hugepage_text mapped
242     if (!have_next) {
243       continue;
244     }
245 
246     const bool have_prev =
247         (prev_range.IsValid() && IsContiguous(*events, prev_range, range) &&
248          IsEquivalentFile(*events, prev_range, range) &&
249          IsEquivalentFile(*events, prev_range, next_range));
250 
251     uint64 start_pgoff = 0;
252     if (have_prev) {
253       const auto& prev = prev_range.LastMmap(*events);
254       start_pgoff = prev.pgoff() + prev.len();
255     }
256     const auto& next = next_range.FirstMmap(*events);
257     // prev.pgoff should be valid now, so let's double-check that
258     // if next has a non-zero pgoff, that {prev,curr,next} will have
259     // contiguous pgoff once updated.
260     if (next.pgoff() >= range.Len(*events) &&
261         (next.pgoff() - range.Len(*events)) == start_pgoff) {
262       UpdateRangeFromNext(range, next_range, events);
263     }
264   }
265 }
266 
CombineMappings(RepeatedPtrField<PerfEvent> * events)267 void CombineMappings(RepeatedPtrField<PerfEvent>* events) {
268   // Combine mappings
269   RepeatedPtrField<PerfEvent> new_events;
270   new_events.Reserve(events->size());
271 
272   // |prev| is the index of the last mmap_event in |new_events| (or
273   // |new_events.size()| if no mmap_events have been inserted yet).
274   int prev = 0;
275   for (int i = 0; i < events->size(); ++i) {
276     PerfEvent* event = events->Mutable(i);
277     if (!event->has_mmap_event()) {
278       new_events.Add()->Swap(event);
279       continue;
280     }
281 
282     const MMapEvent& mmap = event->mmap_event();
283     // Try to merge mmap with |new_events[prev]|.
284     while (prev < new_events.size() && !new_events.Get(prev).has_mmap_event()) {
285       prev++;
286     }
287 
288     if (prev >= new_events.size()) {
289       new_events.Add()->Swap(event);
290       continue;
291     }
292 
293     MMapEvent* prev_mmap = new_events.Mutable(prev)->mutable_mmap_event();
294 
295     // Don't use IsEquivalentFile(); we don't want to combine //anon with
296     // files if DeduceHugepages didn't already fix up the mappings.
297     const bool file_match = prev_mmap->filename() == mmap.filename();
298     const bool pgoff_contiguous =
299         file_match && (prev_mmap->pgoff() + prev_mmap->len() == mmap.pgoff());
300 
301     const bool combine_mappings =
302         IsContiguous(*prev_mmap, mmap) && pgoff_contiguous;
303     if (!combine_mappings) {
304       new_events.Add()->Swap(event);
305       prev++;
306       continue;
307     }
308 
309     // Combine the lengths of the two mappings.
310     prev_mmap->set_len(prev_mmap->len() + mmap.len());
311   }
312 
313   events->Swap(&new_events);
314 }
315 
316 }  // namespace quipper
317