• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/android/library_loader/library_prefetcher.h"
6 
7 #include <stddef.h>
8 #include <sys/mman.h>
9 #include <sys/resource.h>
10 #include <sys/wait.h>
11 #include <unistd.h>
12 #include <algorithm>
13 #include <atomic>
14 #include <cstdlib>
15 #include <memory>
16 #include <utility>
17 #include <vector>
18 
19 #include "base/android/library_loader/anchor_functions.h"
20 #include "base/android/orderfile/orderfile_buildflags.h"
21 #include "base/bits.h"
22 #include "base/files/file.h"
23 #include "base/format_macros.h"
24 #include "base/logging.h"
25 #include "base/macros.h"
26 #include "base/metrics/histogram_macros.h"
27 #include "base/posix/eintr_wrapper.h"
28 #include "base/strings/string_util.h"
29 #include "base/strings/stringprintf.h"
30 #include "build/build_config.h"
31 
32 #if BUILDFLAG(ORDERFILE_INSTRUMENTATION)
33 #include "base/android/orderfile/orderfile_instrumentation.h"
34 #endif
35 
36 #if BUILDFLAG(SUPPORTS_CODE_ORDERING)
37 
38 namespace base {
39 namespace android {
40 
41 namespace {
42 
43 // Android defines the background priority to this value since at least 2009
44 // (see Process.java).
45 constexpr int kBackgroundPriority = 10;
46 // Valid for all Android architectures.
47 constexpr size_t kPageSize = 4096;
48 
49 // Reads a byte per page between |start| and |end| to force it into the page
50 // cache.
51 // Heap allocations, syscalls and library functions are not allowed in this
52 // function.
53 // Returns true for success.
54 #if defined(ADDRESS_SANITIZER)
55 // Disable AddressSanitizer instrumentation for this function. It is touching
56 // memory that hasn't been allocated by the app, though the addresses are
57 // valid. Furthermore, this takes place in a child process. See crbug.com/653372
58 // for the context.
59 __attribute__((no_sanitize_address))
60 #endif
Prefetch(size_t start,size_t end)61 void Prefetch(size_t start, size_t end) {
62   unsigned char* start_ptr = reinterpret_cast<unsigned char*>(start);
63   unsigned char* end_ptr = reinterpret_cast<unsigned char*>(end);
64   unsigned char dummy = 0;
65   for (unsigned char* ptr = start_ptr; ptr < end_ptr; ptr += kPageSize) {
66     // Volatile is required to prevent the compiler from eliminating this
67     // loop.
68     dummy ^= *static_cast<volatile unsigned char*>(ptr);
69   }
70 }
71 
72 // Populates the per-page residency between |start| and |end| in |residency|. If
73 // successful, |residency| has the size of |end| - |start| in pages.
74 // Returns true for success.
Mincore(size_t start,size_t end,std::vector<unsigned char> * residency)75 bool Mincore(size_t start, size_t end, std::vector<unsigned char>* residency) {
76   if (start % kPageSize || end % kPageSize)
77     return false;
78   size_t size = end - start;
79   size_t size_in_pages = size / kPageSize;
80   if (residency->size() != size_in_pages)
81     residency->resize(size_in_pages);
82   int err = HANDLE_EINTR(
83       mincore(reinterpret_cast<void*>(start), size, &(*residency)[0]));
84   PLOG_IF(ERROR, err) << "mincore() failed";
85   return !err;
86 }
87 
88 // Returns the start and end of .text, aligned to the lower and upper page
89 // boundaries, respectively.
GetTextRange()90 std::pair<size_t, size_t> GetTextRange() {
91   // |kStartOfText| may not be at the beginning of a page, since .plt can be
92   // before it, yet in the same mapping for instance.
93   size_t start_page = kStartOfText - kStartOfText % kPageSize;
94   // Set the end to the page on which the beginning of the last symbol is. The
95   // actual symbol may spill into the next page by a few bytes, but this is
96   // outside of the executable code range anyway.
97   size_t end_page = base::bits::Align(kEndOfText, kPageSize);
98   return {start_page, end_page};
99 }
100 
101 // Returns the start and end pages of the unordered section of .text, aligned to
102 // lower and upper page boundaries, respectively.
GetOrderedTextRange()103 std::pair<size_t, size_t> GetOrderedTextRange() {
104   size_t start_page = kStartOfOrderedText - kStartOfOrderedText % kPageSize;
105   // kEndOfUnorderedText is not considered ordered, but the byte immediately
106   // before is considered ordered and so can not be contained in the start page.
107   size_t end_page = base::bits::Align(kEndOfOrderedText, kPageSize);
108   return {start_page, end_page};
109 }
110 
111 // Calls madvise(advice) on the specified range. Does nothing if the range is
112 // empty.
MadviseOnRange(const std::pair<size_t,size_t> & range,int advice)113 void MadviseOnRange(const std::pair<size_t, size_t>& range, int advice) {
114   if (range.first >= range.second) {
115     return;
116   }
117   size_t size = range.second - range.first;
118   int err = madvise(reinterpret_cast<void*>(range.first), size, advice);
119   if (err) {
120     PLOG(ERROR) << "madvise() failed";
121   }
122 }
123 
124 // Timestamp in ns since Unix Epoch, and residency, as returned by mincore().
125 struct TimestampAndResidency {
126   uint64_t timestamp_nanos;
127   std::vector<unsigned char> residency;
128 
TimestampAndResidencybase::android::__anond1cc3f000111::TimestampAndResidency129   TimestampAndResidency(uint64_t timestamp_nanos,
130                         std::vector<unsigned char>&& residency)
131       : timestamp_nanos(timestamp_nanos), residency(residency) {}
132 };
133 
134 // Returns true for success.
CollectResidency(size_t start,size_t end,std::vector<TimestampAndResidency> * data)135 bool CollectResidency(size_t start,
136                       size_t end,
137                       std::vector<TimestampAndResidency>* data) {
138   // Not using base::TimeTicks() to not call too many base:: symbol that would
139   // pollute the reached symbols dumps.
140   struct timespec ts;
141   if (HANDLE_EINTR(clock_gettime(CLOCK_MONOTONIC, &ts))) {
142     PLOG(ERROR) << "Cannot get the time.";
143     return false;
144   }
145   uint64_t now =
146       static_cast<uint64_t>(ts.tv_sec) * 1000 * 1000 * 1000 + ts.tv_nsec;
147   std::vector<unsigned char> residency;
148   if (!Mincore(start, end, &residency))
149     return false;
150 
151   data->emplace_back(now, std::move(residency));
152   return true;
153 }
154 
DumpResidency(size_t start,size_t end,std::unique_ptr<std::vector<TimestampAndResidency>> data)155 void DumpResidency(size_t start,
156                    size_t end,
157                    std::unique_ptr<std::vector<TimestampAndResidency>> data) {
158   auto path = base::FilePath(
159       base::StringPrintf("/data/local/tmp/chrome/residency-%d.txt", getpid()));
160   auto file =
161       base::File(path, base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
162   if (!file.IsValid()) {
163     PLOG(ERROR) << "Cannot open file to dump the residency data "
164                 << path.value();
165     return;
166   }
167 
168   // First line: start-end of text range.
169   CHECK(IsOrderingSane());
170   CHECK_LT(start, kStartOfText);
171   CHECK_LT(kEndOfText, end);
172   auto start_end = base::StringPrintf("%" PRIuS " %" PRIuS "\n",
173                                       kStartOfText - start, kEndOfText - start);
174   file.WriteAtCurrentPos(start_end.c_str(), start_end.size());
175 
176   for (const auto& data_point : *data) {
177     auto timestamp =
178         base::StringPrintf("%" PRIu64 " ", data_point.timestamp_nanos);
179     file.WriteAtCurrentPos(timestamp.c_str(), timestamp.size());
180 
181     std::vector<char> dump;
182     dump.reserve(data_point.residency.size() + 1);
183     for (auto c : data_point.residency)
184       dump.push_back(c ? '1' : '0');
185     dump[dump.size() - 1] = '\n';
186     file.WriteAtCurrentPos(&dump[0], dump.size());
187   }
188 }
189 
190 // These values are persisted to logs. Entries should not be renumbered and
191 // numeric values should never be reused.
192 // Used for "LibraryLoader.PrefetchDetailedStatus".
193 enum class PrefetchStatus {
194   kSuccess = 0,
195   kWrongOrdering = 1,
196   kForkFailed = 2,
197   kChildProcessCrashed = 3,
198   kChildProcessKilled = 4,
199   kMaxValue = kChildProcessKilled
200 };
201 
ForkAndPrefetch(bool ordered_only)202 PrefetchStatus ForkAndPrefetch(bool ordered_only) {
203   if (!IsOrderingSane()) {
204     LOG(WARNING) << "Incorrect code ordering";
205     return PrefetchStatus::kWrongOrdering;
206   }
207 
208   // Looking for ranges is done before the fork, to avoid syscalls and/or memory
209   // allocations in the forked process. The child process inherits the lock
210   // state of its parent thread. It cannot rely on being able to acquire any
211   // lock (unless special care is taken in a pre-fork handler), including being
212   // able to call malloc().
213   //
214   // Always prefetch the ordered section first, as it's reached early during
215   // startup, and not necessarily located at the beginning of .text.
216   std::vector<std::pair<size_t, size_t>> ranges = {GetOrderedTextRange()};
217   if (!ordered_only)
218     ranges.push_back(GetTextRange());
219 
220   pid_t pid = fork();
221   if (pid == 0) {
222     setpriority(PRIO_PROCESS, 0, kBackgroundPriority);
223     // _exit() doesn't call the atexit() handlers.
224     for (const auto& range : ranges) {
225       Prefetch(range.first, range.second);
226     }
227     _exit(EXIT_SUCCESS);
228   } else {
229     if (pid < 0) {
230       return PrefetchStatus::kForkFailed;
231     }
232     int status;
233     const pid_t result = HANDLE_EINTR(waitpid(pid, &status, 0));
234     if (result == pid) {
235       if (WIFEXITED(status))
236         return PrefetchStatus::kSuccess;
237       if (WIFSIGNALED(status)) {
238         int signal = WTERMSIG(status);
239         switch (signal) {
240           case SIGSEGV:
241           case SIGBUS:
242             return PrefetchStatus::kChildProcessCrashed;
243             break;
244           case SIGKILL:
245           case SIGTERM:
246           default:
247             return PrefetchStatus::kChildProcessKilled;
248         }
249       }
250     }
251     // Should not happen. Per man waitpid(2), errors are:
252     // - EINTR: handled.
253     // - ECHILD if the process doesn't have an unwaited-for child with this PID.
254     // - EINVAL.
255     return PrefetchStatus::kChildProcessKilled;
256   }
257 }
258 
259 }  // namespace
260 
261 // static
ForkAndPrefetchNativeLibrary(bool ordered_only)262 void NativeLibraryPrefetcher::ForkAndPrefetchNativeLibrary(bool ordered_only) {
263 #if BUILDFLAG(ORDERFILE_INSTRUMENTATION)
264   // Avoid forking with orderfile instrumentation because the child process
265   // would create a dump as well.
266   return;
267 #endif
268 
269   PrefetchStatus status = ForkAndPrefetch(ordered_only);
270   UMA_HISTOGRAM_BOOLEAN("LibraryLoader.PrefetchStatus",
271                         status == PrefetchStatus::kSuccess);
272   UMA_HISTOGRAM_ENUMERATION("LibraryLoader.PrefetchDetailedStatus", status);
273   if (status != PrefetchStatus::kSuccess) {
274     LOG(WARNING) << "Cannot prefetch the library. status = "
275                  << static_cast<int>(status);
276   }
277 }
278 
279 // static
PercentageOfResidentCode(size_t start,size_t end)280 int NativeLibraryPrefetcher::PercentageOfResidentCode(size_t start,
281                                                       size_t end) {
282   size_t total_pages = 0;
283   size_t resident_pages = 0;
284 
285   std::vector<unsigned char> residency;
286   bool ok = Mincore(start, end, &residency);
287   if (!ok)
288     return -1;
289   total_pages += residency.size();
290   resident_pages += std::count_if(residency.begin(), residency.end(),
291                                   [](unsigned char x) { return x & 1; });
292   if (total_pages == 0)
293     return -1;
294   return static_cast<int>((100 * resident_pages) / total_pages);
295 }
296 
297 // static
PercentageOfResidentNativeLibraryCode()298 int NativeLibraryPrefetcher::PercentageOfResidentNativeLibraryCode() {
299   if (!IsOrderingSane()) {
300     LOG(WARNING) << "Incorrect code ordering";
301     return -1;
302   }
303   const auto& range = GetTextRange();
304   return PercentageOfResidentCode(range.first, range.second);
305 }
306 
307 // static
PeriodicallyCollectResidency()308 void NativeLibraryPrefetcher::PeriodicallyCollectResidency() {
309   CHECK_EQ(static_cast<long>(kPageSize), sysconf(_SC_PAGESIZE));
310 
311   const auto& range = GetTextRange();
312   auto data = std::make_unique<std::vector<TimestampAndResidency>>();
313   for (int i = 0; i < 60; ++i) {
314     if (!CollectResidency(range.first, range.second, data.get()))
315       return;
316     usleep(2e5);
317   }
318   DumpResidency(range.first, range.second, std::move(data));
319 }
320 
321 // static
MadviseForOrderfile()322 void NativeLibraryPrefetcher::MadviseForOrderfile() {
323   CHECK(IsOrderingSane());
324   LOG(WARNING) << "Performing experimental madvise from orderfile information";
325   // First MADV_RANDOM on all of text, then turn the ordered text range back to
326   // normal. The ordered range may be placed anywhere within .text.
327   MadviseOnRange(GetTextRange(), MADV_RANDOM);
328   MadviseOnRange(GetOrderedTextRange(), MADV_NORMAL);
329 }
330 
331 }  // namespace android
332 }  // namespace base
333 #endif  // BUILDFLAG(SUPPORTS_CODE_ORDERING)
334