• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mem_map.h"
18 
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #include <sys/mman.h>  // For the PROT_* and MAP_* constants.
22 #ifndef ANDROID_OS
23 #include <sys/resource.h>
24 #endif
25 
26 #include <map>
27 #include <memory>
28 #include <sstream>
29 
30 #include "android-base/stringprintf.h"
31 #include "android-base/unique_fd.h"
32 #include "backtrace/BacktraceMap.h"
33 #include "cutils/ashmem.h"
34 
35 #include "base/allocator.h"
36 #include "base/bit_utils.h"
37 #include "base/memory_tool.h"
38 #include "globals.h"
39 #include "utils.h"
40 
41 
42 #ifndef MAP_ANONYMOUS
43 #define MAP_ANONYMOUS MAP_ANON
44 #endif
45 
46 namespace art {
47 
48 using android::base::StringPrintf;
49 using android::base::unique_fd;
50 
51 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
52 using AllocationTrackingMultiMap =
53     std::multimap<Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
54 
55 using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
56 
57 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
58 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
59 
operator <<(std::ostream & os,std::pair<BacktraceMap::const_iterator,BacktraceMap::const_iterator> iters)60 static std::ostream& operator<<(
61     std::ostream& os,
62     std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
63   for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
64     os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
65                        static_cast<uint32_t>(it->start),
66                        static_cast<uint32_t>(it->end),
67                        (it->flags & PROT_READ) ? 'r' : '-',
68                        (it->flags & PROT_WRITE) ? 'w' : '-',
69                        (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
70   }
71   return os;
72 }
73 
operator <<(std::ostream & os,const Maps & mem_maps)74 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
75   os << "MemMap:" << std::endl;
76   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
77     void* base = it->first;
78     MemMap* map = it->second;
79     CHECK_EQ(base, map->BaseBegin());
80     os << *map << std::endl;
81   }
82   return os;
83 }
84 
85 std::mutex* MemMap::mem_maps_lock_ = nullptr;
86 
87 #if USE_ART_LOW_4G_ALLOCATOR
88 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
89 
90 // The regular start of memory allocations. The first 64KB is protected by SELinux.
91 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
92 
93 // Generate random starting position.
94 // To not interfere with image position, take the image's address and only place it below. Current
95 // formula (sketch):
96 //
97 // ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
98 // ----------------------------------------
99 //                    = 0000111111111111111
100 // & ~(kPageSize - 1) =~0000000000000001111
101 // ----------------------------------------
102 // mask               = 0000111111111110000
103 // & random data      = YYYYYYYYYYYYYYYYYYY
104 // -----------------------------------
105 // tmp                = 0000YYYYYYYYYYY0000
106 // + LOW_MEM_START    = 0000000000001000000
107 // --------------------------------------
108 // start
109 //
110 // arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
111 // do not have Bionic, simply start with LOW_MEM_START.
112 
113 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
114 #ifdef __BIONIC__
CreateStartPos(uint64_t input)115 uintptr_t CreateStartPos(uint64_t input) {
116   CHECK_NE(0, ART_BASE_ADDRESS);
117 
118   // Start with all bits below highest bit in ART_BASE_ADDRESS.
119   constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
120   constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
121 
122   // Lowest (usually 12) bits are not used, as aligned by page size.
123   constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
124 
125   // Mask input data.
126   return (input & mask) + LOW_MEM_START;
127 }
128 #endif
129 
GenerateNextMemPos()130 static uintptr_t GenerateNextMemPos() {
131 #ifdef __BIONIC__
132   uint64_t random_data;
133   arc4random_buf(&random_data, sizeof(random_data));
134   return CreateStartPos(random_data);
135 #else
136   // No arc4random on host, see above.
137   return LOW_MEM_START;
138 #endif
139 }
140 
141 // Initialize linear scan to random position.
142 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
143 #endif
144 
145 // Return true if the address range is contained in a single memory map by either reading
146 // the gMaps variable or the /proc/self/map entry.
ContainedWithinExistingMap(uint8_t * ptr,size_t size,std::string * error_msg)147 bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
148   uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
149   uintptr_t end = begin + size;
150 
151   // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
152   // further.
153   {
154     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
155     for (auto& pair : *gMaps) {
156       MemMap* const map = pair.second;
157       if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
158           end <= reinterpret_cast<uintptr_t>(map->End())) {
159         return true;
160       }
161     }
162   }
163 
164   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
165   if (map == nullptr) {
166     if (error_msg != nullptr) {
167       *error_msg = StringPrintf("Failed to build process map");
168     }
169     return false;
170   }
171 
172   ScopedBacktraceMapIteratorLock lock(map.get());
173   for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
174     if ((begin >= it->start && begin < it->end)  // start of new within old
175         && (end > it->start && end <= it->end)) {  // end of new within old
176       return true;
177     }
178   }
179   if (error_msg != nullptr) {
180     PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
181     *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
182                               "any existing map. See process maps in the log.", begin, end);
183   }
184   return false;
185 }
186 
187 // Return true if the address range does not conflict with any /proc/self/maps entry.
CheckNonOverlapping(uintptr_t begin,uintptr_t end,std::string * error_msg)188 static bool CheckNonOverlapping(uintptr_t begin,
189                                 uintptr_t end,
190                                 std::string* error_msg) {
191   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
192   if (map.get() == nullptr) {
193     *error_msg = StringPrintf("Failed to build process map");
194     return false;
195   }
196   ScopedBacktraceMapIteratorLock lock(map.get());
197   for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
198     if ((begin >= it->start && begin < it->end)      // start of new within old
199         || (end > it->start && end < it->end)        // end of new within old
200         || (begin <= it->start && end > it->end)) {  // start/end of new includes all of old
201       std::ostringstream map_info;
202       map_info << std::make_pair(it, map->end());
203       *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
204                                 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
205                                 begin, end,
206                                 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
207                                 it->name.c_str(),
208                                 map_info.str().c_str());
209       return false;
210     }
211   }
212   return true;
213 }
214 
215 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
216 // the expected value, calling munmap if validation fails, giving the
217 // reason in error_msg.
218 //
219 // If the expected_ptr is null, nothing is checked beyond the fact
220 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
221 // non-null, we check that pointer is the actual_ptr == expected_ptr,
222 // and if not, report in error_msg what the conflict mapping was if
223 // found, or a generic error in other cases.
CheckMapRequest(uint8_t * expected_ptr,void * actual_ptr,size_t byte_count,std::string * error_msg)224 static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
225                             std::string* error_msg) {
226   // Handled first by caller for more specific error messages.
227   CHECK(actual_ptr != MAP_FAILED);
228 
229   if (expected_ptr == nullptr) {
230     return true;
231   }
232 
233   uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
234   uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
235   uintptr_t limit = expected + byte_count;
236 
237   if (expected_ptr == actual_ptr) {
238     return true;
239   }
240 
241   // We asked for an address but didn't get what we wanted, all paths below here should fail.
242   int result = munmap(actual_ptr, byte_count);
243   if (result == -1) {
244     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
245   }
246 
247   if (error_msg != nullptr) {
248     // We call this here so that we can try and generate a full error
249     // message with the overlapping mapping. There's no guarantee that
250     // that there will be an overlap though, since
251     // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
252     //   true, even if there is no overlap
253     // - There might have been an overlap at the point of mmap, but the
254     //   overlapping region has since been unmapped.
255     std::string error_detail;
256     CheckNonOverlapping(expected, limit, &error_detail);
257     std::ostringstream os;
258     os <<  StringPrintf("Failed to mmap at expected address, mapped at "
259                         "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
260                         actual, expected);
261     if (!error_detail.empty()) {
262       os << " : " << error_detail;
263     }
264     *error_msg = os.str();
265   }
266   return false;
267 }
268 
269 #if USE_ART_LOW_4G_ALLOCATOR
TryMemMapLow4GB(void * ptr,size_t page_aligned_byte_count,int prot,int flags,int fd,off_t offset)270 static inline void* TryMemMapLow4GB(void* ptr,
271                                     size_t page_aligned_byte_count,
272                                     int prot,
273                                     int flags,
274                                     int fd,
275                                     off_t offset) {
276   void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
277   if (actual != MAP_FAILED) {
278     // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
279     // 4GB. If this is the case, unmap and retry.
280     if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
281       munmap(actual, page_aligned_byte_count);
282       actual = MAP_FAILED;
283     }
284   }
285   return actual;
286 }
287 #endif
288 
MapAnonymous(const char * name,uint8_t * expected_ptr,size_t byte_count,int prot,bool low_4gb,bool reuse,std::string * error_msg,bool use_ashmem)289 MemMap* MemMap::MapAnonymous(const char* name,
290                              uint8_t* expected_ptr,
291                              size_t byte_count,
292                              int prot,
293                              bool low_4gb,
294                              bool reuse,
295                              std::string* error_msg,
296                              bool use_ashmem) {
297 #ifndef __LP64__
298   UNUSED(low_4gb);
299 #endif
300   use_ashmem = use_ashmem && !kIsTargetLinux;
301   if (byte_count == 0) {
302     return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
303   }
304   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
305 
306   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
307   if (reuse) {
308     // reuse means it is okay that it overlaps an existing page mapping.
309     // Only use this if you actually made the page reservation yourself.
310     CHECK(expected_ptr != nullptr);
311 
312     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
313     flags |= MAP_FIXED;
314   }
315 
316   if (use_ashmem) {
317     if (!kIsTargetBuild) {
318       // When not on Android (either host or assuming a linux target) ashmem is faked using
319       // files in /tmp. Ensure that such files won't fail due to ulimit restrictions. If they
320       // will then use a regular mmap.
321       struct rlimit rlimit_fsize;
322       CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
323       use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
324         (page_aligned_byte_count < rlimit_fsize.rlim_cur);
325     }
326   }
327 
328   unique_fd fd;
329 
330 
331   if (use_ashmem) {
332     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
333     // prefixed "dalvik-".
334     std::string debug_friendly_name("dalvik-");
335     debug_friendly_name += name;
336     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
337 
338     if (fd.get() == -1) {
339       // We failed to create the ashmem region. Print a warning, but continue
340       // anyway by creating a true anonymous mmap with an fd of -1. It is
341       // better to use an unlabelled anonymous map than to fail to create a
342       // map at all.
343       PLOG(WARNING) << "ashmem_create_region failed for '" << name << "'";
344     } else {
345       // We succeeded in creating the ashmem region. Use the created ashmem
346       // region as backing for the mmap.
347       flags &= ~MAP_ANONYMOUS;
348     }
349   }
350 
351   // We need to store and potentially set an error number for pretty printing of errors
352   int saved_errno = 0;
353 
354   void* actual = MapInternal(expected_ptr,
355                              page_aligned_byte_count,
356                              prot,
357                              flags,
358                              fd.get(),
359                              0,
360                              low_4gb);
361   saved_errno = errno;
362 
363   if (actual == MAP_FAILED) {
364     if (error_msg != nullptr) {
365       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
366         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
367       }
368 
369       *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
370                                     "See process maps in the log.",
371                                 expected_ptr,
372                                 page_aligned_byte_count,
373                                 prot,
374                                 flags,
375                                 fd.get(),
376                                 strerror(saved_errno));
377     }
378     return nullptr;
379   }
380   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
381     return nullptr;
382   }
383   return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
384                     page_aligned_byte_count, prot, reuse);
385 }
386 
MapDummy(const char * name,uint8_t * addr,size_t byte_count)387 MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
388   if (byte_count == 0) {
389     return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
390   }
391   const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
392   return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
393 }
394 
MapFileAtAddress(uint8_t * expected_ptr,size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,bool reuse,const char * filename,std::string * error_msg)395 MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
396                                  size_t byte_count,
397                                  int prot,
398                                  int flags,
399                                  int fd,
400                                  off_t start,
401                                  bool low_4gb,
402                                  bool reuse,
403                                  const char* filename,
404                                  std::string* error_msg) {
405   CHECK_NE(0, prot);
406   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
407 
408   // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
409   // expect his mapping to be contained within an existing map.
410   if (reuse) {
411     // reuse means it is okay that it overlaps an existing page mapping.
412     // Only use this if you actually made the page reservation yourself.
413     CHECK(expected_ptr != nullptr);
414     DCHECK(error_msg != nullptr);
415     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
416         << ((error_msg != nullptr) ? *error_msg : std::string());
417     flags |= MAP_FIXED;
418   } else {
419     CHECK_EQ(0, flags & MAP_FIXED);
420     // Don't bother checking for an overlapping region here. We'll
421     // check this if required after the fact inside CheckMapRequest.
422   }
423 
424   if (byte_count == 0) {
425     return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
426   }
427   // Adjust 'offset' to be page-aligned as required by mmap.
428   int page_offset = start % kPageSize;
429   off_t page_aligned_offset = start - page_offset;
430   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
431   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
432   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
433   // not necessarily to virtual memory. mmap will page align 'expected' for us.
434   uint8_t* page_aligned_expected =
435       (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
436 
437   size_t redzone_size = 0;
438   if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
439     redzone_size = kPageSize;
440     page_aligned_byte_count += redzone_size;
441   }
442 
443   uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
444                                                            page_aligned_byte_count,
445                                                            prot,
446                                                            flags,
447                                                            fd,
448                                                            page_aligned_offset,
449                                                            low_4gb));
450   if (actual == MAP_FAILED) {
451     if (error_msg != nullptr) {
452       auto saved_errno = errno;
453 
454       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
455         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
456       }
457 
458       *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
459                                 ") of file '%s' failed: %s. See process maps in the log.",
460                                 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
461                                 static_cast<int64_t>(page_aligned_offset), filename,
462                                 strerror(saved_errno));
463     }
464     return nullptr;
465   }
466   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
467     return nullptr;
468   }
469   if (redzone_size != 0) {
470     const uint8_t *real_start = actual + page_offset;
471     const uint8_t *real_end = actual + page_offset + byte_count;
472     const uint8_t *mapping_end = actual + page_aligned_byte_count;
473 
474     MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
475     MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
476     page_aligned_byte_count -= redzone_size;
477   }
478 
479   return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
480                     prot, reuse, redzone_size);
481 }
482 
~MemMap()483 MemMap::~MemMap() {
484   if (base_begin_ == nullptr && base_size_ == 0) {
485     return;
486   }
487 
488   // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
489   // before it is returned to the system.
490   if (redzone_size_ != 0) {
491     MEMORY_TOOL_MAKE_UNDEFINED(
492         reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_,
493         redzone_size_);
494   }
495 
496   if (!reuse_) {
497     MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
498     int result = munmap(base_begin_, base_size_);
499     if (result == -1) {
500       PLOG(FATAL) << "munmap failed";
501     }
502   }
503 
504   // Remove it from gMaps.
505   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
506   bool found = false;
507   DCHECK(gMaps != nullptr);
508   for (auto it = gMaps->lower_bound(base_begin_), end = gMaps->end();
509        it != end && it->first == base_begin_; ++it) {
510     if (it->second == this) {
511       found = true;
512       gMaps->erase(it);
513       break;
514     }
515   }
516   CHECK(found) << "MemMap not found";
517 }
518 
MemMap(const std::string & name,uint8_t * begin,size_t size,void * base_begin,size_t base_size,int prot,bool reuse,size_t redzone_size)519 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
520                size_t base_size, int prot, bool reuse, size_t redzone_size)
521     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
522       prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
523   if (size_ == 0) {
524     CHECK(begin_ == nullptr);
525     CHECK(base_begin_ == nullptr);
526     CHECK_EQ(base_size_, 0U);
527   } else {
528     CHECK(begin_ != nullptr);
529     CHECK(base_begin_ != nullptr);
530     CHECK_NE(base_size_, 0U);
531 
532     // Add it to gMaps.
533     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
534     DCHECK(gMaps != nullptr);
535     gMaps->insert(std::make_pair(base_begin_, this));
536   }
537 }
538 
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,std::string * error_msg,bool use_ashmem)539 MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
540                            std::string* error_msg, bool use_ashmem) {
541   use_ashmem = use_ashmem && !kIsTargetLinux;
542   DCHECK_GE(new_end, Begin());
543   DCHECK_LE(new_end, End());
544   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
545   DCHECK_ALIGNED(begin_, kPageSize);
546   DCHECK_ALIGNED(base_begin_, kPageSize);
547   DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
548   DCHECK_ALIGNED(new_end, kPageSize);
549   uint8_t* old_end = begin_ + size_;
550   uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
551   uint8_t* new_base_end = new_end;
552   DCHECK_LE(new_base_end, old_base_end);
553   if (new_base_end == old_base_end) {
554     return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
555   }
556   size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
557   base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
558   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
559   size_t tail_size = old_end - new_end;
560   uint8_t* tail_base_begin = new_base_end;
561   size_t tail_base_size = old_base_end - new_base_end;
562   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
563   DCHECK_ALIGNED(tail_base_size, kPageSize);
564 
565   unique_fd fd;
566   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
567   if (use_ashmem) {
568     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
569     // prefixed "dalvik-".
570     std::string debug_friendly_name("dalvik-");
571     debug_friendly_name += tail_name;
572     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
573     flags = MAP_PRIVATE | MAP_FIXED;
574     if (fd.get() == -1) {
575       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
576                                 tail_name, strerror(errno));
577       return nullptr;
578     }
579   }
580 
581   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
582   // Unmap/map the tail region.
583   int result = munmap(tail_base_begin, tail_base_size);
584   if (result == -1) {
585     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
586     *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
587                               tail_base_begin, tail_base_size, name_.c_str());
588     return nullptr;
589   }
590   // Don't cause memory allocation between the munmap and the mmap
591   // calls. Otherwise, libc (or something else) might take this memory
592   // region. Note this isn't perfect as there's no way to prevent
593   // other threads to try to take this memory region here.
594   uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin,
595                                                     tail_base_size,
596                                                     tail_prot,
597                                                     flags,
598                                                     fd.get(),
599                                                     0));
600   if (actual == MAP_FAILED) {
601     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
602     *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
603                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
604                               fd.get());
605     return nullptr;
606   }
607   return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
608 }
609 
MadviseDontNeedAndZero()610 void MemMap::MadviseDontNeedAndZero() {
611   if (base_begin_ != nullptr || base_size_ != 0) {
612     if (!kMadviseZeroes) {
613       memset(base_begin_, 0, base_size_);
614     }
615     int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
616     if (result == -1) {
617       PLOG(WARNING) << "madvise failed";
618     }
619   }
620 }
621 
Sync()622 bool MemMap::Sync() {
623   bool result;
624   if (redzone_size_ != 0) {
625     // To avoid valgrind errors, temporarily lift the lower-end noaccess protection before passing
626     // it to msync() as it only accepts page-aligned base address, and exclude the higher-end
627     // noaccess protection from the msync range. b/27552451.
628     uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
629     MEMORY_TOOL_MAKE_DEFINED(base_begin, begin_ - base_begin);
630     result = msync(BaseBegin(), End() - base_begin, MS_SYNC) == 0;
631     MEMORY_TOOL_MAKE_NOACCESS(base_begin, begin_ - base_begin);
632   } else {
633     result = msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
634   }
635   return result;
636 }
637 
Protect(int prot)638 bool MemMap::Protect(int prot) {
639   if (base_begin_ == nullptr && base_size_ == 0) {
640     prot_ = prot;
641     return true;
642   }
643 
644   if (mprotect(base_begin_, base_size_, prot) == 0) {
645     prot_ = prot;
646     return true;
647   }
648 
649   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
650               << prot << ") failed";
651   return false;
652 }
653 
CheckNoGaps(MemMap * begin_map,MemMap * end_map)654 bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
655   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
656   CHECK(begin_map != nullptr);
657   CHECK(end_map != nullptr);
658   CHECK(HasMemMap(begin_map));
659   CHECK(HasMemMap(end_map));
660   CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
661   MemMap* map = begin_map;
662   while (map->BaseBegin() != end_map->BaseBegin()) {
663     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
664     if (next_map == nullptr) {
665       // Found a gap.
666       return false;
667     }
668     map = next_map;
669   }
670   return true;
671 }
672 
DumpMaps(std::ostream & os,bool terse)673 void MemMap::DumpMaps(std::ostream& os, bool terse) {
674   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
675   DumpMapsLocked(os, terse);
676 }
677 
DumpMapsLocked(std::ostream & os,bool terse)678 void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
679   const auto& mem_maps = *gMaps;
680   if (!terse) {
681     os << mem_maps;
682     return;
683   }
684 
685   // Terse output example:
686   //   [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
687   //   [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
688   // The details:
689   //   "+0x20P" means 0x20 pages taken by a single mapping,
690   //   "~0x11dP" means a gap of 0x11d pages,
691   //   "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
692   os << "MemMap:" << std::endl;
693   for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
694     MemMap* map = it->second;
695     void* base = it->first;
696     CHECK_EQ(base, map->BaseBegin());
697     os << "[MemMap: " << base;
698     ++it;
699     // Merge consecutive maps with the same protect flags and name.
700     constexpr size_t kMaxGaps = 9;
701     size_t num_gaps = 0;
702     size_t num = 1u;
703     size_t size = map->BaseSize();
704     CHECK_ALIGNED(size, kPageSize);
705     void* end = map->BaseEnd();
706     while (it != maps_end &&
707         it->second->GetProtect() == map->GetProtect() &&
708         it->second->GetName() == map->GetName() &&
709         (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
710       if (it->second->BaseBegin() != end) {
711         ++num_gaps;
712         os << "+0x" << std::hex << (size / kPageSize) << "P";
713         if (num != 1u) {
714           os << "(" << std::dec << num << ")";
715         }
716         size_t gap =
717             reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
718         CHECK_ALIGNED(gap, kPageSize);
719         os << "~0x" << std::hex << (gap / kPageSize) << "P";
720         num = 0u;
721         size = 0u;
722       }
723       CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
724       ++num;
725       size += it->second->BaseSize();
726       end = it->second->BaseEnd();
727       ++it;
728     }
729     os << "+0x" << std::hex << (size / kPageSize) << "P";
730     if (num != 1u) {
731       os << "(" << std::dec << num << ")";
732     }
733     os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
734   }
735 }
736 
HasMemMap(MemMap * map)737 bool MemMap::HasMemMap(MemMap* map) {
738   void* base_begin = map->BaseBegin();
739   for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
740        it != end && it->first == base_begin; ++it) {
741     if (it->second == map) {
742       return true;
743     }
744   }
745   return false;
746 }
747 
GetLargestMemMapAt(void * address)748 MemMap* MemMap::GetLargestMemMapAt(void* address) {
749   size_t largest_size = 0;
750   MemMap* largest_map = nullptr;
751   DCHECK(gMaps != nullptr);
752   for (auto it = gMaps->lower_bound(address), end = gMaps->end();
753        it != end && it->first == address; ++it) {
754     MemMap* map = it->second;
755     CHECK(map != nullptr);
756     if (largest_size < map->BaseSize()) {
757       largest_size = map->BaseSize();
758       largest_map = map;
759     }
760   }
761   return largest_map;
762 }
763 
Init()764 void MemMap::Init() {
765   if (mem_maps_lock_ != nullptr) {
766     // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
767     return;
768   }
769   mem_maps_lock_ = new std::mutex();
770   // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
771   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
772   DCHECK(gMaps == nullptr);
773   gMaps = new Maps;
774 }
775 
Shutdown()776 void MemMap::Shutdown() {
777   if (mem_maps_lock_ == nullptr) {
778     // If MemMap::Shutdown is called more than once, there is no effect.
779     return;
780   }
781   {
782     // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
783     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
784     DCHECK(gMaps != nullptr);
785     delete gMaps;
786     gMaps = nullptr;
787   }
788   delete mem_maps_lock_;
789   mem_maps_lock_ = nullptr;
790 }
791 
SetSize(size_t new_size)792 void MemMap::SetSize(size_t new_size) {
793   if (new_size == base_size_) {
794     return;
795   }
796   CHECK_ALIGNED(new_size, kPageSize);
797   CHECK_EQ(base_size_, size_) << "Unsupported";
798   CHECK_LE(new_size, base_size_);
799   MEMORY_TOOL_MAKE_UNDEFINED(
800       reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
801                               new_size),
802       base_size_ - new_size);
803   CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
804                   base_size_ - new_size), 0) << new_size << " " << base_size_;
805   base_size_ = new_size;
806   size_ = new_size;
807 }
808 
MapInternalArtLow4GBAllocator(size_t length,int prot,int flags,int fd,off_t offset)809 void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
810                                             int prot,
811                                             int flags,
812                                             int fd,
813                                             off_t offset) {
814 #if USE_ART_LOW_4G_ALLOCATOR
815   void* actual = MAP_FAILED;
816 
817   bool first_run = true;
818 
819   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
820   for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
821     // Use gMaps as an optimization to skip over large maps.
822     // Find the first map which is address > ptr.
823     auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
824     if (it != gMaps->begin()) {
825       auto before_it = it;
826       --before_it;
827       // Start at the end of the map before the upper bound.
828       ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
829       CHECK_ALIGNED(ptr, kPageSize);
830     }
831     while (it != gMaps->end()) {
832       // How much space do we have until the next map?
833       size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
834       // If the space may be sufficient, break out of the loop.
835       if (delta >= length) {
836         break;
837       }
838       // Otherwise, skip to the end of the map.
839       ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
840       CHECK_ALIGNED(ptr, kPageSize);
841       ++it;
842     }
843 
844     // Try to see if we get lucky with this address since none of the ART maps overlap.
845     actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
846     if (actual != MAP_FAILED) {
847       next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
848       return actual;
849     }
850 
851     if (4U * GB - ptr < length) {
852       // Not enough memory until 4GB.
853       if (first_run) {
854         // Try another time from the bottom;
855         ptr = LOW_MEM_START - kPageSize;
856         first_run = false;
857         continue;
858       } else {
859         // Second try failed.
860         break;
861       }
862     }
863 
864     uintptr_t tail_ptr;
865 
866     // Check pages are free.
867     bool safe = true;
868     for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
869       if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
870         safe = false;
871         break;
872       } else {
873         DCHECK_EQ(errno, ENOMEM);
874       }
875     }
876 
877     next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
878 
879     if (safe == true) {
880       actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
881       if (actual != MAP_FAILED) {
882         return actual;
883       }
884     } else {
885       // Skip over last page.
886       ptr = tail_ptr;
887     }
888   }
889 
890   if (actual == MAP_FAILED) {
891     LOG(ERROR) << "Could not find contiguous low-memory space.";
892     errno = ENOMEM;
893   }
894   return actual;
895 #else
896   UNUSED(length, prot, flags, fd, offset);
897   LOG(FATAL) << "Unreachable";
898   UNREACHABLE();
899 #endif
900 }
901 
MapInternal(void * addr,size_t length,int prot,int flags,int fd,off_t offset,bool low_4gb)902 void* MemMap::MapInternal(void* addr,
903                           size_t length,
904                           int prot,
905                           int flags,
906                           int fd,
907                           off_t offset,
908                           bool low_4gb) {
909 #ifdef __LP64__
910   // When requesting low_4g memory and having an expectation, the requested range should fit into
911   // 4GB.
912   if (low_4gb && (
913       // Start out of bounds.
914       (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
915       // End out of bounds. For simplicity, this will fail for the last page of memory.
916       ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
917     LOG(ERROR) << "The requested address space (" << addr << ", "
918                << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
919                << ") cannot fit in low_4gb";
920     return MAP_FAILED;
921   }
922 #else
923   UNUSED(low_4gb);
924 #endif
925   DCHECK_ALIGNED(length, kPageSize);
926   if (low_4gb) {
927     DCHECK_EQ(flags & MAP_FIXED, 0);
928   }
929   // TODO:
930   // A page allocator would be a useful abstraction here, as
931   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
932   void* actual = MAP_FAILED;
933 #if USE_ART_LOW_4G_ALLOCATOR
934   // MAP_32BIT only available on x86_64.
935   if (low_4gb && addr == nullptr) {
936     // The linear-scan allocator has an issue when executable pages are denied (e.g., by selinux
937     // policies in sensitive processes). In that case, the error code will still be ENOMEM. So
938     // the allocator will scan all low 4GB twice, and still fail. This is *very* slow.
939     //
940     // To avoid the issue, always map non-executable first, and mprotect if necessary.
941     const int orig_prot = prot;
942     const int prot_non_exec = prot & ~PROT_EXEC;
943     actual = MapInternalArtLow4GBAllocator(length, prot_non_exec, flags, fd, offset);
944 
945     if (actual == MAP_FAILED) {
946       return MAP_FAILED;
947     }
948 
949     // See if we need to remap with the executable bit now.
950     if (orig_prot != prot_non_exec) {
951       if (mprotect(actual, length, orig_prot) != 0) {
952         PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
953         munmap(actual, length);
954         errno = ENOMEM;
955         return MAP_FAILED;
956       }
957     }
958     return actual;
959   }
960 
961   actual = mmap(addr, length, prot, flags, fd, offset);
962 #else
963 #if defined(__LP64__)
964   if (low_4gb && addr == nullptr) {
965     flags |= MAP_32BIT;
966   }
967 #endif
968   actual = mmap(addr, length, prot, flags, fd, offset);
969 #endif
970   return actual;
971 }
972 
operator <<(std::ostream & os,const MemMap & mem_map)973 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
974   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
975                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
976                      mem_map.GetName().c_str());
977   return os;
978 }
979 
TryReadable()980 void MemMap::TryReadable() {
981   if (base_begin_ == nullptr && base_size_ == 0) {
982     return;
983   }
984   CHECK_NE(prot_ & PROT_READ, 0);
985   volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
986   volatile uint8_t* end = begin + base_size_;
987   DCHECK(IsAligned<kPageSize>(begin));
988   DCHECK(IsAligned<kPageSize>(end));
989   // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
990   // reads.
991   for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
992     // This read could fault if protection wasn't set correctly.
993     uint8_t value = *ptr;
994     UNUSED(value);
995   }
996 }
997 
ZeroAndReleasePages(void * address,size_t length)998 void ZeroAndReleasePages(void* address, size_t length) {
999   if (length == 0) {
1000     return;
1001   }
1002   uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
1003   uint8_t* const mem_end = mem_begin + length;
1004   uint8_t* const page_begin = AlignUp(mem_begin, kPageSize);
1005   uint8_t* const page_end = AlignDown(mem_end, kPageSize);
1006   if (!kMadviseZeroes || page_begin >= page_end) {
1007     // No possible area to madvise.
1008     std::fill(mem_begin, mem_end, 0);
1009   } else {
1010     // Spans one or more pages.
1011     DCHECK_LE(mem_begin, page_begin);
1012     DCHECK_LE(page_begin, page_end);
1013     DCHECK_LE(page_end, mem_end);
1014     std::fill(mem_begin, page_begin, 0);
1015     CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
1016     std::fill(page_end, mem_end, 0);
1017   }
1018 }
1019 
AlignBy(size_t size)1020 void MemMap::AlignBy(size_t size) {
1021   CHECK_EQ(begin_, base_begin_) << "Unsupported";
1022   CHECK_EQ(size_, base_size_) << "Unsupported";
1023   CHECK_GT(size, static_cast<size_t>(kPageSize));
1024   CHECK_ALIGNED(size, kPageSize);
1025   if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), size) &&
1026       IsAlignedParam(base_size_, size)) {
1027     // Already aligned.
1028     return;
1029   }
1030   uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
1031   uint8_t* base_end = base_begin + base_size_;
1032   uint8_t* aligned_base_begin = AlignUp(base_begin, size);
1033   uint8_t* aligned_base_end = AlignDown(base_end, size);
1034   CHECK_LE(base_begin, aligned_base_begin);
1035   CHECK_LE(aligned_base_end, base_end);
1036   size_t aligned_base_size = aligned_base_end - aligned_base_begin;
1037   CHECK_LT(aligned_base_begin, aligned_base_end)
1038       << "base_begin = " << reinterpret_cast<void*>(base_begin)
1039       << " base_end = " << reinterpret_cast<void*>(base_end);
1040   CHECK_GE(aligned_base_size, size);
1041   // Unmap the unaligned parts.
1042   if (base_begin < aligned_base_begin) {
1043     MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
1044     CHECK_EQ(munmap(base_begin, aligned_base_begin - base_begin), 0)
1045         << "base_begin=" << reinterpret_cast<void*>(base_begin)
1046         << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
1047   }
1048   if (aligned_base_end < base_end) {
1049     MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
1050     CHECK_EQ(munmap(aligned_base_end, base_end - aligned_base_end), 0)
1051         << "base_end=" << reinterpret_cast<void*>(base_end)
1052         << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
1053   }
1054   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1055   base_begin_ = aligned_base_begin;
1056   base_size_ = aligned_base_size;
1057   begin_ = aligned_base_begin;
1058   size_ = aligned_base_size;
1059   DCHECK(gMaps != nullptr);
1060   if (base_begin < aligned_base_begin) {
1061     auto it = gMaps->find(base_begin);
1062     CHECK(it != gMaps->end()) << "MemMap not found";
1063     gMaps->erase(it);
1064     gMaps->insert(std::make_pair(base_begin_, this));
1065   }
1066 }
1067 
1068 }  // namespace art
1069