• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mem_map.h"
18 
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #if !defined(ANDROID_OS) && !defined(__Fuchsia__) && !defined(_WIN32)
22 #include <sys/resource.h>
23 #endif
24 
25 #if defined(__linux__)
26 #include <sys/prctl.h>
27 #endif
28 
29 #include <map>
30 #include <memory>
31 #include <sstream>
32 
33 #include "allocator.h"
34 #include "android-base/stringprintf.h"
35 #include "android-base/unique_fd.h"
36 #include "bit_utils.h"
37 #include "globals.h"
38 #include "logging.h"  // For VLOG_IS_ON.
39 #include "memory_tool.h"
40 #include "mman.h"  // For the PROT_* and MAP_* constants.
41 #include "utils.h"
42 
43 #ifndef MAP_ANONYMOUS
44 #define MAP_ANONYMOUS MAP_ANON
45 #endif
46 
47 namespace art {
48 
49 using android::base::StringPrintf;
50 using android::base::unique_fd;
51 
52 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
53 using AllocationTrackingMultiMap =
54     std::multimap<Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
55 
56 using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
57 
58 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
59 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
60 
61 // A map containing unique strings used for indentifying anonymous mappings
62 static std::map<std::string, int> debugStrMap GUARDED_BY(MemMap::GetMemMapsLock());
63 
64 // Retrieve iterator to a `gMaps` entry that is known to exist.
GetGMapsEntry(const MemMap & map)65 Maps::iterator GetGMapsEntry(const MemMap& map) REQUIRES(MemMap::GetMemMapsLock()) {
66   DCHECK(map.IsValid());
67   DCHECK(gMaps != nullptr);
68   for (auto it = gMaps->lower_bound(map.BaseBegin()), end = gMaps->end();
69        it != end && it->first == map.BaseBegin();
70        ++it) {
71     if (it->second == &map) {
72       return it;
73     }
74   }
75   LOG(FATAL) << "MemMap not found";
76   UNREACHABLE();
77 }
78 
operator <<(std::ostream & os,const Maps & mem_maps)79 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
80   os << "MemMap:" << std::endl;
81   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
82     void* base = it->first;
83     MemMap* map = it->second;
84     CHECK_EQ(base, map->BaseBegin());
85     os << *map << std::endl;
86   }
87   return os;
88 }
89 
90 std::mutex* MemMap::mem_maps_lock_ = nullptr;
91 #ifdef ART_PAGE_SIZE_AGNOSTIC
92 size_t MemMap::page_size_ = 0;
93 #endif
94 
95 #if USE_ART_LOW_4G_ALLOCATOR
96 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
97 
98 // The regular start of memory allocations. The first 64KB is protected by SELinux.
99 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
100 
101 // Generate random starting position.
102 // To not interfere with image position, take the image's address and only place it below. Current
103 // formula (sketch):
104 //
105 // ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
106 // ----------------------------------------
107 //                    = 0000111111111111111
108 // & ~(page_size - 1) =~0000000000000001111
109 // ----------------------------------------
110 // mask               = 0000111111111110000
111 // & random data      = YYYYYYYYYYYYYYYYYYY
112 // -----------------------------------
113 // tmp                = 0000YYYYYYYYYYY0000
114 // + LOW_MEM_START    = 0000000000001000000
115 // --------------------------------------
116 // start
117 //
118 // arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
119 // do not have Bionic, simply start with LOW_MEM_START.
120 
121 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
122 #ifdef __BIONIC__
CreateStartPos(uint64_t input,size_t page_size)123 uintptr_t CreateStartPos(uint64_t input, size_t page_size) {
124   CHECK_NE(0, ART_BASE_ADDRESS);
125 
126   // Start with all bits below highest bit in ART_BASE_ADDRESS.
127   constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
128   constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
129 
130   // Lowest (usually 12) bits are not used, as aligned by page size.
131   const uintptr_t mask = mask_ones & ~(page_size - 1);
132 
133   // Mask input data.
134   return (input & mask) + LOW_MEM_START;
135 }
136 #endif
137 
GenerateNextMemPos(size_t page_size)138 static uintptr_t GenerateNextMemPos(size_t page_size) {
139 #ifdef __BIONIC__
140   uint64_t random_data;
141   arc4random_buf(&random_data, sizeof(random_data));
142   return CreateStartPos(random_data, page_size);
143 #else
144   UNUSED(page_size);
145   // No arc4random on host, see above.
146   return LOW_MEM_START;
147 #endif
148 }
149 
150 uintptr_t MemMap::next_mem_pos_;
151 #endif
152 
153 // Return true if the address range is contained in a single memory map by either reading
154 // the gMaps variable or the /proc/self/map entry.
ContainedWithinExistingMap(uint8_t * ptr,size_t size,std::string * error_msg)155 bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
156   uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
157   uintptr_t end = begin + size;
158 
159   {
160     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
161     for (auto& pair : *gMaps) {
162       MemMap* const map = pair.second;
163       if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
164           end <= reinterpret_cast<uintptr_t>(map->End())) {
165         return true;
166       }
167     }
168   }
169 
170   if (error_msg != nullptr) {
171     PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
172     *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
173                               "any existing map. See process maps in the log.", begin, end);
174   }
175   return false;
176 }
177 
178 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
179 // the expected value, calling munmap if validation fails, giving the
180 // reason in error_msg.
181 //
182 // If the expected_ptr is null, nothing is checked beyond the fact
183 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
184 // non-null, we check that pointer is the actual_ptr == expected_ptr,
185 // and if not, report in error_msg what the conflict mapping was if
186 // found, or a generic error in other cases.
CheckMapRequest(uint8_t * expected_ptr,void * actual_ptr,size_t byte_count,std::string * error_msg)187 bool MemMap::CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
188                             std::string* error_msg) {
189   // Handled first by caller for more specific error messages.
190   CHECK(actual_ptr != MAP_FAILED);
191 
192   if (expected_ptr == nullptr) {
193     return true;
194   }
195 
196   uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
197   uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
198 
199   if (expected_ptr == actual_ptr) {
200     return true;
201   }
202 
203   // We asked for an address but didn't get what we wanted, all paths below here should fail.
204   int result = TargetMUnmap(actual_ptr, byte_count);
205   if (result == -1) {
206     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
207   }
208 
209   if (error_msg != nullptr) {
210     // We call this here so that we can try and generate a full error
211     // message with the overlapping mapping. There's no guarantee that
212     // that there will be an overlap though, since
213     // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
214     //   true, even if there is no overlap
215     // - There might have been an overlap at the point of mmap, but the
216     //   overlapping region has since been unmapped.
217 
218     // Tell the client the mappings that were in place at the time.
219     if (kIsDebugBuild) {
220       PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
221     }
222 
223     std::ostringstream os;
224     os <<  StringPrintf("Failed to mmap at expected address, mapped at "
225                         "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
226                         actual, expected);
227     *error_msg = os.str();
228   }
229   return false;
230 }
231 
CheckReservation(uint8_t * expected_ptr,size_t byte_count,const char * name,const MemMap & reservation,std::string * error_msg)232 bool MemMap::CheckReservation(uint8_t* expected_ptr,
233                               size_t byte_count,
234                               const char* name,
235                               const MemMap& reservation,
236                               /*out*/std::string* error_msg) {
237   if (!reservation.IsValid()) {
238     *error_msg = StringPrintf("Invalid reservation for %s", name);
239     return false;
240   }
241   DCHECK_ALIGNED_PARAM(reservation.Begin(), GetPageSize());
242   if (reservation.Begin() != expected_ptr) {
243     *error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p",
244                               name,
245                               reservation.Begin(),
246                               expected_ptr);
247     return false;
248   }
249   if (byte_count > reservation.Size()) {
250     *error_msg = StringPrintf("Insufficient reservation, required %zu, available %zu",
251                               byte_count,
252                               reservation.Size());
253     return false;
254   }
255   return true;
256 }
257 
258 
259 #if USE_ART_LOW_4G_ALLOCATOR
TryMemMapLow4GB(void * ptr,size_t page_aligned_byte_count,int prot,int flags,int fd,off_t offset)260 void* MemMap::TryMemMapLow4GB(void* ptr,
261                                     size_t page_aligned_byte_count,
262                                     int prot,
263                                     int flags,
264                                     int fd,
265                                     off_t offset) {
266   void* actual = TargetMMap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
267   if (actual != MAP_FAILED) {
268     // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
269     // 4GB. If this is the case, unmap and retry.
270     if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
271       TargetMUnmap(actual, page_aligned_byte_count);
272       actual = MAP_FAILED;
273     }
274   }
275   return actual;
276 }
277 #endif
278 
SetDebugName(void * map_ptr,const char * name,size_t size)279 void MemMap::SetDebugName(void* map_ptr, const char* name, size_t size) {
280   // Debug naming is only used for Android target builds. For Linux targets,
281   // we'll still call prctl but it wont do anything till we upstream the prctl.
282   if (kIsTargetFuchsia || !kIsTargetBuild) {
283     return;
284   }
285 
286   // lock as std::map is not thread-safe
287   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
288 
289   std::string debug_friendly_name("dalvik-");
290   debug_friendly_name += name;
291   auto it = debugStrMap.find(debug_friendly_name);
292 
293   if (it == debugStrMap.end()) {
294     it = debugStrMap.insert(std::make_pair(std::move(debug_friendly_name), 1)).first;
295   }
296 
297   DCHECK(it != debugStrMap.end());
298 #if defined(PR_SET_VMA) && defined(__linux__)
299   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, size, it->first.c_str());
300 #else
301   // Prevent variable unused compiler errors.
302   UNUSED(map_ptr, size);
303 #endif
304 }
305 
MapAnonymous(const char * name,uint8_t * addr,size_t byte_count,int prot,bool low_4gb,bool reuse,MemMap * reservation,std::string * error_msg,bool use_debug_name)306 MemMap MemMap::MapAnonymous(const char* name,
307                             uint8_t* addr,
308                             size_t byte_count,
309                             int prot,
310                             bool low_4gb,
311                             bool reuse,
312                             /*inout*/MemMap* reservation,
313                             /*out*/std::string* error_msg,
314                             bool use_debug_name) {
315 #ifndef __LP64__
316   UNUSED(low_4gb);
317 #endif
318   if (byte_count == 0) {
319     *error_msg = "Empty MemMap requested.";
320     return Invalid();
321   }
322   size_t page_aligned_byte_count = RoundUp(byte_count, GetPageSize());
323 
324   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
325   if (reuse) {
326     // reuse means it is okay that it overlaps an existing page mapping.
327     // Only use this if you actually made the page reservation yourself.
328     CHECK(addr != nullptr);
329     DCHECK(reservation == nullptr);
330 
331     DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
332     flags |= MAP_FIXED;
333   } else if (reservation != nullptr) {
334     CHECK(addr != nullptr);
335     if (!CheckReservation(addr, byte_count, name, *reservation, error_msg)) {
336       return MemMap::Invalid();
337     }
338     flags |= MAP_FIXED;
339   }
340 
341   unique_fd fd;
342 
343   // We need to store and potentially set an error number for pretty printing of errors
344   int saved_errno = 0;
345 
346   void* actual = nullptr;
347 
348 #if defined(__linux__)
349   // Recent kernels have a bug where the address hint might be ignored.
350   // See https://lore.kernel.org/all/20241115215256.578125-1-kaleshsingh@google.com/
351   // We use MAP_FIXED_NOREPLACE to tell the kernel it must allocate at the address or fail.
352   // If the fixed-address allocation fails, we fallback to the default path (random address).
353   // Therefore, non-null 'addr' still behaves as hint-only as far as ART api is concerned.
354   if ((flags & MAP_FIXED) == 0 && addr != nullptr && IsKernelVersionAtLeast(4, 17)) {
355     actual = MapInternal(
356         addr, page_aligned_byte_count, prot, flags | MAP_FIXED_NOREPLACE, fd.get(), 0, low_4gb);
357   }
358 #endif  // __linux__
359 
360   if (actual == nullptr || actual == MAP_FAILED) {
361     actual = MapInternal(addr, page_aligned_byte_count, prot, flags, fd.get(), 0, low_4gb);
362   }
363   saved_errno = errno;
364 
365   if (actual == MAP_FAILED) {
366     if (error_msg != nullptr) {
367       PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
368       *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
369                                     "See process maps in the log.",
370                                 addr,
371                                 page_aligned_byte_count,
372                                 prot,
373                                 flags,
374                                 fd.get(),
375                                 strerror(saved_errno));
376     }
377     return Invalid();
378   }
379   if (!CheckMapRequest(addr, actual, page_aligned_byte_count, error_msg)) {
380     return Invalid();
381   }
382 
383   if (use_debug_name) {
384     SetDebugName(actual, name, page_aligned_byte_count);
385   }
386 
387   if (reservation != nullptr) {
388     // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
389     DCHECK_EQ(actual, reservation->Begin());
390     reservation->ReleaseReservedMemory(byte_count);
391   }
392   return MemMap(name,
393                 reinterpret_cast<uint8_t*>(actual),
394                 byte_count,
395                 actual,
396                 page_aligned_byte_count,
397                 prot,
398                 reuse);
399 }
400 
MapAnonymousAligned(const char * name,size_t byte_count,int prot,bool low_4gb,size_t alignment,std::string * error_msg)401 MemMap MemMap::MapAnonymousAligned(const char* name,
402                                    size_t byte_count,
403                                    int prot,
404                                    bool low_4gb,
405                                    size_t alignment,
406                                    /*out=*/std::string* error_msg) {
407   DCHECK(IsPowerOfTwo(alignment));
408   DCHECK_GT(alignment, GetPageSize());
409 
410   // Allocate extra 'alignment - GetPageSize()' bytes so that the mapping can be aligned.
411   MemMap ret = MapAnonymous(name,
412                             /*addr=*/nullptr,
413                             // AlignBy requires the size to be page-aligned, so
414                             // rounding it here. It is corrected afterwards with
415                             // SetSize after AlignBy.
416                             RoundUp(byte_count, GetPageSize()) + alignment - GetPageSize(),
417                             prot,
418                             low_4gb,
419                             /*reuse=*/false,
420                             /*reservation=*/nullptr,
421                             error_msg);
422   if (LIKELY(ret.IsValid())) {
423     ret.AlignBy(alignment, /*align_both_ends=*/false);
424     ret.SetSize(byte_count);
425     DCHECK_EQ(ret.Size(), byte_count);
426     DCHECK_ALIGNED_PARAM(ret.Begin(), alignment);
427   }
428   return ret;
429 }
430 
MapPlaceholder(const char * name,uint8_t * addr,size_t byte_count)431 MemMap MemMap::MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count) {
432   if (byte_count == 0) {
433     return Invalid();
434   }
435   const size_t page_aligned_byte_count = RoundUp(byte_count, GetPageSize());
436   return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, /* reuse= */ true);
437 }
438 
439 template<typename A, typename B>
PointerDiff(A * a,B * b)440 static ptrdiff_t PointerDiff(A* a, B* b) {
441   return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
442 }
443 
ReplaceWith(MemMap * source,std::string * error)444 bool MemMap::ReplaceWith(MemMap* source, /*out*/std::string* error) {
445 #if !HAVE_MREMAP_SYSCALL
446   UNUSED(source);
447   *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
448   return false;
449 #else  // !HAVE_MREMAP_SYSCALL
450   CHECK(source != nullptr);
451   CHECK(source->IsValid());
452   if (!MemMap::kCanReplaceMapping) {
453     *error = "Unable to perform atomic replace due to runtime environment!";
454     return false;
455   }
456   // neither can be reuse.
457   if (source->reuse_ || reuse_) {
458     *error = "One or both mappings is not a real mmap!";
459     return false;
460   }
461   // TODO Support redzones.
462   if (source->redzone_size_ != 0 || redzone_size_ != 0) {
463     *error = "source and dest have different redzone sizes";
464     return false;
465   }
466   // Make sure they have the same offset from the actual mmap'd address
467   if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
468     *error =
469         "source starts at a different offset from the mmap. Cannot atomically replace mappings";
470     return false;
471   }
472   // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
473   // memcpy but the check is explicit and actually done).
474   if (source->BaseBegin() > BaseBegin() &&
475       reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
476       reinterpret_cast<uint8_t*>(source->BaseBegin())) {
477     *error = "destination memory pages overlap with source memory pages";
478     return false;
479   }
480   // Change the protection to match the new location.
481   int old_prot = source->GetProtect();
482   if (!source->Protect(GetProtect())) {
483     *error = "Could not change protections for source to those required for dest.";
484     return false;
485   }
486 
487   // Do the mremap.
488   void* res = mremap(/*old_address*/source->BaseBegin(),
489                      /*old_size*/source->BaseSize(),
490                      /*new_size*/source->BaseSize(),
491                      /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
492                      /*new_address*/BaseBegin());
493   if (res == MAP_FAILED) {
494     int saved_errno = errno;
495     // Wasn't able to move mapping. Change the protection of source back to the original one and
496     // return.
497     source->Protect(old_prot);
498     *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
499     return false;
500   }
501   CHECK(res == BaseBegin());
502 
503   // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
504   // them later.
505   size_t new_base_size = std::max(source->base_size_, base_size_);
506 
507   // Invalidate *source, don't unmap it though since it is already gone.
508   size_t source_size = source->size_;
509   source->Invalidate();
510 
511   size_ = source_size;
512   base_size_ = new_base_size;
513   // Reduce base_size if needed (this will unmap the extra pages).
514   SetSize(source_size);
515 
516   return true;
517 #endif  // !HAVE_MREMAP_SYSCALL
518 }
519 
MapFileAtAddress(uint8_t * expected_ptr,size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,bool reuse,MemMap * reservation,std::string * error_msg)520 MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
521                                 size_t byte_count,
522                                 int prot,
523                                 int flags,
524                                 int fd,
525                                 off_t start,
526                                 bool low_4gb,
527                                 const char* filename,
528                                 bool reuse,
529                                 /*inout*/MemMap* reservation,
530                                 /*out*/std::string* error_msg) {
531   CHECK_NE(0, prot);
532   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
533 
534   // Note that we do not allow MAP_FIXED unless reuse == true or we have an existing
535   // reservation, i.e we expect this mapping to be contained within an existing map.
536   if (reuse && expected_ptr != nullptr) {
537     // reuse means it is okay that it overlaps an existing page mapping.
538     // Only use this if you actually made the page reservation yourself.
539     DCHECK(reservation == nullptr);
540     DCHECK(error_msg != nullptr);
541     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
542         << ((error_msg != nullptr) ? *error_msg : std::string());
543     flags |= MAP_FIXED;
544   } else if (reservation != nullptr) {
545     DCHECK(error_msg != nullptr);
546     if (!CheckReservation(expected_ptr, byte_count, filename, *reservation, error_msg)) {
547       return Invalid();
548     }
549     flags |= MAP_FIXED;
550   } else {
551     CHECK_EQ(0, flags & MAP_FIXED);
552     // Don't bother checking for an overlapping region here. We'll
553     // check this if required after the fact inside CheckMapRequest.
554   }
555 
556   if (byte_count == 0) {
557     *error_msg = "Empty MemMap requested";
558     return Invalid();
559   }
560   // Adjust 'offset' to be page-aligned as required by mmap.
561   int page_offset = start % GetPageSize();
562   off_t page_aligned_offset = start - page_offset;
563   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
564   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, GetPageSize());
565   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
566   // not necessarily to virtual memory. mmap will page align 'expected' for us.
567   uint8_t* page_aligned_expected =
568       (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
569 
570   size_t redzone_size = 0;
571   if (kRunningOnMemoryTool && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
572     redzone_size = GetPageSize();
573     page_aligned_byte_count += redzone_size;
574   }
575 
576   uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
577                                                            page_aligned_byte_count,
578                                                            prot,
579                                                            flags,
580                                                            fd,
581                                                            page_aligned_offset,
582                                                            low_4gb));
583   if (actual == MAP_FAILED) {
584     if (error_msg != nullptr) {
585       auto saved_errno = errno;
586 
587       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
588         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
589       }
590 
591       *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
592                                 ") of file '%s' failed: %s. See process maps in the log.",
593                                 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
594                                 static_cast<int64_t>(page_aligned_offset), filename,
595                                 strerror(saved_errno));
596     }
597     return Invalid();
598   }
599   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
600     return Invalid();
601   }
602   if (redzone_size != 0) {
603     const uint8_t *real_start = actual + page_offset;
604     const uint8_t *real_end = actual + page_offset + byte_count;
605     const uint8_t *mapping_end = actual + page_aligned_byte_count;
606 
607     MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
608     MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
609     page_aligned_byte_count -= redzone_size;
610   }
611 
612   if (reservation != nullptr) {
613     // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
614     DCHECK_EQ(actual, reservation->Begin());
615     reservation->ReleaseReservedMemory(byte_count);
616   }
617   return MemMap(filename,
618                 actual + page_offset,
619                 byte_count,
620                 actual,
621                 page_aligned_byte_count,
622                 prot,
623                 reuse,
624                 redzone_size);
625 }
626 
MemMap(MemMap && other)627 MemMap::MemMap(MemMap&& other) noexcept
628     : MemMap() {
629   swap(other);
630 }
631 
~MemMap()632 MemMap::~MemMap() {
633   Reset();
634 }
635 
DoReset()636 void MemMap::DoReset() {
637   DCHECK(IsValid());
638   size_t real_base_size = base_size_;
639   // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
640   // before it is returned to the system.
641   if (redzone_size_ != 0) {
642     // Add redzone_size_ back to base_size or it will cause a mmap leakage.
643     real_base_size += redzone_size_;
644     MEMORY_TOOL_MAKE_UNDEFINED(
645         reinterpret_cast<char*>(base_begin_) + real_base_size - redzone_size_,
646         redzone_size_);
647   }
648 
649   if (!reuse_) {
650     MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
651     if (!already_unmapped_) {
652       int result = TargetMUnmap(base_begin_, real_base_size);
653       if (result == -1) {
654         PLOG(FATAL) << "munmap failed";
655       }
656     }
657   }
658 
659   Invalidate();
660 }
661 
ResetInForkedProcess()662 void MemMap::ResetInForkedProcess() {
663   // This should be called on a map that has MADV_DONTFORK.
664   // The kernel has already unmapped this.
665   already_unmapped_ = true;
666   Reset();
667 }
668 
Invalidate()669 void MemMap::Invalidate() {
670   DCHECK(IsValid());
671 
672   // Remove it from gMaps.
673   // TODO(b/307704260) Move MemMap::Init MemMap::Shutdown out of Runtime init/shutdown.
674   if (mem_maps_lock_ != nullptr) {  // Runtime was shutdown.
675     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
676     auto it = GetGMapsEntry(*this);
677     gMaps->erase(it);
678   }
679 
680   // Mark it as invalid.
681   base_size_ = 0u;
682   DCHECK(!IsValid());
683 }
684 
swap(MemMap & other)685 void MemMap::swap(MemMap& other) {
686   if (IsValid() || other.IsValid()) {
687     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
688     DCHECK(gMaps != nullptr);
689     auto this_it = IsValid() ? GetGMapsEntry(*this) : gMaps->end();
690     auto other_it = other.IsValid() ? GetGMapsEntry(other) : gMaps->end();
691     if (IsValid()) {
692       DCHECK(this_it != gMaps->end());
693       DCHECK_EQ(this_it->second, this);
694       this_it->second = &other;
695     }
696     if (other.IsValid()) {
697       DCHECK(other_it != gMaps->end());
698       DCHECK_EQ(other_it->second, &other);
699       other_it->second = this;
700     }
701     // Swap members with the `mem_maps_lock_` held so that `base_begin_` matches
702     // with the `gMaps` key when other threads try to use `gMaps`.
703     SwapMembers(other);
704   } else {
705     SwapMembers(other);
706   }
707 }
708 
SwapMembers(MemMap & other)709 void MemMap::SwapMembers(MemMap& other) {
710   name_.swap(other.name_);
711   std::swap(begin_, other.begin_);
712   std::swap(size_, other.size_);
713   std::swap(base_begin_, other.base_begin_);
714   std::swap(base_size_, other.base_size_);
715   std::swap(prot_, other.prot_);
716   std::swap(reuse_, other.reuse_);
717   std::swap(already_unmapped_, other.already_unmapped_);
718   std::swap(redzone_size_, other.redzone_size_);
719 }
720 
MemMap(const std::string & name,uint8_t * begin,size_t size,void * base_begin,size_t base_size,int prot,bool reuse,size_t redzone_size)721 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
722                size_t base_size, int prot, bool reuse, size_t redzone_size)
723     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
724       prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
725   if (size_ == 0) {
726     CHECK(begin_ == nullptr);
727     CHECK(base_begin_ == nullptr);
728     CHECK_EQ(base_size_, 0U);
729   } else {
730     CHECK(begin_ != nullptr);
731     CHECK(base_begin_ != nullptr);
732     CHECK_NE(base_size_, 0U);
733 
734     // Add it to gMaps.
735     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
736     DCHECK(gMaps != nullptr);
737     gMaps->insert(std::make_pair(base_begin_, this));
738   }
739 }
740 
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,std::string * error_msg,bool use_debug_name)741 MemMap MemMap::RemapAtEnd(uint8_t* new_end,
742                           const char* tail_name,
743                           int tail_prot,
744                           std::string* error_msg,
745                           bool use_debug_name) {
746   return RemapAtEnd(new_end,
747                     tail_name,
748                     tail_prot,
749                     MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
750                     /* fd= */ -1,
751                     /* offset= */ 0,
752                     error_msg,
753                     use_debug_name);
754 }
755 
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,int flags,int fd,off_t offset,std::string * error_msg,bool use_debug_name)756 MemMap MemMap::RemapAtEnd(uint8_t* new_end,
757                           const char* tail_name,
758                           int tail_prot,
759                           int flags,
760                           int fd,
761                           off_t offset,
762                           std::string* error_msg,
763                           bool use_debug_name) {
764   DCHECK_GE(new_end, Begin());
765   DCHECK_LE(new_end, End());
766   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
767   DCHECK_ALIGNED_PARAM(begin_, GetPageSize());
768   DCHECK_ALIGNED_PARAM(base_begin_, GetPageSize());
769   DCHECK_ALIGNED_PARAM(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, GetPageSize());
770   DCHECK_ALIGNED_PARAM(new_end, GetPageSize());
771   uint8_t* old_end = begin_ + size_;
772   uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
773   uint8_t* new_base_end = new_end;
774   DCHECK_LE(new_base_end, old_base_end);
775   if (new_base_end == old_base_end) {
776     return Invalid();
777   }
778   size_t new_size = new_end - reinterpret_cast<uint8_t*>(begin_);
779   size_t new_base_size = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
780   DCHECK_LE(begin_ + new_size, reinterpret_cast<uint8_t*>(base_begin_) + new_base_size);
781   size_t tail_size = old_end - new_end;
782   uint8_t* tail_base_begin = new_base_end;
783   size_t tail_base_size = old_base_end - new_base_end;
784   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
785   DCHECK_ALIGNED_PARAM(tail_base_size, GetPageSize());
786 
787   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
788   // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
789   // removes old mappings for the overlapping region. This makes the operation atomic
790   // and prevents other threads from racing to allocate memory in the requested region.
791   uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
792                                                           tail_base_size,
793                                                           tail_prot,
794                                                           flags,
795                                                           fd,
796                                                           offset));
797   if (actual == MAP_FAILED) {
798     *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed: %s. See process "
799                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
800                               fd, strerror(errno));
801     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
802     return Invalid();
803   }
804   // Update *this.
805   if (new_base_size == 0u) {
806     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
807     auto it = GetGMapsEntry(*this);
808     gMaps->erase(it);
809   }
810 
811   if (use_debug_name) {
812     SetDebugName(actual, tail_name, tail_base_size);
813   }
814 
815   size_ = new_size;
816   base_size_ = new_base_size;
817   // Return the new mapping.
818   return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
819 }
820 
TakeReservedMemory(size_t byte_count,bool reuse)821 MemMap MemMap::TakeReservedMemory(size_t byte_count, bool reuse) {
822   uint8_t* begin = Begin();
823   ReleaseReservedMemory(byte_count);  // Performs necessary DCHECK()s on this reservation.
824   size_t base_size = RoundUp(byte_count, GetPageSize());
825   return MemMap(name_, begin, byte_count, begin, base_size, prot_, reuse);
826 }
827 
ReleaseReservedMemory(size_t byte_count)828 void MemMap::ReleaseReservedMemory(size_t byte_count) {
829   // Check the reservation mapping.
830   DCHECK(IsValid());
831   DCHECK(!reuse_);
832   DCHECK(!already_unmapped_);
833   DCHECK_EQ(redzone_size_, 0u);
834   DCHECK_EQ(begin_, base_begin_);
835   DCHECK_EQ(size_, base_size_);
836   DCHECK_ALIGNED_PARAM(begin_, GetPageSize());
837   DCHECK_ALIGNED_PARAM(size_, GetPageSize());
838 
839   // Check and round up the `byte_count`.
840   DCHECK_NE(byte_count, 0u);
841   DCHECK_LE(byte_count, size_);
842   byte_count = RoundUp(byte_count, GetPageSize());
843 
844   if (byte_count == size_) {
845     Invalidate();
846   } else {
847     // Shrink the reservation MemMap and update its `gMaps` entry.
848     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
849     auto it = GetGMapsEntry(*this);
850     auto node = gMaps->extract(it);
851     begin_ += byte_count;
852     size_ -= byte_count;
853     base_begin_ = begin_;
854     base_size_ = size_;
855     node.key() = base_begin_;
856     gMaps->insert(std::move(node));
857   }
858 }
859 
FillWithZero(bool release_eagerly)860 void MemMap::FillWithZero(bool release_eagerly) {
861   if (base_begin_ != nullptr && base_size_ != 0) {
862     ZeroMemory(base_begin_, base_size_, release_eagerly);
863   }
864 }
865 
MadviseDontFork()866 int MemMap::MadviseDontFork() {
867 #if defined(__linux__)
868   if (base_begin_ != nullptr || base_size_ != 0) {
869     return madvise(base_begin_, base_size_, MADV_DONTFORK);
870   }
871 #endif
872   return -1;
873 }
874 
Sync()875 bool MemMap::Sync() {
876 #ifdef _WIN32
877   // TODO: add FlushViewOfFile support.
878   PLOG(ERROR) << "MemMap::Sync unsupported on Windows.";
879   return false;
880 #else
881   // Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess
882   // protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind
883   // only accepts page-aligned base address, and excludes the higher-end noaccess protection
884   // from the msync range. b/27552451.
885   return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
886 #endif
887 }
888 
Protect(int prot)889 bool MemMap::Protect(int prot) {
890   if (base_begin_ == nullptr && base_size_ == 0) {
891     prot_ = prot;
892     return true;
893   }
894 
895 #ifndef _WIN32
896   if (mprotect(base_begin_, base_size_, prot) == 0) {
897     prot_ = prot;
898     return true;
899   }
900 #endif
901 
902   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
903               << prot << ") failed";
904   return false;
905 }
906 
CheckNoGaps(MemMap & begin_map,MemMap & end_map)907 bool MemMap::CheckNoGaps(MemMap& begin_map, MemMap& end_map) {
908   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
909   CHECK(begin_map.IsValid());
910   CHECK(end_map.IsValid());
911   CHECK(HasMemMap(begin_map));
912   CHECK(HasMemMap(end_map));
913   CHECK_LE(begin_map.BaseBegin(), end_map.BaseBegin());
914   MemMap* map = &begin_map;
915   while (map->BaseBegin() != end_map.BaseBegin()) {
916     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
917     if (next_map == nullptr) {
918       // Found a gap.
919       return false;
920     }
921     map = next_map;
922   }
923   return true;
924 }
925 
DumpMaps(std::ostream & os,bool terse)926 void MemMap::DumpMaps(std::ostream& os, bool terse) {
927   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
928   DumpMapsLocked(os, terse);
929 }
930 
DumpMapsLocked(std::ostream & os,bool terse)931 void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
932   const auto& mem_maps = *gMaps;
933   if (!terse) {
934     os << mem_maps;
935     return;
936   }
937 
938   // Terse output example:
939   //   [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
940   //   [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
941   // The details:
942   //   "+0x20P" means 0x20 pages taken by a single mapping,
943   //   "~0x11dP" means a gap of 0x11d pages,
944   //   "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
945   os << "MemMap:" << std::endl;
946   for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
947     MemMap* map = it->second;
948     void* base = it->first;
949     CHECK_EQ(base, map->BaseBegin());
950     os << "[MemMap: " << base;
951     ++it;
952     // Merge consecutive maps with the same protect flags and name.
953     constexpr size_t kMaxGaps = 9;
954     size_t num_gaps = 0;
955     size_t num = 1u;
956     size_t size = map->BaseSize();
957     CHECK_ALIGNED_PARAM(size, GetPageSize());
958     void* end = map->BaseEnd();
959     while (it != maps_end &&
960         it->second->GetProtect() == map->GetProtect() &&
961         it->second->GetName() == map->GetName() &&
962         (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
963       if (it->second->BaseBegin() != end) {
964         ++num_gaps;
965         os << "+0x" << std::hex << (size / GetPageSize()) << "P";
966         if (num != 1u) {
967           os << "(" << std::dec << num << ")";
968         }
969         size_t gap =
970             reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
971         CHECK_ALIGNED_PARAM(gap, GetPageSize());
972         os << "~0x" << std::hex << (gap / GetPageSize()) << "P";
973         num = 0u;
974         size = 0u;
975       }
976       CHECK_ALIGNED_PARAM(it->second->BaseSize(), GetPageSize());
977       ++num;
978       size += it->second->BaseSize();
979       end = it->second->BaseEnd();
980       ++it;
981     }
982     os << "+0x" << std::hex << (size / GetPageSize()) << "P";
983     if (num != 1u) {
984       os << "(" << std::dec << num << ")";
985     }
986     os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
987   }
988 }
989 
HasMemMap(MemMap & map)990 bool MemMap::HasMemMap(MemMap& map) {
991   void* base_begin = map.BaseBegin();
992   for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
993        it != end && it->first == base_begin; ++it) {
994     if (it->second == &map) {
995       return true;
996     }
997   }
998   return false;
999 }
1000 
GetLargestMemMapAt(void * address)1001 MemMap* MemMap::GetLargestMemMapAt(void* address) {
1002   size_t largest_size = 0;
1003   MemMap* largest_map = nullptr;
1004   DCHECK(gMaps != nullptr);
1005   for (auto it = gMaps->lower_bound(address), end = gMaps->end();
1006        it != end && it->first == address; ++it) {
1007     MemMap* map = it->second;
1008     CHECK(map != nullptr);
1009     if (largest_size < map->BaseSize()) {
1010       largest_size = map->BaseSize();
1011       largest_map = map;
1012     }
1013   }
1014   return largest_map;
1015 }
1016 
Init()1017 void MemMap::Init() {
1018   if (mem_maps_lock_ != nullptr) {
1019     // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
1020     return;
1021   }
1022 
1023   mem_maps_lock_ = new std::mutex();
1024   // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
1025   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1026 #ifdef ART_PAGE_SIZE_AGNOSTIC
1027   page_size_ = GetPageSizeSlow();
1028 #endif
1029   CHECK_GE(GetPageSize(), kMinPageSize);
1030   CHECK_LE(GetPageSize(), kMaxPageSize);
1031 #if USE_ART_LOW_4G_ALLOCATOR
1032   // Initialize linear scan to random position.
1033   CHECK_EQ(next_mem_pos_, 0u);
1034   next_mem_pos_ = GenerateNextMemPos(GetPageSize());
1035 #endif
1036   DCHECK(gMaps == nullptr);
1037   gMaps = new Maps;
1038 
1039   TargetMMapInit();
1040 }
1041 
IsInitialized()1042 bool MemMap::IsInitialized() { return mem_maps_lock_ != nullptr; }
1043 
Shutdown()1044 void MemMap::Shutdown() {
1045   if (mem_maps_lock_ == nullptr) {
1046     // If MemMap::Shutdown is called more than once, there is no effect.
1047     return;
1048   }
1049   {
1050     // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
1051     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1052     DCHECK(gMaps != nullptr);
1053     delete gMaps;
1054     gMaps = nullptr;
1055   }
1056 #if USE_ART_LOW_4G_ALLOCATOR
1057   next_mem_pos_ = 0u;
1058 #endif
1059   delete mem_maps_lock_;
1060   mem_maps_lock_ = nullptr;
1061 }
1062 
SetSize(size_t new_size)1063 void MemMap::SetSize(size_t new_size) {
1064   CHECK_LE(new_size, size_);
1065   size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
1066                                  GetPageSize());
1067   if (new_base_size == base_size_) {
1068     size_ = new_size;
1069     return;
1070   }
1071   CHECK_LT(new_base_size, base_size_);
1072   MEMORY_TOOL_MAKE_UNDEFINED(
1073       reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
1074                               new_base_size),
1075       base_size_ - new_base_size);
1076   CHECK_EQ(TargetMUnmap(reinterpret_cast<void*>(
1077                         reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
1078                         base_size_ - new_base_size), 0)
1079                         << new_base_size << " " << base_size_;
1080   base_size_ = new_base_size;
1081   size_ = new_size;
1082 }
1083 
MapInternalArtLow4GBAllocator(size_t length,int prot,int flags,int fd,off_t offset)1084 void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
1085                                             int prot,
1086                                             int flags,
1087                                             int fd,
1088                                             off_t offset) {
1089 #if USE_ART_LOW_4G_ALLOCATOR
1090   void* actual = MAP_FAILED;
1091 
1092   bool first_run = true;
1093 
1094   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1095   for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += GetPageSize()) {
1096     // Use gMaps as an optimization to skip over large maps.
1097     // Find the first map which is address > ptr.
1098     auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
1099     if (it != gMaps->begin()) {
1100       auto before_it = it;
1101       --before_it;
1102       // Start at the end of the map before the upper bound.
1103       ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
1104       CHECK_ALIGNED_PARAM(ptr, GetPageSize());
1105     }
1106     while (it != gMaps->end()) {
1107       // How much space do we have until the next map?
1108       size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
1109       // If the space may be sufficient, break out of the loop.
1110       if (delta >= length) {
1111         break;
1112       }
1113       // Otherwise, skip to the end of the map.
1114       ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
1115       CHECK_ALIGNED_PARAM(ptr, GetPageSize());
1116       ++it;
1117     }
1118 
1119     // Try to see if we get lucky with this address since none of the ART maps overlap.
1120     actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
1121     if (actual != MAP_FAILED) {
1122       next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
1123       return actual;
1124     }
1125 
1126     if (4U * GB - ptr < length) {
1127       // Not enough memory until 4GB.
1128       if (first_run) {
1129         // Try another time from the bottom;
1130         ptr = LOW_MEM_START - GetPageSize();
1131         first_run = false;
1132         continue;
1133       } else {
1134         // Second try failed.
1135         break;
1136       }
1137     }
1138 
1139     uintptr_t tail_ptr;
1140 
1141     // Check pages are free.
1142     bool safe = true;
1143     for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += GetPageSize()) {
1144       if (msync(reinterpret_cast<void*>(tail_ptr), GetPageSize(), 0) == 0) {
1145         safe = false;
1146         break;
1147       } else {
1148         DCHECK_EQ(errno, ENOMEM);
1149       }
1150     }
1151 
1152     next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
1153 
1154     if (safe == true) {
1155       actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
1156       if (actual != MAP_FAILED) {
1157         return actual;
1158       }
1159     } else {
1160       // Skip over last page.
1161       ptr = tail_ptr;
1162     }
1163   }
1164 
1165   if (actual == MAP_FAILED) {
1166     LOG(ERROR) << "Could not find contiguous low-memory space.";
1167     errno = ENOMEM;
1168   }
1169   return actual;
1170 #else
1171   UNUSED(length, prot, flags, fd, offset);
1172   LOG(FATAL) << "Unreachable";
1173   UNREACHABLE();
1174 #endif
1175 }
1176 
MapInternal(void * addr,size_t length,int prot,int flags,int fd,off_t offset,bool low_4gb)1177 void* MemMap::MapInternal(void* addr,
1178                           size_t length,
1179                           int prot,
1180                           int flags,
1181                           int fd,
1182                           off_t offset,
1183                           bool low_4gb) {
1184 #ifdef __LP64__
1185   // When requesting low_4g memory and having an expectation, the requested range should fit into
1186   // 4GB.
1187   if (low_4gb && (
1188       // Start out of bounds.
1189       (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
1190       // End out of bounds. For simplicity, this will fail for the last page of memory.
1191       ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
1192     LOG(ERROR) << "The requested address space (" << addr << ", "
1193                << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
1194                << ") cannot fit in low_4gb";
1195     return MAP_FAILED;
1196   }
1197 #else
1198   UNUSED(low_4gb);
1199 #endif
1200   DCHECK_ALIGNED_PARAM(length, GetPageSize());
1201   // TODO:
1202   // A page allocator would be a useful abstraction here, as
1203   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
1204   void* actual = MAP_FAILED;
1205 #if USE_ART_LOW_4G_ALLOCATOR
1206   // MAP_32BIT only available on x86_64.
1207   if (low_4gb && addr == nullptr) {
1208     // The linear-scan allocator has an issue when executable pages are denied (e.g., by selinux
1209     // policies in sensitive processes). In that case, the error code will still be ENOMEM. So
1210     // the allocator will scan all low 4GB twice, and still fail. This is *very* slow.
1211     //
1212     // To avoid the issue, always map non-executable first, and mprotect if necessary.
1213     const int orig_prot = prot;
1214     const int prot_non_exec = prot & ~PROT_EXEC;
1215     actual = MapInternalArtLow4GBAllocator(length, prot_non_exec, flags, fd, offset);
1216 
1217     if (actual == MAP_FAILED) {
1218       return MAP_FAILED;
1219     }
1220 
1221     // See if we need to remap with the executable bit now.
1222     if (orig_prot != prot_non_exec) {
1223       if (mprotect(actual, length, orig_prot) != 0) {
1224         PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
1225         TargetMUnmap(actual, length);
1226         errno = ENOMEM;
1227         return MAP_FAILED;
1228       }
1229     }
1230     return actual;
1231   }
1232 
1233   actual = TargetMMap(addr, length, prot, flags, fd, offset);
1234 #else
1235 #if defined(__LP64__)
1236   if (low_4gb && addr == nullptr) {
1237     flags |= MAP_32BIT;
1238   }
1239 #endif
1240   actual = TargetMMap(addr, length, prot, flags, fd, offset);
1241 #endif
1242   return actual;
1243 }
1244 
operator <<(std::ostream & os,const MemMap & mem_map)1245 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
1246   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
1247                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
1248                      mem_map.GetName().c_str());
1249   return os;
1250 }
1251 
TryReadable()1252 void MemMap::TryReadable() {
1253   if (base_begin_ == nullptr && base_size_ == 0) {
1254     return;
1255   }
1256   CHECK_NE(prot_ & PROT_READ, 0);
1257   volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
1258   volatile uint8_t* end = begin + base_size_;
1259   DCHECK(IsAlignedParam(begin, GetPageSize()));
1260   DCHECK(IsAlignedParam(end, GetPageSize()));
1261   // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
1262   // reads.
1263   for (volatile uint8_t* ptr = begin; ptr < end; ptr += GetPageSize()) {
1264     // This read could fault if protection wasn't set correctly.
1265     uint8_t value = *ptr;
1266     UNUSED(value);
1267   }
1268 }
1269 
RawClearMemory(uint8_t * begin,uint8_t * end)1270 static void inline RawClearMemory(uint8_t* begin, uint8_t* end) {
1271   std::fill(begin, end, 0);
1272 }
1273 
1274 #if defined(__linux__)
ClearMemory(uint8_t * page_begin,size_t size,bool resident,size_t page_size)1275 static inline void ClearMemory(uint8_t* page_begin, size_t size, bool resident, size_t page_size) {
1276   DCHECK(IsAlignedParam(page_begin, page_size));
1277   DCHECK(IsAlignedParam(page_begin + size, page_size));
1278   if (resident) {
1279     RawClearMemory(page_begin, page_begin + size);
1280     // Note we check madvise return value against -1, as it seems old kernels
1281     // can return 1.
1282 #ifdef MADV_FREE
1283     bool res = madvise(page_begin, size, MADV_FREE);
1284     CHECK_NE(res, -1) << "madvise failed";
1285 #endif  // MADV_FREE
1286   } else {
1287     bool res = madvise(page_begin, size, MADV_DONTNEED);
1288     CHECK_NE(res, -1) << "madvise failed";
1289   }
1290 }
1291 #endif  // __linux__
1292 
ZeroMemory(void * address,size_t length,bool release_eagerly)1293 void ZeroMemory(void* address, size_t length, bool release_eagerly) {
1294   if (length == 0) {
1295     return;
1296   }
1297   uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
1298   uint8_t* const mem_end = mem_begin + length;
1299   uint8_t* const page_begin = AlignUp(mem_begin, MemMap::GetPageSize());
1300   uint8_t* const page_end = AlignDown(mem_end, MemMap::GetPageSize());
1301   if (!kMadviseZeroes || page_begin >= page_end) {
1302     // No possible area to madvise.
1303     RawClearMemory(mem_begin, mem_end);
1304     return;
1305   }
1306   // Spans one or more pages.
1307   DCHECK_LE(mem_begin, page_begin);
1308   DCHECK_LE(page_begin, page_end);
1309   DCHECK_LE(page_end, mem_end);
1310 #ifdef _WIN32
1311   UNUSED(release_eagerly);
1312   LOG(WARNING) << "ZeroMemory does not madvise on Windows.";
1313   RawClearMemory(mem_begin, mem_end);
1314 #else
1315   RawClearMemory(mem_begin, page_begin);
1316   RawClearMemory(page_end, mem_end);
1317 // mincore() is linux-specific syscall.
1318 #if defined(__linux__)
1319   if (!release_eagerly) {
1320     size_t vec_len = (page_end - page_begin) / MemMap::GetPageSize();
1321     std::unique_ptr<unsigned char[]> vec(new unsigned char[vec_len]);
1322     if (mincore(page_begin, page_end - page_begin, vec.get()) == 0) {
1323       uint8_t* current_page = page_begin;
1324       size_t current_size = MemMap::GetPageSize();
1325       uint32_t old_state = vec[0] & 0x1;
1326       for (size_t i = 1; i < vec_len; ++i) {
1327         uint32_t new_state = vec[i] & 0x1;
1328         if (old_state == new_state) {
1329           current_size += MemMap::GetPageSize();
1330         } else {
1331           ClearMemory(current_page, current_size, old_state, MemMap::GetPageSize());
1332           current_page = current_page + current_size;
1333           current_size = MemMap::GetPageSize();
1334           old_state = new_state;
1335         }
1336       }
1337       ClearMemory(current_page, current_size, old_state, MemMap::GetPageSize());
1338       return;
1339     }
1340     static bool logged_about_mincore = false;
1341     if (!logged_about_mincore) {
1342       PLOG(WARNING) << "mincore failed, falling back to madvise MADV_DONTNEED";
1343       logged_about_mincore = true;
1344     }
1345     // mincore failed, fall through to MADV_DONTNEED.
1346   }
1347 #else
1348   UNUSED(release_eagerly);
1349 #endif  // __linux__
1350   bool res = madvise(page_begin, page_end - page_begin, MADV_DONTNEED);
1351   CHECK_NE(res, -1) << "madvise failed";
1352 #endif  // _WIN32
1353 }
1354 
AlignBy(size_t alignment,bool align_both_ends)1355 void MemMap::AlignBy(size_t alignment, bool align_both_ends) {
1356   CHECK_EQ(begin_, base_begin_) << "Unsupported";
1357   CHECK_EQ(size_, base_size_) << "Unsupported";
1358   CHECK_GT(alignment, static_cast<size_t>(GetPageSize()));
1359   CHECK_ALIGNED_PARAM(alignment, GetPageSize());
1360   CHECK(!reuse_);
1361   if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), alignment) &&
1362       (!align_both_ends || IsAlignedParam(base_size_, alignment))) {
1363     // Already aligned.
1364     return;
1365   }
1366   uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
1367   uint8_t* aligned_base_begin = AlignUp(base_begin, alignment);
1368   CHECK_LE(base_begin, aligned_base_begin);
1369   if (base_begin < aligned_base_begin) {
1370     MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
1371     CHECK_EQ(TargetMUnmap(base_begin, aligned_base_begin - base_begin), 0)
1372         << "base_begin=" << reinterpret_cast<void*>(base_begin)
1373         << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
1374   }
1375   uint8_t* base_end = base_begin + base_size_;
1376   size_t aligned_base_size;
1377   if (align_both_ends) {
1378     uint8_t* aligned_base_end = AlignDown(base_end, alignment);
1379     CHECK_LE(aligned_base_end, base_end);
1380     CHECK_LT(aligned_base_begin, aligned_base_end)
1381         << "base_begin = " << reinterpret_cast<void*>(base_begin)
1382         << " base_end = " << reinterpret_cast<void*>(base_end);
1383     aligned_base_size = aligned_base_end - aligned_base_begin;
1384     CHECK_GE(aligned_base_size, alignment);
1385     if (aligned_base_end < base_end) {
1386       MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
1387       CHECK_EQ(TargetMUnmap(aligned_base_end, base_end - aligned_base_end), 0)
1388           << "base_end=" << reinterpret_cast<void*>(base_end)
1389           << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
1390     }
1391   } else {
1392     CHECK_LT(aligned_base_begin, base_end)
1393         << "base_begin = " << reinterpret_cast<void*>(base_begin);
1394     aligned_base_size = base_end - aligned_base_begin;
1395   }
1396   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1397   if (base_begin < aligned_base_begin) {
1398     auto it = GetGMapsEntry(*this);
1399     auto node = gMaps->extract(it);
1400     node.key() = aligned_base_begin;
1401     gMaps->insert(std::move(node));
1402   }
1403   base_begin_ = aligned_base_begin;
1404   base_size_ = aligned_base_size;
1405   begin_ = aligned_base_begin;
1406   size_ = aligned_base_size;
1407   DCHECK(gMaps != nullptr);
1408 }
1409 
1410 }  // namespace art
1411