1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mem_map.h"
18
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #if !defined(ANDROID_OS) && !defined(__Fuchsia__) && !defined(_WIN32)
22 #include <sys/resource.h>
23 #endif
24
25 #if defined(__linux__)
26 #include <sys/prctl.h>
27 #endif
28
29 #include <map>
30 #include <memory>
31 #include <sstream>
32
33 #include "android-base/stringprintf.h"
34 #include "android-base/unique_fd.h"
35
36 #include "allocator.h"
37 #include "bit_utils.h"
38 #include "globals.h"
39 #include "logging.h" // For VLOG_IS_ON.
40 #include "memory_tool.h"
41 #include "mman.h" // For the PROT_* and MAP_* constants.
42 #include "utils.h"
43
44 #ifndef MAP_ANONYMOUS
45 #define MAP_ANONYMOUS MAP_ANON
46 #endif
47
48 namespace art {
49
50 using android::base::StringPrintf;
51 using android::base::unique_fd;
52
53 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
54 using AllocationTrackingMultiMap =
55 std::multimap<Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
56
57 using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
58
59 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
60 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
61
62 // A map containing unique strings used for indentifying anonymous mappings
63 static std::map<std::string, int> debugStrMap GUARDED_BY(MemMap::GetMemMapsLock());
64
65 // Retrieve iterator to a `gMaps` entry that is known to exist.
GetGMapsEntry(const MemMap & map)66 Maps::iterator GetGMapsEntry(const MemMap& map) REQUIRES(MemMap::GetMemMapsLock()) {
67 DCHECK(map.IsValid());
68 DCHECK(gMaps != nullptr);
69 for (auto it = gMaps->lower_bound(map.BaseBegin()), end = gMaps->end();
70 it != end && it->first == map.BaseBegin();
71 ++it) {
72 if (it->second == &map) {
73 return it;
74 }
75 }
76 LOG(FATAL) << "MemMap not found";
77 UNREACHABLE();
78 }
79
operator <<(std::ostream & os,const Maps & mem_maps)80 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
81 os << "MemMap:" << std::endl;
82 for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
83 void* base = it->first;
84 MemMap* map = it->second;
85 CHECK_EQ(base, map->BaseBegin());
86 os << *map << std::endl;
87 }
88 return os;
89 }
90
91 std::mutex* MemMap::mem_maps_lock_ = nullptr;
92
93 #if USE_ART_LOW_4G_ALLOCATOR
94 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
95
96 // The regular start of memory allocations. The first 64KB is protected by SELinux.
97 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
98
99 // Generate random starting position.
100 // To not interfere with image position, take the image's address and only place it below. Current
101 // formula (sketch):
102 //
103 // ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX
104 // ----------------------------------------
105 // = 0000111111111111111
106 // & ~(kPageSize - 1) =~0000000000000001111
107 // ----------------------------------------
108 // mask = 0000111111111110000
109 // & random data = YYYYYYYYYYYYYYYYYYY
110 // -----------------------------------
111 // tmp = 0000YYYYYYYYYYY0000
112 // + LOW_MEM_START = 0000000000001000000
113 // --------------------------------------
114 // start
115 //
116 // arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
117 // do not have Bionic, simply start with LOW_MEM_START.
118
119 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
120 #ifdef __BIONIC__
CreateStartPos(uint64_t input)121 uintptr_t CreateStartPos(uint64_t input) {
122 CHECK_NE(0, ART_BASE_ADDRESS);
123
124 // Start with all bits below highest bit in ART_BASE_ADDRESS.
125 constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
126 constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
127
128 // Lowest (usually 12) bits are not used, as aligned by page size.
129 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
130
131 // Mask input data.
132 return (input & mask) + LOW_MEM_START;
133 }
134 #endif
135
GenerateNextMemPos()136 static uintptr_t GenerateNextMemPos() {
137 #ifdef __BIONIC__
138 uint64_t random_data;
139 arc4random_buf(&random_data, sizeof(random_data));
140 return CreateStartPos(random_data);
141 #else
142 // No arc4random on host, see above.
143 return LOW_MEM_START;
144 #endif
145 }
146
147 // Initialize linear scan to random position.
148 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
149 #endif
150
151 // Return true if the address range is contained in a single memory map by either reading
152 // the gMaps variable or the /proc/self/map entry.
ContainedWithinExistingMap(uint8_t * ptr,size_t size,std::string * error_msg)153 bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
154 uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
155 uintptr_t end = begin + size;
156
157 {
158 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
159 for (auto& pair : *gMaps) {
160 MemMap* const map = pair.second;
161 if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
162 end <= reinterpret_cast<uintptr_t>(map->End())) {
163 return true;
164 }
165 }
166 }
167
168 if (error_msg != nullptr) {
169 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
170 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
171 "any existing map. See process maps in the log.", begin, end);
172 }
173 return false;
174 }
175
176 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
177 // the expected value, calling munmap if validation fails, giving the
178 // reason in error_msg.
179 //
180 // If the expected_ptr is null, nothing is checked beyond the fact
181 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
182 // non-null, we check that pointer is the actual_ptr == expected_ptr,
183 // and if not, report in error_msg what the conflict mapping was if
184 // found, or a generic error in other cases.
CheckMapRequest(uint8_t * expected_ptr,void * actual_ptr,size_t byte_count,std::string * error_msg)185 bool MemMap::CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
186 std::string* error_msg) {
187 // Handled first by caller for more specific error messages.
188 CHECK(actual_ptr != MAP_FAILED);
189
190 if (expected_ptr == nullptr) {
191 return true;
192 }
193
194 uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
195 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
196
197 if (expected_ptr == actual_ptr) {
198 return true;
199 }
200
201 // We asked for an address but didn't get what we wanted, all paths below here should fail.
202 int result = TargetMUnmap(actual_ptr, byte_count);
203 if (result == -1) {
204 PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
205 }
206
207 if (error_msg != nullptr) {
208 // We call this here so that we can try and generate a full error
209 // message with the overlapping mapping. There's no guarantee that
210 // that there will be an overlap though, since
211 // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
212 // true, even if there is no overlap
213 // - There might have been an overlap at the point of mmap, but the
214 // overlapping region has since been unmapped.
215
216 // Tell the client the mappings that were in place at the time.
217 if (kIsDebugBuild) {
218 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
219 }
220
221 std::ostringstream os;
222 os << StringPrintf("Failed to mmap at expected address, mapped at "
223 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
224 actual, expected);
225 *error_msg = os.str();
226 }
227 return false;
228 }
229
CheckReservation(uint8_t * expected_ptr,size_t byte_count,const char * name,const MemMap & reservation,std::string * error_msg)230 bool MemMap::CheckReservation(uint8_t* expected_ptr,
231 size_t byte_count,
232 const char* name,
233 const MemMap& reservation,
234 /*out*/std::string* error_msg) {
235 if (!reservation.IsValid()) {
236 *error_msg = StringPrintf("Invalid reservation for %s", name);
237 return false;
238 }
239 DCHECK_ALIGNED(reservation.Begin(), kPageSize);
240 if (reservation.Begin() != expected_ptr) {
241 *error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p",
242 name,
243 reservation.Begin(),
244 expected_ptr);
245 return false;
246 }
247 if (byte_count > reservation.Size()) {
248 *error_msg = StringPrintf("Insufficient reservation, required %zu, available %zu",
249 byte_count,
250 reservation.Size());
251 return false;
252 }
253 return true;
254 }
255
256
257 #if USE_ART_LOW_4G_ALLOCATOR
TryMemMapLow4GB(void * ptr,size_t page_aligned_byte_count,int prot,int flags,int fd,off_t offset)258 void* MemMap::TryMemMapLow4GB(void* ptr,
259 size_t page_aligned_byte_count,
260 int prot,
261 int flags,
262 int fd,
263 off_t offset) {
264 void* actual = TargetMMap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
265 if (actual != MAP_FAILED) {
266 // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
267 // 4GB. If this is the case, unmap and retry.
268 if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
269 TargetMUnmap(actual, page_aligned_byte_count);
270 actual = MAP_FAILED;
271 }
272 }
273 return actual;
274 }
275 #endif
276
SetDebugName(void * map_ptr,const char * name,size_t size)277 void MemMap::SetDebugName(void* map_ptr, const char* name, size_t size) {
278 // Debug naming is only used for Android target builds. For Linux targets,
279 // we'll still call prctl but it wont do anything till we upstream the prctl.
280 if (kIsTargetFuchsia || !kIsTargetBuild) {
281 return;
282 }
283
284 // lock as std::map is not thread-safe
285 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
286
287 std::string debug_friendly_name("dalvik-");
288 debug_friendly_name += name;
289 auto it = debugStrMap.find(debug_friendly_name);
290
291 if (it == debugStrMap.end()) {
292 it = debugStrMap.insert(std::make_pair(std::move(debug_friendly_name), 1)).first;
293 }
294
295 DCHECK(it != debugStrMap.end());
296 #if defined(PR_SET_VMA) && defined(__linux__)
297 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, size, it->first.c_str());
298 #else
299 // Prevent variable unused compiler errors.
300 UNUSED(map_ptr, size);
301 #endif
302 }
303
MapAnonymous(const char * name,uint8_t * addr,size_t byte_count,int prot,bool low_4gb,bool reuse,MemMap * reservation,std::string * error_msg,bool use_debug_name)304 MemMap MemMap::MapAnonymous(const char* name,
305 uint8_t* addr,
306 size_t byte_count,
307 int prot,
308 bool low_4gb,
309 bool reuse,
310 /*inout*/MemMap* reservation,
311 /*out*/std::string* error_msg,
312 bool use_debug_name) {
313 #ifndef __LP64__
314 UNUSED(low_4gb);
315 #endif
316 if (byte_count == 0) {
317 *error_msg = "Empty MemMap requested.";
318 return Invalid();
319 }
320 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
321
322 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
323 if (reuse) {
324 // reuse means it is okay that it overlaps an existing page mapping.
325 // Only use this if you actually made the page reservation yourself.
326 CHECK(addr != nullptr);
327 DCHECK(reservation == nullptr);
328
329 DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
330 flags |= MAP_FIXED;
331 } else if (reservation != nullptr) {
332 CHECK(addr != nullptr);
333 if (!CheckReservation(addr, byte_count, name, *reservation, error_msg)) {
334 return MemMap::Invalid();
335 }
336 flags |= MAP_FIXED;
337 }
338
339 unique_fd fd;
340
341 // We need to store and potentially set an error number for pretty printing of errors
342 int saved_errno = 0;
343
344 void* actual = MapInternal(addr,
345 page_aligned_byte_count,
346 prot,
347 flags,
348 fd.get(),
349 0,
350 low_4gb);
351 saved_errno = errno;
352
353 if (actual == MAP_FAILED) {
354 if (error_msg != nullptr) {
355 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
356 *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
357 "See process maps in the log.",
358 addr,
359 page_aligned_byte_count,
360 prot,
361 flags,
362 fd.get(),
363 strerror(saved_errno));
364 }
365 return Invalid();
366 }
367 if (!CheckMapRequest(addr, actual, page_aligned_byte_count, error_msg)) {
368 return Invalid();
369 }
370
371 if (use_debug_name) {
372 SetDebugName(actual, name, page_aligned_byte_count);
373 }
374
375 if (reservation != nullptr) {
376 // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
377 DCHECK_EQ(actual, reservation->Begin());
378 reservation->ReleaseReservedMemory(byte_count);
379 }
380 return MemMap(name,
381 reinterpret_cast<uint8_t*>(actual),
382 byte_count,
383 actual,
384 page_aligned_byte_count,
385 prot,
386 reuse);
387 }
388
MapAnonymousAligned(const char * name,size_t byte_count,int prot,bool low_4gb,size_t alignment,std::string * error_msg)389 MemMap MemMap::MapAnonymousAligned(const char* name,
390 size_t byte_count,
391 int prot,
392 bool low_4gb,
393 size_t alignment,
394 /*out=*/std::string* error_msg) {
395 DCHECK(IsPowerOfTwo(alignment));
396 DCHECK_GT(alignment, kPageSize);
397 // Allocate extra 'alignment - kPageSize' bytes so that the mapping can be aligned.
398 MemMap ret = MapAnonymous(name,
399 /*addr=*/nullptr,
400 byte_count + alignment - kPageSize,
401 prot,
402 low_4gb,
403 /*reuse=*/false,
404 /*reservation=*/nullptr,
405 error_msg);
406 if (LIKELY(ret.IsValid())) {
407 ret.AlignBy(alignment, /*align_both_ends=*/false);
408 ret.SetSize(byte_count);
409 DCHECK_EQ(ret.Size(), byte_count);
410 DCHECK_ALIGNED_PARAM(ret.Begin(), alignment);
411 }
412 return ret;
413 }
414
MapPlaceholder(const char * name,uint8_t * addr,size_t byte_count)415 MemMap MemMap::MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count) {
416 if (byte_count == 0) {
417 return Invalid();
418 }
419 const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
420 return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, /* reuse= */ true);
421 }
422
423 template<typename A, typename B>
PointerDiff(A * a,B * b)424 static ptrdiff_t PointerDiff(A* a, B* b) {
425 return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
426 }
427
ReplaceWith(MemMap * source,std::string * error)428 bool MemMap::ReplaceWith(MemMap* source, /*out*/std::string* error) {
429 #if !HAVE_MREMAP_SYSCALL
430 UNUSED(source);
431 *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
432 return false;
433 #else // !HAVE_MREMAP_SYSCALL
434 CHECK(source != nullptr);
435 CHECK(source->IsValid());
436 if (!MemMap::kCanReplaceMapping) {
437 *error = "Unable to perform atomic replace due to runtime environment!";
438 return false;
439 }
440 // neither can be reuse.
441 if (source->reuse_ || reuse_) {
442 *error = "One or both mappings is not a real mmap!";
443 return false;
444 }
445 // TODO Support redzones.
446 if (source->redzone_size_ != 0 || redzone_size_ != 0) {
447 *error = "source and dest have different redzone sizes";
448 return false;
449 }
450 // Make sure they have the same offset from the actual mmap'd address
451 if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
452 *error =
453 "source starts at a different offset from the mmap. Cannot atomically replace mappings";
454 return false;
455 }
456 // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
457 // memcpy but the check is explicit and actually done).
458 if (source->BaseBegin() > BaseBegin() &&
459 reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
460 reinterpret_cast<uint8_t*>(source->BaseBegin())) {
461 *error = "destination memory pages overlap with source memory pages";
462 return false;
463 }
464 // Change the protection to match the new location.
465 int old_prot = source->GetProtect();
466 if (!source->Protect(GetProtect())) {
467 *error = "Could not change protections for source to those required for dest.";
468 return false;
469 }
470
471 // Do the mremap.
472 void* res = mremap(/*old_address*/source->BaseBegin(),
473 /*old_size*/source->BaseSize(),
474 /*new_size*/source->BaseSize(),
475 /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
476 /*new_address*/BaseBegin());
477 if (res == MAP_FAILED) {
478 int saved_errno = errno;
479 // Wasn't able to move mapping. Change the protection of source back to the original one and
480 // return.
481 source->Protect(old_prot);
482 *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
483 return false;
484 }
485 CHECK(res == BaseBegin());
486
487 // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
488 // them later.
489 size_t new_base_size = std::max(source->base_size_, base_size_);
490
491 // Invalidate *source, don't unmap it though since it is already gone.
492 size_t source_size = source->size_;
493 source->Invalidate();
494
495 size_ = source_size;
496 base_size_ = new_base_size;
497 // Reduce base_size if needed (this will unmap the extra pages).
498 SetSize(source_size);
499
500 return true;
501 #endif // !HAVE_MREMAP_SYSCALL
502 }
503
MapFileAtAddress(uint8_t * expected_ptr,size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,bool reuse,MemMap * reservation,std::string * error_msg)504 MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
505 size_t byte_count,
506 int prot,
507 int flags,
508 int fd,
509 off_t start,
510 bool low_4gb,
511 const char* filename,
512 bool reuse,
513 /*inout*/MemMap* reservation,
514 /*out*/std::string* error_msg) {
515 CHECK_NE(0, prot);
516 CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
517
518 // Note that we do not allow MAP_FIXED unless reuse == true or we have an existing
519 // reservation, i.e we expect this mapping to be contained within an existing map.
520 if (reuse) {
521 // reuse means it is okay that it overlaps an existing page mapping.
522 // Only use this if you actually made the page reservation yourself.
523 CHECK(expected_ptr != nullptr);
524 DCHECK(reservation == nullptr);
525 DCHECK(error_msg != nullptr);
526 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
527 << ((error_msg != nullptr) ? *error_msg : std::string());
528 flags |= MAP_FIXED;
529 } else if (reservation != nullptr) {
530 DCHECK(error_msg != nullptr);
531 if (!CheckReservation(expected_ptr, byte_count, filename, *reservation, error_msg)) {
532 return Invalid();
533 }
534 flags |= MAP_FIXED;
535 } else {
536 CHECK_EQ(0, flags & MAP_FIXED);
537 // Don't bother checking for an overlapping region here. We'll
538 // check this if required after the fact inside CheckMapRequest.
539 }
540
541 if (byte_count == 0) {
542 *error_msg = "Empty MemMap requested";
543 return Invalid();
544 }
545 // Adjust 'offset' to be page-aligned as required by mmap.
546 int page_offset = start % kPageSize;
547 off_t page_aligned_offset = start - page_offset;
548 // Adjust 'byte_count' to be page-aligned as we will map this anyway.
549 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
550 // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
551 // not necessarily to virtual memory. mmap will page align 'expected' for us.
552 uint8_t* page_aligned_expected =
553 (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
554
555 size_t redzone_size = 0;
556 if (kRunningOnMemoryTool && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
557 redzone_size = kPageSize;
558 page_aligned_byte_count += redzone_size;
559 }
560
561 uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
562 page_aligned_byte_count,
563 prot,
564 flags,
565 fd,
566 page_aligned_offset,
567 low_4gb));
568 if (actual == MAP_FAILED) {
569 if (error_msg != nullptr) {
570 auto saved_errno = errno;
571
572 if (kIsDebugBuild || VLOG_IS_ON(oat)) {
573 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
574 }
575
576 *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
577 ") of file '%s' failed: %s. See process maps in the log.",
578 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
579 static_cast<int64_t>(page_aligned_offset), filename,
580 strerror(saved_errno));
581 }
582 return Invalid();
583 }
584 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
585 return Invalid();
586 }
587 if (redzone_size != 0) {
588 const uint8_t *real_start = actual + page_offset;
589 const uint8_t *real_end = actual + page_offset + byte_count;
590 const uint8_t *mapping_end = actual + page_aligned_byte_count;
591
592 MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
593 MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
594 page_aligned_byte_count -= redzone_size;
595 }
596
597 if (reservation != nullptr) {
598 // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
599 DCHECK_EQ(actual, reservation->Begin());
600 reservation->ReleaseReservedMemory(byte_count);
601 }
602 return MemMap(filename,
603 actual + page_offset,
604 byte_count,
605 actual,
606 page_aligned_byte_count,
607 prot,
608 reuse,
609 redzone_size);
610 }
611
MemMap(MemMap && other)612 MemMap::MemMap(MemMap&& other) noexcept
613 : MemMap() {
614 swap(other);
615 }
616
~MemMap()617 MemMap::~MemMap() {
618 Reset();
619 }
620
DoReset()621 void MemMap::DoReset() {
622 DCHECK(IsValid());
623 size_t real_base_size = base_size_;
624 // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
625 // before it is returned to the system.
626 if (redzone_size_ != 0) {
627 // Add redzone_size_ back to base_size or it will cause a mmap leakage.
628 real_base_size += redzone_size_;
629 MEMORY_TOOL_MAKE_UNDEFINED(
630 reinterpret_cast<char*>(base_begin_) + real_base_size - redzone_size_,
631 redzone_size_);
632 }
633
634 if (!reuse_) {
635 MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
636 if (!already_unmapped_) {
637 int result = TargetMUnmap(base_begin_, real_base_size);
638 if (result == -1) {
639 PLOG(FATAL) << "munmap failed";
640 }
641 }
642 }
643
644 Invalidate();
645 }
646
ResetInForkedProcess()647 void MemMap::ResetInForkedProcess() {
648 // This should be called on a map that has MADV_DONTFORK.
649 // The kernel has already unmapped this.
650 already_unmapped_ = true;
651 Reset();
652 }
653
Invalidate()654 void MemMap::Invalidate() {
655 DCHECK(IsValid());
656
657 // Remove it from gMaps.
658 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
659 auto it = GetGMapsEntry(*this);
660 gMaps->erase(it);
661
662 // Mark it as invalid.
663 base_size_ = 0u;
664 DCHECK(!IsValid());
665 }
666
swap(MemMap & other)667 void MemMap::swap(MemMap& other) {
668 if (IsValid() || other.IsValid()) {
669 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
670 DCHECK(gMaps != nullptr);
671 auto this_it = IsValid() ? GetGMapsEntry(*this) : gMaps->end();
672 auto other_it = other.IsValid() ? GetGMapsEntry(other) : gMaps->end();
673 if (IsValid()) {
674 DCHECK(this_it != gMaps->end());
675 DCHECK_EQ(this_it->second, this);
676 this_it->second = &other;
677 }
678 if (other.IsValid()) {
679 DCHECK(other_it != gMaps->end());
680 DCHECK_EQ(other_it->second, &other);
681 other_it->second = this;
682 }
683 // Swap members with the `mem_maps_lock_` held so that `base_begin_` matches
684 // with the `gMaps` key when other threads try to use `gMaps`.
685 SwapMembers(other);
686 } else {
687 SwapMembers(other);
688 }
689 }
690
SwapMembers(MemMap & other)691 void MemMap::SwapMembers(MemMap& other) {
692 name_.swap(other.name_);
693 std::swap(begin_, other.begin_);
694 std::swap(size_, other.size_);
695 std::swap(base_begin_, other.base_begin_);
696 std::swap(base_size_, other.base_size_);
697 std::swap(prot_, other.prot_);
698 std::swap(reuse_, other.reuse_);
699 std::swap(already_unmapped_, other.already_unmapped_);
700 std::swap(redzone_size_, other.redzone_size_);
701 }
702
MemMap(const std::string & name,uint8_t * begin,size_t size,void * base_begin,size_t base_size,int prot,bool reuse,size_t redzone_size)703 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
704 size_t base_size, int prot, bool reuse, size_t redzone_size)
705 : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
706 prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
707 if (size_ == 0) {
708 CHECK(begin_ == nullptr);
709 CHECK(base_begin_ == nullptr);
710 CHECK_EQ(base_size_, 0U);
711 } else {
712 CHECK(begin_ != nullptr);
713 CHECK(base_begin_ != nullptr);
714 CHECK_NE(base_size_, 0U);
715
716 // Add it to gMaps.
717 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
718 DCHECK(gMaps != nullptr);
719 gMaps->insert(std::make_pair(base_begin_, this));
720 }
721 }
722
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,std::string * error_msg,bool use_debug_name)723 MemMap MemMap::RemapAtEnd(uint8_t* new_end,
724 const char* tail_name,
725 int tail_prot,
726 std::string* error_msg,
727 bool use_debug_name) {
728 return RemapAtEnd(new_end,
729 tail_name,
730 tail_prot,
731 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
732 /* fd= */ -1,
733 /* offset= */ 0,
734 error_msg,
735 use_debug_name);
736 }
737
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,int flags,int fd,off_t offset,std::string * error_msg,bool use_debug_name)738 MemMap MemMap::RemapAtEnd(uint8_t* new_end,
739 const char* tail_name,
740 int tail_prot,
741 int flags,
742 int fd,
743 off_t offset,
744 std::string* error_msg,
745 bool use_debug_name) {
746 DCHECK_GE(new_end, Begin());
747 DCHECK_LE(new_end, End());
748 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
749 DCHECK_ALIGNED(begin_, kPageSize);
750 DCHECK_ALIGNED(base_begin_, kPageSize);
751 DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
752 DCHECK_ALIGNED(new_end, kPageSize);
753 uint8_t* old_end = begin_ + size_;
754 uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
755 uint8_t* new_base_end = new_end;
756 DCHECK_LE(new_base_end, old_base_end);
757 if (new_base_end == old_base_end) {
758 return Invalid();
759 }
760 size_t new_size = new_end - reinterpret_cast<uint8_t*>(begin_);
761 size_t new_base_size = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
762 DCHECK_LE(begin_ + new_size, reinterpret_cast<uint8_t*>(base_begin_) + new_base_size);
763 size_t tail_size = old_end - new_end;
764 uint8_t* tail_base_begin = new_base_end;
765 size_t tail_base_size = old_base_end - new_base_end;
766 DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
767 DCHECK_ALIGNED(tail_base_size, kPageSize);
768
769 MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
770 // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
771 // removes old mappings for the overlapping region. This makes the operation atomic
772 // and prevents other threads from racing to allocate memory in the requested region.
773 uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
774 tail_base_size,
775 tail_prot,
776 flags,
777 fd,
778 offset));
779 if (actual == MAP_FAILED) {
780 *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed: %s. See process "
781 "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
782 fd, strerror(errno));
783 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
784 return Invalid();
785 }
786 // Update *this.
787 if (new_base_size == 0u) {
788 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
789 auto it = GetGMapsEntry(*this);
790 gMaps->erase(it);
791 }
792
793 if (use_debug_name) {
794 SetDebugName(actual, tail_name, tail_base_size);
795 }
796
797 size_ = new_size;
798 base_size_ = new_base_size;
799 // Return the new mapping.
800 return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
801 }
802
TakeReservedMemory(size_t byte_count,bool reuse)803 MemMap MemMap::TakeReservedMemory(size_t byte_count, bool reuse) {
804 uint8_t* begin = Begin();
805 ReleaseReservedMemory(byte_count); // Performs necessary DCHECK()s on this reservation.
806 size_t base_size = RoundUp(byte_count, kPageSize);
807 return MemMap(name_, begin, byte_count, begin, base_size, prot_, reuse);
808 }
809
ReleaseReservedMemory(size_t byte_count)810 void MemMap::ReleaseReservedMemory(size_t byte_count) {
811 // Check the reservation mapping.
812 DCHECK(IsValid());
813 DCHECK(!reuse_);
814 DCHECK(!already_unmapped_);
815 DCHECK_EQ(redzone_size_, 0u);
816 DCHECK_EQ(begin_, base_begin_);
817 DCHECK_EQ(size_, base_size_);
818 DCHECK_ALIGNED(begin_, kPageSize);
819 DCHECK_ALIGNED(size_, kPageSize);
820
821 // Check and round up the `byte_count`.
822 DCHECK_NE(byte_count, 0u);
823 DCHECK_LE(byte_count, size_);
824 byte_count = RoundUp(byte_count, kPageSize);
825
826 if (byte_count == size_) {
827 Invalidate();
828 } else {
829 // Shrink the reservation MemMap and update its `gMaps` entry.
830 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
831 auto it = GetGMapsEntry(*this);
832 auto node = gMaps->extract(it);
833 begin_ += byte_count;
834 size_ -= byte_count;
835 base_begin_ = begin_;
836 base_size_ = size_;
837 node.key() = base_begin_;
838 gMaps->insert(std::move(node));
839 }
840 }
841
MadviseDontNeedAndZero()842 void MemMap::MadviseDontNeedAndZero() {
843 if (base_begin_ != nullptr || base_size_ != 0) {
844 if (!kMadviseZeroes) {
845 memset(base_begin_, 0, base_size_);
846 }
847 #ifdef _WIN32
848 // It is benign not to madvise away the pages here.
849 PLOG(WARNING) << "MemMap::MadviseDontNeedAndZero does not madvise on Windows.";
850 #else
851 int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
852 if (result == -1) {
853 PLOG(WARNING) << "madvise failed";
854 }
855 #endif
856 }
857 }
858
MadviseDontFork()859 int MemMap::MadviseDontFork() {
860 #if defined(__linux__)
861 if (base_begin_ != nullptr || base_size_ != 0) {
862 return madvise(base_begin_, base_size_, MADV_DONTFORK);
863 }
864 #endif
865 return -1;
866 }
867
Sync()868 bool MemMap::Sync() {
869 #ifdef _WIN32
870 // TODO: add FlushViewOfFile support.
871 PLOG(ERROR) << "MemMap::Sync unsupported on Windows.";
872 return false;
873 #else
874 // Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess
875 // protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind
876 // only accepts page-aligned base address, and excludes the higher-end noaccess protection
877 // from the msync range. b/27552451.
878 return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
879 #endif
880 }
881
Protect(int prot)882 bool MemMap::Protect(int prot) {
883 if (base_begin_ == nullptr && base_size_ == 0) {
884 prot_ = prot;
885 return true;
886 }
887
888 #ifndef _WIN32
889 if (mprotect(base_begin_, base_size_, prot) == 0) {
890 prot_ = prot;
891 return true;
892 }
893 #endif
894
895 PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
896 << prot << ") failed";
897 return false;
898 }
899
CheckNoGaps(MemMap & begin_map,MemMap & end_map)900 bool MemMap::CheckNoGaps(MemMap& begin_map, MemMap& end_map) {
901 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
902 CHECK(begin_map.IsValid());
903 CHECK(end_map.IsValid());
904 CHECK(HasMemMap(begin_map));
905 CHECK(HasMemMap(end_map));
906 CHECK_LE(begin_map.BaseBegin(), end_map.BaseBegin());
907 MemMap* map = &begin_map;
908 while (map->BaseBegin() != end_map.BaseBegin()) {
909 MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
910 if (next_map == nullptr) {
911 // Found a gap.
912 return false;
913 }
914 map = next_map;
915 }
916 return true;
917 }
918
DumpMaps(std::ostream & os,bool terse)919 void MemMap::DumpMaps(std::ostream& os, bool terse) {
920 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
921 DumpMapsLocked(os, terse);
922 }
923
DumpMapsLocked(std::ostream & os,bool terse)924 void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
925 const auto& mem_maps = *gMaps;
926 if (!terse) {
927 os << mem_maps;
928 return;
929 }
930
931 // Terse output example:
932 // [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
933 // [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
934 // The details:
935 // "+0x20P" means 0x20 pages taken by a single mapping,
936 // "~0x11dP" means a gap of 0x11d pages,
937 // "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
938 os << "MemMap:" << std::endl;
939 for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
940 MemMap* map = it->second;
941 void* base = it->first;
942 CHECK_EQ(base, map->BaseBegin());
943 os << "[MemMap: " << base;
944 ++it;
945 // Merge consecutive maps with the same protect flags and name.
946 constexpr size_t kMaxGaps = 9;
947 size_t num_gaps = 0;
948 size_t num = 1u;
949 size_t size = map->BaseSize();
950 CHECK_ALIGNED(size, kPageSize);
951 void* end = map->BaseEnd();
952 while (it != maps_end &&
953 it->second->GetProtect() == map->GetProtect() &&
954 it->second->GetName() == map->GetName() &&
955 (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
956 if (it->second->BaseBegin() != end) {
957 ++num_gaps;
958 os << "+0x" << std::hex << (size / kPageSize) << "P";
959 if (num != 1u) {
960 os << "(" << std::dec << num << ")";
961 }
962 size_t gap =
963 reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
964 CHECK_ALIGNED(gap, kPageSize);
965 os << "~0x" << std::hex << (gap / kPageSize) << "P";
966 num = 0u;
967 size = 0u;
968 }
969 CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
970 ++num;
971 size += it->second->BaseSize();
972 end = it->second->BaseEnd();
973 ++it;
974 }
975 os << "+0x" << std::hex << (size / kPageSize) << "P";
976 if (num != 1u) {
977 os << "(" << std::dec << num << ")";
978 }
979 os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
980 }
981 }
982
HasMemMap(MemMap & map)983 bool MemMap::HasMemMap(MemMap& map) {
984 void* base_begin = map.BaseBegin();
985 for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
986 it != end && it->first == base_begin; ++it) {
987 if (it->second == &map) {
988 return true;
989 }
990 }
991 return false;
992 }
993
GetLargestMemMapAt(void * address)994 MemMap* MemMap::GetLargestMemMapAt(void* address) {
995 size_t largest_size = 0;
996 MemMap* largest_map = nullptr;
997 DCHECK(gMaps != nullptr);
998 for (auto it = gMaps->lower_bound(address), end = gMaps->end();
999 it != end && it->first == address; ++it) {
1000 MemMap* map = it->second;
1001 CHECK(map != nullptr);
1002 if (largest_size < map->BaseSize()) {
1003 largest_size = map->BaseSize();
1004 largest_map = map;
1005 }
1006 }
1007 return largest_map;
1008 }
1009
Init()1010 void MemMap::Init() {
1011 if (mem_maps_lock_ != nullptr) {
1012 // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
1013 return;
1014 }
1015 mem_maps_lock_ = new std::mutex();
1016 // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
1017 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1018 DCHECK(gMaps == nullptr);
1019 gMaps = new Maps;
1020
1021 TargetMMapInit();
1022 }
1023
IsInitialized()1024 bool MemMap::IsInitialized() { return mem_maps_lock_ != nullptr; }
1025
Shutdown()1026 void MemMap::Shutdown() {
1027 if (mem_maps_lock_ == nullptr) {
1028 // If MemMap::Shutdown is called more than once, there is no effect.
1029 return;
1030 }
1031 {
1032 // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
1033 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1034 DCHECK(gMaps != nullptr);
1035 delete gMaps;
1036 gMaps = nullptr;
1037 }
1038 delete mem_maps_lock_;
1039 mem_maps_lock_ = nullptr;
1040 }
1041
SetSize(size_t new_size)1042 void MemMap::SetSize(size_t new_size) {
1043 CHECK_LE(new_size, size_);
1044 size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
1045 kPageSize);
1046 if (new_base_size == base_size_) {
1047 size_ = new_size;
1048 return;
1049 }
1050 CHECK_LT(new_base_size, base_size_);
1051 MEMORY_TOOL_MAKE_UNDEFINED(
1052 reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
1053 new_base_size),
1054 base_size_ - new_base_size);
1055 CHECK_EQ(TargetMUnmap(reinterpret_cast<void*>(
1056 reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
1057 base_size_ - new_base_size), 0)
1058 << new_base_size << " " << base_size_;
1059 base_size_ = new_base_size;
1060 size_ = new_size;
1061 }
1062
MapInternalArtLow4GBAllocator(size_t length,int prot,int flags,int fd,off_t offset)1063 void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
1064 int prot,
1065 int flags,
1066 int fd,
1067 off_t offset) {
1068 #if USE_ART_LOW_4G_ALLOCATOR
1069 void* actual = MAP_FAILED;
1070
1071 bool first_run = true;
1072
1073 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1074 for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
1075 // Use gMaps as an optimization to skip over large maps.
1076 // Find the first map which is address > ptr.
1077 auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
1078 if (it != gMaps->begin()) {
1079 auto before_it = it;
1080 --before_it;
1081 // Start at the end of the map before the upper bound.
1082 ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
1083 CHECK_ALIGNED(ptr, kPageSize);
1084 }
1085 while (it != gMaps->end()) {
1086 // How much space do we have until the next map?
1087 size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
1088 // If the space may be sufficient, break out of the loop.
1089 if (delta >= length) {
1090 break;
1091 }
1092 // Otherwise, skip to the end of the map.
1093 ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
1094 CHECK_ALIGNED(ptr, kPageSize);
1095 ++it;
1096 }
1097
1098 // Try to see if we get lucky with this address since none of the ART maps overlap.
1099 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
1100 if (actual != MAP_FAILED) {
1101 next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
1102 return actual;
1103 }
1104
1105 if (4U * GB - ptr < length) {
1106 // Not enough memory until 4GB.
1107 if (first_run) {
1108 // Try another time from the bottom;
1109 ptr = LOW_MEM_START - kPageSize;
1110 first_run = false;
1111 continue;
1112 } else {
1113 // Second try failed.
1114 break;
1115 }
1116 }
1117
1118 uintptr_t tail_ptr;
1119
1120 // Check pages are free.
1121 bool safe = true;
1122 for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
1123 if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
1124 safe = false;
1125 break;
1126 } else {
1127 DCHECK_EQ(errno, ENOMEM);
1128 }
1129 }
1130
1131 next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region
1132
1133 if (safe == true) {
1134 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
1135 if (actual != MAP_FAILED) {
1136 return actual;
1137 }
1138 } else {
1139 // Skip over last page.
1140 ptr = tail_ptr;
1141 }
1142 }
1143
1144 if (actual == MAP_FAILED) {
1145 LOG(ERROR) << "Could not find contiguous low-memory space.";
1146 errno = ENOMEM;
1147 }
1148 return actual;
1149 #else
1150 UNUSED(length, prot, flags, fd, offset);
1151 LOG(FATAL) << "Unreachable";
1152 UNREACHABLE();
1153 #endif
1154 }
1155
MapInternal(void * addr,size_t length,int prot,int flags,int fd,off_t offset,bool low_4gb)1156 void* MemMap::MapInternal(void* addr,
1157 size_t length,
1158 int prot,
1159 int flags,
1160 int fd,
1161 off_t offset,
1162 bool low_4gb) {
1163 #ifdef __LP64__
1164 // When requesting low_4g memory and having an expectation, the requested range should fit into
1165 // 4GB.
1166 if (low_4gb && (
1167 // Start out of bounds.
1168 (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
1169 // End out of bounds. For simplicity, this will fail for the last page of memory.
1170 ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
1171 LOG(ERROR) << "The requested address space (" << addr << ", "
1172 << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
1173 << ") cannot fit in low_4gb";
1174 return MAP_FAILED;
1175 }
1176 #else
1177 UNUSED(low_4gb);
1178 #endif
1179 DCHECK_ALIGNED(length, kPageSize);
1180 // TODO:
1181 // A page allocator would be a useful abstraction here, as
1182 // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
1183 void* actual = MAP_FAILED;
1184 #if USE_ART_LOW_4G_ALLOCATOR
1185 // MAP_32BIT only available on x86_64.
1186 if (low_4gb && addr == nullptr) {
1187 // The linear-scan allocator has an issue when executable pages are denied (e.g., by selinux
1188 // policies in sensitive processes). In that case, the error code will still be ENOMEM. So
1189 // the allocator will scan all low 4GB twice, and still fail. This is *very* slow.
1190 //
1191 // To avoid the issue, always map non-executable first, and mprotect if necessary.
1192 const int orig_prot = prot;
1193 const int prot_non_exec = prot & ~PROT_EXEC;
1194 actual = MapInternalArtLow4GBAllocator(length, prot_non_exec, flags, fd, offset);
1195
1196 if (actual == MAP_FAILED) {
1197 return MAP_FAILED;
1198 }
1199
1200 // See if we need to remap with the executable bit now.
1201 if (orig_prot != prot_non_exec) {
1202 if (mprotect(actual, length, orig_prot) != 0) {
1203 PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
1204 TargetMUnmap(actual, length);
1205 errno = ENOMEM;
1206 return MAP_FAILED;
1207 }
1208 }
1209 return actual;
1210 }
1211
1212 actual = TargetMMap(addr, length, prot, flags, fd, offset);
1213 #else
1214 #if defined(__LP64__)
1215 if (low_4gb && addr == nullptr) {
1216 flags |= MAP_32BIT;
1217 }
1218 #endif
1219 actual = TargetMMap(addr, length, prot, flags, fd, offset);
1220 #endif
1221 return actual;
1222 }
1223
operator <<(std::ostream & os,const MemMap & mem_map)1224 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
1225 os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
1226 mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
1227 mem_map.GetName().c_str());
1228 return os;
1229 }
1230
TryReadable()1231 void MemMap::TryReadable() {
1232 if (base_begin_ == nullptr && base_size_ == 0) {
1233 return;
1234 }
1235 CHECK_NE(prot_ & PROT_READ, 0);
1236 volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
1237 volatile uint8_t* end = begin + base_size_;
1238 DCHECK(IsAligned<kPageSize>(begin));
1239 DCHECK(IsAligned<kPageSize>(end));
1240 // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
1241 // reads.
1242 for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
1243 // This read could fault if protection wasn't set correctly.
1244 uint8_t value = *ptr;
1245 UNUSED(value);
1246 }
1247 }
1248
ZeroAndReleasePages(void * address,size_t length)1249 void ZeroAndReleasePages(void* address, size_t length) {
1250 if (length == 0) {
1251 return;
1252 }
1253 uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
1254 uint8_t* const mem_end = mem_begin + length;
1255 uint8_t* const page_begin = AlignUp(mem_begin, kPageSize);
1256 uint8_t* const page_end = AlignDown(mem_end, kPageSize);
1257 if (!kMadviseZeroes || page_begin >= page_end) {
1258 // No possible area to madvise.
1259 std::fill(mem_begin, mem_end, 0);
1260 } else {
1261 // Spans one or more pages.
1262 DCHECK_LE(mem_begin, page_begin);
1263 DCHECK_LE(page_begin, page_end);
1264 DCHECK_LE(page_end, mem_end);
1265 std::fill(mem_begin, page_begin, 0);
1266 #ifdef _WIN32
1267 LOG(WARNING) << "ZeroAndReleasePages does not madvise on Windows.";
1268 #else
1269 CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
1270 #endif
1271 std::fill(page_end, mem_end, 0);
1272 }
1273 }
1274
AlignBy(size_t alignment,bool align_both_ends)1275 void MemMap::AlignBy(size_t alignment, bool align_both_ends) {
1276 CHECK_EQ(begin_, base_begin_) << "Unsupported";
1277 CHECK_EQ(size_, base_size_) << "Unsupported";
1278 CHECK_GT(alignment, static_cast<size_t>(kPageSize));
1279 CHECK_ALIGNED(alignment, kPageSize);
1280 CHECK(!reuse_);
1281 if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), alignment) &&
1282 (!align_both_ends || IsAlignedParam(base_size_, alignment))) {
1283 // Already aligned.
1284 return;
1285 }
1286 uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
1287 uint8_t* aligned_base_begin = AlignUp(base_begin, alignment);
1288 CHECK_LE(base_begin, aligned_base_begin);
1289 if (base_begin < aligned_base_begin) {
1290 MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
1291 CHECK_EQ(TargetMUnmap(base_begin, aligned_base_begin - base_begin), 0)
1292 << "base_begin=" << reinterpret_cast<void*>(base_begin)
1293 << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
1294 }
1295 uint8_t* base_end = base_begin + base_size_;
1296 size_t aligned_base_size;
1297 if (align_both_ends) {
1298 uint8_t* aligned_base_end = AlignDown(base_end, alignment);
1299 CHECK_LE(aligned_base_end, base_end);
1300 CHECK_LT(aligned_base_begin, aligned_base_end)
1301 << "base_begin = " << reinterpret_cast<void*>(base_begin)
1302 << " base_end = " << reinterpret_cast<void*>(base_end);
1303 aligned_base_size = aligned_base_end - aligned_base_begin;
1304 CHECK_GE(aligned_base_size, alignment);
1305 if (aligned_base_end < base_end) {
1306 MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
1307 CHECK_EQ(TargetMUnmap(aligned_base_end, base_end - aligned_base_end), 0)
1308 << "base_end=" << reinterpret_cast<void*>(base_end)
1309 << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
1310 }
1311 } else {
1312 CHECK_LT(aligned_base_begin, base_end)
1313 << "base_begin = " << reinterpret_cast<void*>(base_begin);
1314 aligned_base_size = base_end - aligned_base_begin;
1315 }
1316 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1317 if (base_begin < aligned_base_begin) {
1318 auto it = GetGMapsEntry(*this);
1319 auto node = gMaps->extract(it);
1320 node.key() = aligned_base_begin;
1321 gMaps->insert(std::move(node));
1322 }
1323 base_begin_ = aligned_base_begin;
1324 base_size_ = aligned_base_size;
1325 begin_ = aligned_base_begin;
1326 size_ = aligned_base_size;
1327 DCHECK(gMaps != nullptr);
1328 }
1329
1330 } // namespace art
1331