1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mem_map.h"
18
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #if !defined(ANDROID_OS) && !defined(__Fuchsia__) && !defined(_WIN32)
22 #include <sys/resource.h>
23 #endif
24
25 #if defined(__linux__)
26 #include <sys/prctl.h>
27 #endif
28
29 #include <map>
30 #include <memory>
31 #include <sstream>
32
33 #include "android-base/stringprintf.h"
34 #include "android-base/unique_fd.h"
35
36 #include "allocator.h"
37 #include "bit_utils.h"
38 #include "globals.h"
39 #include "logging.h" // For VLOG_IS_ON.
40 #include "memory_tool.h"
41 #include "mman.h" // For the PROT_* and MAP_* constants.
42 #include "utils.h"
43
44 #ifndef MAP_ANONYMOUS
45 #define MAP_ANONYMOUS MAP_ANON
46 #endif
47
48 namespace art {
49
50 using android::base::StringPrintf;
51 using android::base::unique_fd;
52
53 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
54 using AllocationTrackingMultiMap =
55 std::multimap<Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
56
57 using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
58
59 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
60 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
61
62 // A map containing unique strings used for indentifying anonymous mappings
63 static std::map<std::string, int> debugStrMap GUARDED_BY(MemMap::GetMemMapsLock());
64
65 // Retrieve iterator to a `gMaps` entry that is known to exist.
GetGMapsEntry(const MemMap & map)66 Maps::iterator GetGMapsEntry(const MemMap& map) REQUIRES(MemMap::GetMemMapsLock()) {
67 DCHECK(map.IsValid());
68 DCHECK(gMaps != nullptr);
69 for (auto it = gMaps->lower_bound(map.BaseBegin()), end = gMaps->end();
70 it != end && it->first == map.BaseBegin();
71 ++it) {
72 if (it->second == &map) {
73 return it;
74 }
75 }
76 LOG(FATAL) << "MemMap not found";
77 UNREACHABLE();
78 }
79
operator <<(std::ostream & os,const Maps & mem_maps)80 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
81 os << "MemMap:" << std::endl;
82 for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
83 void* base = it->first;
84 MemMap* map = it->second;
85 CHECK_EQ(base, map->BaseBegin());
86 os << *map << std::endl;
87 }
88 return os;
89 }
90
91 std::mutex* MemMap::mem_maps_lock_ = nullptr;
92 #ifdef ART_PAGE_SIZE_AGNOSTIC
93 size_t MemMap::page_size_ = 0;
94 #endif
95
96 #if USE_ART_LOW_4G_ALLOCATOR
97 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
98
99 // The regular start of memory allocations. The first 64KB is protected by SELinux.
100 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
101
102 // Generate random starting position.
103 // To not interfere with image position, take the image's address and only place it below. Current
104 // formula (sketch):
105 //
106 // ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX
107 // ----------------------------------------
108 // = 0000111111111111111
109 // & ~(page_size - 1) =~0000000000000001111
110 // ----------------------------------------
111 // mask = 0000111111111110000
112 // & random data = YYYYYYYYYYYYYYYYYYY
113 // -----------------------------------
114 // tmp = 0000YYYYYYYYYYY0000
115 // + LOW_MEM_START = 0000000000001000000
116 // --------------------------------------
117 // start
118 //
119 // arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
120 // do not have Bionic, simply start with LOW_MEM_START.
121
122 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
123 #ifdef __BIONIC__
CreateStartPos(uint64_t input,size_t page_size)124 uintptr_t CreateStartPos(uint64_t input, size_t page_size) {
125 CHECK_NE(0, ART_BASE_ADDRESS);
126
127 // Start with all bits below highest bit in ART_BASE_ADDRESS.
128 constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
129 constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
130
131 // Lowest (usually 12) bits are not used, as aligned by page size.
132 const uintptr_t mask = mask_ones & ~(page_size - 1);
133
134 // Mask input data.
135 return (input & mask) + LOW_MEM_START;
136 }
137 #endif
138
GenerateNextMemPos(size_t page_size)139 static uintptr_t GenerateNextMemPos(size_t page_size) {
140 #ifdef __BIONIC__
141 uint64_t random_data;
142 arc4random_buf(&random_data, sizeof(random_data));
143 return CreateStartPos(random_data, page_size);
144 #else
145 UNUSED(page_size);
146 // No arc4random on host, see above.
147 return LOW_MEM_START;
148 #endif
149 }
150
151 uintptr_t MemMap::next_mem_pos_;
152 #endif
153
154 // Return true if the address range is contained in a single memory map by either reading
155 // the gMaps variable or the /proc/self/map entry.
ContainedWithinExistingMap(uint8_t * ptr,size_t size,std::string * error_msg)156 bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
157 uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
158 uintptr_t end = begin + size;
159
160 {
161 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
162 for (auto& pair : *gMaps) {
163 MemMap* const map = pair.second;
164 if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
165 end <= reinterpret_cast<uintptr_t>(map->End())) {
166 return true;
167 }
168 }
169 }
170
171 if (error_msg != nullptr) {
172 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
173 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
174 "any existing map. See process maps in the log.", begin, end);
175 }
176 return false;
177 }
178
179 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
180 // the expected value, calling munmap if validation fails, giving the
181 // reason in error_msg.
182 //
183 // If the expected_ptr is null, nothing is checked beyond the fact
184 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
185 // non-null, we check that pointer is the actual_ptr == expected_ptr,
186 // and if not, report in error_msg what the conflict mapping was if
187 // found, or a generic error in other cases.
CheckMapRequest(uint8_t * expected_ptr,void * actual_ptr,size_t byte_count,std::string * error_msg)188 bool MemMap::CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
189 std::string* error_msg) {
190 // Handled first by caller for more specific error messages.
191 CHECK(actual_ptr != MAP_FAILED);
192
193 if (expected_ptr == nullptr) {
194 return true;
195 }
196
197 uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
198 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
199
200 if (expected_ptr == actual_ptr) {
201 return true;
202 }
203
204 // We asked for an address but didn't get what we wanted, all paths below here should fail.
205 int result = TargetMUnmap(actual_ptr, byte_count);
206 if (result == -1) {
207 PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
208 }
209
210 if (error_msg != nullptr) {
211 // We call this here so that we can try and generate a full error
212 // message with the overlapping mapping. There's no guarantee that
213 // that there will be an overlap though, since
214 // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
215 // true, even if there is no overlap
216 // - There might have been an overlap at the point of mmap, but the
217 // overlapping region has since been unmapped.
218
219 // Tell the client the mappings that were in place at the time.
220 if (kIsDebugBuild) {
221 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
222 }
223
224 std::ostringstream os;
225 os << StringPrintf("Failed to mmap at expected address, mapped at "
226 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
227 actual, expected);
228 *error_msg = os.str();
229 }
230 return false;
231 }
232
CheckReservation(uint8_t * expected_ptr,size_t byte_count,const char * name,const MemMap & reservation,std::string * error_msg)233 bool MemMap::CheckReservation(uint8_t* expected_ptr,
234 size_t byte_count,
235 const char* name,
236 const MemMap& reservation,
237 /*out*/std::string* error_msg) {
238 if (!reservation.IsValid()) {
239 *error_msg = StringPrintf("Invalid reservation for %s", name);
240 return false;
241 }
242 DCHECK_ALIGNED_PARAM(reservation.Begin(), GetPageSize());
243 if (reservation.Begin() != expected_ptr) {
244 *error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p",
245 name,
246 reservation.Begin(),
247 expected_ptr);
248 return false;
249 }
250 if (byte_count > reservation.Size()) {
251 *error_msg = StringPrintf("Insufficient reservation, required %zu, available %zu",
252 byte_count,
253 reservation.Size());
254 return false;
255 }
256 return true;
257 }
258
259
260 #if USE_ART_LOW_4G_ALLOCATOR
TryMemMapLow4GB(void * ptr,size_t page_aligned_byte_count,int prot,int flags,int fd,off_t offset)261 void* MemMap::TryMemMapLow4GB(void* ptr,
262 size_t page_aligned_byte_count,
263 int prot,
264 int flags,
265 int fd,
266 off_t offset) {
267 void* actual = TargetMMap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
268 if (actual != MAP_FAILED) {
269 // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
270 // 4GB. If this is the case, unmap and retry.
271 if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
272 TargetMUnmap(actual, page_aligned_byte_count);
273 actual = MAP_FAILED;
274 }
275 }
276 return actual;
277 }
278 #endif
279
SetDebugName(void * map_ptr,const char * name,size_t size)280 void MemMap::SetDebugName(void* map_ptr, const char* name, size_t size) {
281 // Debug naming is only used for Android target builds. For Linux targets,
282 // we'll still call prctl but it wont do anything till we upstream the prctl.
283 if (kIsTargetFuchsia || !kIsTargetBuild) {
284 return;
285 }
286
287 // lock as std::map is not thread-safe
288 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
289
290 std::string debug_friendly_name("dalvik-");
291 debug_friendly_name += name;
292 auto it = debugStrMap.find(debug_friendly_name);
293
294 if (it == debugStrMap.end()) {
295 it = debugStrMap.insert(std::make_pair(std::move(debug_friendly_name), 1)).first;
296 }
297
298 DCHECK(it != debugStrMap.end());
299 #if defined(PR_SET_VMA) && defined(__linux__)
300 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, size, it->first.c_str());
301 #else
302 // Prevent variable unused compiler errors.
303 UNUSED(map_ptr, size);
304 #endif
305 }
306
MapAnonymous(const char * name,uint8_t * addr,size_t byte_count,int prot,bool low_4gb,bool reuse,MemMap * reservation,std::string * error_msg,bool use_debug_name)307 MemMap MemMap::MapAnonymous(const char* name,
308 uint8_t* addr,
309 size_t byte_count,
310 int prot,
311 bool low_4gb,
312 bool reuse,
313 /*inout*/MemMap* reservation,
314 /*out*/std::string* error_msg,
315 bool use_debug_name) {
316 #ifndef __LP64__
317 UNUSED(low_4gb);
318 #endif
319 if (byte_count == 0) {
320 *error_msg = "Empty MemMap requested.";
321 return Invalid();
322 }
323 size_t page_aligned_byte_count = RoundUp(byte_count, GetPageSize());
324
325 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
326 if (reuse) {
327 // reuse means it is okay that it overlaps an existing page mapping.
328 // Only use this if you actually made the page reservation yourself.
329 CHECK(addr != nullptr);
330 DCHECK(reservation == nullptr);
331
332 DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
333 flags |= MAP_FIXED;
334 } else if (reservation != nullptr) {
335 CHECK(addr != nullptr);
336 if (!CheckReservation(addr, byte_count, name, *reservation, error_msg)) {
337 return MemMap::Invalid();
338 }
339 flags |= MAP_FIXED;
340 }
341
342 unique_fd fd;
343
344 // We need to store and potentially set an error number for pretty printing of errors
345 int saved_errno = 0;
346
347 void* actual = MapInternal(addr,
348 page_aligned_byte_count,
349 prot,
350 flags,
351 fd.get(),
352 0,
353 low_4gb);
354 saved_errno = errno;
355
356 if (actual == MAP_FAILED) {
357 if (error_msg != nullptr) {
358 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
359 *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
360 "See process maps in the log.",
361 addr,
362 page_aligned_byte_count,
363 prot,
364 flags,
365 fd.get(),
366 strerror(saved_errno));
367 }
368 return Invalid();
369 }
370 if (!CheckMapRequest(addr, actual, page_aligned_byte_count, error_msg)) {
371 return Invalid();
372 }
373
374 if (use_debug_name) {
375 SetDebugName(actual, name, page_aligned_byte_count);
376 }
377
378 if (reservation != nullptr) {
379 // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
380 DCHECK_EQ(actual, reservation->Begin());
381 reservation->ReleaseReservedMemory(byte_count);
382 }
383 return MemMap(name,
384 reinterpret_cast<uint8_t*>(actual),
385 byte_count,
386 actual,
387 page_aligned_byte_count,
388 prot,
389 reuse);
390 }
391
MapAnonymousAligned(const char * name,size_t byte_count,int prot,bool low_4gb,size_t alignment,std::string * error_msg)392 MemMap MemMap::MapAnonymousAligned(const char* name,
393 size_t byte_count,
394 int prot,
395 bool low_4gb,
396 size_t alignment,
397 /*out=*/std::string* error_msg) {
398 DCHECK(IsPowerOfTwo(alignment));
399 DCHECK_GT(alignment, GetPageSize());
400
401 // Allocate extra 'alignment - GetPageSize()' bytes so that the mapping can be aligned.
402 MemMap ret = MapAnonymous(name,
403 /*addr=*/nullptr,
404 // AlignBy requires the size to be page-aligned, so
405 // rounding it here. It is corrected afterwards with
406 // SetSize after AlignBy.
407 RoundUp(byte_count, GetPageSize()) + alignment - GetPageSize(),
408 prot,
409 low_4gb,
410 /*reuse=*/false,
411 /*reservation=*/nullptr,
412 error_msg);
413 if (LIKELY(ret.IsValid())) {
414 ret.AlignBy(alignment, /*align_both_ends=*/false);
415 ret.SetSize(byte_count);
416 DCHECK_EQ(ret.Size(), byte_count);
417 DCHECK_ALIGNED_PARAM(ret.Begin(), alignment);
418 }
419 return ret;
420 }
421
MapPlaceholder(const char * name,uint8_t * addr,size_t byte_count)422 MemMap MemMap::MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count) {
423 if (byte_count == 0) {
424 return Invalid();
425 }
426 const size_t page_aligned_byte_count = RoundUp(byte_count, GetPageSize());
427 return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, /* reuse= */ true);
428 }
429
430 template<typename A, typename B>
PointerDiff(A * a,B * b)431 static ptrdiff_t PointerDiff(A* a, B* b) {
432 return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
433 }
434
ReplaceWith(MemMap * source,std::string * error)435 bool MemMap::ReplaceWith(MemMap* source, /*out*/std::string* error) {
436 #if !HAVE_MREMAP_SYSCALL
437 UNUSED(source);
438 *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
439 return false;
440 #else // !HAVE_MREMAP_SYSCALL
441 CHECK(source != nullptr);
442 CHECK(source->IsValid());
443 if (!MemMap::kCanReplaceMapping) {
444 *error = "Unable to perform atomic replace due to runtime environment!";
445 return false;
446 }
447 // neither can be reuse.
448 if (source->reuse_ || reuse_) {
449 *error = "One or both mappings is not a real mmap!";
450 return false;
451 }
452 // TODO Support redzones.
453 if (source->redzone_size_ != 0 || redzone_size_ != 0) {
454 *error = "source and dest have different redzone sizes";
455 return false;
456 }
457 // Make sure they have the same offset from the actual mmap'd address
458 if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
459 *error =
460 "source starts at a different offset from the mmap. Cannot atomically replace mappings";
461 return false;
462 }
463 // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
464 // memcpy but the check is explicit and actually done).
465 if (source->BaseBegin() > BaseBegin() &&
466 reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
467 reinterpret_cast<uint8_t*>(source->BaseBegin())) {
468 *error = "destination memory pages overlap with source memory pages";
469 return false;
470 }
471 // Change the protection to match the new location.
472 int old_prot = source->GetProtect();
473 if (!source->Protect(GetProtect())) {
474 *error = "Could not change protections for source to those required for dest.";
475 return false;
476 }
477
478 // Do the mremap.
479 void* res = mremap(/*old_address*/source->BaseBegin(),
480 /*old_size*/source->BaseSize(),
481 /*new_size*/source->BaseSize(),
482 /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
483 /*new_address*/BaseBegin());
484 if (res == MAP_FAILED) {
485 int saved_errno = errno;
486 // Wasn't able to move mapping. Change the protection of source back to the original one and
487 // return.
488 source->Protect(old_prot);
489 *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
490 return false;
491 }
492 CHECK(res == BaseBegin());
493
494 // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
495 // them later.
496 size_t new_base_size = std::max(source->base_size_, base_size_);
497
498 // Invalidate *source, don't unmap it though since it is already gone.
499 size_t source_size = source->size_;
500 source->Invalidate();
501
502 size_ = source_size;
503 base_size_ = new_base_size;
504 // Reduce base_size if needed (this will unmap the extra pages).
505 SetSize(source_size);
506
507 return true;
508 #endif // !HAVE_MREMAP_SYSCALL
509 }
510
MapFileAtAddress(uint8_t * expected_ptr,size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,bool reuse,MemMap * reservation,std::string * error_msg)511 MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
512 size_t byte_count,
513 int prot,
514 int flags,
515 int fd,
516 off_t start,
517 bool low_4gb,
518 const char* filename,
519 bool reuse,
520 /*inout*/MemMap* reservation,
521 /*out*/std::string* error_msg) {
522 CHECK_NE(0, prot);
523 CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
524
525 // Note that we do not allow MAP_FIXED unless reuse == true or we have an existing
526 // reservation, i.e we expect this mapping to be contained within an existing map.
527 if (reuse) {
528 // reuse means it is okay that it overlaps an existing page mapping.
529 // Only use this if you actually made the page reservation yourself.
530 CHECK(expected_ptr != nullptr);
531 DCHECK(reservation == nullptr);
532 DCHECK(error_msg != nullptr);
533 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
534 << ((error_msg != nullptr) ? *error_msg : std::string());
535 flags |= MAP_FIXED;
536 } else if (reservation != nullptr) {
537 DCHECK(error_msg != nullptr);
538 if (!CheckReservation(expected_ptr, byte_count, filename, *reservation, error_msg)) {
539 return Invalid();
540 }
541 flags |= MAP_FIXED;
542 } else {
543 CHECK_EQ(0, flags & MAP_FIXED);
544 // Don't bother checking for an overlapping region here. We'll
545 // check this if required after the fact inside CheckMapRequest.
546 }
547
548 if (byte_count == 0) {
549 *error_msg = "Empty MemMap requested";
550 return Invalid();
551 }
552 // Adjust 'offset' to be page-aligned as required by mmap.
553 int page_offset = start % GetPageSize();
554 off_t page_aligned_offset = start - page_offset;
555 // Adjust 'byte_count' to be page-aligned as we will map this anyway.
556 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, GetPageSize());
557 // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
558 // not necessarily to virtual memory. mmap will page align 'expected' for us.
559 uint8_t* page_aligned_expected =
560 (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
561
562 size_t redzone_size = 0;
563 if (kRunningOnMemoryTool && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
564 redzone_size = GetPageSize();
565 page_aligned_byte_count += redzone_size;
566 }
567
568 uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
569 page_aligned_byte_count,
570 prot,
571 flags,
572 fd,
573 page_aligned_offset,
574 low_4gb));
575 if (actual == MAP_FAILED) {
576 if (error_msg != nullptr) {
577 auto saved_errno = errno;
578
579 if (kIsDebugBuild || VLOG_IS_ON(oat)) {
580 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
581 }
582
583 *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
584 ") of file '%s' failed: %s. See process maps in the log.",
585 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
586 static_cast<int64_t>(page_aligned_offset), filename,
587 strerror(saved_errno));
588 }
589 return Invalid();
590 }
591 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
592 return Invalid();
593 }
594 if (redzone_size != 0) {
595 const uint8_t *real_start = actual + page_offset;
596 const uint8_t *real_end = actual + page_offset + byte_count;
597 const uint8_t *mapping_end = actual + page_aligned_byte_count;
598
599 MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
600 MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
601 page_aligned_byte_count -= redzone_size;
602 }
603
604 if (reservation != nullptr) {
605 // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
606 DCHECK_EQ(actual, reservation->Begin());
607 reservation->ReleaseReservedMemory(byte_count);
608 }
609 return MemMap(filename,
610 actual + page_offset,
611 byte_count,
612 actual,
613 page_aligned_byte_count,
614 prot,
615 reuse,
616 redzone_size);
617 }
618
MemMap(MemMap && other)619 MemMap::MemMap(MemMap&& other) noexcept
620 : MemMap() {
621 swap(other);
622 }
623
~MemMap()624 MemMap::~MemMap() {
625 Reset();
626 }
627
DoReset()628 void MemMap::DoReset() {
629 DCHECK(IsValid());
630 size_t real_base_size = base_size_;
631 // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
632 // before it is returned to the system.
633 if (redzone_size_ != 0) {
634 // Add redzone_size_ back to base_size or it will cause a mmap leakage.
635 real_base_size += redzone_size_;
636 MEMORY_TOOL_MAKE_UNDEFINED(
637 reinterpret_cast<char*>(base_begin_) + real_base_size - redzone_size_,
638 redzone_size_);
639 }
640
641 if (!reuse_) {
642 MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
643 if (!already_unmapped_) {
644 int result = TargetMUnmap(base_begin_, real_base_size);
645 if (result == -1) {
646 PLOG(FATAL) << "munmap failed";
647 }
648 }
649 }
650
651 Invalidate();
652 }
653
ResetInForkedProcess()654 void MemMap::ResetInForkedProcess() {
655 // This should be called on a map that has MADV_DONTFORK.
656 // The kernel has already unmapped this.
657 already_unmapped_ = true;
658 Reset();
659 }
660
Invalidate()661 void MemMap::Invalidate() {
662 DCHECK(IsValid());
663
664 // Remove it from gMaps.
665 // TODO(b/307704260) Move MemMap::Init MemMap::Shutdown out of Runtime init/shutdown.
666 if (mem_maps_lock_ != nullptr) { // Runtime was shutdown.
667 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
668 auto it = GetGMapsEntry(*this);
669 gMaps->erase(it);
670 }
671
672 // Mark it as invalid.
673 base_size_ = 0u;
674 DCHECK(!IsValid());
675 }
676
swap(MemMap & other)677 void MemMap::swap(MemMap& other) {
678 if (IsValid() || other.IsValid()) {
679 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
680 DCHECK(gMaps != nullptr);
681 auto this_it = IsValid() ? GetGMapsEntry(*this) : gMaps->end();
682 auto other_it = other.IsValid() ? GetGMapsEntry(other) : gMaps->end();
683 if (IsValid()) {
684 DCHECK(this_it != gMaps->end());
685 DCHECK_EQ(this_it->second, this);
686 this_it->second = &other;
687 }
688 if (other.IsValid()) {
689 DCHECK(other_it != gMaps->end());
690 DCHECK_EQ(other_it->second, &other);
691 other_it->second = this;
692 }
693 // Swap members with the `mem_maps_lock_` held so that `base_begin_` matches
694 // with the `gMaps` key when other threads try to use `gMaps`.
695 SwapMembers(other);
696 } else {
697 SwapMembers(other);
698 }
699 }
700
SwapMembers(MemMap & other)701 void MemMap::SwapMembers(MemMap& other) {
702 name_.swap(other.name_);
703 std::swap(begin_, other.begin_);
704 std::swap(size_, other.size_);
705 std::swap(base_begin_, other.base_begin_);
706 std::swap(base_size_, other.base_size_);
707 std::swap(prot_, other.prot_);
708 std::swap(reuse_, other.reuse_);
709 std::swap(already_unmapped_, other.already_unmapped_);
710 std::swap(redzone_size_, other.redzone_size_);
711 }
712
MemMap(const std::string & name,uint8_t * begin,size_t size,void * base_begin,size_t base_size,int prot,bool reuse,size_t redzone_size)713 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
714 size_t base_size, int prot, bool reuse, size_t redzone_size)
715 : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
716 prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
717 if (size_ == 0) {
718 CHECK(begin_ == nullptr);
719 CHECK(base_begin_ == nullptr);
720 CHECK_EQ(base_size_, 0U);
721 } else {
722 CHECK(begin_ != nullptr);
723 CHECK(base_begin_ != nullptr);
724 CHECK_NE(base_size_, 0U);
725
726 // Add it to gMaps.
727 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
728 DCHECK(gMaps != nullptr);
729 gMaps->insert(std::make_pair(base_begin_, this));
730 }
731 }
732
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,std::string * error_msg,bool use_debug_name)733 MemMap MemMap::RemapAtEnd(uint8_t* new_end,
734 const char* tail_name,
735 int tail_prot,
736 std::string* error_msg,
737 bool use_debug_name) {
738 return RemapAtEnd(new_end,
739 tail_name,
740 tail_prot,
741 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
742 /* fd= */ -1,
743 /* offset= */ 0,
744 error_msg,
745 use_debug_name);
746 }
747
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,int flags,int fd,off_t offset,std::string * error_msg,bool use_debug_name)748 MemMap MemMap::RemapAtEnd(uint8_t* new_end,
749 const char* tail_name,
750 int tail_prot,
751 int flags,
752 int fd,
753 off_t offset,
754 std::string* error_msg,
755 bool use_debug_name) {
756 DCHECK_GE(new_end, Begin());
757 DCHECK_LE(new_end, End());
758 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
759 DCHECK_ALIGNED_PARAM(begin_, GetPageSize());
760 DCHECK_ALIGNED_PARAM(base_begin_, GetPageSize());
761 DCHECK_ALIGNED_PARAM(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, GetPageSize());
762 DCHECK_ALIGNED_PARAM(new_end, GetPageSize());
763 uint8_t* old_end = begin_ + size_;
764 uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
765 uint8_t* new_base_end = new_end;
766 DCHECK_LE(new_base_end, old_base_end);
767 if (new_base_end == old_base_end) {
768 return Invalid();
769 }
770 size_t new_size = new_end - reinterpret_cast<uint8_t*>(begin_);
771 size_t new_base_size = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
772 DCHECK_LE(begin_ + new_size, reinterpret_cast<uint8_t*>(base_begin_) + new_base_size);
773 size_t tail_size = old_end - new_end;
774 uint8_t* tail_base_begin = new_base_end;
775 size_t tail_base_size = old_base_end - new_base_end;
776 DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
777 DCHECK_ALIGNED_PARAM(tail_base_size, GetPageSize());
778
779 MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
780 // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
781 // removes old mappings for the overlapping region. This makes the operation atomic
782 // and prevents other threads from racing to allocate memory in the requested region.
783 uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
784 tail_base_size,
785 tail_prot,
786 flags,
787 fd,
788 offset));
789 if (actual == MAP_FAILED) {
790 *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed: %s. See process "
791 "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
792 fd, strerror(errno));
793 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
794 return Invalid();
795 }
796 // Update *this.
797 if (new_base_size == 0u) {
798 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
799 auto it = GetGMapsEntry(*this);
800 gMaps->erase(it);
801 }
802
803 if (use_debug_name) {
804 SetDebugName(actual, tail_name, tail_base_size);
805 }
806
807 size_ = new_size;
808 base_size_ = new_base_size;
809 // Return the new mapping.
810 return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
811 }
812
TakeReservedMemory(size_t byte_count,bool reuse)813 MemMap MemMap::TakeReservedMemory(size_t byte_count, bool reuse) {
814 uint8_t* begin = Begin();
815 ReleaseReservedMemory(byte_count); // Performs necessary DCHECK()s on this reservation.
816 size_t base_size = RoundUp(byte_count, GetPageSize());
817 return MemMap(name_, begin, byte_count, begin, base_size, prot_, reuse);
818 }
819
ReleaseReservedMemory(size_t byte_count)820 void MemMap::ReleaseReservedMemory(size_t byte_count) {
821 // Check the reservation mapping.
822 DCHECK(IsValid());
823 DCHECK(!reuse_);
824 DCHECK(!already_unmapped_);
825 DCHECK_EQ(redzone_size_, 0u);
826 DCHECK_EQ(begin_, base_begin_);
827 DCHECK_EQ(size_, base_size_);
828 DCHECK_ALIGNED_PARAM(begin_, GetPageSize());
829 DCHECK_ALIGNED_PARAM(size_, GetPageSize());
830
831 // Check and round up the `byte_count`.
832 DCHECK_NE(byte_count, 0u);
833 DCHECK_LE(byte_count, size_);
834 byte_count = RoundUp(byte_count, GetPageSize());
835
836 if (byte_count == size_) {
837 Invalidate();
838 } else {
839 // Shrink the reservation MemMap and update its `gMaps` entry.
840 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
841 auto it = GetGMapsEntry(*this);
842 auto node = gMaps->extract(it);
843 begin_ += byte_count;
844 size_ -= byte_count;
845 base_begin_ = begin_;
846 base_size_ = size_;
847 node.key() = base_begin_;
848 gMaps->insert(std::move(node));
849 }
850 }
851
FillWithZero(bool release_eagerly)852 void MemMap::FillWithZero(bool release_eagerly) {
853 if (base_begin_ != nullptr && base_size_ != 0) {
854 ZeroMemory(base_begin_, base_size_, release_eagerly);
855 }
856 }
857
MadviseDontFork()858 int MemMap::MadviseDontFork() {
859 #if defined(__linux__)
860 if (base_begin_ != nullptr || base_size_ != 0) {
861 return madvise(base_begin_, base_size_, MADV_DONTFORK);
862 }
863 #endif
864 return -1;
865 }
866
Sync()867 bool MemMap::Sync() {
868 #ifdef _WIN32
869 // TODO: add FlushViewOfFile support.
870 PLOG(ERROR) << "MemMap::Sync unsupported on Windows.";
871 return false;
872 #else
873 // Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess
874 // protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind
875 // only accepts page-aligned base address, and excludes the higher-end noaccess protection
876 // from the msync range. b/27552451.
877 return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
878 #endif
879 }
880
Protect(int prot)881 bool MemMap::Protect(int prot) {
882 if (base_begin_ == nullptr && base_size_ == 0) {
883 prot_ = prot;
884 return true;
885 }
886
887 #ifndef _WIN32
888 if (mprotect(base_begin_, base_size_, prot) == 0) {
889 prot_ = prot;
890 return true;
891 }
892 #endif
893
894 PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
895 << prot << ") failed";
896 return false;
897 }
898
CheckNoGaps(MemMap & begin_map,MemMap & end_map)899 bool MemMap::CheckNoGaps(MemMap& begin_map, MemMap& end_map) {
900 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
901 CHECK(begin_map.IsValid());
902 CHECK(end_map.IsValid());
903 CHECK(HasMemMap(begin_map));
904 CHECK(HasMemMap(end_map));
905 CHECK_LE(begin_map.BaseBegin(), end_map.BaseBegin());
906 MemMap* map = &begin_map;
907 while (map->BaseBegin() != end_map.BaseBegin()) {
908 MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
909 if (next_map == nullptr) {
910 // Found a gap.
911 return false;
912 }
913 map = next_map;
914 }
915 return true;
916 }
917
DumpMaps(std::ostream & os,bool terse)918 void MemMap::DumpMaps(std::ostream& os, bool terse) {
919 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
920 DumpMapsLocked(os, terse);
921 }
922
DumpMapsLocked(std::ostream & os,bool terse)923 void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
924 const auto& mem_maps = *gMaps;
925 if (!terse) {
926 os << mem_maps;
927 return;
928 }
929
930 // Terse output example:
931 // [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
932 // [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
933 // The details:
934 // "+0x20P" means 0x20 pages taken by a single mapping,
935 // "~0x11dP" means a gap of 0x11d pages,
936 // "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
937 os << "MemMap:" << std::endl;
938 for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
939 MemMap* map = it->second;
940 void* base = it->first;
941 CHECK_EQ(base, map->BaseBegin());
942 os << "[MemMap: " << base;
943 ++it;
944 // Merge consecutive maps with the same protect flags and name.
945 constexpr size_t kMaxGaps = 9;
946 size_t num_gaps = 0;
947 size_t num = 1u;
948 size_t size = map->BaseSize();
949 CHECK_ALIGNED_PARAM(size, GetPageSize());
950 void* end = map->BaseEnd();
951 while (it != maps_end &&
952 it->second->GetProtect() == map->GetProtect() &&
953 it->second->GetName() == map->GetName() &&
954 (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
955 if (it->second->BaseBegin() != end) {
956 ++num_gaps;
957 os << "+0x" << std::hex << (size / GetPageSize()) << "P";
958 if (num != 1u) {
959 os << "(" << std::dec << num << ")";
960 }
961 size_t gap =
962 reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
963 CHECK_ALIGNED_PARAM(gap, GetPageSize());
964 os << "~0x" << std::hex << (gap / GetPageSize()) << "P";
965 num = 0u;
966 size = 0u;
967 }
968 CHECK_ALIGNED_PARAM(it->second->BaseSize(), GetPageSize());
969 ++num;
970 size += it->second->BaseSize();
971 end = it->second->BaseEnd();
972 ++it;
973 }
974 os << "+0x" << std::hex << (size / GetPageSize()) << "P";
975 if (num != 1u) {
976 os << "(" << std::dec << num << ")";
977 }
978 os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
979 }
980 }
981
HasMemMap(MemMap & map)982 bool MemMap::HasMemMap(MemMap& map) {
983 void* base_begin = map.BaseBegin();
984 for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
985 it != end && it->first == base_begin; ++it) {
986 if (it->second == &map) {
987 return true;
988 }
989 }
990 return false;
991 }
992
GetLargestMemMapAt(void * address)993 MemMap* MemMap::GetLargestMemMapAt(void* address) {
994 size_t largest_size = 0;
995 MemMap* largest_map = nullptr;
996 DCHECK(gMaps != nullptr);
997 for (auto it = gMaps->lower_bound(address), end = gMaps->end();
998 it != end && it->first == address; ++it) {
999 MemMap* map = it->second;
1000 CHECK(map != nullptr);
1001 if (largest_size < map->BaseSize()) {
1002 largest_size = map->BaseSize();
1003 largest_map = map;
1004 }
1005 }
1006 return largest_map;
1007 }
1008
Init()1009 void MemMap::Init() {
1010 if (mem_maps_lock_ != nullptr) {
1011 // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
1012 return;
1013 }
1014
1015 mem_maps_lock_ = new std::mutex();
1016 // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
1017 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1018 #ifdef ART_PAGE_SIZE_AGNOSTIC
1019 page_size_ = GetPageSizeSlow();
1020 #endif
1021 CHECK_GE(GetPageSize(), kMinPageSize);
1022 CHECK_LE(GetPageSize(), kMaxPageSize);
1023 #if USE_ART_LOW_4G_ALLOCATOR
1024 // Initialize linear scan to random position.
1025 CHECK_EQ(next_mem_pos_, 0u);
1026 next_mem_pos_ = GenerateNextMemPos(GetPageSize());
1027 #endif
1028 DCHECK(gMaps == nullptr);
1029 gMaps = new Maps;
1030
1031 TargetMMapInit();
1032 }
1033
IsInitialized()1034 bool MemMap::IsInitialized() { return mem_maps_lock_ != nullptr; }
1035
Shutdown()1036 void MemMap::Shutdown() {
1037 if (mem_maps_lock_ == nullptr) {
1038 // If MemMap::Shutdown is called more than once, there is no effect.
1039 return;
1040 }
1041 {
1042 // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
1043 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1044 DCHECK(gMaps != nullptr);
1045 delete gMaps;
1046 gMaps = nullptr;
1047 }
1048 #if USE_ART_LOW_4G_ALLOCATOR
1049 next_mem_pos_ = 0u;
1050 #endif
1051 delete mem_maps_lock_;
1052 mem_maps_lock_ = nullptr;
1053 }
1054
SetSize(size_t new_size)1055 void MemMap::SetSize(size_t new_size) {
1056 CHECK_LE(new_size, size_);
1057 size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
1058 GetPageSize());
1059 if (new_base_size == base_size_) {
1060 size_ = new_size;
1061 return;
1062 }
1063 CHECK_LT(new_base_size, base_size_);
1064 MEMORY_TOOL_MAKE_UNDEFINED(
1065 reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
1066 new_base_size),
1067 base_size_ - new_base_size);
1068 CHECK_EQ(TargetMUnmap(reinterpret_cast<void*>(
1069 reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
1070 base_size_ - new_base_size), 0)
1071 << new_base_size << " " << base_size_;
1072 base_size_ = new_base_size;
1073 size_ = new_size;
1074 }
1075
MapInternalArtLow4GBAllocator(size_t length,int prot,int flags,int fd,off_t offset)1076 void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
1077 int prot,
1078 int flags,
1079 int fd,
1080 off_t offset) {
1081 #if USE_ART_LOW_4G_ALLOCATOR
1082 void* actual = MAP_FAILED;
1083
1084 bool first_run = true;
1085
1086 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1087 for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += GetPageSize()) {
1088 // Use gMaps as an optimization to skip over large maps.
1089 // Find the first map which is address > ptr.
1090 auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
1091 if (it != gMaps->begin()) {
1092 auto before_it = it;
1093 --before_it;
1094 // Start at the end of the map before the upper bound.
1095 ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
1096 CHECK_ALIGNED_PARAM(ptr, GetPageSize());
1097 }
1098 while (it != gMaps->end()) {
1099 // How much space do we have until the next map?
1100 size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
1101 // If the space may be sufficient, break out of the loop.
1102 if (delta >= length) {
1103 break;
1104 }
1105 // Otherwise, skip to the end of the map.
1106 ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
1107 CHECK_ALIGNED_PARAM(ptr, GetPageSize());
1108 ++it;
1109 }
1110
1111 // Try to see if we get lucky with this address since none of the ART maps overlap.
1112 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
1113 if (actual != MAP_FAILED) {
1114 next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
1115 return actual;
1116 }
1117
1118 if (4U * GB - ptr < length) {
1119 // Not enough memory until 4GB.
1120 if (first_run) {
1121 // Try another time from the bottom;
1122 ptr = LOW_MEM_START - GetPageSize();
1123 first_run = false;
1124 continue;
1125 } else {
1126 // Second try failed.
1127 break;
1128 }
1129 }
1130
1131 uintptr_t tail_ptr;
1132
1133 // Check pages are free.
1134 bool safe = true;
1135 for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += GetPageSize()) {
1136 if (msync(reinterpret_cast<void*>(tail_ptr), GetPageSize(), 0) == 0) {
1137 safe = false;
1138 break;
1139 } else {
1140 DCHECK_EQ(errno, ENOMEM);
1141 }
1142 }
1143
1144 next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region
1145
1146 if (safe == true) {
1147 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
1148 if (actual != MAP_FAILED) {
1149 return actual;
1150 }
1151 } else {
1152 // Skip over last page.
1153 ptr = tail_ptr;
1154 }
1155 }
1156
1157 if (actual == MAP_FAILED) {
1158 LOG(ERROR) << "Could not find contiguous low-memory space.";
1159 errno = ENOMEM;
1160 }
1161 return actual;
1162 #else
1163 UNUSED(length, prot, flags, fd, offset);
1164 LOG(FATAL) << "Unreachable";
1165 UNREACHABLE();
1166 #endif
1167 }
1168
MapInternal(void * addr,size_t length,int prot,int flags,int fd,off_t offset,bool low_4gb)1169 void* MemMap::MapInternal(void* addr,
1170 size_t length,
1171 int prot,
1172 int flags,
1173 int fd,
1174 off_t offset,
1175 bool low_4gb) {
1176 #ifdef __LP64__
1177 // When requesting low_4g memory and having an expectation, the requested range should fit into
1178 // 4GB.
1179 if (low_4gb && (
1180 // Start out of bounds.
1181 (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
1182 // End out of bounds. For simplicity, this will fail for the last page of memory.
1183 ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
1184 LOG(ERROR) << "The requested address space (" << addr << ", "
1185 << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
1186 << ") cannot fit in low_4gb";
1187 return MAP_FAILED;
1188 }
1189 #else
1190 UNUSED(low_4gb);
1191 #endif
1192 DCHECK_ALIGNED_PARAM(length, GetPageSize());
1193 // TODO:
1194 // A page allocator would be a useful abstraction here, as
1195 // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
1196 void* actual = MAP_FAILED;
1197 #if USE_ART_LOW_4G_ALLOCATOR
1198 // MAP_32BIT only available on x86_64.
1199 if (low_4gb && addr == nullptr) {
1200 // The linear-scan allocator has an issue when executable pages are denied (e.g., by selinux
1201 // policies in sensitive processes). In that case, the error code will still be ENOMEM. So
1202 // the allocator will scan all low 4GB twice, and still fail. This is *very* slow.
1203 //
1204 // To avoid the issue, always map non-executable first, and mprotect if necessary.
1205 const int orig_prot = prot;
1206 const int prot_non_exec = prot & ~PROT_EXEC;
1207 actual = MapInternalArtLow4GBAllocator(length, prot_non_exec, flags, fd, offset);
1208
1209 if (actual == MAP_FAILED) {
1210 return MAP_FAILED;
1211 }
1212
1213 // See if we need to remap with the executable bit now.
1214 if (orig_prot != prot_non_exec) {
1215 if (mprotect(actual, length, orig_prot) != 0) {
1216 PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
1217 TargetMUnmap(actual, length);
1218 errno = ENOMEM;
1219 return MAP_FAILED;
1220 }
1221 }
1222 return actual;
1223 }
1224
1225 actual = TargetMMap(addr, length, prot, flags, fd, offset);
1226 #else
1227 #if defined(__LP64__)
1228 if (low_4gb && addr == nullptr) {
1229 flags |= MAP_32BIT;
1230 }
1231 #endif
1232 actual = TargetMMap(addr, length, prot, flags, fd, offset);
1233 #endif
1234 return actual;
1235 }
1236
operator <<(std::ostream & os,const MemMap & mem_map)1237 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
1238 os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
1239 mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
1240 mem_map.GetName().c_str());
1241 return os;
1242 }
1243
TryReadable()1244 void MemMap::TryReadable() {
1245 if (base_begin_ == nullptr && base_size_ == 0) {
1246 return;
1247 }
1248 CHECK_NE(prot_ & PROT_READ, 0);
1249 volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
1250 volatile uint8_t* end = begin + base_size_;
1251 DCHECK(IsAlignedParam(begin, GetPageSize()));
1252 DCHECK(IsAlignedParam(end, GetPageSize()));
1253 // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
1254 // reads.
1255 for (volatile uint8_t* ptr = begin; ptr < end; ptr += GetPageSize()) {
1256 // This read could fault if protection wasn't set correctly.
1257 uint8_t value = *ptr;
1258 UNUSED(value);
1259 }
1260 }
1261
RawClearMemory(uint8_t * begin,uint8_t * end)1262 static void inline RawClearMemory(uint8_t* begin, uint8_t* end) {
1263 std::fill(begin, end, 0);
1264 }
1265
1266 #if defined(__linux__)
ClearMemory(uint8_t * page_begin,size_t size,bool resident,size_t page_size)1267 static inline void ClearMemory(uint8_t* page_begin, size_t size, bool resident, size_t page_size) {
1268 DCHECK(IsAlignedParam(page_begin, page_size));
1269 DCHECK(IsAlignedParam(page_begin + size, page_size));
1270 if (resident) {
1271 RawClearMemory(page_begin, page_begin + size);
1272 // Note we check madvise return value against -1, as it seems old kernels
1273 // can return 1.
1274 #ifdef MADV_FREE
1275 bool res = madvise(page_begin, size, MADV_FREE);
1276 CHECK_NE(res, -1) << "madvise failed";
1277 #endif // MADV_FREE
1278 } else {
1279 bool res = madvise(page_begin, size, MADV_DONTNEED);
1280 CHECK_NE(res, -1) << "madvise failed";
1281 }
1282 }
1283 #endif // __linux__
1284
ZeroMemory(void * address,size_t length,bool release_eagerly)1285 void ZeroMemory(void* address, size_t length, bool release_eagerly) {
1286 if (length == 0) {
1287 return;
1288 }
1289 uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
1290 uint8_t* const mem_end = mem_begin + length;
1291 uint8_t* const page_begin = AlignUp(mem_begin, MemMap::GetPageSize());
1292 uint8_t* const page_end = AlignDown(mem_end, MemMap::GetPageSize());
1293 if (!kMadviseZeroes || page_begin >= page_end) {
1294 // No possible area to madvise.
1295 RawClearMemory(mem_begin, mem_end);
1296 return;
1297 }
1298 // Spans one or more pages.
1299 DCHECK_LE(mem_begin, page_begin);
1300 DCHECK_LE(page_begin, page_end);
1301 DCHECK_LE(page_end, mem_end);
1302 #ifdef _WIN32
1303 UNUSED(release_eagerly);
1304 LOG(WARNING) << "ZeroMemory does not madvise on Windows.";
1305 RawClearMemory(mem_begin, mem_end);
1306 #else
1307 RawClearMemory(mem_begin, page_begin);
1308 RawClearMemory(page_end, mem_end);
1309 // mincore() is linux-specific syscall.
1310 #if defined(__linux__)
1311 if (!release_eagerly) {
1312 size_t vec_len = (page_end - page_begin) / MemMap::GetPageSize();
1313 std::unique_ptr<unsigned char[]> vec(new unsigned char[vec_len]);
1314 if (mincore(page_begin, page_end - page_begin, vec.get()) == 0) {
1315 uint8_t* current_page = page_begin;
1316 size_t current_size = MemMap::GetPageSize();
1317 uint32_t old_state = vec[0] & 0x1;
1318 for (size_t i = 1; i < vec_len; ++i) {
1319 uint32_t new_state = vec[i] & 0x1;
1320 if (old_state == new_state) {
1321 current_size += MemMap::GetPageSize();
1322 } else {
1323 ClearMemory(current_page, current_size, old_state, MemMap::GetPageSize());
1324 current_page = current_page + current_size;
1325 current_size = MemMap::GetPageSize();
1326 old_state = new_state;
1327 }
1328 }
1329 ClearMemory(current_page, current_size, old_state, MemMap::GetPageSize());
1330 return;
1331 }
1332 static bool logged_about_mincore = false;
1333 if (!logged_about_mincore) {
1334 PLOG(WARNING) << "mincore failed, falling back to madvise MADV_DONTNEED";
1335 logged_about_mincore = true;
1336 }
1337 // mincore failed, fall through to MADV_DONTNEED.
1338 }
1339 #else
1340 UNUSED(release_eagerly);
1341 #endif // __linux__
1342 bool res = madvise(page_begin, page_end - page_begin, MADV_DONTNEED);
1343 CHECK_NE(res, -1) << "madvise failed";
1344 #endif // _WIN32
1345 }
1346
AlignBy(size_t alignment,bool align_both_ends)1347 void MemMap::AlignBy(size_t alignment, bool align_both_ends) {
1348 CHECK_EQ(begin_, base_begin_) << "Unsupported";
1349 CHECK_EQ(size_, base_size_) << "Unsupported";
1350 CHECK_GT(alignment, static_cast<size_t>(GetPageSize()));
1351 CHECK_ALIGNED_PARAM(alignment, GetPageSize());
1352 CHECK(!reuse_);
1353 if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), alignment) &&
1354 (!align_both_ends || IsAlignedParam(base_size_, alignment))) {
1355 // Already aligned.
1356 return;
1357 }
1358 uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
1359 uint8_t* aligned_base_begin = AlignUp(base_begin, alignment);
1360 CHECK_LE(base_begin, aligned_base_begin);
1361 if (base_begin < aligned_base_begin) {
1362 MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
1363 CHECK_EQ(TargetMUnmap(base_begin, aligned_base_begin - base_begin), 0)
1364 << "base_begin=" << reinterpret_cast<void*>(base_begin)
1365 << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
1366 }
1367 uint8_t* base_end = base_begin + base_size_;
1368 size_t aligned_base_size;
1369 if (align_both_ends) {
1370 uint8_t* aligned_base_end = AlignDown(base_end, alignment);
1371 CHECK_LE(aligned_base_end, base_end);
1372 CHECK_LT(aligned_base_begin, aligned_base_end)
1373 << "base_begin = " << reinterpret_cast<void*>(base_begin)
1374 << " base_end = " << reinterpret_cast<void*>(base_end);
1375 aligned_base_size = aligned_base_end - aligned_base_begin;
1376 CHECK_GE(aligned_base_size, alignment);
1377 if (aligned_base_end < base_end) {
1378 MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
1379 CHECK_EQ(TargetMUnmap(aligned_base_end, base_end - aligned_base_end), 0)
1380 << "base_end=" << reinterpret_cast<void*>(base_end)
1381 << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
1382 }
1383 } else {
1384 CHECK_LT(aligned_base_begin, base_end)
1385 << "base_begin = " << reinterpret_cast<void*>(base_begin);
1386 aligned_base_size = base_end - aligned_base_begin;
1387 }
1388 std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1389 if (base_begin < aligned_base_begin) {
1390 auto it = GetGMapsEntry(*this);
1391 auto node = gMaps->extract(it);
1392 node.key() = aligned_base_begin;
1393 gMaps->insert(std::move(node));
1394 }
1395 base_begin_ = aligned_base_begin;
1396 base_size_ = aligned_base_size;
1397 begin_ = aligned_base_begin;
1398 size_ = aligned_base_size;
1399 DCHECK(gMaps != nullptr);
1400 }
1401
1402 } // namespace art
1403