1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_LIBARTBASE_BASE_MEM_MAP_H_
18 #define ART_LIBARTBASE_BASE_MEM_MAP_H_
19
20 #include <stddef.h>
21 #include <sys/types.h>
22
23 #include <map>
24 #include <mutex>
25 #include <string>
26
27 #include "android-base/thread_annotations.h"
28 #include "bit_utils.h"
29 #include "globals.h"
30 #include "macros.h"
31
32 #ifndef __BIONIC__
33 #ifndef MAP_FIXED_NOREPLACE
34 #define MAP_FIXED_NOREPLACE 0x100000
35 #endif
36 #endif // __BIONIC__
37
38 namespace art {
39
40 #if defined(__LP64__) && !defined(__Fuchsia__) && !defined(_WINDOWS_)
41 #define USE_ART_LOW_4G_ALLOCATOR 1
42 #else
43 #define USE_ART_LOW_4G_ALLOCATOR 0
44 #endif
45
46 #ifdef __linux__
47 static constexpr bool kMadviseZeroes = true;
48 #define HAVE_MREMAP_SYSCALL true
49 #else
50 static constexpr bool kMadviseZeroes = false;
51 // We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
52 // present.
53 #define HAVE_MREMAP_SYSCALL false
54 #endif
55
56 // Used to keep track of mmap segments.
57 //
58 // On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
59 // for free pages. For security, the start of this scan should be randomized. This requires a
60 // dynamic initializer.
61 // For this to work, it is paramount that there are no other static initializers that access MemMap.
62 // Otherwise, calls might see uninitialized values.
63 class MemMap {
64 public:
65 static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
66
67 // Creates an invalid mapping.
MemMap()68 MemMap() {}
69
70 // Creates an invalid mapping. Used when we want to be more explicit than MemMap().
Invalid()71 static MemMap Invalid() {
72 return MemMap();
73 }
74
75 MemMap(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_);
76 MemMap& operator=(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_) {
77 Reset();
78 swap(other);
79 return *this;
80 }
81
82 // Releases the memory mapping.
83 ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
84
85 // Swap two MemMaps.
86 void swap(MemMap& other);
87
Reset()88 void Reset() {
89 if (IsValid()) {
90 DoReset();
91 }
92 }
93
IsValid()94 bool IsValid() const {
95 return base_size_ != 0u;
96 }
97
98 // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
99 // relinquishes ownership of the source mmap.
100 //
101 // For the call to be successful:
102 // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
103 // [source->Begin(), source->End()].
104 // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
105 // with them.
106 // * kCanReplaceMapping must be true.
107 // * Neither source nor dest may use manual redzones.
108 // * Both source and dest must have the same offset from the nearest page boundary.
109 // * mremap must succeed when called on the mappings.
110 //
111 // If this call succeeds it will return true and:
112 // * Invalidate *source
113 // * The protection of this will remain the same.
114 // * The size of this will be the size of the source
115 // * The data in this will be the data from source.
116 //
117 // If this call fails it will return false and make no changes to *source or this. The ownership
118 // of the source mmap is returned to the caller.
119 bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
120
121 // Set a debug friendly name for a map. It will be prefixed with "dalvik-".
122 static void SetDebugName(void* map_ptr, const char* name, size_t size);
123
124 // Request an anonymous region of length 'byte_count' and a requested base address.
125 // Use null as the requested base address if you don't care.
126 //
127 // `reuse` allows re-mapping an address range from an existing mapping which retains the
128 // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
129 // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
130 //
131 // The word "anonymous" in this context means "not backed by a file". The supplied
132 // 'name' will be used -- on systems that support it -- to give the mapping
133 // a name.
134 //
135 // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
136 static MemMap MapAnonymous(const char* name,
137 uint8_t* addr,
138 size_t byte_count,
139 int prot,
140 bool low_4gb,
141 bool reuse,
142 /*inout*/MemMap* reservation,
143 /*out*/std::string* error_msg,
144 bool use_debug_name = true);
145
146 // Request an aligned anonymous region, where the alignment must be higher
147 // than the runtime gPageSize. We can't directly ask for a MAP_SHARED
148 // (anonymous or otherwise) mapping to be aligned as in that case file offset
149 // is involved and could make the starting offset to be out of sync with
150 // another mapping of the same file.
151 static MemMap MapAnonymousAligned(const char* name,
152 size_t byte_count,
153 int prot,
154 bool low_4gb,
155 size_t alignment,
156 /*out=*/std::string* error_msg);
157
MapAnonymous(const char * name,size_t byte_count,int prot,bool low_4gb,std::string * error_msg)158 static MemMap MapAnonymous(const char* name,
159 size_t byte_count,
160 int prot,
161 bool low_4gb,
162 /*out*/std::string* error_msg) {
163 return MapAnonymous(name,
164 /*addr=*/ nullptr,
165 byte_count,
166 prot,
167 low_4gb,
168 /*reuse=*/ false,
169 /*reservation=*/ nullptr,
170 error_msg);
171 }
MapAnonymous(const char * name,size_t byte_count,int prot,bool low_4gb,MemMap * reservation,std::string * error_msg)172 static MemMap MapAnonymous(const char* name,
173 size_t byte_count,
174 int prot,
175 bool low_4gb,
176 MemMap* reservation,
177 /*out*/std::string* error_msg) {
178 return MapAnonymous(name,
179 /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr,
180 byte_count,
181 prot,
182 low_4gb,
183 /*reuse=*/ false,
184 reservation,
185 error_msg);
186 }
187
188 // Create placeholder for a region allocated by direct call to mmap.
189 // This is useful when we do not have control over the code calling mmap,
190 // but when we still want to keep track of it in the list.
191 // The region is not considered to be owned and will not be unmmaped.
192 static MemMap MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count);
193
194 // Map part of a file, taking care of non-page aligned offsets. The
195 // "start" offset is absolute, not relative.
196 //
197 // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
MapFile(size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,std::string * error_msg)198 static MemMap MapFile(size_t byte_count,
199 int prot,
200 int flags,
201 int fd,
202 off_t start,
203 bool low_4gb,
204 const char* filename,
205 std::string* error_msg) {
206 return MapFileAtAddress(nullptr,
207 byte_count,
208 prot,
209 flags,
210 fd,
211 start,
212 /*low_4gb=*/ low_4gb,
213 filename,
214 /*reuse=*/ false,
215 /*reservation=*/ nullptr,
216 error_msg);
217 }
218
MapFile(size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,bool reuse,std::string * error_msg)219 static MemMap MapFile(size_t byte_count,
220 int prot,
221 int flags,
222 int fd,
223 off_t start,
224 bool low_4gb,
225 const char* filename,
226 bool reuse,
227 std::string* error_msg) {
228 return MapFileAtAddress(nullptr,
229 byte_count,
230 prot,
231 flags,
232 fd,
233 start,
234 /*low_4gb=*/ low_4gb,
235 filename,
236 reuse,
237 /*reservation=*/ nullptr,
238 error_msg);
239 }
240
241 // Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute,
242 // not relative. This version allows requesting a specific address for the base of the mapping.
243 //
244 // `reuse` allows re-mapping an address range from an existing mapping which retains the
245 // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
246 // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
247 //
248 // If error_msg is null then we do not print /proc/maps to the log if MapFileAtAddress fails.
249 // This helps improve performance of the fail case since reading and printing /proc/maps takes
250 // several milliseconds in the worst case.
251 //
252 // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
253 static MemMap MapFileAtAddress(uint8_t* addr,
254 size_t byte_count,
255 int prot,
256 int flags,
257 int fd,
258 off_t start,
259 bool low_4gb,
260 const char* filename,
261 bool reuse,
262 /*inout*/MemMap* reservation,
263 /*out*/std::string* error_msg);
264
GetName()265 const std::string& GetName() const {
266 return name_;
267 }
268
269 bool Sync();
270
271 bool Protect(int prot);
272
273 void FillWithZero(bool release_eagerly);
MadviseDontNeedAndZero()274 void MadviseDontNeedAndZero() {
275 FillWithZero(/* release_eagerly= */ true);
276 }
277 int MadviseDontFork();
278
GetProtect()279 int GetProtect() const {
280 return prot_;
281 }
282
Begin()283 uint8_t* Begin() const {
284 return begin_;
285 }
286
Size()287 size_t Size() const {
288 return size_;
289 }
290
291 // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
292 void SetSize(size_t new_size);
293
End()294 uint8_t* End() const {
295 return Begin() + Size();
296 }
297
BaseBegin()298 void* BaseBegin() const {
299 return base_begin_;
300 }
301
BaseSize()302 size_t BaseSize() const {
303 return base_size_;
304 }
305
BaseEnd()306 void* BaseEnd() const {
307 return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
308 }
309
HasAddress(const void * addr)310 bool HasAddress(const void* addr) const {
311 return Begin() <= addr && addr < End();
312 }
313
314 // Unmap the pages at end and remap them to create another memory map.
315 MemMap RemapAtEnd(uint8_t* new_end,
316 const char* tail_name,
317 int tail_prot,
318 std::string* error_msg,
319 bool use_debug_name = true);
320
321 // Unmap the pages of a file at end and remap them to create another memory map.
322 MemMap RemapAtEnd(uint8_t* new_end,
323 const char* tail_name,
324 int tail_prot,
325 int tail_flags,
326 int fd,
327 off_t offset,
328 std::string* error_msg,
329 bool use_debug_name = true);
330
331 // Take ownership of pages at the beginning of the mapping. The mapping must be an
332 // anonymous reservation mapping, owning entire pages. The `byte_count` must not
333 // exceed the size of this reservation.
334 //
335 // Returns a mapping owning `byte_count` bytes rounded up to entire pages
336 // with size set to the passed `byte_count`. If 'reuse' is true then the caller
337 // is responsible for unmapping the taken pages.
338 MemMap TakeReservedMemory(size_t byte_count, bool reuse = false);
339
340 static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
341 REQUIRES(!MemMap::mem_maps_lock_);
342 static void DumpMaps(std::ostream& os, bool terse = false)
343 REQUIRES(!MemMap::mem_maps_lock_);
344
345 // Init and Shutdown are NOT thread safe.
346 // Both may be called multiple times and MemMap objects may be created any
347 // time after the first call to Init and before the first call to Shutodwn.
348 static void Init() REQUIRES(!MemMap::mem_maps_lock_);
349 static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
350 static bool IsInitialized();
351
352 // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
353 // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
354 // intermittently.
355 void TryReadable();
356
357 // Align the map by unmapping the unaligned part at the lower end and if 'align_both_ends' is
358 // true, then the higher end as well.
359 void AlignBy(size_t alignment, bool align_both_ends = true);
360
361 // For annotation reasons.
GetMemMapsLock()362 static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) {
363 return nullptr;
364 }
365
366 // Reset in a forked process the MemMap whose memory has been madvised MADV_DONTFORK
367 // in the parent process.
368 void ResetInForkedProcess();
369
370 // 'redzone_size_ == 0' indicates that we are not using memory-tool on this mapping.
GetRedzoneSize()371 size_t GetRedzoneSize() const { return redzone_size_; }
372
373 #ifdef ART_PAGE_SIZE_AGNOSTIC
GetPageSize()374 static inline size_t GetPageSize() {
375 DCHECK_NE(page_size_, 0u);
376 return page_size_;
377 }
378 #else
GetPageSize()379 static constexpr size_t GetPageSize() {
380 return GetPageSizeSlow();
381 }
382 #endif
383
384 private:
385 MemMap(const std::string& name,
386 uint8_t* begin,
387 size_t size,
388 void* base_begin,
389 size_t base_size,
390 int prot,
391 bool reuse,
392 size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
393
394 void DoReset();
395 void Invalidate();
396 void SwapMembers(MemMap& other);
397
398 static void DumpMapsLocked(std::ostream& os, bool terse)
399 REQUIRES(MemMap::mem_maps_lock_);
400 static bool HasMemMap(MemMap& map)
401 REQUIRES(MemMap::mem_maps_lock_);
402 static MemMap* GetLargestMemMapAt(void* address)
403 REQUIRES(MemMap::mem_maps_lock_);
404 static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
405 REQUIRES(!MemMap::mem_maps_lock_);
406
407 // Internal version of mmap that supports low 4gb emulation.
408 static void* MapInternal(void* addr,
409 size_t length,
410 int prot,
411 int flags,
412 int fd,
413 off_t offset,
414 bool low_4gb)
415 REQUIRES(!MemMap::mem_maps_lock_);
416 static void* MapInternalArtLow4GBAllocator(size_t length,
417 int prot,
418 int flags,
419 int fd,
420 off_t offset)
421 REQUIRES(!MemMap::mem_maps_lock_);
422
423 // Release memory owned by a reservation mapping.
424 void ReleaseReservedMemory(size_t byte_count);
425
426 // member function to access real_munmap
427 static bool CheckMapRequest(uint8_t* expected_ptr,
428 void* actual_ptr,
429 size_t byte_count,
430 std::string* error_msg);
431
432 static bool CheckReservation(uint8_t* expected_ptr,
433 size_t byte_count,
434 const char* name,
435 const MemMap& reservation,
436 /*out*/std::string* error_msg);
437
438 std::string name_;
439 uint8_t* begin_ = nullptr; // Start of data. May be changed by AlignBy.
440 size_t size_ = 0u; // Length of data.
441
442 void* base_begin_ = nullptr; // Page-aligned base address. May be changed by AlignBy.
443 size_t base_size_ = 0u; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
444 int prot_ = 0; // Protection of the map.
445
446 // When reuse_ is true, this is a view of a mapping on which
447 // we do not take ownership and are not responsible for
448 // unmapping.
449 bool reuse_ = false;
450
451 // When already_unmapped_ is true the destructor will not call munmap.
452 bool already_unmapped_ = false;
453
454 size_t redzone_size_ = 0u;
455
456 #if USE_ART_LOW_4G_ALLOCATOR
457 static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
458
459 static void* TryMemMapLow4GB(void* ptr,
460 size_t page_aligned_byte_count,
461 int prot,
462 int flags,
463 int fd,
464 off_t offset);
465 #endif
466
467 static void TargetMMapInit();
468 static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off);
469 static int TargetMUnmap(void* start, size_t len);
470
471 static std::mutex* mem_maps_lock_;
472
473 #ifdef ART_PAGE_SIZE_AGNOSTIC
474 static size_t page_size_;
475 #endif
476
477 friend class MemMapTest; // To allow access to base_begin_ and base_size_.
478 };
479
swap(MemMap & lhs,MemMap & rhs)480 inline void swap(MemMap& lhs, MemMap& rhs) {
481 lhs.swap(rhs);
482 }
483
484 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
485
486 // Zero and maybe release memory if possible, no requirements on alignments.
487 void ZeroMemory(void* address, size_t length, bool release_eagerly);
ZeroAndReleaseMemory(void * address,size_t length)488 inline void ZeroAndReleaseMemory(void* address, size_t length) {
489 ZeroMemory(address, length, /* release_eagerly= */ true);
490 }
491
492 } // namespace art
493
494 #endif // ART_LIBARTBASE_BASE_MEM_MAP_H_
495