• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_LIBARTBASE_BASE_MEM_MAP_H_
18 #define ART_LIBARTBASE_BASE_MEM_MAP_H_
19 
20 #include <stddef.h>
21 #include <sys/types.h>
22 
23 #include <map>
24 #include <mutex>
25 #include <string>
26 
27 #include "android-base/thread_annotations.h"
28 #include "bit_utils.h"
29 #include "globals.h"
30 #include "macros.h"
31 
32 namespace art {
33 
34 #if defined(__LP64__) && !defined(__Fuchsia__) && \
35     (defined(__aarch64__) || defined(__riscv) || defined(__APPLE__))
36 #define USE_ART_LOW_4G_ALLOCATOR 1
37 #else
38 #if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
39 #error "Unrecognized 64-bit architecture."
40 #endif
41 #define USE_ART_LOW_4G_ALLOCATOR 0
42 #endif
43 
44 #ifdef __linux__
45 static constexpr bool kMadviseZeroes = true;
46 #define HAVE_MREMAP_SYSCALL true
47 #else
48 static constexpr bool kMadviseZeroes = false;
49 // We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
50 // present.
51 #define HAVE_MREMAP_SYSCALL false
52 #endif
53 
54 // Used to keep track of mmap segments.
55 //
56 // On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
57 // for free pages. For security, the start of this scan should be randomized. This requires a
58 // dynamic initializer.
59 // For this to work, it is paramount that there are no other static initializers that access MemMap.
60 // Otherwise, calls might see uninitialized values.
61 class MemMap {
62  public:
63   static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
64 
65   // Creates an invalid mapping.
MemMap()66   MemMap() {}
67 
68   // Creates an invalid mapping. Used when we want to be more explicit than MemMap().
Invalid()69   static MemMap Invalid() {
70     return MemMap();
71   }
72 
73   MemMap(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_);
74   MemMap& operator=(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_) {
75     Reset();
76     swap(other);
77     return *this;
78   }
79 
80   // Releases the memory mapping.
81   ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
82 
83   // Swap two MemMaps.
84   void swap(MemMap& other);
85 
Reset()86   void Reset() {
87     if (IsValid()) {
88       DoReset();
89     }
90   }
91 
IsValid()92   bool IsValid() const {
93     return base_size_ != 0u;
94   }
95 
96   // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
97   // relinquishes ownership of the source mmap.
98   //
99   // For the call to be successful:
100   //   * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
101   //     [source->Begin(), source->End()].
102   //   * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
103   //     with them.
104   //   * kCanReplaceMapping must be true.
105   //   * Neither source nor dest may use manual redzones.
106   //   * Both source and dest must have the same offset from the nearest page boundary.
107   //   * mremap must succeed when called on the mappings.
108   //
109   // If this call succeeds it will return true and:
110   //   * Invalidate *source
111   //   * The protection of this will remain the same.
112   //   * The size of this will be the size of the source
113   //   * The data in this will be the data from source.
114   //
115   // If this call fails it will return false and make no changes to *source or this. The ownership
116   // of the source mmap is returned to the caller.
117   bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
118 
119   // Set a debug friendly name for a map. It will be prefixed with "dalvik-".
120   static void SetDebugName(void* map_ptr, const char* name, size_t size);
121 
122   // Request an anonymous region of length 'byte_count' and a requested base address.
123   // Use null as the requested base address if you don't care.
124   //
125   // `reuse` allows re-mapping an address range from an existing mapping which retains the
126   // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
127   // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
128   //
129   // The word "anonymous" in this context means "not backed by a file". The supplied
130   // 'name' will be used -- on systems that support it -- to give the mapping
131   // a name.
132   //
133   // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
134   static MemMap MapAnonymous(const char* name,
135                              uint8_t* addr,
136                              size_t byte_count,
137                              int prot,
138                              bool low_4gb,
139                              bool reuse,
140                              /*inout*/MemMap* reservation,
141                              /*out*/std::string* error_msg,
142                              bool use_debug_name = true);
143 
144   // Request an aligned anonymous region, where the alignment must be higher
145   // than the runtime gPageSize. We can't directly ask for a MAP_SHARED
146   // (anonymous or otherwise) mapping to be aligned as in that case file offset
147   // is involved and could make the starting offset to be out of sync with
148   // another mapping of the same file.
149   static MemMap MapAnonymousAligned(const char* name,
150                                     size_t byte_count,
151                                     int prot,
152                                     bool low_4gb,
153                                     size_t alignment,
154                                     /*out=*/std::string* error_msg);
155 
MapAnonymous(const char * name,size_t byte_count,int prot,bool low_4gb,std::string * error_msg)156   static MemMap MapAnonymous(const char* name,
157                              size_t byte_count,
158                              int prot,
159                              bool low_4gb,
160                              /*out*/std::string* error_msg) {
161     return MapAnonymous(name,
162                         /*addr=*/ nullptr,
163                         byte_count,
164                         prot,
165                         low_4gb,
166                         /*reuse=*/ false,
167                         /*reservation=*/ nullptr,
168                         error_msg);
169   }
MapAnonymous(const char * name,size_t byte_count,int prot,bool low_4gb,MemMap * reservation,std::string * error_msg)170   static MemMap MapAnonymous(const char* name,
171                              size_t byte_count,
172                              int prot,
173                              bool low_4gb,
174                              MemMap* reservation,
175                              /*out*/std::string* error_msg) {
176     return MapAnonymous(name,
177                         /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr,
178                         byte_count,
179                         prot,
180                         low_4gb,
181                         /*reuse=*/ false,
182                         reservation,
183                         error_msg);
184   }
185 
186   // Create placeholder for a region allocated by direct call to mmap.
187   // This is useful when we do not have control over the code calling mmap,
188   // but when we still want to keep track of it in the list.
189   // The region is not considered to be owned and will not be unmmaped.
190   static MemMap MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count);
191 
192   // Map part of a file, taking care of non-page aligned offsets. The
193   // "start" offset is absolute, not relative.
194   //
195   // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
MapFile(size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,std::string * error_msg)196   static MemMap MapFile(size_t byte_count,
197                         int prot,
198                         int flags,
199                         int fd,
200                         off_t start,
201                         bool low_4gb,
202                         const char* filename,
203                         std::string* error_msg) {
204     return MapFileAtAddress(nullptr,
205                             byte_count,
206                             prot,
207                             flags,
208                             fd,
209                             start,
210                             /*low_4gb=*/ low_4gb,
211                             filename,
212                             /*reuse=*/ false,
213                             /*reservation=*/ nullptr,
214                             error_msg);
215   }
216 
217   // Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute,
218   // not relative. This version allows requesting a specific address for the base of the mapping.
219   //
220   // `reuse` allows re-mapping an address range from an existing mapping which retains the
221   // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
222   // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
223   //
224   // If error_msg is null then we do not print /proc/maps to the log if MapFileAtAddress fails.
225   // This helps improve performance of the fail case since reading and printing /proc/maps takes
226   // several milliseconds in the worst case.
227   //
228   // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
229   static MemMap MapFileAtAddress(uint8_t* addr,
230                                  size_t byte_count,
231                                  int prot,
232                                  int flags,
233                                  int fd,
234                                  off_t start,
235                                  bool low_4gb,
236                                  const char* filename,
237                                  bool reuse,
238                                  /*inout*/MemMap* reservation,
239                                  /*out*/std::string* error_msg);
240 
GetName()241   const std::string& GetName() const {
242     return name_;
243   }
244 
245   bool Sync();
246 
247   bool Protect(int prot);
248 
249   void FillWithZero(bool release_eagerly);
MadviseDontNeedAndZero()250   void MadviseDontNeedAndZero() {
251     FillWithZero(/* release_eagerly= */ true);
252   }
253   int MadviseDontFork();
254 
GetProtect()255   int GetProtect() const {
256     return prot_;
257   }
258 
Begin()259   uint8_t* Begin() const {
260     return begin_;
261   }
262 
Size()263   size_t Size() const {
264     return size_;
265   }
266 
267   // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
268   void SetSize(size_t new_size);
269 
End()270   uint8_t* End() const {
271     return Begin() + Size();
272   }
273 
BaseBegin()274   void* BaseBegin() const {
275     return base_begin_;
276   }
277 
BaseSize()278   size_t BaseSize() const {
279     return base_size_;
280   }
281 
BaseEnd()282   void* BaseEnd() const {
283     return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
284   }
285 
HasAddress(const void * addr)286   bool HasAddress(const void* addr) const {
287     return Begin() <= addr && addr < End();
288   }
289 
290   // Unmap the pages at end and remap them to create another memory map.
291   MemMap RemapAtEnd(uint8_t* new_end,
292                     const char* tail_name,
293                     int tail_prot,
294                     std::string* error_msg,
295                     bool use_debug_name = true);
296 
297   // Unmap the pages of a file at end and remap them to create another memory map.
298   MemMap RemapAtEnd(uint8_t* new_end,
299                     const char* tail_name,
300                     int tail_prot,
301                     int tail_flags,
302                     int fd,
303                     off_t offset,
304                     std::string* error_msg,
305                     bool use_debug_name = true);
306 
307   // Take ownership of pages at the beginning of the mapping. The mapping must be an
308   // anonymous reservation mapping, owning entire pages. The `byte_count` must not
309   // exceed the size of this reservation.
310   //
311   // Returns a mapping owning `byte_count` bytes rounded up to entire pages
312   // with size set to the passed `byte_count`. If 'reuse' is true then the caller
313   // is responsible for unmapping the taken pages.
314   MemMap TakeReservedMemory(size_t byte_count, bool reuse = false);
315 
316   static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
317       REQUIRES(!MemMap::mem_maps_lock_);
318   static void DumpMaps(std::ostream& os, bool terse = false)
319       REQUIRES(!MemMap::mem_maps_lock_);
320 
321   // Init and Shutdown are NOT thread safe.
322   // Both may be called multiple times and MemMap objects may be created any
323   // time after the first call to Init and before the first call to Shutodwn.
324   static void Init() REQUIRES(!MemMap::mem_maps_lock_);
325   static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
326   static bool IsInitialized();
327 
328   // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
329   // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
330   // intermittently.
331   void TryReadable();
332 
333   // Align the map by unmapping the unaligned part at the lower end and if 'align_both_ends' is
334   // true, then the higher end as well.
335   void AlignBy(size_t alignment, bool align_both_ends = true);
336 
337   // For annotation reasons.
GetMemMapsLock()338   static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) {
339     return nullptr;
340   }
341 
342   // Reset in a forked process the MemMap whose memory has been madvised MADV_DONTFORK
343   // in the parent process.
344   void ResetInForkedProcess();
345 
346   // 'redzone_size_ == 0' indicates that we are not using memory-tool on this mapping.
GetRedzoneSize()347   size_t GetRedzoneSize() const { return redzone_size_; }
348 
349 #ifdef ART_PAGE_SIZE_AGNOSTIC
GetPageSize()350   static inline size_t GetPageSize() {
351     DCHECK_NE(page_size_, 0u);
352     return page_size_;
353   }
354 #else
GetPageSize()355   static constexpr size_t GetPageSize() {
356     return GetPageSizeSlow();
357   }
358 #endif
359 
360  private:
361   MemMap(const std::string& name,
362          uint8_t* begin,
363          size_t size,
364          void* base_begin,
365          size_t base_size,
366          int prot,
367          bool reuse,
368          size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
369 
370   void DoReset();
371   void Invalidate();
372   void SwapMembers(MemMap& other);
373 
374   static void DumpMapsLocked(std::ostream& os, bool terse)
375       REQUIRES(MemMap::mem_maps_lock_);
376   static bool HasMemMap(MemMap& map)
377       REQUIRES(MemMap::mem_maps_lock_);
378   static MemMap* GetLargestMemMapAt(void* address)
379       REQUIRES(MemMap::mem_maps_lock_);
380   static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
381       REQUIRES(!MemMap::mem_maps_lock_);
382 
383   // Internal version of mmap that supports low 4gb emulation.
384   static void* MapInternal(void* addr,
385                            size_t length,
386                            int prot,
387                            int flags,
388                            int fd,
389                            off_t offset,
390                            bool low_4gb)
391       REQUIRES(!MemMap::mem_maps_lock_);
392   static void* MapInternalArtLow4GBAllocator(size_t length,
393                                              int prot,
394                                              int flags,
395                                              int fd,
396                                              off_t offset)
397       REQUIRES(!MemMap::mem_maps_lock_);
398 
399   // Release memory owned by a reservation mapping.
400   void ReleaseReservedMemory(size_t byte_count);
401 
402   // member function to access real_munmap
403   static bool CheckMapRequest(uint8_t* expected_ptr,
404                               void* actual_ptr,
405                               size_t byte_count,
406                               std::string* error_msg);
407 
408   static bool CheckReservation(uint8_t* expected_ptr,
409                                size_t byte_count,
410                                const char* name,
411                                const MemMap& reservation,
412                                /*out*/std::string* error_msg);
413 
414   std::string name_;
415   uint8_t* begin_ = nullptr;    // Start of data. May be changed by AlignBy.
416   size_t size_ = 0u;            // Length of data.
417 
418   void* base_begin_ = nullptr;  // Page-aligned base address. May be changed by AlignBy.
419   size_t base_size_ = 0u;       // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
420   int prot_ = 0;                // Protection of the map.
421 
422   // When reuse_ is true, this is just a view of an existing mapping
423   // and we do not take ownership and are not responsible for
424   // unmapping.
425   bool reuse_ = false;
426 
427   // When already_unmapped_ is true the destructor will not call munmap.
428   bool already_unmapped_ = false;
429 
430   size_t redzone_size_ = 0u;
431 
432 #if USE_ART_LOW_4G_ALLOCATOR
433   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
434 
435   static void* TryMemMapLow4GB(void* ptr,
436                                size_t page_aligned_byte_count,
437                                int prot,
438                                int flags,
439                                int fd,
440                                off_t offset);
441 #endif
442 
443   static void TargetMMapInit();
444   static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off);
445   static int TargetMUnmap(void* start, size_t len);
446 
447   static std::mutex* mem_maps_lock_;
448 
449 #ifdef ART_PAGE_SIZE_AGNOSTIC
450   static size_t page_size_;
451 #endif
452 
453   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
454 };
455 
swap(MemMap & lhs,MemMap & rhs)456 inline void swap(MemMap& lhs, MemMap& rhs) {
457   lhs.swap(rhs);
458 }
459 
460 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
461 
462 // Zero and maybe release memory if possible, no requirements on alignments.
463 void ZeroMemory(void* address, size_t length, bool release_eagerly);
ZeroAndReleaseMemory(void * address,size_t length)464 inline void ZeroAndReleaseMemory(void* address, size_t length) {
465   ZeroMemory(address, length, /* release_eagerly= */ true);
466 }
467 
468 }  // namespace art
469 
470 #endif  // ART_LIBARTBASE_BASE_MEM_MAP_H_
471