• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_LIBARTBASE_BASE_MEM_MAP_H_
18 #define ART_LIBARTBASE_BASE_MEM_MAP_H_
19 
20 #include <stddef.h>
21 #include <sys/types.h>
22 
23 #include <map>
24 #include <mutex>
25 #include <string>
26 
27 #include "android-base/thread_annotations.h"
28 #include "macros.h"
29 
30 namespace art {
31 
32 #if defined(__LP64__) && !defined(__Fuchsia__) && \
33     (defined(__aarch64__) || defined(__riscv) || defined(__APPLE__))
34 #define USE_ART_LOW_4G_ALLOCATOR 1
35 #else
36 #if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
37 #error "Unrecognized 64-bit architecture."
38 #endif
39 #define USE_ART_LOW_4G_ALLOCATOR 0
40 #endif
41 
42 #ifdef __linux__
43 static constexpr bool kMadviseZeroes = true;
44 #define HAVE_MREMAP_SYSCALL true
45 #else
46 static constexpr bool kMadviseZeroes = false;
47 // We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
48 // present.
49 #define HAVE_MREMAP_SYSCALL false
50 #endif
51 
52 // Used to keep track of mmap segments.
53 //
54 // On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
55 // for free pages. For security, the start of this scan should be randomized. This requires a
56 // dynamic initializer.
57 // For this to work, it is paramount that there are no other static initializers that access MemMap.
58 // Otherwise, calls might see uninitialized values.
59 class MemMap {
60  public:
61   static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
62 
63   // Creates an invalid mapping.
MemMap()64   MemMap() {}
65 
66   // Creates an invalid mapping. Used when we want to be more explicit than MemMap().
Invalid()67   static MemMap Invalid() {
68     return MemMap();
69   }
70 
71   MemMap(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_);
72   MemMap& operator=(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_) {
73     Reset();
74     swap(other);
75     return *this;
76   }
77 
78   // Releases the memory mapping.
79   ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
80 
81   // Swap two MemMaps.
82   void swap(MemMap& other);
83 
Reset()84   void Reset() {
85     if (IsValid()) {
86       DoReset();
87     }
88   }
89 
IsValid()90   bool IsValid() const {
91     return base_size_ != 0u;
92   }
93 
94   // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
95   // relinquishes ownership of the source mmap.
96   //
97   // For the call to be successful:
98   //   * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
99   //     [source->Begin(), source->End()].
100   //   * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
101   //     with them.
102   //   * kCanReplaceMapping must be true.
103   //   * Neither source nor dest may use manual redzones.
104   //   * Both source and dest must have the same offset from the nearest page boundary.
105   //   * mremap must succeed when called on the mappings.
106   //
107   // If this call succeeds it will return true and:
108   //   * Invalidate *source
109   //   * The protection of this will remain the same.
110   //   * The size of this will be the size of the source
111   //   * The data in this will be the data from source.
112   //
113   // If this call fails it will return false and make no changes to *source or this. The ownership
114   // of the source mmap is returned to the caller.
115   bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
116 
117   // Set a debug friendly name for a map. It will be prefixed with "dalvik-".
118   static void SetDebugName(void* map_ptr, const char* name, size_t size);
119 
120   // Request an anonymous region of length 'byte_count' and a requested base address.
121   // Use null as the requested base address if you don't care.
122   //
123   // `reuse` allows re-mapping an address range from an existing mapping which retains the
124   // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
125   // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
126   //
127   // The word "anonymous" in this context means "not backed by a file". The supplied
128   // 'name' will be used -- on systems that support it -- to give the mapping
129   // a name.
130   //
131   // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
132   static MemMap MapAnonymous(const char* name,
133                              uint8_t* addr,
134                              size_t byte_count,
135                              int prot,
136                              bool low_4gb,
137                              bool reuse,
138                              /*inout*/MemMap* reservation,
139                              /*out*/std::string* error_msg,
140                              bool use_debug_name = true);
141 
142   // Request an aligned anonymous region. We can't directly ask for a MAP_SHARED (anonymous or
143   // otherwise) mapping to be aligned as in that case file offset is involved and could make
144   // the starting offset to be out of sync with another mapping of the same file.
145   static MemMap MapAnonymousAligned(const char* name,
146                                     size_t byte_count,
147                                     int prot,
148                                     bool low_4gb,
149                                     size_t alignment,
150                                     /*out=*/std::string* error_msg);
151 
MapAnonymous(const char * name,size_t byte_count,int prot,bool low_4gb,std::string * error_msg)152   static MemMap MapAnonymous(const char* name,
153                              size_t byte_count,
154                              int prot,
155                              bool low_4gb,
156                              /*out*/std::string* error_msg) {
157     return MapAnonymous(name,
158                         /*addr=*/ nullptr,
159                         byte_count,
160                         prot,
161                         low_4gb,
162                         /*reuse=*/ false,
163                         /*reservation=*/ nullptr,
164                         error_msg);
165   }
MapAnonymous(const char * name,size_t byte_count,int prot,bool low_4gb,MemMap * reservation,std::string * error_msg)166   static MemMap MapAnonymous(const char* name,
167                              size_t byte_count,
168                              int prot,
169                              bool low_4gb,
170                              MemMap* reservation,
171                              /*out*/std::string* error_msg) {
172     return MapAnonymous(name,
173                         /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr,
174                         byte_count,
175                         prot,
176                         low_4gb,
177                         /*reuse=*/ false,
178                         reservation,
179                         error_msg);
180   }
181 
182   // Create placeholder for a region allocated by direct call to mmap.
183   // This is useful when we do not have control over the code calling mmap,
184   // but when we still want to keep track of it in the list.
185   // The region is not considered to be owned and will not be unmmaped.
186   static MemMap MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count);
187 
188   // Map part of a file, taking care of non-page aligned offsets. The
189   // "start" offset is absolute, not relative.
190   //
191   // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
MapFile(size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,std::string * error_msg)192   static MemMap MapFile(size_t byte_count,
193                         int prot,
194                         int flags,
195                         int fd,
196                         off_t start,
197                         bool low_4gb,
198                         const char* filename,
199                         std::string* error_msg) {
200     return MapFileAtAddress(nullptr,
201                             byte_count,
202                             prot,
203                             flags,
204                             fd,
205                             start,
206                             /*low_4gb=*/ low_4gb,
207                             filename,
208                             /*reuse=*/ false,
209                             /*reservation=*/ nullptr,
210                             error_msg);
211   }
212 
213   // Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute,
214   // not relative. This version allows requesting a specific address for the base of the mapping.
215   //
216   // `reuse` allows re-mapping an address range from an existing mapping which retains the
217   // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
218   // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
219   //
220   // If error_msg is null then we do not print /proc/maps to the log if MapFileAtAddress fails.
221   // This helps improve performance of the fail case since reading and printing /proc/maps takes
222   // several milliseconds in the worst case.
223   //
224   // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
225   static MemMap MapFileAtAddress(uint8_t* addr,
226                                  size_t byte_count,
227                                  int prot,
228                                  int flags,
229                                  int fd,
230                                  off_t start,
231                                  bool low_4gb,
232                                  const char* filename,
233                                  bool reuse,
234                                  /*inout*/MemMap* reservation,
235                                  /*out*/std::string* error_msg);
236 
GetName()237   const std::string& GetName() const {
238     return name_;
239   }
240 
241   bool Sync();
242 
243   bool Protect(int prot);
244 
245   void MadviseDontNeedAndZero();
246   int MadviseDontFork();
247 
GetProtect()248   int GetProtect() const {
249     return prot_;
250   }
251 
Begin()252   uint8_t* Begin() const {
253     return begin_;
254   }
255 
Size()256   size_t Size() const {
257     return size_;
258   }
259 
260   // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
261   void SetSize(size_t new_size);
262 
End()263   uint8_t* End() const {
264     return Begin() + Size();
265   }
266 
BaseBegin()267   void* BaseBegin() const {
268     return base_begin_;
269   }
270 
BaseSize()271   size_t BaseSize() const {
272     return base_size_;
273   }
274 
BaseEnd()275   void* BaseEnd() const {
276     return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
277   }
278 
HasAddress(const void * addr)279   bool HasAddress(const void* addr) const {
280     return Begin() <= addr && addr < End();
281   }
282 
283   // Unmap the pages at end and remap them to create another memory map.
284   MemMap RemapAtEnd(uint8_t* new_end,
285                     const char* tail_name,
286                     int tail_prot,
287                     std::string* error_msg,
288                     bool use_debug_name = true);
289 
290   // Unmap the pages of a file at end and remap them to create another memory map.
291   MemMap RemapAtEnd(uint8_t* new_end,
292                     const char* tail_name,
293                     int tail_prot,
294                     int tail_flags,
295                     int fd,
296                     off_t offset,
297                     std::string* error_msg,
298                     bool use_debug_name = true);
299 
300   // Take ownership of pages at the beginning of the mapping. The mapping must be an
301   // anonymous reservation mapping, owning entire pages. The `byte_count` must not
302   // exceed the size of this reservation.
303   //
304   // Returns a mapping owning `byte_count` bytes rounded up to entire pages
305   // with size set to the passed `byte_count`. If 'reuse' is true then the caller
306   // is responsible for unmapping the taken pages.
307   MemMap TakeReservedMemory(size_t byte_count, bool reuse = false);
308 
309   static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
310       REQUIRES(!MemMap::mem_maps_lock_);
311   static void DumpMaps(std::ostream& os, bool terse = false)
312       REQUIRES(!MemMap::mem_maps_lock_);
313 
314   // Init and Shutdown are NOT thread safe.
315   // Both may be called multiple times and MemMap objects may be created any
316   // time after the first call to Init and before the first call to Shutodwn.
317   static void Init() REQUIRES(!MemMap::mem_maps_lock_);
318   static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
319   static bool IsInitialized();
320 
321   // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
322   // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
323   // intermittently.
324   void TryReadable();
325 
326   // Align the map by unmapping the unaligned part at the lower end and if 'align_both_ends' is
327   // true, then the higher end as well.
328   void AlignBy(size_t alignment, bool align_both_ends = true);
329 
330   // For annotation reasons.
GetMemMapsLock()331   static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) {
332     return nullptr;
333   }
334 
335   // Reset in a forked process the MemMap whose memory has been madvised MADV_DONTFORK
336   // in the parent process.
337   void ResetInForkedProcess();
338 
339   // 'redzone_size_ == 0' indicates that we are not using memory-tool on this mapping.
GetRedzoneSize()340   size_t GetRedzoneSize() const { return redzone_size_; }
341 
342  private:
343   MemMap(const std::string& name,
344          uint8_t* begin,
345          size_t size,
346          void* base_begin,
347          size_t base_size,
348          int prot,
349          bool reuse,
350          size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
351 
352   void DoReset();
353   void Invalidate();
354   void SwapMembers(MemMap& other);
355 
356   static void DumpMapsLocked(std::ostream& os, bool terse)
357       REQUIRES(MemMap::mem_maps_lock_);
358   static bool HasMemMap(MemMap& map)
359       REQUIRES(MemMap::mem_maps_lock_);
360   static MemMap* GetLargestMemMapAt(void* address)
361       REQUIRES(MemMap::mem_maps_lock_);
362   static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
363       REQUIRES(!MemMap::mem_maps_lock_);
364 
365   // Internal version of mmap that supports low 4gb emulation.
366   static void* MapInternal(void* addr,
367                            size_t length,
368                            int prot,
369                            int flags,
370                            int fd,
371                            off_t offset,
372                            bool low_4gb)
373       REQUIRES(!MemMap::mem_maps_lock_);
374   static void* MapInternalArtLow4GBAllocator(size_t length,
375                                              int prot,
376                                              int flags,
377                                              int fd,
378                                              off_t offset)
379       REQUIRES(!MemMap::mem_maps_lock_);
380 
381   // Release memory owned by a reservation mapping.
382   void ReleaseReservedMemory(size_t byte_count);
383 
384   // member function to access real_munmap
385   static bool CheckMapRequest(uint8_t* expected_ptr,
386                               void* actual_ptr,
387                               size_t byte_count,
388                               std::string* error_msg);
389 
390   static bool CheckReservation(uint8_t* expected_ptr,
391                                size_t byte_count,
392                                const char* name,
393                                const MemMap& reservation,
394                                /*out*/std::string* error_msg);
395 
396   std::string name_;
397   uint8_t* begin_ = nullptr;    // Start of data. May be changed by AlignBy.
398   size_t size_ = 0u;            // Length of data.
399 
400   void* base_begin_ = nullptr;  // Page-aligned base address. May be changed by AlignBy.
401   size_t base_size_ = 0u;       // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
402   int prot_ = 0;                // Protection of the map.
403 
404   // When reuse_ is true, this is just a view of an existing mapping
405   // and we do not take ownership and are not responsible for
406   // unmapping.
407   bool reuse_ = false;
408 
409   // When already_unmapped_ is true the destructor will not call munmap.
410   bool already_unmapped_ = false;
411 
412   size_t redzone_size_ = 0u;
413 
414 #if USE_ART_LOW_4G_ALLOCATOR
415   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
416 
417   static void* TryMemMapLow4GB(void* ptr,
418                                size_t page_aligned_byte_count,
419                                int prot,
420                                int flags,
421                                int fd,
422                                off_t offset);
423 #endif
424 
425   static void TargetMMapInit();
426   static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off);
427   static int TargetMUnmap(void* start, size_t len);
428 
429   static std::mutex* mem_maps_lock_;
430 
431   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
432 };
433 
swap(MemMap & lhs,MemMap & rhs)434 inline void swap(MemMap& lhs, MemMap& rhs) {
435   lhs.swap(rhs);
436 }
437 
438 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
439 
440 // Zero and release pages if possible, no requirements on alignments.
441 void ZeroAndReleasePages(void* address, size_t length);
442 
443 }  // namespace art
444 
445 #endif  // ART_LIBARTBASE_BASE_MEM_MAP_H_
446