• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MEM_MAP_H_
18 #define ART_RUNTIME_MEM_MAP_H_
19 
20 #include <stddef.h>
21 #include <sys/types.h>
22 
23 #include <map>
24 #include <mutex>
25 #include <string>
26 
27 #include "android-base/thread_annotations.h"
28 
29 namespace art {
30 
31 #if defined(__LP64__) && (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
32 #define USE_ART_LOW_4G_ALLOCATOR 1
33 #else
34 #if defined(__LP64__) && !defined(__x86_64__)
35 #error "Unrecognized 64-bit architecture."
36 #endif
37 #define USE_ART_LOW_4G_ALLOCATOR 0
38 #endif
39 
40 #ifdef __linux__
41 static constexpr bool kMadviseZeroes = true;
42 #else
43 static constexpr bool kMadviseZeroes = false;
44 #endif
45 
46 // Used to keep track of mmap segments.
47 //
48 // On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
49 // for free pages. For security, the start of this scan should be randomized. This requires a
50 // dynamic initializer.
51 // For this to work, it is paramount that there are no other static initializers that access MemMap.
52 // Otherwise, calls might see uninitialized values.
53 class MemMap {
54  public:
55   // Request an anonymous region of length 'byte_count' and a requested base address.
56   // Use null as the requested base address if you don't care.
57   // "reuse" allows re-mapping an address range from an existing mapping.
58   //
59   // The word "anonymous" in this context means "not backed by a file". The supplied
60   // 'name' will be used -- on systems that support it -- to give the mapping
61   // a name.
62   //
63   // On success, returns returns a MemMap instance.  On failure, returns null.
64   static MemMap* MapAnonymous(const char* name,
65                               uint8_t* addr,
66                               size_t byte_count,
67                               int prot,
68                               bool low_4gb,
69                               bool reuse,
70                               std::string* error_msg,
71                               bool use_ashmem = true);
72 
73   // Create placeholder for a region allocated by direct call to mmap.
74   // This is useful when we do not have control over the code calling mmap,
75   // but when we still want to keep track of it in the list.
76   // The region is not considered to be owned and will not be unmmaped.
77   static MemMap* MapDummy(const char* name, uint8_t* addr, size_t byte_count);
78 
79   // Map part of a file, taking care of non-page aligned offsets.  The
80   // "start" offset is absolute, not relative.
81   //
82   // On success, returns returns a MemMap instance.  On failure, returns null.
MapFile(size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,std::string * error_msg)83   static MemMap* MapFile(size_t byte_count,
84                          int prot,
85                          int flags,
86                          int fd,
87                          off_t start,
88                          bool low_4gb,
89                          const char* filename,
90                          std::string* error_msg) {
91     return MapFileAtAddress(nullptr,
92                             byte_count,
93                             prot,
94                             flags,
95                             fd,
96                             start,
97                             /*low_4gb*/low_4gb,
98                             /*reuse*/false,
99                             filename,
100                             error_msg);
101   }
102 
103   // Map part of a file, taking care of non-page aligned offsets.  The "start" offset is absolute,
104   // not relative. This version allows requesting a specific address for the base of the mapping.
105   // "reuse" allows us to create a view into an existing mapping where we do not take ownership of
106   // the memory. If error_msg is null then we do not print /proc/maps to the log if
107   // MapFileAtAddress fails. This helps improve performance of the fail case since reading and
108   // printing /proc/maps takes several milliseconds in the worst case.
109   //
110   // On success, returns returns a MemMap instance.  On failure, returns null.
111   static MemMap* MapFileAtAddress(uint8_t* addr,
112                                   size_t byte_count,
113                                   int prot,
114                                   int flags,
115                                   int fd,
116                                   off_t start,
117                                   bool low_4gb,
118                                   bool reuse,
119                                   const char* filename,
120                                   std::string* error_msg);
121 
122   // Releases the memory mapping.
123   ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
124 
GetName()125   const std::string& GetName() const {
126     return name_;
127   }
128 
129   bool Sync();
130 
131   bool Protect(int prot);
132 
133   void MadviseDontNeedAndZero();
134 
GetProtect()135   int GetProtect() const {
136     return prot_;
137   }
138 
Begin()139   uint8_t* Begin() const {
140     return begin_;
141   }
142 
Size()143   size_t Size() const {
144     return size_;
145   }
146 
147   // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
148   void SetSize(size_t new_size);
149 
End()150   uint8_t* End() const {
151     return Begin() + Size();
152   }
153 
BaseBegin()154   void* BaseBegin() const {
155     return base_begin_;
156   }
157 
BaseSize()158   size_t BaseSize() const {
159     return base_size_;
160   }
161 
BaseEnd()162   void* BaseEnd() const {
163     return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
164   }
165 
HasAddress(const void * addr)166   bool HasAddress(const void* addr) const {
167     return Begin() <= addr && addr < End();
168   }
169 
170   // Unmap the pages at end and remap them to create another memory map.
171   MemMap* RemapAtEnd(uint8_t* new_end,
172                      const char* tail_name,
173                      int tail_prot,
174                      std::string* error_msg,
175                      bool use_ashmem = true);
176 
177   static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
178       REQUIRES(!MemMap::mem_maps_lock_);
179   static void DumpMaps(std::ostream& os, bool terse = false)
180       REQUIRES(!MemMap::mem_maps_lock_);
181 
182   // Init and Shutdown are NOT thread safe.
183   // Both may be called multiple times and MemMap objects may be created any
184   // time after the first call to Init and before the first call to Shutodwn.
185   static void Init() REQUIRES(!MemMap::mem_maps_lock_);
186   static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
187 
188   // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
189   // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
190   // intermittently.
191   void TryReadable();
192 
193   // Align the map by unmapping the unaligned parts at the lower and the higher ends.
194   void AlignBy(size_t size);
195 
196   // For annotation reasons.
GetMemMapsLock()197   static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) {
198     return nullptr;
199   }
200 
201  private:
202   MemMap(const std::string& name,
203          uint8_t* begin,
204          size_t size,
205          void* base_begin,
206          size_t base_size,
207          int prot,
208          bool reuse,
209          size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
210 
211   static void DumpMapsLocked(std::ostream& os, bool terse)
212       REQUIRES(MemMap::mem_maps_lock_);
213   static bool HasMemMap(MemMap* map)
214       REQUIRES(MemMap::mem_maps_lock_);
215   static MemMap* GetLargestMemMapAt(void* address)
216       REQUIRES(MemMap::mem_maps_lock_);
217   static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
218       REQUIRES(!MemMap::mem_maps_lock_);
219 
220   // Internal version of mmap that supports low 4gb emulation.
221   static void* MapInternal(void* addr,
222                            size_t length,
223                            int prot,
224                            int flags,
225                            int fd,
226                            off_t offset,
227                            bool low_4gb)
228       REQUIRES(!MemMap::mem_maps_lock_);
229   static void* MapInternalArtLow4GBAllocator(size_t length,
230                                              int prot,
231                                              int flags,
232                                              int fd,
233                                              off_t offset)
234       REQUIRES(!MemMap::mem_maps_lock_);
235 
236   const std::string name_;
237   uint8_t* begin_;  // Start of data. May be changed by AlignBy.
238   size_t size_;  // Length of data.
239 
240   void* base_begin_;  // Page-aligned base address. May be changed by AlignBy.
241   size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
242   int prot_;  // Protection of the map.
243 
244   // When reuse_ is true, this is just a view of an existing mapping
245   // and we do not take ownership and are not responsible for
246   // unmapping.
247   const bool reuse_;
248 
249   const size_t redzone_size_;
250 
251 #if USE_ART_LOW_4G_ALLOCATOR
252   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
253 #endif
254 
255   static std::mutex* mem_maps_lock_;
256 
257   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
258 };
259 
260 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
261 
262 // Zero and release pages if possible, no requirements on alignments.
263 void ZeroAndReleasePages(void* address, size_t length);
264 
265 }  // namespace art
266 
267 #endif  // ART_RUNTIME_MEM_MAP_H_
268