• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_JIT_JIT_MEMORY_REGION_H_
18 #define ART_RUNTIME_JIT_JIT_MEMORY_REGION_H_
19 
20 #include <string>
21 
22 #include "arch/instruction_set.h"
23 #include "base/globals.h"
24 #include "base/locks.h"
25 #include "base/mem_map.h"
26 #include "gc_root-inl.h"
27 #include "handle.h"
28 
29 namespace art {
30 
31 namespace mirror {
32 class Object;
33 }
34 
35 namespace jit {
36 
37 class TestZygoteMemory;
38 
39 // Number of bytes represented by a bit in the CodeCacheBitmap. Value is reasonable for all
40 // architectures.
41 static constexpr int kJitCodeAccountingBytes = 16;
42 
43 // Helper to get the size required for emitting `number_of_roots` in the
44 // data portion of a JIT memory region.
ComputeRootTableSize(uint32_t number_of_roots)45 uint32_t inline ComputeRootTableSize(uint32_t number_of_roots) {
46   return sizeof(uint32_t) + number_of_roots * sizeof(GcRoot<mirror::Object>);
47 }
48 
49 // Represents a memory region for the JIT, where code and data are stored. This class
50 // provides allocation and deallocation primitives.
51 class JitMemoryRegion {
52  public:
JitMemoryRegion()53   JitMemoryRegion()
54       : initial_capacity_(0),
55         max_capacity_(0),
56         current_capacity_(0),
57         data_end_(0),
58         exec_end_(0),
59         used_memory_for_code_(0),
60         used_memory_for_data_(0),
61         data_pages_(),
62         writable_data_pages_(),
63         exec_pages_(),
64         non_exec_pages_(),
65         data_mspace_(nullptr),
66         exec_mspace_(nullptr) {}
67 
68   bool Initialize(size_t initial_capacity,
69                   size_t max_capacity,
70                   bool rwx_memory_allowed,
71                   bool is_zygote,
72                   std::string* error_msg)
73       REQUIRES(Locks::jit_lock_);
74 
75   // Try to increase the current capacity of the code cache. Return whether we
76   // succeeded at doing so.
77   bool IncreaseCodeCacheCapacity() REQUIRES(Locks::jit_lock_);
78 
79   // Set the footprint limit of the code cache.
80   void SetFootprintLimit(size_t new_footprint) REQUIRES(Locks::jit_lock_);
81 
82   const uint8_t* AllocateCode(size_t code_size) REQUIRES(Locks::jit_lock_);
83   void FreeCode(const uint8_t* code) REQUIRES(Locks::jit_lock_);
84   const uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_);
85   void FreeData(const uint8_t* data) REQUIRES(Locks::jit_lock_);
86   void FreeData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) = delete;
87   void FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_);
88 
89   // Emit header and code into the memory pointed by `reserved_code` (despite it being const).
90   // Returns pointer to copied code (within reserved_code region; after OatQuickMethodHeader).
91   const uint8_t* CommitCode(ArrayRef<const uint8_t> reserved_code,
92                             ArrayRef<const uint8_t> code,
93                             const uint8_t* stack_map,
94                             bool has_should_deoptimize_flag)
95       REQUIRES(Locks::jit_lock_);
96 
97   // Emit roots and stack map into the memory pointed by `roots_data` (despite it being const).
98   bool CommitData(ArrayRef<const uint8_t> reserved_data,
99                   const std::vector<Handle<mirror::Object>>& roots,
100                   ArrayRef<const uint8_t> stack_map)
101       REQUIRES(Locks::jit_lock_)
102       REQUIRES_SHARED(Locks::mutator_lock_);
103 
ResetWritableMappings()104   void ResetWritableMappings() REQUIRES(Locks::jit_lock_) {
105     non_exec_pages_.ResetInForkedProcess();
106     writable_data_pages_.ResetInForkedProcess();
107     // Also clear the mspaces, which, in their implementation,
108     // point to the discarded mappings.
109     exec_mspace_ = nullptr;
110     data_mspace_ = nullptr;
111   }
112 
IsValid()113   bool IsValid() const NO_THREAD_SAFETY_ANALYSIS {
114     return exec_mspace_ != nullptr || data_mspace_ != nullptr;
115   }
116 
117   template <typename T>
FillData(const T * address,size_t n,const T & t)118   void FillData(const T* address, size_t n, const T& t)  REQUIRES(Locks::jit_lock_) {
119     std::fill_n(GetWritableDataAddress(address), n, t);
120   }
121 
122   // Generic helper for writing abritrary data in the data portion of the
123   // region.
124   template <typename T>
WriteData(const T * address,const T & value)125   void WriteData(const T* address, const T& value) {
126     *GetWritableDataAddress(address) = value;
127   }
128 
HasDualCodeMapping()129   bool HasDualCodeMapping() const {
130     return non_exec_pages_.IsValid();
131   }
132 
HasDualDataMapping()133   bool HasDualDataMapping() const {
134     return writable_data_pages_.IsValid();
135   }
136 
HasCodeMapping()137   bool HasCodeMapping() const {
138     return exec_pages_.IsValid();
139   }
140 
IsInDataSpace(const void * ptr)141   bool IsInDataSpace(const void* ptr) const {
142     return data_pages_.HasAddress(ptr);
143   }
144 
IsInExecSpace(const void * ptr)145   bool IsInExecSpace(const void* ptr) const {
146     return exec_pages_.HasAddress(ptr);
147   }
148 
GetExecPages()149   const MemMap* GetExecPages() const {
150     return &exec_pages_;
151   }
152 
153   void* MoreCore(const void* mspace, intptr_t increment);
154 
OwnsSpace(const void * mspace)155   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
156     return mspace == data_mspace_ || mspace == exec_mspace_;
157   }
158 
GetCurrentCapacity()159   size_t GetCurrentCapacity() const REQUIRES(Locks::jit_lock_) {
160     return current_capacity_;
161   }
162 
GetMaxCapacity()163   size_t GetMaxCapacity() const REQUIRES(Locks::jit_lock_) {
164     return max_capacity_;
165   }
166 
GetUsedMemoryForCode()167   size_t GetUsedMemoryForCode() const REQUIRES(Locks::jit_lock_) {
168     return used_memory_for_code_;
169   }
170 
GetResidentMemoryForCode()171   size_t GetResidentMemoryForCode() const REQUIRES(Locks::jit_lock_) {
172     return exec_end_;
173   }
174 
GetUsedMemoryForData()175   size_t GetUsedMemoryForData() const REQUIRES(Locks::jit_lock_) {
176     return used_memory_for_data_;
177   }
178 
GetResidentMemoryForData()179   size_t GetResidentMemoryForData() const REQUIRES(Locks::jit_lock_) {
180     return data_end_;
181   }
182 
GetWritableDataAddress(const T * src_ptr)183   template <typename T> T* GetWritableDataAddress(const T* src_ptr) {
184     if (!HasDualDataMapping()) {
185       return const_cast<T*>(src_ptr);
186     }
187     return const_cast<T*>(TranslateAddress(src_ptr, data_pages_, writable_data_pages_));
188   }
189 
190  private:
191   template <typename T>
TranslateAddress(T * src_ptr,const MemMap & src,const MemMap & dst)192   T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
193     CHECK(src.HasAddress(src_ptr)) << reinterpret_cast<const void*>(src_ptr);
194     const uint8_t* const raw_src_ptr = reinterpret_cast<const uint8_t*>(src_ptr);
195     return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
196   }
197 
GetUpdatableCodeMapping()198   const MemMap* GetUpdatableCodeMapping() const {
199     if (HasDualCodeMapping()) {
200       return &non_exec_pages_;
201     } else if (HasCodeMapping()) {
202       return &exec_pages_;
203     } else {
204       return nullptr;
205     }
206   }
207 
GetWritableDataMapping()208   const MemMap* GetWritableDataMapping() const {
209     if (HasDualDataMapping()) {
210       return &writable_data_pages_;
211     } else {
212       return &data_pages_;
213     }
214   }
215 
GetNonWritableDataAddress(T * src_ptr)216   template <typename T> T* GetNonWritableDataAddress(T* src_ptr) {
217     if (!HasDualDataMapping()) {
218       return src_ptr;
219     }
220     return TranslateAddress(src_ptr, writable_data_pages_, data_pages_);
221   }
222 
GetExecutableAddress(T * src_ptr)223   template <typename T> T* GetExecutableAddress(T* src_ptr) {
224     if (!HasDualCodeMapping()) {
225       return src_ptr;
226     }
227     return TranslateAddress(src_ptr, non_exec_pages_, exec_pages_);
228   }
229 
GetNonExecutableAddress(T * src_ptr)230   template <typename T> T* GetNonExecutableAddress(T* src_ptr) {
231     if (!HasDualCodeMapping()) {
232       return src_ptr;
233     }
234     return TranslateAddress(src_ptr, exec_pages_, non_exec_pages_);
235   }
236 
237   static int CreateZygoteMemory(size_t capacity, std::string* error_msg);
238   static bool ProtectZygoteMemory(int fd, std::string* error_msg);
239 
240   // The initial capacity in bytes this code region starts with.
241   size_t initial_capacity_ GUARDED_BY(Locks::jit_lock_);
242 
243   // The maximum capacity in bytes this region can go to.
244   size_t max_capacity_ GUARDED_BY(Locks::jit_lock_);
245 
246   // The current capacity in bytes of the region.
247   size_t current_capacity_ GUARDED_BY(Locks::jit_lock_);
248 
249   // The current footprint in bytes of the data portion of the region.
250   size_t data_end_ GUARDED_BY(Locks::jit_lock_);
251 
252   // The current footprint in bytes of the code portion of the region.
253   size_t exec_end_ GUARDED_BY(Locks::jit_lock_);
254 
255   // The size in bytes of used memory for the code portion of the region.
256   size_t used_memory_for_code_ GUARDED_BY(Locks::jit_lock_);
257 
258   // The size in bytes of used memory for the data portion of the region.
259   size_t used_memory_for_data_ GUARDED_BY(Locks::jit_lock_);
260 
261   // Mem map which holds data (stack maps and profiling info).
262   MemMap data_pages_;
263 
264   // Mem map which holds data with writable permission. Only valid for dual view
265   // JIT when this is the writable view and data_pages_ is the readable view.
266   MemMap writable_data_pages_;
267 
268   // Mem map which holds code and has executable permission.
269   MemMap exec_pages_;
270 
271   // Mem map which holds code with non executable permission. Only valid for dual view JIT when
272   // this is the non-executable view of code used to write updates.
273   MemMap non_exec_pages_;
274 
275   // The opaque mspace for allocating data.
276   void* data_mspace_ GUARDED_BY(Locks::jit_lock_);
277 
278   // The opaque mspace for allocating code.
279   void* exec_mspace_ GUARDED_BY(Locks::jit_lock_);
280 
281   friend class ScopedCodeCacheWrite;  // For GetUpdatableCodeMapping
282   friend class TestZygoteMemory;
283 };
284 
285 }  // namespace jit
286 }  // namespace art
287 
288 #endif  // ART_RUNTIME_JIT_JIT_MEMORY_REGION_H_
289