• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_IMAGE_WRITER_H_
18 #define ART_COMPILER_IMAGE_WRITER_H_
19 
20 #include <stdint.h>
21 #include <valgrind.h>
22 
23 #include <cstddef>
24 #include <memory>
25 #include <set>
26 #include <string>
27 #include <ostream>
28 
29 #include "base/bit_utils.h"
30 #include "base/macros.h"
31 #include "driver/compiler_driver.h"
32 #include "gc/space/space.h"
33 #include "lock_word.h"
34 #include "mem_map.h"
35 #include "oat_file.h"
36 #include "mirror/dex_cache.h"
37 #include "os.h"
38 #include "safe_map.h"
39 #include "utils.h"
40 
41 namespace art {
42 
43 // Write a Space built during compilation for use during execution.
44 class ImageWriter FINAL {
45  public:
ImageWriter(const CompilerDriver & compiler_driver,uintptr_t image_begin,bool compile_pic)46   ImageWriter(const CompilerDriver& compiler_driver, uintptr_t image_begin,
47               bool compile_pic)
48       : compiler_driver_(compiler_driver), image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
49         image_end_(0), image_objects_offset_begin_(0), image_roots_address_(0), oat_file_(nullptr),
50         oat_data_begin_(nullptr), interpreter_to_interpreter_bridge_offset_(0),
51         interpreter_to_compiled_code_bridge_offset_(0), jni_dlsym_lookup_offset_(0),
52         quick_generic_jni_trampoline_offset_(0),
53         quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0),
54         quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
55         target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
56         bin_slot_sizes_(), bin_slot_previous_sizes_(), bin_slot_count_(),
57         intern_table_bytes_(0u), dirty_methods_(0u), clean_methods_(0u) {
58     CHECK_NE(image_begin, 0U);
59     std::fill(image_methods_, image_methods_ + arraysize(image_methods_), nullptr);
60   }
61 
~ImageWriter()62   ~ImageWriter() {
63   }
64 
65   bool PrepareImageAddressSpace();
66 
IsImageAddressSpaceReady()67   bool IsImageAddressSpaceReady() const {
68     return image_roots_address_ != 0u;
69   }
70 
71   template <typename T>
GetImageAddress(T * object)72   T* GetImageAddress(T* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
73     return object == nullptr ? nullptr :
74         reinterpret_cast<T*>(image_begin_ + GetImageOffset(object));
75   }
76 
77   ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
78 
GetDexCacheArrayElementImageAddress(const DexFile * dex_file,uint32_t offset)79   mirror::HeapReference<mirror::Object>* GetDexCacheArrayElementImageAddress(
80       const DexFile* dex_file, uint32_t offset) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
81     auto it = dex_cache_array_starts_.find(dex_file);
82     DCHECK(it != dex_cache_array_starts_.end());
83     return reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
84         image_begin_ + RoundUp(sizeof(ImageHeader), kObjectAlignment) + it->second + offset);
85   }
86 
87   uint8_t* GetOatFileBegin() const;
88 
89   bool Write(const std::string& image_filename, const std::string& oat_filename,
90              const std::string& oat_location)
91       LOCKS_EXCLUDED(Locks::mutator_lock_);
92 
GetOatDataBegin()93   uintptr_t GetOatDataBegin() {
94     return reinterpret_cast<uintptr_t>(oat_data_begin_);
95   }
96 
97  private:
98   bool AllocMemory();
99 
100   // Mark the objects defined in this space in the given live bitmap.
101   void RecordImageAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
102 
103   // Classify different kinds of bins that objects end up getting packed into during image writing.
104   enum Bin {
105     // Dex cache arrays have a special slot for PC-relative addressing. Since they are
106     // huge, and as such their dirtiness is not important for the clean/dirty separation,
107     // we arbitrarily keep them at the beginning.
108     kBinDexCacheArray,            // Object arrays belonging to dex cache.
109     // Likely-clean:
110     kBinString,                        // [String] Almost always immutable (except for obj header).
111     kBinArtMethodsManagedInitialized,  // [ArtMethod] Not-native, and initialized. Unlikely to dirty
112     // Unknown mix of clean/dirty:
113     kBinRegular,
114     // Likely-dirty:
115     // All classes get their own bins since their fields often dirty
116     kBinClassInitializedFinalStatics,  // Class initializers have been run, no non-final statics
117     kBinClassInitialized,         // Class initializers have been run
118     kBinClassVerified,            // Class verified, but initializers haven't been run
119     // Add more bins here if we add more segregation code.
120     // Non mirror fields must be below.
121     // ArtFields should be always clean.
122     kBinArtField,
123     // If the class is initialized, then the ArtMethods are probably clean.
124     kBinArtMethodClean,
125     // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
126     // initialized.
127     kBinArtMethodDirty,
128     kBinSize,
129     // Number of bins which are for mirror objects.
130     kBinMirrorCount = kBinArtField,
131   };
132 
133   friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
134 
135   static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
136   // uint32 = typeof(lockword_)
137   // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
138   // failures due to invalid read barrier bits during object field reads.
139   static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits -
140       LockWord::kReadBarrierStateSize;
141   // 111000.....0
142   static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
143 
144   // We use the lock word to store the bin # and bin index of the object in the image.
145   //
146   // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
147   // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
148   struct BinSlot {
149     explicit BinSlot(uint32_t lockword);
150     BinSlot(Bin bin, uint32_t index);
151 
152     // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
153     Bin GetBin() const;
154     // The offset in bytes from the beginning of the bin. Aligned to object size.
155     uint32_t GetIndex() const;
156     // Pack into a single uint32_t, for storing into a lock word.
Uint32ValueBinSlot157     uint32_t Uint32Value() const { return lockword_; }
158     // Comparison operator for map support
159     bool operator<(const BinSlot& other) const  { return lockword_ < other.lockword_; }
160 
161   private:
162     // Must be the same size as LockWord, any larger and we would truncate the data.
163     const uint32_t lockword_;
164   };
165 
166   // We use the lock word to store the offset of the object in the image.
167   void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
168       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
169   void SetImageOffset(mirror::Object* object, size_t offset)
170       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
171   bool IsImageOffsetAssigned(mirror::Object* object) const
172       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
173   size_t GetImageOffset(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
174   void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
175       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
176 
177   void PrepareDexCacheArraySlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
178   void AssignImageBinSlot(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
179   void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
180       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
181   bool IsImageBinSlotAssigned(mirror::Object* object) const
182       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
183   BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
184 
185   void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
186 
GetImageAddressCallback(void * writer,mirror::Object * obj)187   static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
188       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
189     return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
190   }
191 
GetLocalAddress(mirror::Object * object)192   mirror::Object* GetLocalAddress(mirror::Object* object) const
193       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
194     size_t offset = GetImageOffset(object);
195     uint8_t* dst = image_->Begin() + offset;
196     return reinterpret_cast<mirror::Object*>(dst);
197   }
198 
GetOatAddress(uint32_t offset)199   const uint8_t* GetOatAddress(uint32_t offset) const {
200     // With Quick, code is within the OatFile, as there are all in one
201     // .o ELF object.
202     DCHECK_LT(offset, oat_file_->Size());
203     DCHECK(oat_data_begin_ != nullptr);
204     return offset == 0u ? nullptr : oat_data_begin_ + offset;
205   }
206 
IsArtMethodBin(Bin bin)207   static bool IsArtMethodBin(Bin bin) {
208     return bin == kBinArtMethodClean || bin == kBinArtMethodDirty;
209   }
210 
211   // Returns true if the class was in the original requested image classes list.
212   bool IsImageClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
213 
214   // Debug aid that list of requested image classes.
215   void DumpImageClasses();
216 
217   // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
218   void ComputeLazyFieldsForImageClasses()
219       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
220   static bool ComputeLazyFieldsForClassesVisitor(mirror::Class* klass, void* arg)
221       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
222 
223   // Wire dex cache resolved strings to strings in the image to avoid runtime resolution.
224   void ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
225   static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg)
226       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
227 
228   // Remove unwanted classes from various roots.
229   void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
230   static bool NonImageClassesVisitor(mirror::Class* c, void* arg)
231       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
232 
233   // Verify unwanted classes removed.
234   void CheckNonImageClassesRemoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
235   static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
236       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
237 
238   // Lays out where the image objects will be at runtime.
239   void CalculateNewObjectOffsets()
240       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
241   void CreateHeader(size_t oat_loaded_size, size_t oat_data_offset)
242       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
243   mirror::ObjectArray<mirror::Object>* CreateImageRoots() const
244       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
245   void CalculateObjectBinSlots(mirror::Object* obj)
246       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
247   void UnbinObjectsIntoOffset(mirror::Object* obj)
248       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
249 
250   void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
251       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
252   void WalkFieldsInOrder(mirror::Object* obj)
253       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
254   static void WalkFieldsCallback(mirror::Object* obj, void* arg)
255       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
256   static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
257       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
258 
259   // Creates the contiguous image in memory and adjusts pointers.
260   void CopyAndFixupNativeData() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
261   void CopyAndFixupObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
262   static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
263       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
264   void CopyAndFixupObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
265   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy)
266       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
267   void FixupClass(mirror::Class* orig, mirror::Class* copy)
268       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
269   void FixupObject(mirror::Object* orig, mirror::Object* copy)
270       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
271   void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass,
272                          Bin array_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
273 
274   // Get quick code for non-resolution/imt_conflict/abstract method.
275   const uint8_t* GetQuickCode(ArtMethod* method, bool* quick_is_interpreted)
276       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
277 
278   const uint8_t* GetQuickEntryPoint(ArtMethod* method)
279       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
280 
281   // Patches references in OatFile to expect runtime addresses.
282   void SetOatChecksumFromElfFile(File* elf_file);
283 
284   // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
285   size_t GetBinSizeSum(Bin up_to = kBinSize) const;
286 
287   // Return true if a method is likely to be dirtied at runtime.
288   bool WillMethodBeDirty(ArtMethod* m) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
289 
290   // Assign the offset for an ArtMethod.
291   void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
292 
293   const CompilerDriver& compiler_driver_;
294 
295   // Beginning target image address for the output image.
296   uint8_t* image_begin_;
297 
298   // Offset to the free space in image_.
299   size_t image_end_;
300 
301   // Offset from image_begin_ to where the first object is in image_.
302   size_t image_objects_offset_begin_;
303 
304   // The image roots address in the image.
305   uint32_t image_roots_address_;
306 
307   // oat file with code for this image
308   OatFile* oat_file_;
309 
310   // Memory mapped for generating the image.
311   std::unique_ptr<MemMap> image_;
312 
313   // Indexes, lengths for dex cache arrays (objects are inside of the image so that they don't
314   // move).
315   struct DexCacheArrayLocation {
316     size_t offset_;
317     size_t length_;
318     Bin bin_type_;
319   };
320   SafeMap<mirror::Object*, DexCacheArrayLocation> dex_cache_array_indexes_;
321 
322   // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
323   // to keep track. These include vtable arrays, iftable arrays, and dex caches.
324   std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
325 
326   // The start offsets of the dex cache arrays.
327   SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
328 
329   // Saved hash codes. We use these to restore lockwords which were temporarily used to have
330   // forwarding addresses as well as copying over hash codes.
331   std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
332 
333   // Beginning target oat address for the pointers from the output image to its oat file.
334   const uint8_t* oat_data_begin_;
335 
336   // Image bitmap which lets us know where the objects inside of the image reside.
337   std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
338 
339   // Offset from oat_data_begin_ to the stubs.
340   uint32_t interpreter_to_interpreter_bridge_offset_;
341   uint32_t interpreter_to_compiled_code_bridge_offset_;
342   uint32_t jni_dlsym_lookup_offset_;
343   uint32_t quick_generic_jni_trampoline_offset_;
344   uint32_t quick_imt_conflict_trampoline_offset_;
345   uint32_t quick_resolution_trampoline_offset_;
346   uint32_t quick_to_interpreter_bridge_offset_;
347   const bool compile_pic_;
348 
349   // Size of pointers on the target architecture.
350   size_t target_ptr_size_;
351 
352   // Bin slot tracking for dirty object packing
353   size_t bin_slot_sizes_[kBinSize];  // Number of bytes in a bin
354   size_t bin_slot_previous_sizes_[kBinSize];  // Number of bytes in previous bins.
355   size_t bin_slot_count_[kBinSize];  // Number of objects in a bin
356 
357   // Cached size of the intern table for when we allocate memory.
358   size_t intern_table_bytes_;
359 
360   // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
361   // have one entry per art field for convenience. ArtFields are placed right after the end of the
362   // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
363   struct NativeObjectReloc {
364     uintptr_t offset;
365     Bin bin_type;
366   };
367   std::unordered_map<void*, NativeObjectReloc> native_object_reloc_;
368 
369   // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
370   ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
371 
372   // Counters for measurements, used for logging only.
373   uint64_t dirty_methods_;
374   uint64_t clean_methods_;
375 
376   friend class FixupClassVisitor;
377   friend class FixupRootVisitor;
378   friend class FixupVisitor;
379   DISALLOW_COPY_AND_ASSIGN(ImageWriter);
380 };
381 
382 }  // namespace art
383 
384 #endif  // ART_COMPILER_IMAGE_WRITER_H_
385