• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_IMAGE_WRITER_H_
18 #define ART_COMPILER_IMAGE_WRITER_H_
19 
20 #include <stdint.h>
21 #include "base/memory_tool.h"
22 
23 #include <cstddef>
24 #include <memory>
25 #include <set>
26 #include <stack>
27 #include <string>
28 #include <ostream>
29 
30 #include "art_method.h"
31 #include "base/bit_utils.h"
32 #include "base/dchecked_vector.h"
33 #include "base/enums.h"
34 #include "base/length_prefixed_array.h"
35 #include "base/macros.h"
36 #include "class_table.h"
37 #include "driver/compiler_driver.h"
38 #include "image.h"
39 #include "intern_table.h"
40 #include "lock_word.h"
41 #include "mem_map.h"
42 #include "mirror/dex_cache.h"
43 #include "obj_ptr.h"
44 #include "oat_file.h"
45 #include "os.h"
46 #include "safe_map.h"
47 #include "utils.h"
48 
49 namespace art {
50 namespace gc {
51 namespace accounting {
52 template <size_t kAlignment> class SpaceBitmap;
53 typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
54 }  // namespace accounting
55 namespace space {
56 class ImageSpace;
57 }  // namespace space
58 }  // namespace gc
59 
60 namespace mirror {
61 class ClassLoader;
62 }  // namespace mirror
63 
64 class ClassLoaderVisitor;
65 class ImtConflictTable;
66 
67 static constexpr int kInvalidFd = -1;
68 
69 // Write a Space built during compilation for use during execution.
70 class ImageWriter FINAL {
71  public:
72   ImageWriter(const CompilerDriver& compiler_driver,
73               uintptr_t image_begin,
74               bool compile_pic,
75               bool compile_app_image,
76               ImageHeader::StorageMode image_storage_mode,
77               const std::vector<const char*>& oat_filenames,
78               const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
79               const std::unordered_set<std::string>* dirty_image_objects);
80 
81   bool PrepareImageAddressSpace();
82 
IsImageAddressSpaceReady()83   bool IsImageAddressSpaceReady() const {
84     DCHECK(!image_infos_.empty());
85     for (const ImageInfo& image_info : image_infos_) {
86       if (image_info.image_roots_address_ == 0u) {
87         return false;
88       }
89     }
90     return true;
91   }
92 
GetClassLoader()93   ObjPtr<mirror::ClassLoader> GetClassLoader() {
94     CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
95     return compile_app_image_ ? *class_loaders_.begin() : nullptr;
96   }
97 
98   template <typename T>
GetImageAddress(T * object)99   T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
100     if (object == nullptr || IsInBootImage(object)) {
101       return object;
102     } else {
103       size_t oat_index = GetOatIndex(object);
104       const ImageInfo& image_info = GetImageInfo(oat_index);
105       return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
106     }
107   }
108 
109   ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
110 
GetOatFileOffset(size_t oat_index)111   size_t GetOatFileOffset(size_t oat_index) const {
112     return GetImageInfo(oat_index).oat_offset_;
113   }
114 
GetOatFileBegin(size_t oat_index)115   const uint8_t* GetOatFileBegin(size_t oat_index) const {
116     return GetImageInfo(oat_index).oat_file_begin_;
117   }
118 
119   // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open
120   // the names in image_filenames.
121   // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open
122   // the names in oat_filenames.
123   bool Write(int image_fd,
124              const std::vector<const char*>& image_filenames,
125              const std::vector<const char*>& oat_filenames)
126       REQUIRES(!Locks::mutator_lock_);
127 
GetOatDataBegin(size_t oat_index)128   uintptr_t GetOatDataBegin(size_t oat_index) {
129     return reinterpret_cast<uintptr_t>(GetImageInfo(oat_index).oat_data_begin_);
130   }
131 
132   // Get the index of the oat file containing the dex file.
133   //
134   // This "oat_index" is used to retrieve information about the the memory layout
135   // of the oat file and its associated image file, needed for link-time patching
136   // of references to the image or across oat files.
137   size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
138 
139   // Get the index of the oat file containing the dex file served by the dex cache.
140   size_t GetOatIndexForDexCache(ObjPtr<mirror::DexCache> dex_cache) const
141       REQUIRES_SHARED(Locks::mutator_lock_);
142 
143   // Update the oat layout for the given oat file.
144   // This will make the oat_offset for the next oat file valid.
145   void UpdateOatFileLayout(size_t oat_index,
146                            size_t oat_loaded_size,
147                            size_t oat_data_offset,
148                            size_t oat_data_size);
149   // Update information about the oat header, i.e. checksum and trampoline offsets.
150   void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
151 
152  private:
153   using WorkStack = std::stack<std::pair<mirror::Object*, size_t>>;
154 
155   bool AllocMemory();
156 
157   // Mark the objects defined in this space in the given live bitmap.
158   void RecordImageAllocations() REQUIRES_SHARED(Locks::mutator_lock_);
159 
160   // Classify different kinds of bins that objects end up getting packed into during image writing.
161   // Ordered from dirtiest to cleanest (until ArtMethods).
162   enum Bin {
163     kBinKnownDirty,               // Known dirty objects from --dirty-image-objects list
164     kBinMiscDirty,                // Dex caches, object locks, etc...
165     kBinClassVerified,            // Class verified, but initializers haven't been run
166     // Unknown mix of clean/dirty:
167     kBinRegular,
168     kBinClassInitialized,         // Class initializers have been run
169     // All classes get their own bins since their fields often dirty
170     kBinClassInitializedFinalStatics,  // Class initializers have been run, no non-final statics
171     // Likely-clean:
172     kBinString,                        // [String] Almost always immutable (except for obj header).
173     // Add more bins here if we add more segregation code.
174     // Non mirror fields must be below.
175     // ArtFields should be always clean.
176     kBinArtField,
177     // If the class is initialized, then the ArtMethods are probably clean.
178     kBinArtMethodClean,
179     // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
180     // initialized.
181     kBinArtMethodDirty,
182     // IMT (clean)
183     kBinImTable,
184     // Conflict tables (clean).
185     kBinIMTConflictTable,
186     // Runtime methods (always clean, do not have a length prefix array).
187     kBinRuntimeMethod,
188     // Dex cache arrays have a special slot for PC-relative addressing. Since they are
189     // huge, and as such their dirtiness is not important for the clean/dirty separation,
190     // we arbitrarily keep them at the end of the native data.
191     kBinDexCacheArray,            // Arrays belonging to dex cache.
192     kBinSize,
193     // Number of bins which are for mirror objects.
194     kBinMirrorCount = kBinArtField,
195   };
196   friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
197 
198   enum NativeObjectRelocationType {
199     kNativeObjectRelocationTypeArtField,
200     kNativeObjectRelocationTypeArtFieldArray,
201     kNativeObjectRelocationTypeArtMethodClean,
202     kNativeObjectRelocationTypeArtMethodArrayClean,
203     kNativeObjectRelocationTypeArtMethodDirty,
204     kNativeObjectRelocationTypeArtMethodArrayDirty,
205     kNativeObjectRelocationTypeRuntimeMethod,
206     kNativeObjectRelocationTypeIMTable,
207     kNativeObjectRelocationTypeIMTConflictTable,
208     kNativeObjectRelocationTypeDexCacheArray,
209   };
210   friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
211 
212   enum OatAddress {
213     kOatAddressInterpreterToInterpreterBridge,
214     kOatAddressInterpreterToCompiledCodeBridge,
215     kOatAddressJNIDlsymLookup,
216     kOatAddressQuickGenericJNITrampoline,
217     kOatAddressQuickIMTConflictTrampoline,
218     kOatAddressQuickResolutionTrampoline,
219     kOatAddressQuickToInterpreterBridge,
220     // Number of elements in the enum.
221     kOatAddressCount,
222   };
223   friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address);
224 
225   static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
226   // uint32 = typeof(lockword_)
227   // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
228   // failures due to invalid read barrier bits during object field reads.
229   static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits - LockWord::kGCStateSize;
230   // 111000.....0
231   static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
232 
233   // We use the lock word to store the bin # and bin index of the object in the image.
234   //
235   // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
236   // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
237   struct BinSlot {
238     explicit BinSlot(uint32_t lockword);
239     BinSlot(Bin bin, uint32_t index);
240 
241     // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
242     Bin GetBin() const;
243     // The offset in bytes from the beginning of the bin. Aligned to object size.
244     uint32_t GetIndex() const;
245     // Pack into a single uint32_t, for storing into a lock word.
Uint32ValueBinSlot246     uint32_t Uint32Value() const { return lockword_; }
247     // Comparison operator for map support
248     bool operator<(const BinSlot& other) const  { return lockword_ < other.lockword_; }
249 
250   private:
251     // Must be the same size as LockWord, any larger and we would truncate the data.
252     const uint32_t lockword_;
253   };
254 
255   struct ImageInfo {
256     ImageInfo();
257     ImageInfo(ImageInfo&&) = default;
258 
259     // Create the image sections into the out sections variable, returns the size of the image
260     // excluding the bitmap.
261     size_t CreateImageSections(ImageSection* out_sections) const;
262 
263     std::unique_ptr<MemMap> image_;  // Memory mapped for generating the image.
264 
265     // Target begin of this image. Notes: It is not valid to write here, this is the address
266     // of the target image, not necessarily where image_ is mapped. The address is only valid
267     // after layouting (otherwise null).
268     uint8_t* image_begin_ = nullptr;
269 
270     // Offset to the free space in image_, initially size of image header.
271     size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
272     uint32_t image_roots_address_ = 0;  // The image roots address in the image.
273     size_t image_offset_ = 0;  // Offset of this image from the start of the first image.
274 
275     // Image size is the *address space* covered by this image. As the live bitmap is aligned
276     // to the page size, the live bitmap will cover more address space than necessary. But live
277     // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
278     // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
279     // page-aligned).
280     size_t image_size_ = 0;
281 
282     // Oat data.
283     // Offset of the oat file for this image from start of oat files. This is
284     // valid when the previous oat file has been written.
285     size_t oat_offset_ = 0;
286     // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout().
287     const uint8_t* oat_file_begin_ = nullptr;
288     size_t oat_loaded_size_ = 0;
289     const uint8_t* oat_data_begin_ = nullptr;
290     size_t oat_size_ = 0;  // Size of the corresponding oat data.
291     // The oat header checksum, valid after UpdateOatFileHeader().
292     uint32_t oat_checksum_ = 0u;
293 
294     // Image bitmap which lets us know where the objects inside of the image reside.
295     std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
296 
297     // The start offsets of the dex cache arrays.
298     SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
299 
300     // Offset from oat_data_begin_ to the stubs.
301     uint32_t oat_address_offsets_[kOatAddressCount] = {};
302 
303     // Bin slot tracking for dirty object packing.
304     size_t bin_slot_sizes_[kBinSize] = {};  // Number of bytes in a bin.
305     size_t bin_slot_offsets_[kBinSize] = {};  // Number of bytes in previous bins.
306     size_t bin_slot_count_[kBinSize] = {};  // Number of objects in a bin.
307 
308     // Cached size of the intern table for when we allocate memory.
309     size_t intern_table_bytes_ = 0;
310 
311     // Number of image class table bytes.
312     size_t class_table_bytes_ = 0;
313 
314     // Number of object fixup bytes.
315     size_t object_fixup_bytes_ = 0;
316 
317     // Number of pointer fixup bytes.
318     size_t pointer_fixup_bytes_ = 0;
319 
320     // Intern table associated with this image for serialization.
321     std::unique_ptr<InternTable> intern_table_;
322 
323     // Class table associated with this image for serialization.
324     std::unique_ptr<ClassTable> class_table_;
325   };
326 
327   // We use the lock word to store the offset of the object in the image.
328   void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
329       REQUIRES_SHARED(Locks::mutator_lock_);
330   void SetImageOffset(mirror::Object* object, size_t offset)
331       REQUIRES_SHARED(Locks::mutator_lock_);
332   bool IsImageOffsetAssigned(mirror::Object* object) const
333       REQUIRES_SHARED(Locks::mutator_lock_);
334   size_t GetImageOffset(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
335   void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
336       REQUIRES_SHARED(Locks::mutator_lock_);
337 
338   void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_);
339   void AssignImageBinSlot(mirror::Object* object, size_t oat_index)
340       REQUIRES_SHARED(Locks::mutator_lock_);
341   mirror::Object* TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index)
342       REQUIRES_SHARED(Locks::mutator_lock_);
343   void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
344       REQUIRES_SHARED(Locks::mutator_lock_);
345   bool IsImageBinSlotAssigned(mirror::Object* object) const
346       REQUIRES_SHARED(Locks::mutator_lock_);
347   BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
348 
349   void AddDexCacheArrayRelocation(void* array, size_t offset, ObjPtr<mirror::DexCache> dex_cache)
350       REQUIRES_SHARED(Locks::mutator_lock_);
351   void AddMethodPointerArray(mirror::PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_);
352 
GetImageAddressCallback(void * writer,mirror::Object * obj)353   static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
354       REQUIRES_SHARED(Locks::mutator_lock_) {
355     return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
356   }
357 
GetLocalAddress(mirror::Object * object)358   mirror::Object* GetLocalAddress(mirror::Object* object) const
359       REQUIRES_SHARED(Locks::mutator_lock_) {
360     size_t offset = GetImageOffset(object);
361     size_t oat_index = GetOatIndex(object);
362     const ImageInfo& image_info = GetImageInfo(oat_index);
363     uint8_t* dst = image_info.image_->Begin() + offset;
364     return reinterpret_cast<mirror::Object*>(dst);
365   }
366 
367   // Returns the address in the boot image if we are compiling the app image.
368   const uint8_t* GetOatAddress(OatAddress type) const;
369 
GetOatAddressForOffset(uint32_t offset,const ImageInfo & image_info)370   const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const {
371     // With Quick, code is within the OatFile, as there are all in one
372     // .o ELF object. But interpret it as signed.
373     DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_));
374     DCHECK(image_info.oat_data_begin_ != nullptr);
375     return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset);
376   }
377 
378   // Returns true if the class was in the original requested image classes list.
379   bool KeepClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
380 
381   // Debug aid that list of requested image classes.
382   void DumpImageClasses();
383 
384   // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
385   void ComputeLazyFieldsForImageClasses()
386       REQUIRES_SHARED(Locks::mutator_lock_);
387 
388   // Visit all class loaders.
389   void VisitClassLoaders(ClassLoaderVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
390 
391   // Remove unwanted classes from various roots.
392   void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
393 
394   // Remove unwanted classes from the DexCache roots and preload deterministic DexCache contents.
395   void PruneAndPreloadDexCache(ObjPtr<mirror::DexCache> dex_cache,
396                                ObjPtr<mirror::ClassLoader> class_loader)
397       REQUIRES_SHARED(Locks::mutator_lock_)
398       REQUIRES(!Locks::classlinker_classes_lock_);
399 
400   // Verify unwanted classes removed.
401   void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
402 
403   // Lays out where the image objects will be at runtime.
404   void CalculateNewObjectOffsets()
405       REQUIRES_SHARED(Locks::mutator_lock_);
406   void ProcessWorkStack(WorkStack* work_stack)
407       REQUIRES_SHARED(Locks::mutator_lock_);
408   void CreateHeader(size_t oat_index)
409       REQUIRES_SHARED(Locks::mutator_lock_);
410   mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
411       REQUIRES_SHARED(Locks::mutator_lock_);
412   void CalculateObjectBinSlots(mirror::Object* obj)
413       REQUIRES_SHARED(Locks::mutator_lock_);
414   void UnbinObjectsIntoOffset(mirror::Object* obj)
415       REQUIRES_SHARED(Locks::mutator_lock_);
416 
417   // Creates the contiguous image in memory and adjusts pointers.
418   void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
419   void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
420   void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
421   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
422       REQUIRES_SHARED(Locks::mutator_lock_);
423   void CopyAndFixupImTable(ImTable* orig, ImTable* copy) REQUIRES_SHARED(Locks::mutator_lock_);
424   void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
425       REQUIRES_SHARED(Locks::mutator_lock_);
426   void FixupClass(mirror::Class* orig, mirror::Class* copy)
427       REQUIRES_SHARED(Locks::mutator_lock_);
428   void FixupObject(mirror::Object* orig, mirror::Object* copy)
429       REQUIRES_SHARED(Locks::mutator_lock_);
430   void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
431       REQUIRES_SHARED(Locks::mutator_lock_);
432   void FixupPointerArray(mirror::Object* dst,
433                          mirror::PointerArray* arr,
434                          mirror::Class* klass,
435                          Bin array_type)
436       REQUIRES_SHARED(Locks::mutator_lock_);
437 
438   // Get quick code for non-resolution/imt_conflict/abstract method.
439   const uint8_t* GetQuickCode(ArtMethod* method,
440                               const ImageInfo& image_info,
441                               bool* quick_is_interpreted)
442       REQUIRES_SHARED(Locks::mutator_lock_);
443 
444   // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
445   size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
446 
447   // Return true if a method is likely to be dirtied at runtime.
448   bool WillMethodBeDirty(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_);
449 
450   // Assign the offset for an ArtMethod.
451   void AssignMethodOffset(ArtMethod* method,
452                           NativeObjectRelocationType type,
453                           size_t oat_index)
454       REQUIRES_SHARED(Locks::mutator_lock_);
455 
456   // Return true if imt was newly inserted.
457   bool TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
458 
459   // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
460   // relocation.
461   void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
462       REQUIRES_SHARED(Locks::mutator_lock_);
463 
464   // Return true if klass is loaded by the boot class loader but not in the boot image.
465   bool IsBootClassLoaderNonImageClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
466 
467   // Return true if klass depends on a boot class loader non image class. We want to prune these
468   // classes since we do not want any boot class loader classes in the image. This means that
469   // we also cannot have any classes which refer to these boot class loader non image classes.
470   // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
471   // driver.
472   bool PruneAppImageClass(ObjPtr<mirror::Class> klass)
473       REQUIRES_SHARED(Locks::mutator_lock_);
474 
475   // early_exit is true if we had a cyclic dependency anywhere down the chain.
476   bool PruneAppImageClassInternal(ObjPtr<mirror::Class> klass,
477                                   bool* early_exit,
478                                   std::unordered_set<mirror::Object*>* visited)
479       REQUIRES_SHARED(Locks::mutator_lock_);
480 
IsMultiImage()481   bool IsMultiImage() const {
482     return image_infos_.size() > 1;
483   }
484 
485   static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
486 
487   uintptr_t NativeOffsetInImage(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
488 
489   // Location of where the object will be when the image is loaded at runtime.
490   template <typename T>
491   T* NativeLocationInImage(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
492 
493   // Location of where the temporary copy of the object currently is.
494   template <typename T>
495   T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
496 
497   // Return true of obj is inside of the boot image space. This may only return true if we are
498   // compiling an app image.
499   bool IsInBootImage(const void* obj) const;
500 
501   // Return true if ptr is within the boot oat file.
502   bool IsInBootOatFile(const void* ptr) const;
503 
504   // Get the index of the oat file associated with the object.
505   size_t GetOatIndex(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
506 
507   // The oat index for shared data in multi-image and all data in single-image compilation.
GetDefaultOatIndex()508   size_t GetDefaultOatIndex() const {
509     return 0u;
510   }
511 
GetImageInfo(size_t oat_index)512   ImageInfo& GetImageInfo(size_t oat_index) {
513     return image_infos_[oat_index];
514   }
515 
GetImageInfo(size_t oat_index)516   const ImageInfo& GetImageInfo(size_t oat_index) const {
517     return image_infos_[oat_index];
518   }
519 
520   // Find an already strong interned string in the other images or in the boot image. Used to
521   // remove duplicates in the multi image and app image case.
522   mirror::String* FindInternedString(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_);
523 
524   // Return true if there already exists a native allocation for an object.
525   bool NativeRelocationAssigned(void* ptr) const;
526 
527   void CopyReference(mirror::HeapReference<mirror::Object>* dest, ObjPtr<mirror::Object> src)
528       REQUIRES_SHARED(Locks::mutator_lock_);
529 
530   void CopyReference(mirror::CompressedReference<mirror::Object>* dest, ObjPtr<mirror::Object> src)
531       REQUIRES_SHARED(Locks::mutator_lock_);
532 
533   void CopyAndFixupPointer(void** target, void* value);
534 
535   const CompilerDriver& compiler_driver_;
536 
537   // Beginning target image address for the first image.
538   uint8_t* global_image_begin_;
539 
540   // Offset from image_begin_ to where the first object is in image_.
541   size_t image_objects_offset_begin_;
542 
543   // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
544   // to keep track. These include vtable arrays, iftable arrays, and dex caches.
545   std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
546 
547   // Saved hash codes. We use these to restore lockwords which were temporarily used to have
548   // forwarding addresses as well as copying over hash codes.
549   std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
550 
551   // Oat index map for objects.
552   std::unordered_map<mirror::Object*, uint32_t> oat_index_map_;
553 
554   // Boolean flags.
555   const bool compile_pic_;
556   const bool compile_app_image_;
557 
558   // Size of pointers on the target architecture.
559   PointerSize target_ptr_size_;
560 
561   // Image data indexed by the oat file index.
562   dchecked_vector<ImageInfo> image_infos_;
563 
564   // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
565   // have one entry per art field for convenience. ArtFields are placed right after the end of the
566   // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
567   struct NativeObjectRelocation {
568     size_t oat_index;
569     uintptr_t offset;
570     NativeObjectRelocationType type;
571 
IsArtMethodRelocationNativeObjectRelocation572     bool IsArtMethodRelocation() const {
573       return type == kNativeObjectRelocationTypeArtMethodClean ||
574           type == kNativeObjectRelocationTypeArtMethodDirty ||
575           type == kNativeObjectRelocationTypeRuntimeMethod;
576     }
577   };
578   std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
579 
580   // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
581   ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
582 
583   // Counters for measurements, used for logging only.
584   uint64_t dirty_methods_;
585   uint64_t clean_methods_;
586 
587   // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
588   std::unordered_map<mirror::Class*, bool> prune_class_memo_;
589 
590   // Class loaders with a class table to write out. There should only be one class loader because
591   // dex2oat loads the dex files to be compiled into a single class loader. For the boot image,
592   // null is a valid entry.
593   std::unordered_set<mirror::ClassLoader*> class_loaders_;
594 
595   // Which mode the image is stored as, see image.h
596   const ImageHeader::StorageMode image_storage_mode_;
597 
598   // The file names of oat files.
599   const std::vector<const char*>& oat_filenames_;
600 
601   // Map of dex files to the indexes of oat files that they were compiled into.
602   const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
603 
604   // Set of objects known to be dirty in the image. Can be nullptr if there are none.
605   const std::unordered_set<std::string>* dirty_image_objects_;
606 
607   class ComputeLazyFieldsForClassesVisitor;
608   class FixupClassVisitor;
609   class FixupRootVisitor;
610   class FixupVisitor;
611   class GetRootsVisitor;
612   class ImageAddressVisitorForDexCacheArray;
613   class NativeLocationVisitor;
614   class PruneClassesVisitor;
615   class PruneClassLoaderClassesVisitor;
616   class RegisterBootClassPathClassesVisitor;
617   class VisitReferencesVisitor;
618   class PruneObjectReferenceVisitor;
619 
620   DISALLOW_COPY_AND_ASSIGN(ImageWriter);
621 };
622 
623 }  // namespace art
624 
625 #endif  // ART_COMPILER_IMAGE_WRITER_H_
626