• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_IMAGE_WRITER_H_
18 #define ART_COMPILER_IMAGE_WRITER_H_
19 
20 #include <stdint.h>
21 #include "base/memory_tool.h"
22 
23 #include <cstddef>
24 #include <memory>
25 #include <set>
26 #include <string>
27 #include <ostream>
28 
29 #include "base/bit_utils.h"
30 #include "base/dchecked_vector.h"
31 #include "base/length_prefixed_array.h"
32 #include "base/macros.h"
33 #include "driver/compiler_driver.h"
34 #include "gc/space/space.h"
35 #include "image.h"
36 #include "lock_word.h"
37 #include "mem_map.h"
38 #include "oat_file.h"
39 #include "mirror/dex_cache.h"
40 #include "os.h"
41 #include "safe_map.h"
42 #include "utils.h"
43 
44 namespace art {
45 namespace gc {
46 namespace space {
47 class ImageSpace;
48 }  // namespace space
49 }  // namespace gc
50 
51 class ClassTable;
52 
53 static constexpr int kInvalidFd = -1;
54 
55 // Write a Space built during compilation for use during execution.
56 class ImageWriter FINAL {
57  public:
58   ImageWriter(const CompilerDriver& compiler_driver,
59               uintptr_t image_begin,
60               bool compile_pic,
61               bool compile_app_image,
62               ImageHeader::StorageMode image_storage_mode,
63               const std::vector<const char*>& oat_filenames,
64               const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map);
65 
66   bool PrepareImageAddressSpace();
67 
IsImageAddressSpaceReady()68   bool IsImageAddressSpaceReady() const {
69     DCHECK(!image_infos_.empty());
70     for (const ImageInfo& image_info : image_infos_) {
71       if (image_info.image_roots_address_ == 0u) {
72         return false;
73       }
74     }
75     return true;
76   }
77 
78   template <typename T>
GetImageAddress(T * object)79   T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
80     if (object == nullptr || IsInBootImage(object)) {
81       return object;
82     } else {
83       size_t oat_index = GetOatIndex(object);
84       const ImageInfo& image_info = GetImageInfo(oat_index);
85       return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
86     }
87   }
88 
89   ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
90 
91   template <typename PtrType>
GetDexCacheArrayElementImageAddress(const DexFile * dex_file,uint32_t offset)92   PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
93       const SHARED_REQUIRES(Locks::mutator_lock_) {
94     auto oat_it = dex_file_oat_index_map_.find(dex_file);
95     DCHECK(oat_it != dex_file_oat_index_map_.end());
96     const ImageInfo& image_info = GetImageInfo(oat_it->second);
97     auto it = image_info.dex_cache_array_starts_.find(dex_file);
98     DCHECK(it != image_info.dex_cache_array_starts_.end());
99     return reinterpret_cast<PtrType>(
100         image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] +
101             it->second + offset);
102   }
103 
GetOatFileOffset(size_t oat_index)104   size_t GetOatFileOffset(size_t oat_index) const {
105     return GetImageInfo(oat_index).oat_offset_;
106   }
107 
GetOatFileBegin(size_t oat_index)108   const uint8_t* GetOatFileBegin(size_t oat_index) const {
109     return GetImageInfo(oat_index).oat_file_begin_;
110   }
111 
112   // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open
113   // the names in image_filenames.
114   // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open
115   // the names in oat_filenames.
116   bool Write(int image_fd,
117              const std::vector<const char*>& image_filenames,
118              const std::vector<const char*>& oat_filenames)
119       REQUIRES(!Locks::mutator_lock_);
120 
GetOatDataBegin(size_t oat_index)121   uintptr_t GetOatDataBegin(size_t oat_index) {
122     return reinterpret_cast<uintptr_t>(GetImageInfo(oat_index).oat_data_begin_);
123   }
124 
125   // Get the index of the oat file containing the dex file.
126   //
127   // This "oat_index" is used to retrieve information about the the memory layout
128   // of the oat file and its associated image file, needed for link-time patching
129   // of references to the image or across oat files.
130   size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
131 
132   // Get the index of the oat file containing the dex file served by the dex cache.
133   size_t GetOatIndexForDexCache(mirror::DexCache* dex_cache) const
134       SHARED_REQUIRES(Locks::mutator_lock_);
135 
136   // Update the oat layout for the given oat file.
137   // This will make the oat_offset for the next oat file valid.
138   void UpdateOatFileLayout(size_t oat_index,
139                            size_t oat_loaded_size,
140                            size_t oat_data_offset,
141                            size_t oat_data_size);
142   // Update information about the oat header, i.e. checksum and trampoline offsets.
143   void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
144 
145  private:
146   bool AllocMemory();
147 
148   // Mark the objects defined in this space in the given live bitmap.
149   void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
150 
151   // Classify different kinds of bins that objects end up getting packed into during image writing.
152   // Ordered from dirtiest to cleanest (until ArtMethods).
153   enum Bin {
154     kBinMiscDirty,                // Dex caches, object locks, etc...
155     kBinClassVerified,            // Class verified, but initializers haven't been run
156     // Unknown mix of clean/dirty:
157     kBinRegular,
158     kBinClassInitialized,         // Class initializers have been run
159     // All classes get their own bins since their fields often dirty
160     kBinClassInitializedFinalStatics,  // Class initializers have been run, no non-final statics
161     // Likely-clean:
162     kBinString,                        // [String] Almost always immutable (except for obj header).
163     // Add more bins here if we add more segregation code.
164     // Non mirror fields must be below.
165     // ArtFields should be always clean.
166     kBinArtField,
167     // If the class is initialized, then the ArtMethods are probably clean.
168     kBinArtMethodClean,
169     // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
170     // initialized.
171     kBinArtMethodDirty,
172     // Conflict tables (clean).
173     kBinIMTConflictTable,
174     // Runtime methods (always clean, do not have a length prefix array).
175     kBinRuntimeMethod,
176     // Dex cache arrays have a special slot for PC-relative addressing. Since they are
177     // huge, and as such their dirtiness is not important for the clean/dirty separation,
178     // we arbitrarily keep them at the end of the native data.
179     kBinDexCacheArray,            // Arrays belonging to dex cache.
180     kBinSize,
181     // Number of bins which are for mirror objects.
182     kBinMirrorCount = kBinArtField,
183   };
184   friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
185 
186   enum NativeObjectRelocationType {
187     kNativeObjectRelocationTypeArtField,
188     kNativeObjectRelocationTypeArtFieldArray,
189     kNativeObjectRelocationTypeArtMethodClean,
190     kNativeObjectRelocationTypeArtMethodArrayClean,
191     kNativeObjectRelocationTypeArtMethodDirty,
192     kNativeObjectRelocationTypeArtMethodArrayDirty,
193     kNativeObjectRelocationTypeRuntimeMethod,
194     kNativeObjectRelocationTypeIMTConflictTable,
195     kNativeObjectRelocationTypeDexCacheArray,
196   };
197   friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
198 
199   enum OatAddress {
200     kOatAddressInterpreterToInterpreterBridge,
201     kOatAddressInterpreterToCompiledCodeBridge,
202     kOatAddressJNIDlsymLookup,
203     kOatAddressQuickGenericJNITrampoline,
204     kOatAddressQuickIMTConflictTrampoline,
205     kOatAddressQuickResolutionTrampoline,
206     kOatAddressQuickToInterpreterBridge,
207     // Number of elements in the enum.
208     kOatAddressCount,
209   };
210   friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address);
211 
212   static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
213   // uint32 = typeof(lockword_)
214   // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
215   // failures due to invalid read barrier bits during object field reads.
216   static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits -
217       LockWord::kReadBarrierStateSize;
218   // 111000.....0
219   static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
220 
221   // We use the lock word to store the bin # and bin index of the object in the image.
222   //
223   // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
224   // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
225   struct BinSlot {
226     explicit BinSlot(uint32_t lockword);
227     BinSlot(Bin bin, uint32_t index);
228 
229     // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
230     Bin GetBin() const;
231     // The offset in bytes from the beginning of the bin. Aligned to object size.
232     uint32_t GetIndex() const;
233     // Pack into a single uint32_t, for storing into a lock word.
Uint32ValueBinSlot234     uint32_t Uint32Value() const { return lockword_; }
235     // Comparison operator for map support
236     bool operator<(const BinSlot& other) const  { return lockword_ < other.lockword_; }
237 
238   private:
239     // Must be the same size as LockWord, any larger and we would truncate the data.
240     const uint32_t lockword_;
241   };
242 
243   struct ImageInfo {
244     ImageInfo();
245     ImageInfo(ImageInfo&&) = default;
246 
247     // Create the image sections into the out sections variable, returns the size of the image
248     // excluding the bitmap.
249     size_t CreateImageSections(ImageSection* out_sections) const;
250 
251     std::unique_ptr<MemMap> image_;  // Memory mapped for generating the image.
252 
253     // Target begin of this image. Notes: It is not valid to write here, this is the address
254     // of the target image, not necessarily where image_ is mapped. The address is only valid
255     // after layouting (otherwise null).
256     uint8_t* image_begin_ = nullptr;
257 
258     // Offset to the free space in image_, initially size of image header.
259     size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
260     uint32_t image_roots_address_ = 0;  // The image roots address in the image.
261     size_t image_offset_ = 0;  // Offset of this image from the start of the first image.
262 
263     // Image size is the *address space* covered by this image. As the live bitmap is aligned
264     // to the page size, the live bitmap will cover more address space than necessary. But live
265     // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
266     // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
267     // page-aligned).
268     size_t image_size_ = 0;
269 
270     // Oat data.
271     // Offset of the oat file for this image from start of oat files. This is
272     // valid when the previous oat file has been written.
273     size_t oat_offset_ = 0;
274     // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout().
275     const uint8_t* oat_file_begin_ = nullptr;
276     size_t oat_loaded_size_ = 0;
277     const uint8_t* oat_data_begin_ = nullptr;
278     size_t oat_size_ = 0;  // Size of the corresponding oat data.
279     // The oat header checksum, valid after UpdateOatFileHeader().
280     uint32_t oat_checksum_ = 0u;
281 
282     // Image bitmap which lets us know where the objects inside of the image reside.
283     std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
284 
285     // The start offsets of the dex cache arrays.
286     SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
287 
288     // Offset from oat_data_begin_ to the stubs.
289     uint32_t oat_address_offsets_[kOatAddressCount] = {};
290 
291     // Bin slot tracking for dirty object packing.
292     size_t bin_slot_sizes_[kBinSize] = {};  // Number of bytes in a bin.
293     size_t bin_slot_offsets_[kBinSize] = {};  // Number of bytes in previous bins.
294     size_t bin_slot_count_[kBinSize] = {};  // Number of objects in a bin.
295 
296     // Cached size of the intern table for when we allocate memory.
297     size_t intern_table_bytes_ = 0;
298 
299     // Number of image class table bytes.
300     size_t class_table_bytes_ = 0;
301 
302     // Intern table associated with this image for serialization.
303     std::unique_ptr<InternTable> intern_table_;
304 
305     // Class table associated with this image for serialization.
306     std::unique_ptr<ClassTable> class_table_;
307   };
308 
309   // We use the lock word to store the offset of the object in the image.
310   void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
311       SHARED_REQUIRES(Locks::mutator_lock_);
312   void SetImageOffset(mirror::Object* object, size_t offset)
313       SHARED_REQUIRES(Locks::mutator_lock_);
314   bool IsImageOffsetAssigned(mirror::Object* object) const
315       SHARED_REQUIRES(Locks::mutator_lock_);
316   size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
317   void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
318       SHARED_REQUIRES(Locks::mutator_lock_);
319 
320   void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_);
321   void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
322   void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
323       SHARED_REQUIRES(Locks::mutator_lock_);
324   bool IsImageBinSlotAssigned(mirror::Object* object) const
325       SHARED_REQUIRES(Locks::mutator_lock_);
326   BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
327 
328   void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache)
329       SHARED_REQUIRES(Locks::mutator_lock_);
330   void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
331 
GetImageAddressCallback(void * writer,mirror::Object * obj)332   static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
333       SHARED_REQUIRES(Locks::mutator_lock_) {
334     return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
335   }
336 
GetLocalAddress(mirror::Object * object)337   mirror::Object* GetLocalAddress(mirror::Object* object) const
338       SHARED_REQUIRES(Locks::mutator_lock_) {
339     size_t offset = GetImageOffset(object);
340     size_t oat_index = GetOatIndex(object);
341     const ImageInfo& image_info = GetImageInfo(oat_index);
342     uint8_t* dst = image_info.image_->Begin() + offset;
343     return reinterpret_cast<mirror::Object*>(dst);
344   }
345 
346   // Returns the address in the boot image if we are compiling the app image.
347   const uint8_t* GetOatAddress(OatAddress type) const;
348 
GetOatAddressForOffset(uint32_t offset,const ImageInfo & image_info)349   const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const {
350     // With Quick, code is within the OatFile, as there are all in one
351     // .o ELF object. But interpret it as signed.
352     DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_));
353     DCHECK(image_info.oat_data_begin_ != nullptr);
354     return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset);
355   }
356 
357   // Returns true if the class was in the original requested image classes list.
358   bool KeepClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
359 
360   // Debug aid that list of requested image classes.
361   void DumpImageClasses();
362 
363   // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
364   void ComputeLazyFieldsForImageClasses()
365       SHARED_REQUIRES(Locks::mutator_lock_);
366 
367   // Remove unwanted classes from various roots.
368   void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
369 
370   // Verify unwanted classes removed.
371   void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
372   static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
373       SHARED_REQUIRES(Locks::mutator_lock_);
374 
375   // Lays out where the image objects will be at runtime.
376   void CalculateNewObjectOffsets()
377       SHARED_REQUIRES(Locks::mutator_lock_);
378   void CreateHeader(size_t oat_index)
379       SHARED_REQUIRES(Locks::mutator_lock_);
380   mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
381       SHARED_REQUIRES(Locks::mutator_lock_);
382   void CalculateObjectBinSlots(mirror::Object* obj)
383       SHARED_REQUIRES(Locks::mutator_lock_);
384   void UnbinObjectsIntoOffset(mirror::Object* obj)
385       SHARED_REQUIRES(Locks::mutator_lock_);
386 
387   void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
388       SHARED_REQUIRES(Locks::mutator_lock_);
389   void WalkFieldsInOrder(mirror::Object* obj)
390       SHARED_REQUIRES(Locks::mutator_lock_);
391   static void WalkFieldsCallback(mirror::Object* obj, void* arg)
392       SHARED_REQUIRES(Locks::mutator_lock_);
393   static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
394       SHARED_REQUIRES(Locks::mutator_lock_);
395 
396   // Creates the contiguous image in memory and adjusts pointers.
397   void CopyAndFixupNativeData(size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
398   void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
399   static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
400       SHARED_REQUIRES(Locks::mutator_lock_);
401   void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
402   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
403       SHARED_REQUIRES(Locks::mutator_lock_);
404   void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
405       SHARED_REQUIRES(Locks::mutator_lock_);
406   void FixupClass(mirror::Class* orig, mirror::Class* copy)
407       SHARED_REQUIRES(Locks::mutator_lock_);
408   void FixupObject(mirror::Object* orig, mirror::Object* copy)
409       SHARED_REQUIRES(Locks::mutator_lock_);
410   void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
411       SHARED_REQUIRES(Locks::mutator_lock_);
412   void FixupPointerArray(mirror::Object* dst,
413                          mirror::PointerArray* arr,
414                          mirror::Class* klass,
415                          Bin array_type)
416       SHARED_REQUIRES(Locks::mutator_lock_);
417 
418   // Get quick code for non-resolution/imt_conflict/abstract method.
419   const uint8_t* GetQuickCode(ArtMethod* method,
420                               const ImageInfo& image_info,
421                               bool* quick_is_interpreted)
422       SHARED_REQUIRES(Locks::mutator_lock_);
423 
424   // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
425   size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
426 
427   // Return true if a method is likely to be dirtied at runtime.
428   bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
429 
430   // Assign the offset for an ArtMethod.
431   void AssignMethodOffset(ArtMethod* method,
432                           NativeObjectRelocationType type,
433                           size_t oat_index)
434       SHARED_REQUIRES(Locks::mutator_lock_);
435 
436   // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
437   // relocation.
438   void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
439       SHARED_REQUIRES(Locks::mutator_lock_);
440 
441   // Return true if klass is loaded by the boot class loader but not in the boot image.
442   bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
443 
444   // Return true if klass depends on a boot class loader non image class. We want to prune these
445   // classes since we do not want any boot class loader classes in the image. This means that
446   // we also cannot have any classes which refer to these boot class loader non image classes.
447   // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
448   // driver.
449   bool PruneAppImageClass(mirror::Class* klass)
450       SHARED_REQUIRES(Locks::mutator_lock_);
451 
452   // early_exit is true if we had a cyclic dependency anywhere down the chain.
453   bool PruneAppImageClassInternal(mirror::Class* klass,
454                                   bool* early_exit,
455                                   std::unordered_set<mirror::Class*>* visited)
456       SHARED_REQUIRES(Locks::mutator_lock_);
457 
458   static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
459 
460   uintptr_t NativeOffsetInImage(void* obj) SHARED_REQUIRES(Locks::mutator_lock_);
461 
462   // Location of where the object will be when the image is loaded at runtime.
463   template <typename T>
464   T* NativeLocationInImage(T* obj) SHARED_REQUIRES(Locks::mutator_lock_);
465 
466   // Location of where the temporary copy of the object currently is.
467   template <typename T>
468   T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
469 
470   // Return true of obj is inside of the boot image space. This may only return true if we are
471   // compiling an app image.
472   bool IsInBootImage(const void* obj) const;
473 
474   // Return true if ptr is within the boot oat file.
475   bool IsInBootOatFile(const void* ptr) const;
476 
477   // Get the index of the oat file associated with the object.
478   size_t GetOatIndex(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
479 
480   // The oat index for shared data in multi-image and all data in single-image compilation.
GetDefaultOatIndex()481   size_t GetDefaultOatIndex() const {
482     return 0u;
483   }
484 
GetImageInfo(size_t oat_index)485   ImageInfo& GetImageInfo(size_t oat_index) {
486     return image_infos_[oat_index];
487   }
488 
GetImageInfo(size_t oat_index)489   const ImageInfo& GetImageInfo(size_t oat_index) const {
490     return image_infos_[oat_index];
491   }
492 
493   // Find an already strong interned string in the other images or in the boot image. Used to
494   // remove duplicates in the multi image and app image case.
495   mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
496 
497   // Return true if there already exists a native allocation for an object.
498   bool NativeRelocationAssigned(void* ptr) const;
499 
500   const CompilerDriver& compiler_driver_;
501 
502   // Beginning target image address for the first image.
503   uint8_t* global_image_begin_;
504 
505   // Offset from image_begin_ to where the first object is in image_.
506   size_t image_objects_offset_begin_;
507 
508   // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
509   // to keep track. These include vtable arrays, iftable arrays, and dex caches.
510   std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
511 
512   // Saved hash codes. We use these to restore lockwords which were temporarily used to have
513   // forwarding addresses as well as copying over hash codes.
514   std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
515 
516   // Boolean flags.
517   const bool compile_pic_;
518   const bool compile_app_image_;
519 
520   // Size of pointers on the target architecture.
521   size_t target_ptr_size_;
522 
523   // Image data indexed by the oat file index.
524   dchecked_vector<ImageInfo> image_infos_;
525 
526   // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
527   // have one entry per art field for convenience. ArtFields are placed right after the end of the
528   // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
529   struct NativeObjectRelocation {
530     size_t oat_index;
531     uintptr_t offset;
532     NativeObjectRelocationType type;
533 
IsArtMethodRelocationNativeObjectRelocation534     bool IsArtMethodRelocation() const {
535       return type == kNativeObjectRelocationTypeArtMethodClean ||
536           type == kNativeObjectRelocationTypeArtMethodDirty ||
537           type == kNativeObjectRelocationTypeRuntimeMethod;
538     }
539   };
540   std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
541 
542   // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
543   ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
544 
545   // Counters for measurements, used for logging only.
546   uint64_t dirty_methods_;
547   uint64_t clean_methods_;
548 
549   // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
550   std::unordered_map<mirror::Class*, bool> prune_class_memo_;
551 
552   // Class loaders with a class table to write out. There should only be one class loader because
553   // dex2oat loads the dex files to be compiled into a single class loader. For the boot image,
554   // null is a valid entry.
555   std::unordered_set<mirror::ClassLoader*> class_loaders_;
556 
557   // Which mode the image is stored as, see image.h
558   const ImageHeader::StorageMode image_storage_mode_;
559 
560   // The file names of oat files.
561   const std::vector<const char*>& oat_filenames_;
562 
563   // Map of dex files to the indexes of oat files that they were compiled into.
564   const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
565 
566   friend class ContainsBootClassLoaderNonImageClassVisitor;
567   friend class FixupClassVisitor;
568   friend class FixupRootVisitor;
569   friend class FixupVisitor;
570   friend class NativeLocationVisitor;
571   friend class NonImageClassesVisitor;
572   DISALLOW_COPY_AND_ASSIGN(ImageWriter);
573 };
574 
575 }  // namespace art
576 
577 #endif  // ART_COMPILER_IMAGE_WRITER_H_
578