• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_IMAGE_WRITER_H_
18 #define ART_COMPILER_IMAGE_WRITER_H_
19 
20 #include <stdint.h>
21 #include "base/memory_tool.h"
22 
23 #include <cstddef>
24 #include <memory>
25 #include <set>
26 #include <string>
27 #include <ostream>
28 
29 #include "base/bit_utils.h"
30 #include "base/dchecked_vector.h"
31 #include "base/length_prefixed_array.h"
32 #include "base/macros.h"
33 #include "driver/compiler_driver.h"
34 #include "gc/space/space.h"
35 #include "image.h"
36 #include "lock_word.h"
37 #include "mem_map.h"
38 #include "oat_file.h"
39 #include "mirror/dex_cache.h"
40 #include "os.h"
41 #include "safe_map.h"
42 #include "utils.h"
43 
44 namespace art {
45 namespace gc {
46 namespace space {
47 class ImageSpace;
48 }  // namespace space
49 }  // namespace gc
50 
51 class ClassTable;
52 
53 static constexpr int kInvalidFd = -1;
54 
55 // Write a Space built during compilation for use during execution.
56 class ImageWriter FINAL {
57  public:
58   ImageWriter(const CompilerDriver& compiler_driver,
59               uintptr_t image_begin,
60               bool compile_pic,
61               bool compile_app_image,
62               ImageHeader::StorageMode image_storage_mode,
63               const std::vector<const char*>& oat_filenames,
64               const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map);
65 
66   bool PrepareImageAddressSpace();
67 
IsImageAddressSpaceReady()68   bool IsImageAddressSpaceReady() const {
69     DCHECK(!image_infos_.empty());
70     for (const ImageInfo& image_info : image_infos_) {
71       if (image_info.image_roots_address_ == 0u) {
72         return false;
73       }
74     }
75     return true;
76   }
77 
78   template <typename T>
GetImageAddress(T * object)79   T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
80     if (object == nullptr || IsInBootImage(object)) {
81       return object;
82     } else {
83       size_t oat_index = GetOatIndex(object);
84       const ImageInfo& image_info = GetImageInfo(oat_index);
85       return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
86     }
87   }
88 
89   ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
90 
91   template <typename PtrType>
GetDexCacheArrayElementImageAddress(const DexFile * dex_file,uint32_t offset)92   PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
93       const SHARED_REQUIRES(Locks::mutator_lock_) {
94     auto oat_it = dex_file_oat_index_map_.find(dex_file);
95     DCHECK(oat_it != dex_file_oat_index_map_.end());
96     const ImageInfo& image_info = GetImageInfo(oat_it->second);
97     auto it = image_info.dex_cache_array_starts_.find(dex_file);
98     DCHECK(it != image_info.dex_cache_array_starts_.end());
99     return reinterpret_cast<PtrType>(
100         image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] +
101             it->second + offset);
102   }
103 
GetOatFileOffset(size_t oat_index)104   size_t GetOatFileOffset(size_t oat_index) const {
105     return GetImageInfo(oat_index).oat_offset_;
106   }
107 
GetOatFileBegin(size_t oat_index)108   const uint8_t* GetOatFileBegin(size_t oat_index) const {
109     return GetImageInfo(oat_index).oat_file_begin_;
110   }
111 
112   // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open
113   // the names in image_filenames.
114   // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open
115   // the names in oat_filenames.
116   bool Write(int image_fd,
117              const std::vector<const char*>& image_filenames,
118              const std::vector<const char*>& oat_filenames)
119       REQUIRES(!Locks::mutator_lock_);
120 
GetOatDataBegin(size_t oat_index)121   uintptr_t GetOatDataBegin(size_t oat_index) {
122     return reinterpret_cast<uintptr_t>(GetImageInfo(oat_index).oat_data_begin_);
123   }
124 
125   // Get the index of the oat file containing the dex file.
126   //
127   // This "oat_index" is used to retrieve information about the the memory layout
128   // of the oat file and its associated image file, needed for link-time patching
129   // of references to the image or across oat files.
130   size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
131 
132   // Get the index of the oat file containing the dex file served by the dex cache.
133   size_t GetOatIndexForDexCache(mirror::DexCache* dex_cache) const
134       SHARED_REQUIRES(Locks::mutator_lock_);
135 
136   // Update the oat layout for the given oat file.
137   // This will make the oat_offset for the next oat file valid.
138   void UpdateOatFileLayout(size_t oat_index,
139                            size_t oat_loaded_size,
140                            size_t oat_data_offset,
141                            size_t oat_data_size);
142   // Update information about the oat header, i.e. checksum and trampoline offsets.
143   void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
144 
145  private:
146   bool AllocMemory();
147 
148   // Mark the objects defined in this space in the given live bitmap.
149   void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
150 
151   // Classify different kinds of bins that objects end up getting packed into during image writing.
152   // Ordered from dirtiest to cleanest (until ArtMethods).
153   enum Bin {
154     kBinMiscDirty,                // Dex caches, object locks, etc...
155     kBinClassVerified,            // Class verified, but initializers haven't been run
156     // Unknown mix of clean/dirty:
157     kBinRegular,
158     kBinClassInitialized,         // Class initializers have been run
159     // All classes get their own bins since their fields often dirty
160     kBinClassInitializedFinalStatics,  // Class initializers have been run, no non-final statics
161     // Likely-clean:
162     kBinString,                        // [String] Almost always immutable (except for obj header).
163     // Add more bins here if we add more segregation code.
164     // Non mirror fields must be below.
165     // ArtFields should be always clean.
166     kBinArtField,
167     // If the class is initialized, then the ArtMethods are probably clean.
168     kBinArtMethodClean,
169     // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
170     // initialized.
171     kBinArtMethodDirty,
172     // IMT (clean)
173     kBinImTable,
174     // Conflict tables (clean).
175     kBinIMTConflictTable,
176     // Runtime methods (always clean, do not have a length prefix array).
177     kBinRuntimeMethod,
178     // Dex cache arrays have a special slot for PC-relative addressing. Since they are
179     // huge, and as such their dirtiness is not important for the clean/dirty separation,
180     // we arbitrarily keep them at the end of the native data.
181     kBinDexCacheArray,            // Arrays belonging to dex cache.
182     kBinSize,
183     // Number of bins which are for mirror objects.
184     kBinMirrorCount = kBinArtField,
185   };
186   friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
187 
188   enum NativeObjectRelocationType {
189     kNativeObjectRelocationTypeArtField,
190     kNativeObjectRelocationTypeArtFieldArray,
191     kNativeObjectRelocationTypeArtMethodClean,
192     kNativeObjectRelocationTypeArtMethodArrayClean,
193     kNativeObjectRelocationTypeArtMethodDirty,
194     kNativeObjectRelocationTypeArtMethodArrayDirty,
195     kNativeObjectRelocationTypeRuntimeMethod,
196     kNativeObjectRelocationTypeIMTable,
197     kNativeObjectRelocationTypeIMTConflictTable,
198     kNativeObjectRelocationTypeDexCacheArray,
199   };
200   friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
201 
202   enum OatAddress {
203     kOatAddressInterpreterToInterpreterBridge,
204     kOatAddressInterpreterToCompiledCodeBridge,
205     kOatAddressJNIDlsymLookup,
206     kOatAddressQuickGenericJNITrampoline,
207     kOatAddressQuickIMTConflictTrampoline,
208     kOatAddressQuickResolutionTrampoline,
209     kOatAddressQuickToInterpreterBridge,
210     // Number of elements in the enum.
211     kOatAddressCount,
212   };
213   friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address);
214 
215   static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
216   // uint32 = typeof(lockword_)
217   // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
218   // failures due to invalid read barrier bits during object field reads.
219   static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits -
220       LockWord::kReadBarrierStateSize;
221   // 111000.....0
222   static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
223 
224   // We use the lock word to store the bin # and bin index of the object in the image.
225   //
226   // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
227   // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
228   struct BinSlot {
229     explicit BinSlot(uint32_t lockword);
230     BinSlot(Bin bin, uint32_t index);
231 
232     // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
233     Bin GetBin() const;
234     // The offset in bytes from the beginning of the bin. Aligned to object size.
235     uint32_t GetIndex() const;
236     // Pack into a single uint32_t, for storing into a lock word.
Uint32ValueBinSlot237     uint32_t Uint32Value() const { return lockword_; }
238     // Comparison operator for map support
239     bool operator<(const BinSlot& other) const  { return lockword_ < other.lockword_; }
240 
241   private:
242     // Must be the same size as LockWord, any larger and we would truncate the data.
243     const uint32_t lockword_;
244   };
245 
246   struct ImageInfo {
247     ImageInfo();
248     ImageInfo(ImageInfo&&) = default;
249 
250     // Create the image sections into the out sections variable, returns the size of the image
251     // excluding the bitmap.
252     size_t CreateImageSections(ImageSection* out_sections) const;
253 
254     std::unique_ptr<MemMap> image_;  // Memory mapped for generating the image.
255 
256     // Target begin of this image. Notes: It is not valid to write here, this is the address
257     // of the target image, not necessarily where image_ is mapped. The address is only valid
258     // after layouting (otherwise null).
259     uint8_t* image_begin_ = nullptr;
260 
261     // Offset to the free space in image_, initially size of image header.
262     size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
263     uint32_t image_roots_address_ = 0;  // The image roots address in the image.
264     size_t image_offset_ = 0;  // Offset of this image from the start of the first image.
265 
266     // Image size is the *address space* covered by this image. As the live bitmap is aligned
267     // to the page size, the live bitmap will cover more address space than necessary. But live
268     // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
269     // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
270     // page-aligned).
271     size_t image_size_ = 0;
272 
273     // Oat data.
274     // Offset of the oat file for this image from start of oat files. This is
275     // valid when the previous oat file has been written.
276     size_t oat_offset_ = 0;
277     // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout().
278     const uint8_t* oat_file_begin_ = nullptr;
279     size_t oat_loaded_size_ = 0;
280     const uint8_t* oat_data_begin_ = nullptr;
281     size_t oat_size_ = 0;  // Size of the corresponding oat data.
282     // The oat header checksum, valid after UpdateOatFileHeader().
283     uint32_t oat_checksum_ = 0u;
284 
285     // Image bitmap which lets us know where the objects inside of the image reside.
286     std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
287 
288     // The start offsets of the dex cache arrays.
289     SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
290 
291     // Offset from oat_data_begin_ to the stubs.
292     uint32_t oat_address_offsets_[kOatAddressCount] = {};
293 
294     // Bin slot tracking for dirty object packing.
295     size_t bin_slot_sizes_[kBinSize] = {};  // Number of bytes in a bin.
296     size_t bin_slot_offsets_[kBinSize] = {};  // Number of bytes in previous bins.
297     size_t bin_slot_count_[kBinSize] = {};  // Number of objects in a bin.
298 
299     // Cached size of the intern table for when we allocate memory.
300     size_t intern_table_bytes_ = 0;
301 
302     // Number of image class table bytes.
303     size_t class_table_bytes_ = 0;
304 
305     // Intern table associated with this image for serialization.
306     std::unique_ptr<InternTable> intern_table_;
307 
308     // Class table associated with this image for serialization.
309     std::unique_ptr<ClassTable> class_table_;
310   };
311 
312   // We use the lock word to store the offset of the object in the image.
313   void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
314       SHARED_REQUIRES(Locks::mutator_lock_);
315   void SetImageOffset(mirror::Object* object, size_t offset)
316       SHARED_REQUIRES(Locks::mutator_lock_);
317   bool IsImageOffsetAssigned(mirror::Object* object) const
318       SHARED_REQUIRES(Locks::mutator_lock_);
319   size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
320   void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
321       SHARED_REQUIRES(Locks::mutator_lock_);
322 
323   void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_);
324   void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
325   void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
326       SHARED_REQUIRES(Locks::mutator_lock_);
327   bool IsImageBinSlotAssigned(mirror::Object* object) const
328       SHARED_REQUIRES(Locks::mutator_lock_);
329   BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
330 
331   void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache)
332       SHARED_REQUIRES(Locks::mutator_lock_);
333   void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
334 
GetImageAddressCallback(void * writer,mirror::Object * obj)335   static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
336       SHARED_REQUIRES(Locks::mutator_lock_) {
337     return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
338   }
339 
GetLocalAddress(mirror::Object * object)340   mirror::Object* GetLocalAddress(mirror::Object* object) const
341       SHARED_REQUIRES(Locks::mutator_lock_) {
342     size_t offset = GetImageOffset(object);
343     size_t oat_index = GetOatIndex(object);
344     const ImageInfo& image_info = GetImageInfo(oat_index);
345     uint8_t* dst = image_info.image_->Begin() + offset;
346     return reinterpret_cast<mirror::Object*>(dst);
347   }
348 
349   // Returns the address in the boot image if we are compiling the app image.
350   const uint8_t* GetOatAddress(OatAddress type) const;
351 
GetOatAddressForOffset(uint32_t offset,const ImageInfo & image_info)352   const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const {
353     // With Quick, code is within the OatFile, as there are all in one
354     // .o ELF object. But interpret it as signed.
355     DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_));
356     DCHECK(image_info.oat_data_begin_ != nullptr);
357     return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset);
358   }
359 
360   // Returns true if the class was in the original requested image classes list.
361   bool KeepClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
362 
363   // Debug aid that list of requested image classes.
364   void DumpImageClasses();
365 
366   // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
367   void ComputeLazyFieldsForImageClasses()
368       SHARED_REQUIRES(Locks::mutator_lock_);
369 
370   // Remove unwanted classes from various roots.
371   void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
372 
373   // Verify unwanted classes removed.
374   void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
375   static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
376       SHARED_REQUIRES(Locks::mutator_lock_);
377 
378   // Lays out where the image objects will be at runtime.
379   void CalculateNewObjectOffsets()
380       SHARED_REQUIRES(Locks::mutator_lock_);
381   void CreateHeader(size_t oat_index)
382       SHARED_REQUIRES(Locks::mutator_lock_);
383   mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
384       SHARED_REQUIRES(Locks::mutator_lock_);
385   void CalculateObjectBinSlots(mirror::Object* obj)
386       SHARED_REQUIRES(Locks::mutator_lock_);
387   void UnbinObjectsIntoOffset(mirror::Object* obj)
388       SHARED_REQUIRES(Locks::mutator_lock_);
389 
390   void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
391       SHARED_REQUIRES(Locks::mutator_lock_);
392   void WalkFieldsInOrder(mirror::Object* obj)
393       SHARED_REQUIRES(Locks::mutator_lock_);
394   static void WalkFieldsCallback(mirror::Object* obj, void* arg)
395       SHARED_REQUIRES(Locks::mutator_lock_);
396   static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
397       SHARED_REQUIRES(Locks::mutator_lock_);
398 
399   // Creates the contiguous image in memory and adjusts pointers.
400   void CopyAndFixupNativeData(size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
401   void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
402   static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
403       SHARED_REQUIRES(Locks::mutator_lock_);
404   void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
405   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
406       SHARED_REQUIRES(Locks::mutator_lock_);
407   void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_);
408   void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
409       SHARED_REQUIRES(Locks::mutator_lock_);
410   void FixupClass(mirror::Class* orig, mirror::Class* copy)
411       SHARED_REQUIRES(Locks::mutator_lock_);
412   void FixupObject(mirror::Object* orig, mirror::Object* copy)
413       SHARED_REQUIRES(Locks::mutator_lock_);
414   void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
415       SHARED_REQUIRES(Locks::mutator_lock_);
416   void FixupPointerArray(mirror::Object* dst,
417                          mirror::PointerArray* arr,
418                          mirror::Class* klass,
419                          Bin array_type)
420       SHARED_REQUIRES(Locks::mutator_lock_);
421 
422   // Get quick code for non-resolution/imt_conflict/abstract method.
423   const uint8_t* GetQuickCode(ArtMethod* method,
424                               const ImageInfo& image_info,
425                               bool* quick_is_interpreted)
426       SHARED_REQUIRES(Locks::mutator_lock_);
427 
428   // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
429   size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
430 
431   // Return true if a method is likely to be dirtied at runtime.
432   bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
433 
434   // Assign the offset for an ArtMethod.
435   void AssignMethodOffset(ArtMethod* method,
436                           NativeObjectRelocationType type,
437                           size_t oat_index)
438       SHARED_REQUIRES(Locks::mutator_lock_);
439 
440   void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
441 
442   // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
443   // relocation.
444   void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
445       SHARED_REQUIRES(Locks::mutator_lock_);
446 
447   // Return true if klass is loaded by the boot class loader but not in the boot image.
448   bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
449 
450   // Return true if klass depends on a boot class loader non image class. We want to prune these
451   // classes since we do not want any boot class loader classes in the image. This means that
452   // we also cannot have any classes which refer to these boot class loader non image classes.
453   // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
454   // driver.
455   bool PruneAppImageClass(mirror::Class* klass)
456       SHARED_REQUIRES(Locks::mutator_lock_);
457 
458   // early_exit is true if we had a cyclic dependency anywhere down the chain.
459   bool PruneAppImageClassInternal(mirror::Class* klass,
460                                   bool* early_exit,
461                                   std::unordered_set<mirror::Class*>* visited)
462       SHARED_REQUIRES(Locks::mutator_lock_);
463 
464   static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
465 
466   uintptr_t NativeOffsetInImage(void* obj) SHARED_REQUIRES(Locks::mutator_lock_);
467 
468   // Location of where the object will be when the image is loaded at runtime.
469   template <typename T>
470   T* NativeLocationInImage(T* obj) SHARED_REQUIRES(Locks::mutator_lock_);
471 
472   // Location of where the temporary copy of the object currently is.
473   template <typename T>
474   T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
475 
476   // Return true of obj is inside of the boot image space. This may only return true if we are
477   // compiling an app image.
478   bool IsInBootImage(const void* obj) const;
479 
480   // Return true if ptr is within the boot oat file.
481   bool IsInBootOatFile(const void* ptr) const;
482 
483   // Get the index of the oat file associated with the object.
484   size_t GetOatIndex(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
485 
486   // The oat index for shared data in multi-image and all data in single-image compilation.
GetDefaultOatIndex()487   size_t GetDefaultOatIndex() const {
488     return 0u;
489   }
490 
GetImageInfo(size_t oat_index)491   ImageInfo& GetImageInfo(size_t oat_index) {
492     return image_infos_[oat_index];
493   }
494 
GetImageInfo(size_t oat_index)495   const ImageInfo& GetImageInfo(size_t oat_index) const {
496     return image_infos_[oat_index];
497   }
498 
499   // Find an already strong interned string in the other images or in the boot image. Used to
500   // remove duplicates in the multi image and app image case.
501   mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
502 
503   // Return true if there already exists a native allocation for an object.
504   bool NativeRelocationAssigned(void* ptr) const;
505 
506   const CompilerDriver& compiler_driver_;
507 
508   // Beginning target image address for the first image.
509   uint8_t* global_image_begin_;
510 
511   // Offset from image_begin_ to where the first object is in image_.
512   size_t image_objects_offset_begin_;
513 
514   // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
515   // to keep track. These include vtable arrays, iftable arrays, and dex caches.
516   std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
517 
518   // Saved hash codes. We use these to restore lockwords which were temporarily used to have
519   // forwarding addresses as well as copying over hash codes.
520   std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
521 
522   // Boolean flags.
523   const bool compile_pic_;
524   const bool compile_app_image_;
525 
526   // Size of pointers on the target architecture.
527   size_t target_ptr_size_;
528 
529   // Image data indexed by the oat file index.
530   dchecked_vector<ImageInfo> image_infos_;
531 
532   // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
533   // have one entry per art field for convenience. ArtFields are placed right after the end of the
534   // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
535   struct NativeObjectRelocation {
536     size_t oat_index;
537     uintptr_t offset;
538     NativeObjectRelocationType type;
539 
IsArtMethodRelocationNativeObjectRelocation540     bool IsArtMethodRelocation() const {
541       return type == kNativeObjectRelocationTypeArtMethodClean ||
542           type == kNativeObjectRelocationTypeArtMethodDirty ||
543           type == kNativeObjectRelocationTypeRuntimeMethod;
544     }
545   };
546   std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
547 
548   // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
549   ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
550 
551   // Counters for measurements, used for logging only.
552   uint64_t dirty_methods_;
553   uint64_t clean_methods_;
554 
555   // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
556   std::unordered_map<mirror::Class*, bool> prune_class_memo_;
557 
558   // Class loaders with a class table to write out. There should only be one class loader because
559   // dex2oat loads the dex files to be compiled into a single class loader. For the boot image,
560   // null is a valid entry.
561   std::unordered_set<mirror::ClassLoader*> class_loaders_;
562 
563   // Which mode the image is stored as, see image.h
564   const ImageHeader::StorageMode image_storage_mode_;
565 
566   // The file names of oat files.
567   const std::vector<const char*>& oat_filenames_;
568 
569   // Map of dex files to the indexes of oat files that they were compiled into.
570   const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
571 
572   friend class ContainsBootClassLoaderNonImageClassVisitor;
573   friend class FixupClassVisitor;
574   friend class FixupRootVisitor;
575   friend class FixupVisitor;
576   friend class NativeLocationVisitor;
577   friend class NonImageClassesVisitor;
578   DISALLOW_COPY_AND_ASSIGN(ImageWriter);
579 };
580 
581 }  // namespace art
582 
583 #endif  // ART_COMPILER_IMAGE_WRITER_H_
584