• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "image_space.h"
18 
19 #include <sys/statvfs.h>
20 #include <sys/types.h>
21 #include <unistd.h>
22 
23 #include <random>
24 
25 #include "android-base/stringprintf.h"
26 #include "android-base/strings.h"
27 
28 #include "arch/instruction_set.h"
29 #include "art_field-inl.h"
30 #include "art_method-inl.h"
31 #include "base/array_ref.h"
32 #include "base/bit_memory_region.h"
33 #include "base/callee_save_type.h"
34 #include "base/enums.h"
35 #include "base/file_utils.h"
36 #include "base/macros.h"
37 #include "base/os.h"
38 #include "base/scoped_flock.h"
39 #include "base/stl_util.h"
40 #include "base/systrace.h"
41 #include "base/time_utils.h"
42 #include "base/utils.h"
43 #include "class_root.h"
44 #include "dex/art_dex_file_loader.h"
45 #include "dex/dex_file_loader.h"
46 #include "exec_utils.h"
47 #include "gc/accounting/space_bitmap-inl.h"
48 #include "gc/task_processor.h"
49 #include "image-inl.h"
50 #include "image_space_fs.h"
51 #include "intern_table-inl.h"
52 #include "mirror/class-inl.h"
53 #include "mirror/executable-inl.h"
54 #include "mirror/object-inl.h"
55 #include "mirror/object-refvisitor-inl.h"
56 #include "oat_file.h"
57 #include "runtime.h"
58 #include "space-inl.h"
59 
60 namespace art {
61 namespace gc {
62 namespace space {
63 
64 using android::base::StringAppendF;
65 using android::base::StringPrintf;
66 
67 Atomic<uint32_t> ImageSpace::bitmap_index_(0);
68 
ImageSpace(const std::string & image_filename,const char * image_location,MemMap && mem_map,std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,uint8_t * end)69 ImageSpace::ImageSpace(const std::string& image_filename,
70                        const char* image_location,
71                        MemMap&& mem_map,
72                        std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
73                        uint8_t* end)
74     : MemMapSpace(image_filename,
75                   std::move(mem_map),
76                   mem_map.Begin(),
77                   end,
78                   end,
79                   kGcRetentionPolicyNeverCollect),
80       live_bitmap_(std::move(live_bitmap)),
81       oat_file_non_owned_(nullptr),
82       image_location_(image_location) {
83   DCHECK(live_bitmap_ != nullptr);
84 }
85 
ChooseRelocationOffsetDelta(int32_t min_delta,int32_t max_delta)86 static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta) {
87   CHECK_ALIGNED(min_delta, kPageSize);
88   CHECK_ALIGNED(max_delta, kPageSize);
89   CHECK_LT(min_delta, max_delta);
90 
91   int32_t r = GetRandomNumber<int32_t>(min_delta, max_delta);
92   if (r % 2 == 0) {
93     r = RoundUp(r, kPageSize);
94   } else {
95     r = RoundDown(r, kPageSize);
96   }
97   CHECK_LE(min_delta, r);
98   CHECK_GE(max_delta, r);
99   CHECK_ALIGNED(r, kPageSize);
100   return r;
101 }
102 
ChooseRelocationOffsetDelta()103 static int32_t ChooseRelocationOffsetDelta() {
104   return ChooseRelocationOffsetDelta(ART_BASE_ADDRESS_MIN_DELTA, ART_BASE_ADDRESS_MAX_DELTA);
105 }
106 
GenerateImage(const std::string & image_filename,InstructionSet image_isa,std::string * error_msg)107 static bool GenerateImage(const std::string& image_filename,
108                           InstructionSet image_isa,
109                           std::string* error_msg) {
110   Runtime* runtime = Runtime::Current();
111   const std::vector<std::string>& boot_class_path = runtime->GetBootClassPath();
112   if (boot_class_path.empty()) {
113     *error_msg = "Failed to generate image because no boot class path specified";
114     return false;
115   }
116   // We should clean up so we are more likely to have room for the image.
117   if (Runtime::Current()->IsZygote()) {
118     LOG(INFO) << "Pruning dalvik-cache since we are generating an image and will need to recompile";
119     PruneDalvikCache(image_isa);
120   }
121 
122   std::vector<std::string> arg_vector;
123 
124   std::string dex2oat(Runtime::Current()->GetCompilerExecutable());
125   arg_vector.push_back(dex2oat);
126 
127   char* dex2oat_bcp = getenv("DEX2OATBOOTCLASSPATH");
128   std::vector<std::string> dex2oat_bcp_vector;
129   if (dex2oat_bcp != nullptr) {
130     arg_vector.push_back("--runtime-arg");
131     arg_vector.push_back(StringPrintf("-Xbootclasspath:%s", dex2oat_bcp));
132     Split(dex2oat_bcp, ':', &dex2oat_bcp_vector);
133   }
134 
135   std::string image_option_string("--image=");
136   image_option_string += image_filename;
137   arg_vector.push_back(image_option_string);
138 
139   if (!dex2oat_bcp_vector.empty()) {
140     for (size_t i = 0u; i < dex2oat_bcp_vector.size(); i++) {
141       arg_vector.push_back(std::string("--dex-file=") + dex2oat_bcp_vector[i]);
142       arg_vector.push_back(std::string("--dex-location=") + dex2oat_bcp_vector[i]);
143     }
144   } else {
145     const std::vector<std::string>& boot_class_path_locations =
146         runtime->GetBootClassPathLocations();
147     DCHECK_EQ(boot_class_path.size(), boot_class_path_locations.size());
148     for (size_t i = 0u; i < boot_class_path.size(); i++) {
149       arg_vector.push_back(std::string("--dex-file=") + boot_class_path[i]);
150       arg_vector.push_back(std::string("--dex-location=") + boot_class_path_locations[i]);
151     }
152   }
153 
154   std::string oat_file_option_string("--oat-file=");
155   oat_file_option_string += ImageHeader::GetOatLocationFromImageLocation(image_filename);
156   arg_vector.push_back(oat_file_option_string);
157 
158   // Note: we do not generate a fully debuggable boot image so we do not pass the
159   // compiler flag --debuggable here.
160 
161   Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&arg_vector);
162   CHECK_EQ(image_isa, kRuntimeISA)
163       << "We should always be generating an image for the current isa.";
164 
165   int32_t base_offset = ChooseRelocationOffsetDelta();
166   LOG(INFO) << "Using an offset of 0x" << std::hex << base_offset << " from default "
167             << "art base address of 0x" << std::hex << ART_BASE_ADDRESS;
168   arg_vector.push_back(StringPrintf("--base=0x%x", ART_BASE_ADDRESS + base_offset));
169 
170   if (!kIsTargetBuild) {
171     arg_vector.push_back("--host");
172   }
173 
174   const std::vector<std::string>& compiler_options = Runtime::Current()->GetImageCompilerOptions();
175   for (size_t i = 0; i < compiler_options.size(); ++i) {
176     arg_vector.push_back(compiler_options[i].c_str());
177   }
178 
179   std::string command_line(android::base::Join(arg_vector, ' '));
180   LOG(INFO) << "GenerateImage: " << command_line;
181   return Exec(arg_vector, error_msg);
182 }
183 
FindImageFilenameImpl(const char * image_location,const InstructionSet image_isa,bool * has_system,std::string * system_filename,bool * dalvik_cache_exists,std::string * dalvik_cache,bool * is_global_cache,bool * has_cache,std::string * cache_filename)184 static bool FindImageFilenameImpl(const char* image_location,
185                                   const InstructionSet image_isa,
186                                   bool* has_system,
187                                   std::string* system_filename,
188                                   bool* dalvik_cache_exists,
189                                   std::string* dalvik_cache,
190                                   bool* is_global_cache,
191                                   bool* has_cache,
192                                   std::string* cache_filename) {
193   DCHECK(dalvik_cache != nullptr);
194 
195   *has_system = false;
196   *has_cache = false;
197   // image_location = /system/framework/boot.art
198   // system_image_location = /system/framework/<image_isa>/boot.art
199   std::string system_image_filename(GetSystemImageFilename(image_location, image_isa));
200   if (OS::FileExists(system_image_filename.c_str())) {
201     *system_filename = system_image_filename;
202     *has_system = true;
203   }
204 
205   bool have_android_data = false;
206   *dalvik_cache_exists = false;
207   GetDalvikCache(GetInstructionSetString(image_isa),
208                  /*create_if_absent=*/ true,
209                  dalvik_cache,
210                  &have_android_data,
211                  dalvik_cache_exists,
212                  is_global_cache);
213 
214   if (*dalvik_cache_exists) {
215     DCHECK(have_android_data);
216     // Always set output location even if it does not exist,
217     // so that the caller knows where to create the image.
218     //
219     // image_location = /system/framework/boot.art
220     // *image_filename = /data/dalvik-cache/<image_isa>/system@framework@boot.art
221     std::string error_msg;
222     if (!GetDalvikCacheFilename(image_location,
223                                 dalvik_cache->c_str(),
224                                 cache_filename,
225                                 &error_msg)) {
226       LOG(WARNING) << error_msg;
227       return *has_system;
228     }
229     *has_cache = OS::FileExists(cache_filename->c_str());
230   }
231   return *has_system || *has_cache;
232 }
233 
FindImageFilename(const char * image_location,const InstructionSet image_isa,std::string * system_filename,bool * has_system,std::string * cache_filename,bool * dalvik_cache_exists,bool * has_cache,bool * is_global_cache)234 bool ImageSpace::FindImageFilename(const char* image_location,
235                                    const InstructionSet image_isa,
236                                    std::string* system_filename,
237                                    bool* has_system,
238                                    std::string* cache_filename,
239                                    bool* dalvik_cache_exists,
240                                    bool* has_cache,
241                                    bool* is_global_cache) {
242   std::string dalvik_cache_unused;
243   return FindImageFilenameImpl(image_location,
244                                image_isa,
245                                has_system,
246                                system_filename,
247                                dalvik_cache_exists,
248                                &dalvik_cache_unused,
249                                is_global_cache,
250                                has_cache,
251                                cache_filename);
252 }
253 
ReadSpecificImageHeader(const char * filename,ImageHeader * image_header)254 static bool ReadSpecificImageHeader(const char* filename, ImageHeader* image_header) {
255     std::unique_ptr<File> image_file(OS::OpenFileForReading(filename));
256     if (image_file.get() == nullptr) {
257       return false;
258     }
259     const bool success = image_file->ReadFully(image_header, sizeof(ImageHeader));
260     if (!success || !image_header->IsValid()) {
261       return false;
262     }
263     return true;
264 }
265 
ReadSpecificImageHeader(const char * filename,std::string * error_msg)266 static std::unique_ptr<ImageHeader> ReadSpecificImageHeader(const char* filename,
267                                                             std::string* error_msg) {
268   std::unique_ptr<ImageHeader> hdr(new ImageHeader);
269   if (!ReadSpecificImageHeader(filename, hdr.get())) {
270     *error_msg = StringPrintf("Unable to read image header for %s", filename);
271     return nullptr;
272   }
273   return hdr;
274 }
275 
ReadImageHeader(const char * image_location,const InstructionSet image_isa,ImageSpaceLoadingOrder order,std::string * error_msg)276 std::unique_ptr<ImageHeader> ImageSpace::ReadImageHeader(const char* image_location,
277                                                          const InstructionSet image_isa,
278                                                          ImageSpaceLoadingOrder order,
279                                                          std::string* error_msg) {
280   std::string system_filename;
281   bool has_system = false;
282   std::string cache_filename;
283   bool has_cache = false;
284   bool dalvik_cache_exists = false;
285   bool is_global_cache = false;
286   if (FindImageFilename(image_location,
287                         image_isa,
288                         &system_filename,
289                         &has_system,
290                         &cache_filename,
291                         &dalvik_cache_exists,
292                         &has_cache,
293                         &is_global_cache)) {
294     if (order == ImageSpaceLoadingOrder::kSystemFirst) {
295       if (has_system) {
296         return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
297       }
298       if (has_cache) {
299         return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
300       }
301     } else {
302       if (has_cache) {
303         return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
304       }
305       if (has_system) {
306         return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
307       }
308     }
309   }
310 
311   *error_msg = StringPrintf("Unable to find image file for %s", image_location);
312   return nullptr;
313 }
314 
CanWriteToDalvikCache(const InstructionSet isa)315 static bool CanWriteToDalvikCache(const InstructionSet isa) {
316   const std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(isa));
317   if (access(dalvik_cache.c_str(), O_RDWR) == 0) {
318     return true;
319   } else if (errno != EACCES) {
320     PLOG(WARNING) << "CanWriteToDalvikCache returned error other than EACCES";
321   }
322   return false;
323 }
324 
ImageCreationAllowed(bool is_global_cache,const InstructionSet isa,bool is_zygote,std::string * error_msg)325 static bool ImageCreationAllowed(bool is_global_cache,
326                                  const InstructionSet isa,
327                                  bool is_zygote,
328                                  std::string* error_msg) {
329   // Anyone can write into a "local" cache.
330   if (!is_global_cache) {
331     return true;
332   }
333 
334   // Only the zygote running as root is allowed to create the global boot image.
335   // If the zygote is running as non-root (and cannot write to the dalvik-cache),
336   // then image creation is not allowed..
337   if (is_zygote) {
338     return CanWriteToDalvikCache(isa);
339   }
340 
341   *error_msg = "Only the zygote can create the global boot image.";
342   return false;
343 }
344 
VerifyImageAllocations()345 void ImageSpace::VerifyImageAllocations() {
346   uint8_t* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
347   while (current < End()) {
348     CHECK_ALIGNED(current, kObjectAlignment);
349     auto* obj = reinterpret_cast<mirror::Object*>(current);
350     CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
351     CHECK(live_bitmap_->Test(obj)) << obj->PrettyTypeOf();
352     if (kUseBakerReadBarrier) {
353       obj->AssertReadBarrierState();
354     }
355     current += RoundUp(obj->SizeOf(), kObjectAlignment);
356   }
357 }
358 
359 // Helper class for relocating from one range of memory to another.
360 class RelocationRange {
361  public:
362   RelocationRange() = default;
363   RelocationRange(const RelocationRange&) = default;
RelocationRange(uintptr_t source,uintptr_t dest,uintptr_t length)364   RelocationRange(uintptr_t source, uintptr_t dest, uintptr_t length)
365       : source_(source),
366         dest_(dest),
367         length_(length) {}
368 
InSource(uintptr_t address) const369   bool InSource(uintptr_t address) const {
370     return address - source_ < length_;
371   }
372 
InDest(const void * dest) const373   bool InDest(const void* dest) const {
374     return InDest(reinterpret_cast<uintptr_t>(dest));
375   }
376 
InDest(uintptr_t address) const377   bool InDest(uintptr_t address) const {
378     return address - dest_ < length_;
379   }
380 
381   // Translate a source address to the destination space.
ToDest(uintptr_t address) const382   uintptr_t ToDest(uintptr_t address) const {
383     DCHECK(InSource(address));
384     return address + Delta();
385   }
386 
387   // Returns the delta between the dest from the source.
Delta() const388   uintptr_t Delta() const {
389     return dest_ - source_;
390   }
391 
Source() const392   uintptr_t Source() const {
393     return source_;
394   }
395 
Dest() const396   uintptr_t Dest() const {
397     return dest_;
398   }
399 
Length() const400   uintptr_t Length() const {
401     return length_;
402   }
403 
404  private:
405   const uintptr_t source_;
406   const uintptr_t dest_;
407   const uintptr_t length_;
408 };
409 
operator <<(std::ostream & os,const RelocationRange & reloc)410 std::ostream& operator<<(std::ostream& os, const RelocationRange& reloc) {
411   return os << "(" << reinterpret_cast<const void*>(reloc.Source()) << "-"
412             << reinterpret_cast<const void*>(reloc.Source() + reloc.Length()) << ")->("
413             << reinterpret_cast<const void*>(reloc.Dest()) << "-"
414             << reinterpret_cast<const void*>(reloc.Dest() + reloc.Length()) << ")";
415 }
416 
417 template <PointerSize kPointerSize, typename HeapVisitor, typename NativeVisitor>
418 class ImageSpace::PatchObjectVisitor final {
419  public:
PatchObjectVisitor(HeapVisitor heap_visitor,NativeVisitor native_visitor)420   explicit PatchObjectVisitor(HeapVisitor heap_visitor, NativeVisitor native_visitor)
421       : heap_visitor_(heap_visitor), native_visitor_(native_visitor) {}
422 
VisitClass(mirror::Class * klass)423   void VisitClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
424     // A mirror::Class object consists of
425     //  - instance fields inherited from j.l.Object,
426     //  - instance fields inherited from j.l.Class,
427     //  - embedded tables (vtable, interface method table),
428     //  - static fields of the class itself.
429     // The reference fields are at the start of each field section (this is how the
430     // ClassLinker orders fields; except when that would create a gap between superclass
431     // fields and the first reference of the subclass due to alignment, it can be filled
432     // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
433 
434     DCHECK_ALIGNED(klass, kObjectAlignment);
435     static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
436     // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
437     // This should be the only reference field in j.l.Object and we assert that below.
438     PatchReferenceField</*kMayBeNull=*/ false>(klass, mirror::Object::ClassOffset());
439     // Then patch the reference instance fields described by j.l.Class.class.
440     // Use the sizeof(Object) to determine where these reference fields start;
441     // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
442     // after patching but the j.l.Class may not have been patched yet.
443     mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
444     size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
445     DCHECK_NE(num_reference_instance_fields, 0u);
446     static_assert(IsAligned<kHeapReferenceSize>(sizeof(mirror::Object)), "Size alignment check.");
447     MemberOffset instance_field_offset(sizeof(mirror::Object));
448     for (size_t i = 0; i != num_reference_instance_fields; ++i) {
449       PatchReferenceField(klass, instance_field_offset);
450       static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
451                     "Heap reference sizes equality check.");
452       instance_field_offset =
453           MemberOffset(instance_field_offset.Uint32Value() + kHeapReferenceSize);
454     }
455     // Now that we have patched the `super_class_`, if this is the j.l.Class.class,
456     // we can get a reference to j.l.Object.class and assert that it has only one
457     // reference instance field (the `klass_` patched above).
458     if (kIsDebugBuild && klass == class_class) {
459       ObjPtr<mirror::Class> object_class =
460           klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
461       CHECK_EQ(object_class->NumReferenceInstanceFields<kVerifyNone>(), 1u);
462     }
463     // Then patch static fields.
464     size_t num_reference_static_fields = klass->NumReferenceStaticFields<kVerifyNone>();
465     if (num_reference_static_fields != 0u) {
466       MemberOffset static_field_offset =
467           klass->GetFirstReferenceStaticFieldOffset<kVerifyNone>(kPointerSize);
468       for (size_t i = 0; i != num_reference_static_fields; ++i) {
469         PatchReferenceField(klass, static_field_offset);
470         static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
471                       "Heap reference sizes equality check.");
472         static_field_offset =
473             MemberOffset(static_field_offset.Uint32Value() + kHeapReferenceSize);
474       }
475     }
476     // Then patch native pointers.
477     klass->FixupNativePointers<kVerifyNone>(klass, kPointerSize, *this);
478   }
479 
480   template <typename T>
operator ()(T * ptr,void ** dest_addr ATTRIBUTE_UNUSED) const481   T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const {
482     return (ptr != nullptr) ? native_visitor_(ptr) : nullptr;
483   }
484 
VisitPointerArray(ObjPtr<mirror::PointerArray> pointer_array)485   void VisitPointerArray(ObjPtr<mirror::PointerArray> pointer_array)
486       REQUIRES_SHARED(Locks::mutator_lock_) {
487     // Fully patch the pointer array, including the `klass_` field.
488     PatchReferenceField</*kMayBeNull=*/ false>(pointer_array, mirror::Object::ClassOffset());
489 
490     int32_t length = pointer_array->GetLength<kVerifyNone>();
491     for (int32_t i = 0; i != length; ++i) {
492       ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(
493           pointer_array->ElementAddress<kVerifyNone>(i, kPointerSize));
494       PatchNativePointer</*kMayBeNull=*/ false>(method_entry);
495     }
496   }
497 
VisitObject(mirror::Object * object)498   void VisitObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
499     // Visit all reference fields.
500     object->VisitReferences</*kVisitNativeRoots=*/ false,
501                             kVerifyNone,
502                             kWithoutReadBarrier>(*this, *this);
503     // This function should not be called for classes.
504     DCHECK(!object->IsClass<kVerifyNone>());
505   }
506 
507   // Visitor for VisitReferences().
operator ()(ObjPtr<mirror::Object> object,MemberOffset field_offset,bool is_static) const508   ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> object,
509                                 MemberOffset field_offset,
510                                 bool is_static)
511       const REQUIRES_SHARED(Locks::mutator_lock_) {
512     DCHECK(!is_static);
513     PatchReferenceField(object, field_offset);
514   }
515   // Visitor for VisitReferences(), java.lang.ref.Reference case.
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const516   ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
517       REQUIRES_SHARED(Locks::mutator_lock_) {
518     DCHECK(klass->IsTypeOfReferenceClass());
519     this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
520   }
521   // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const522   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
523       const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const524   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
525 
VisitDexCacheArrays(ObjPtr<mirror::DexCache> dex_cache)526   void VisitDexCacheArrays(ObjPtr<mirror::DexCache> dex_cache)
527       REQUIRES_SHARED(Locks::mutator_lock_) {
528     FixupDexCacheArray<mirror::StringDexCacheType>(dex_cache,
529                                                    mirror::DexCache::StringsOffset(),
530                                                    dex_cache->NumStrings<kVerifyNone>());
531     FixupDexCacheArray<mirror::TypeDexCacheType>(dex_cache,
532                                                  mirror::DexCache::ResolvedTypesOffset(),
533                                                  dex_cache->NumResolvedTypes<kVerifyNone>());
534     FixupDexCacheArray<mirror::MethodDexCacheType>(dex_cache,
535                                                    mirror::DexCache::ResolvedMethodsOffset(),
536                                                    dex_cache->NumResolvedMethods<kVerifyNone>());
537     FixupDexCacheArray<mirror::FieldDexCacheType>(dex_cache,
538                                                   mirror::DexCache::ResolvedFieldsOffset(),
539                                                   dex_cache->NumResolvedFields<kVerifyNone>());
540     FixupDexCacheArray<mirror::MethodTypeDexCacheType>(
541         dex_cache,
542         mirror::DexCache::ResolvedMethodTypesOffset(),
543         dex_cache->NumResolvedMethodTypes<kVerifyNone>());
544     FixupDexCacheArray<GcRoot<mirror::CallSite>>(
545         dex_cache,
546         mirror::DexCache::ResolvedCallSitesOffset(),
547         dex_cache->NumResolvedCallSites<kVerifyNone>());
548     FixupDexCacheArray<GcRoot<mirror::String>>(
549         dex_cache,
550         mirror::DexCache::PreResolvedStringsOffset(),
551         dex_cache->NumPreResolvedStrings<kVerifyNone>());
552   }
553 
554   template <bool kMayBeNull = true, typename T>
PatchGcRoot(GcRoot<T> * root) const555   ALWAYS_INLINE void PatchGcRoot(/*inout*/GcRoot<T>* root) const
556       REQUIRES_SHARED(Locks::mutator_lock_) {
557     static_assert(sizeof(GcRoot<mirror::Class*>) == sizeof(uint32_t), "GcRoot size check");
558     T* old_value = root->template Read<kWithoutReadBarrier>();
559     DCHECK(kMayBeNull || old_value != nullptr);
560     if (!kMayBeNull || old_value != nullptr) {
561       *root = GcRoot<T>(heap_visitor_(old_value));
562     }
563   }
564 
565   template <bool kMayBeNull = true, typename T>
PatchNativePointer(T ** entry) const566   ALWAYS_INLINE void PatchNativePointer(/*inout*/T** entry) const {
567     if (kPointerSize == PointerSize::k64) {
568       uint64_t* raw_entry = reinterpret_cast<uint64_t*>(entry);
569       T* old_value = reinterpret_cast64<T*>(*raw_entry);
570       DCHECK(kMayBeNull || old_value != nullptr);
571       if (!kMayBeNull || old_value != nullptr) {
572         T* new_value = native_visitor_(old_value);
573         *raw_entry = reinterpret_cast64<uint64_t>(new_value);
574       }
575     } else {
576       uint32_t* raw_entry = reinterpret_cast<uint32_t*>(entry);
577       T* old_value = reinterpret_cast32<T*>(*raw_entry);
578       DCHECK(kMayBeNull || old_value != nullptr);
579       if (!kMayBeNull || old_value != nullptr) {
580         T* new_value = native_visitor_(old_value);
581         *raw_entry = reinterpret_cast32<uint32_t>(new_value);
582       }
583     }
584   }
585 
586   template <bool kMayBeNull = true>
PatchReferenceField(ObjPtr<mirror::Object> object,MemberOffset offset) const587   ALWAYS_INLINE void PatchReferenceField(ObjPtr<mirror::Object> object, MemberOffset offset) const
588       REQUIRES_SHARED(Locks::mutator_lock_) {
589     ObjPtr<mirror::Object> old_value =
590         object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
591     DCHECK(kMayBeNull || old_value != nullptr);
592     if (!kMayBeNull || old_value != nullptr) {
593       ObjPtr<mirror::Object> new_value = heap_visitor_(old_value.Ptr());
594       object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
595                                                 /*kCheckTransaction=*/ true,
596                                                 kVerifyNone>(offset, new_value);
597     }
598   }
599 
600   template <typename T>
FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>> * array,uint32_t index)601   void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* array, uint32_t index)
602       REQUIRES_SHARED(Locks::mutator_lock_) {
603     static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
604                   "Size check for removing std::atomic<>.");
605     PatchGcRoot(&(reinterpret_cast<mirror::DexCachePair<T>*>(array)[index].object));
606   }
607 
608   template <typename T>
FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>> * array,uint32_t index)609   void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* array, uint32_t index)
610       REQUIRES_SHARED(Locks::mutator_lock_) {
611     static_assert(sizeof(std::atomic<mirror::NativeDexCachePair<T>>) ==
612                       sizeof(mirror::NativeDexCachePair<T>),
613                   "Size check for removing std::atomic<>.");
614     mirror::NativeDexCachePair<T> pair =
615         mirror::DexCache::GetNativePairPtrSize(array, index, kPointerSize);
616     if (pair.object != nullptr) {
617       pair.object = native_visitor_(pair.object);
618       mirror::DexCache::SetNativePairPtrSize(array, index, pair, kPointerSize);
619     }
620   }
621 
FixupDexCacheArrayEntry(GcRoot<mirror::CallSite> * array,uint32_t index)622   void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* array, uint32_t index)
623       REQUIRES_SHARED(Locks::mutator_lock_) {
624     PatchGcRoot(&array[index]);
625   }
626 
FixupDexCacheArrayEntry(GcRoot<mirror::String> * array,uint32_t index)627   void FixupDexCacheArrayEntry(GcRoot<mirror::String>* array, uint32_t index)
628       REQUIRES_SHARED(Locks::mutator_lock_) {
629     PatchGcRoot(&array[index]);
630   }
631 
632   template <typename EntryType>
FixupDexCacheArray(ObjPtr<mirror::DexCache> dex_cache,MemberOffset array_offset,uint32_t size)633   void FixupDexCacheArray(ObjPtr<mirror::DexCache> dex_cache,
634                           MemberOffset array_offset,
635                           uint32_t size) REQUIRES_SHARED(Locks::mutator_lock_) {
636     EntryType* old_array =
637         reinterpret_cast64<EntryType*>(dex_cache->GetField64<kVerifyNone>(array_offset));
638     DCHECK_EQ(old_array != nullptr, size != 0u);
639     if (old_array != nullptr) {
640       EntryType* new_array = native_visitor_(old_array);
641       dex_cache->SetField64<kVerifyNone>(array_offset, reinterpret_cast64<uint64_t>(new_array));
642       for (uint32_t i = 0; i != size; ++i) {
643         FixupDexCacheArrayEntry(new_array, i);
644       }
645     }
646   }
647 
648  private:
649   // Heap objects visitor.
650   HeapVisitor heap_visitor_;
651 
652   // Native objects visitor.
653   NativeVisitor native_visitor_;
654 };
655 
656 template <typename ReferenceVisitor>
657 class ImageSpace::ClassTableVisitor final {
658  public:
ClassTableVisitor(const ReferenceVisitor & reference_visitor)659   explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor)
660       : reference_visitor_(reference_visitor) {}
661 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const662   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
663       REQUIRES_SHARED(Locks::mutator_lock_) {
664     DCHECK(root->AsMirrorPtr() != nullptr);
665     root->Assign(reference_visitor_(root->AsMirrorPtr()));
666   }
667 
668  private:
669   ReferenceVisitor reference_visitor_;
670 };
671 
672 // Helper class encapsulating loading, so we can access private ImageSpace members (this is a
673 // nested class), but not declare functions in the header.
674 class ImageSpace::Loader {
675  public:
InitAppImage(const char * image_filename,const char * image_location,const OatFile * oat_file,MemMap * image_reservation,std::string * error_msg)676   static std::unique_ptr<ImageSpace> InitAppImage(const char* image_filename,
677                                                   const char* image_location,
678                                                   const OatFile* oat_file,
679                                                   /*inout*/MemMap* image_reservation,
680                                                   /*out*/std::string* error_msg)
681       REQUIRES_SHARED(Locks::mutator_lock_) {
682     TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
683 
684     std::unique_ptr<ImageSpace> space = Init(image_filename,
685                                              image_location,
686                                              oat_file,
687                                              &logger,
688                                              image_reservation,
689                                              error_msg);
690     if (space != nullptr) {
691       uint32_t expected_reservation_size =
692           RoundUp(space->GetImageHeader().GetImageSize(), kPageSize);
693       if (!CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
694           !CheckImageComponentCount(*space, /*expected_component_count=*/ 1u, error_msg)) {
695         return nullptr;
696       }
697 
698       TimingLogger::ScopedTiming timing("RelocateImage", &logger);
699       ImageHeader* image_header = reinterpret_cast<ImageHeader*>(space->GetMemMap()->Begin());
700       const PointerSize pointer_size = image_header->GetPointerSize();
701       bool result;
702       if (pointer_size == PointerSize::k64) {
703         result = RelocateInPlace<PointerSize::k64>(*image_header,
704                                                    space->GetMemMap()->Begin(),
705                                                    space->GetLiveBitmap(),
706                                                    oat_file,
707                                                    error_msg);
708       } else {
709         result = RelocateInPlace<PointerSize::k32>(*image_header,
710                                                    space->GetMemMap()->Begin(),
711                                                    space->GetLiveBitmap(),
712                                                    oat_file,
713                                                    error_msg);
714       }
715       if (!result) {
716         return nullptr;
717       }
718       Runtime* runtime = Runtime::Current();
719       CHECK_EQ(runtime->GetResolutionMethod(),
720                image_header->GetImageMethod(ImageHeader::kResolutionMethod));
721       CHECK_EQ(runtime->GetImtConflictMethod(),
722                image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
723       CHECK_EQ(runtime->GetImtUnimplementedMethod(),
724                image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
725       CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves),
726                image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod));
727       CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly),
728                image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod));
729       CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs),
730                image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod));
731       CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything),
732                image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
733       CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForClinit),
734                image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit));
735       CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForSuspendCheck),
736                image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck));
737 
738       VLOG(image) << "ImageSpace::Loader::InitAppImage exiting " << *space.get();
739     }
740     if (VLOG_IS_ON(image)) {
741       logger.Dump(LOG_STREAM(INFO));
742     }
743     return space;
744   }
745 
Init(const char * image_filename,const char * image_location,const OatFile * oat_file,TimingLogger * logger,MemMap * image_reservation,std::string * error_msg)746   static std::unique_ptr<ImageSpace> Init(const char* image_filename,
747                                           const char* image_location,
748                                           const OatFile* oat_file,
749                                           TimingLogger* logger,
750                                           /*inout*/MemMap* image_reservation,
751                                           /*out*/std::string* error_msg)
752       REQUIRES_SHARED(Locks::mutator_lock_) {
753     CHECK(image_filename != nullptr);
754     CHECK(image_location != nullptr);
755 
756     VLOG(image) << "ImageSpace::Init entering image_filename=" << image_filename;
757 
758     std::unique_ptr<File> file;
759     {
760       TimingLogger::ScopedTiming timing("OpenImageFile", logger);
761       file.reset(OS::OpenFileForReading(image_filename));
762       if (file == nullptr) {
763         *error_msg = StringPrintf("Failed to open '%s'", image_filename);
764         return nullptr;
765       }
766     }
767     ImageHeader temp_image_header;
768     ImageHeader* image_header = &temp_image_header;
769     {
770       TimingLogger::ScopedTiming timing("ReadImageHeader", logger);
771       bool success = file->ReadFully(image_header, sizeof(*image_header));
772       if (!success || !image_header->IsValid()) {
773         *error_msg = StringPrintf("Invalid image header in '%s'", image_filename);
774         return nullptr;
775       }
776     }
777     // Check that the file is larger or equal to the header size + data size.
778     const uint64_t image_file_size = static_cast<uint64_t>(file->GetLength());
779     if (image_file_size < sizeof(ImageHeader) + image_header->GetDataSize()) {
780       *error_msg = StringPrintf(
781           "Image file truncated: %" PRIu64 " vs. %" PRIu64 ".",
782            image_file_size,
783            static_cast<uint64_t>(sizeof(ImageHeader) + image_header->GetDataSize()));
784       return nullptr;
785     }
786 
787     if (oat_file != nullptr) {
788       // If we have an oat file (i.e. for app image), check the oat file checksum.
789       // Otherwise, we open the oat file after the image and check the checksum there.
790       const uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
791       const uint32_t image_oat_checksum = image_header->GetOatChecksum();
792       if (oat_checksum != image_oat_checksum) {
793         *error_msg = StringPrintf("Oat checksum 0x%x does not match the image one 0x%x in image %s",
794                                   oat_checksum,
795                                   image_oat_checksum,
796                                   image_filename);
797         return nullptr;
798       }
799     }
800 
801     if (VLOG_IS_ON(startup)) {
802       LOG(INFO) << "Dumping image sections";
803       for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
804         const auto section_idx = static_cast<ImageHeader::ImageSections>(i);
805         auto& section = image_header->GetImageSection(section_idx);
806         LOG(INFO) << section_idx << " start="
807             << reinterpret_cast<void*>(image_header->GetImageBegin() + section.Offset()) << " "
808             << section;
809       }
810     }
811 
812     const auto& bitmap_section = image_header->GetImageBitmapSection();
813     // The location we want to map from is the first aligned page after the end of the stored
814     // (possibly compressed) data.
815     const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
816                                                kPageSize);
817     const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size();
818     if (end_of_bitmap != image_file_size) {
819       *error_msg = StringPrintf(
820           "Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.",
821           image_file_size,
822           end_of_bitmap);
823       return nullptr;
824     }
825 
826     // GetImageBegin is the preferred address to map the image. If we manage to map the
827     // image at the image begin, the amount of fixup work required is minimized.
828     // If it is pic we will retry with error_msg for the2 failure case. Pass a null error_msg to
829     // avoid reading proc maps for a mapping failure and slowing everything down.
830     // For the boot image, we have already reserved the memory and we load the image
831     // into the `image_reservation`.
832     MemMap map = LoadImageFile(
833         image_filename,
834         image_location,
835         *image_header,
836         file->Fd(),
837         logger,
838         image_reservation,
839         error_msg);
840     if (!map.IsValid()) {
841       DCHECK(!error_msg->empty());
842       return nullptr;
843     }
844     DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
845 
846     MemMap image_bitmap_map = MemMap::MapFile(bitmap_section.Size(),
847                                               PROT_READ,
848                                               MAP_PRIVATE,
849                                               file->Fd(),
850                                               image_bitmap_offset,
851                                               /*low_4gb=*/ false,
852                                               image_filename,
853                                               error_msg);
854     if (!image_bitmap_map.IsValid()) {
855       *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
856       return nullptr;
857     }
858     // Loaded the map, use the image header from the file now in case we patch it with
859     // RelocateInPlace.
860     image_header = reinterpret_cast<ImageHeader*>(map.Begin());
861     const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1);
862     std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
863                                          image_filename,
864                                          bitmap_index));
865     // Bitmap only needs to cover until the end of the mirror objects section.
866     const ImageSection& image_objects = image_header->GetObjectsSection();
867     // We only want the mirror object, not the ArtFields and ArtMethods.
868     uint8_t* const image_end = map.Begin() + image_objects.End();
869     std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap;
870     {
871       TimingLogger::ScopedTiming timing("CreateImageBitmap", logger);
872       bitmap.reset(
873           accounting::ContinuousSpaceBitmap::CreateFromMemMap(
874               bitmap_name,
875               std::move(image_bitmap_map),
876               reinterpret_cast<uint8_t*>(map.Begin()),
877               // Make sure the bitmap is aligned to card size instead of just bitmap word size.
878               RoundUp(image_objects.End(), gc::accounting::CardTable::kCardSize)));
879       if (bitmap == nullptr) {
880         *error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
881         return nullptr;
882       }
883     }
884     // We only want the mirror object, not the ArtFields and ArtMethods.
885     std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
886                                                      image_location,
887                                                      std::move(map),
888                                                      std::move(bitmap),
889                                                      image_end));
890     space->oat_file_non_owned_ = oat_file;
891     return space;
892   }
893 
CheckImageComponentCount(const ImageSpace & space,uint32_t expected_component_count,std::string * error_msg)894   static bool CheckImageComponentCount(const ImageSpace& space,
895                                        uint32_t expected_component_count,
896                                        /*out*/std::string* error_msg) {
897     const ImageHeader& header = space.GetImageHeader();
898     if (header.GetComponentCount() != expected_component_count) {
899       *error_msg = StringPrintf("Unexpected component count in %s, received %u, expected %u",
900                                 space.GetImageFilename().c_str(),
901                                 header.GetComponentCount(),
902                                 expected_component_count);
903       return false;
904     }
905     return true;
906   }
907 
CheckImageReservationSize(const ImageSpace & space,uint32_t expected_reservation_size,std::string * error_msg)908   static bool CheckImageReservationSize(const ImageSpace& space,
909                                         uint32_t expected_reservation_size,
910                                         /*out*/std::string* error_msg) {
911     const ImageHeader& header = space.GetImageHeader();
912     if (header.GetImageReservationSize() != expected_reservation_size) {
913       *error_msg = StringPrintf("Unexpected reservation size in %s, received %u, expected %u",
914                                 space.GetImageFilename().c_str(),
915                                 header.GetImageReservationSize(),
916                                 expected_reservation_size);
917       return false;
918     }
919     return true;
920   }
921 
922  private:
LoadImageFile(const char * image_filename,const char * image_location,const ImageHeader & image_header,int fd,TimingLogger * logger,MemMap * image_reservation,std::string * error_msg)923   static MemMap LoadImageFile(const char* image_filename,
924                               const char* image_location,
925                               const ImageHeader& image_header,
926                               int fd,
927                               TimingLogger* logger,
928                               /*inout*/MemMap* image_reservation,
929                               /*out*/std::string* error_msg)
930         REQUIRES_SHARED(Locks::mutator_lock_) {
931     TimingLogger::ScopedTiming timing("MapImageFile", logger);
932     std::string temp_error_msg;
933     const bool is_compressed = image_header.HasCompressedBlock();
934     if (!is_compressed) {
935       uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
936       return MemMap::MapFileAtAddress(address,
937                                       image_header.GetImageSize(),
938                                       PROT_READ | PROT_WRITE,
939                                       MAP_PRIVATE,
940                                       fd,
941                                       /*start=*/ 0,
942                                       /*low_4gb=*/ true,
943                                       image_filename,
944                                       /*reuse=*/ false,
945                                       image_reservation,
946                                       error_msg);
947     }
948 
949     // Reserve output and decompress into it.
950     MemMap map = MemMap::MapAnonymous(image_location,
951                                       image_header.GetImageSize(),
952                                       PROT_READ | PROT_WRITE,
953                                       /*low_4gb=*/ true,
954                                       image_reservation,
955                                       error_msg);
956     if (map.IsValid()) {
957       const size_t stored_size = image_header.GetDataSize();
958       MemMap temp_map = MemMap::MapFile(sizeof(ImageHeader) + stored_size,
959                                         PROT_READ,
960                                         MAP_PRIVATE,
961                                         fd,
962                                         /*start=*/ 0,
963                                         /*low_4gb=*/ false,
964                                         image_filename,
965                                         error_msg);
966       if (!temp_map.IsValid()) {
967         DCHECK(error_msg == nullptr || !error_msg->empty());
968         return MemMap::Invalid();
969       }
970       memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
971 
972       Runtime::ScopedThreadPoolUsage stpu;
973       ThreadPool* const pool = stpu.GetThreadPool();
974       const uint64_t start = NanoTime();
975       Thread* const self = Thread::Current();
976       static constexpr size_t kMinBlocks = 2u;
977       const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
978       for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
979         auto function = [&](Thread*) {
980           const uint64_t start2 = NanoTime();
981           ScopedTrace trace("LZ4 decompress block");
982           bool result = block.Decompress(/*out_ptr=*/map.Begin(),
983                                          /*in_ptr=*/temp_map.Begin(),
984                                          error_msg);
985           if (!result && error_msg != nullptr) {
986             *error_msg = "Failed to decompress image block " + *error_msg;
987           }
988           VLOG(image) << "Decompress block " << block.GetDataSize() << " -> "
989                       << block.GetImageSize() << " in " << PrettyDuration(NanoTime() - start2);
990         };
991         if (use_parallel) {
992           pool->AddTask(self, new FunctionTask(std::move(function)));
993         } else {
994           function(self);
995         }
996       }
997       if (use_parallel) {
998         ScopedTrace trace("Waiting for workers");
999         // Go to native since we don't want to suspend while holding the mutator lock.
1000         ScopedThreadSuspension sts(Thread::Current(), kNative);
1001         pool->Wait(self, true, false);
1002       }
1003       const uint64_t time = NanoTime() - start;
1004       // Add one 1 ns to prevent possible divide by 0.
1005       VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
1006                   << PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
1007                   << "/s)";
1008     }
1009 
1010     return map;
1011   }
1012 
1013   class EmptyRange {
1014    public:
InSource(uintptr_t) const1015     ALWAYS_INLINE bool InSource(uintptr_t) const { return false; }
InDest(uintptr_t) const1016     ALWAYS_INLINE bool InDest(uintptr_t) const { return false; }
ToDest(uintptr_t) const1017     ALWAYS_INLINE uintptr_t ToDest(uintptr_t) const { UNREACHABLE(); }
1018   };
1019 
1020   template <typename Range0, typename Range1 = EmptyRange, typename Range2 = EmptyRange>
1021   class ForwardAddress {
1022    public:
ForwardAddress(const Range0 & range0=Range0 (),const Range1 & range1=Range1 (),const Range2 & range2=Range2 ())1023     ForwardAddress(const Range0& range0 = Range0(),
1024                    const Range1& range1 = Range1(),
1025                    const Range2& range2 = Range2())
1026         : range0_(range0), range1_(range1), range2_(range2) {}
1027 
1028     // Return the relocated address of a heap object.
1029     // Null checks must be performed in the caller (for performance reasons).
1030     template <typename T>
operator ()(T * src) const1031     ALWAYS_INLINE T* operator()(T* src) const {
1032       DCHECK(src != nullptr);
1033       const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
1034       if (range2_.InSource(uint_src)) {
1035         return reinterpret_cast<T*>(range2_.ToDest(uint_src));
1036       }
1037       if (range1_.InSource(uint_src)) {
1038         return reinterpret_cast<T*>(range1_.ToDest(uint_src));
1039       }
1040       CHECK(range0_.InSource(uint_src))
1041           << reinterpret_cast<const void*>(src) << " not in "
1042           << reinterpret_cast<const void*>(range0_.Source()) << "-"
1043           << reinterpret_cast<const void*>(range0_.Source() + range0_.Length());
1044       return reinterpret_cast<T*>(range0_.ToDest(uint_src));
1045     }
1046 
1047    private:
1048     const Range0 range0_;
1049     const Range1 range1_;
1050     const Range2 range2_;
1051   };
1052 
1053   template <typename Forward>
1054   class FixupRootVisitor {
1055    public:
1056     template<typename... Args>
FixupRootVisitor(Args...args)1057     explicit FixupRootVisitor(Args... args) : forward_(args...) {}
1058 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1059     ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1060         REQUIRES_SHARED(Locks::mutator_lock_) {
1061       if (!root->IsNull()) {
1062         VisitRoot(root);
1063       }
1064     }
1065 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1066     ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1067         REQUIRES_SHARED(Locks::mutator_lock_) {
1068       mirror::Object* ref = root->AsMirrorPtr();
1069       mirror::Object* new_ref = forward_(ref);
1070       if (ref != new_ref) {
1071         root->Assign(new_ref);
1072       }
1073     }
1074 
1075    private:
1076     Forward forward_;
1077   };
1078 
1079   template <typename Forward>
1080   class FixupObjectVisitor {
1081    public:
FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap * visited,const Forward & forward)1082     explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited,
1083                                 const Forward& forward)
1084         : visited_(visited), forward_(forward) {}
1085 
1086     // Fix up separately since we also need to fix up method entrypoints.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const1087     ALWAYS_INLINE void VisitRootIfNonNull(
1088         mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1089 
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const1090     ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1091         const {}
1092 
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1093     ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
1094                                   MemberOffset offset,
1095                                   bool is_static ATTRIBUTE_UNUSED) const
1096         NO_THREAD_SAFETY_ANALYSIS {
1097       // Space is not yet added to the heap, don't do a read barrier.
1098       mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
1099           offset);
1100       if (ref != nullptr) {
1101         // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
1102         // image.
1103         obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, forward_(ref));
1104       }
1105     }
1106 
1107     // java.lang.ref.Reference visitor.
operator ()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,ObjPtr<mirror::Reference> ref) const1108     void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
1109                     ObjPtr<mirror::Reference> ref) const
1110         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
1111       mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
1112       if (obj != nullptr) {
1113         ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
1114             mirror::Reference::ReferentOffset(),
1115             forward_(obj));
1116       }
1117     }
1118 
operator ()(mirror::Object * obj) const1119     void operator()(mirror::Object* obj) const
1120         NO_THREAD_SAFETY_ANALYSIS {
1121       if (!visited_->Set(obj)) {
1122         // Not already visited.
1123         obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
1124             *this,
1125             *this);
1126         CHECK(!obj->IsClass());
1127       }
1128     }
1129 
1130    private:
1131     gc::accounting::ContinuousSpaceBitmap* const visited_;
1132     Forward forward_;
1133   };
1134 
1135   // Relocate an image space mapped at target_base which possibly used to be at a different base
1136   // address. In place means modifying a single ImageSpace in place rather than relocating from
1137   // one ImageSpace to another.
1138   template <PointerSize kPointerSize>
RelocateInPlace(ImageHeader & image_header,uint8_t * target_base,accounting::ContinuousSpaceBitmap * bitmap,const OatFile * app_oat_file,std::string * error_msg)1139   static bool RelocateInPlace(ImageHeader& image_header,
1140                               uint8_t* target_base,
1141                               accounting::ContinuousSpaceBitmap* bitmap,
1142                               const OatFile* app_oat_file,
1143                               std::string* error_msg) {
1144     DCHECK(error_msg != nullptr);
1145     // Set up sections.
1146     uint32_t boot_image_begin = 0;
1147     uint32_t boot_image_end = 0;
1148     uint32_t boot_oat_begin = 0;
1149     uint32_t boot_oat_end = 0;
1150     gc::Heap* const heap = Runtime::Current()->GetHeap();
1151     heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
1152     if (boot_image_begin == boot_image_end) {
1153       *error_msg = "Can not relocate app image without boot image space";
1154       return false;
1155     }
1156     if (boot_oat_begin == boot_oat_end) {
1157       *error_msg = "Can not relocate app image without boot oat file";
1158       return false;
1159     }
1160     const uint32_t boot_image_size = boot_oat_end - boot_image_begin;
1161     const uint32_t image_header_boot_image_size = image_header.GetBootImageSize();
1162     if (boot_image_size != image_header_boot_image_size) {
1163       *error_msg = StringPrintf("Boot image size %" PRIu64 " does not match expected size %"
1164                                     PRIu64,
1165                                 static_cast<uint64_t>(boot_image_size),
1166                                 static_cast<uint64_t>(image_header_boot_image_size));
1167       return false;
1168     }
1169     const ImageSection& objects_section = image_header.GetObjectsSection();
1170     // Where the app image objects are mapped to.
1171     uint8_t* objects_location = target_base + objects_section.Offset();
1172     TimingLogger logger(__FUNCTION__, true, false);
1173     RelocationRange boot_image(image_header.GetBootImageBegin(),
1174                                boot_image_begin,
1175                                boot_image_size);
1176     // Metadata is everything after the objects section, use exclusion to be safe.
1177     RelocationRange app_image_metadata(
1178         reinterpret_cast<uintptr_t>(image_header.GetImageBegin()) + objects_section.End(),
1179         reinterpret_cast<uintptr_t>(target_base) + objects_section.End(),
1180         image_header.GetImageSize() - objects_section.End());
1181     // App image heap objects, may be mapped in the heap.
1182     RelocationRange app_image_objects(
1183         reinterpret_cast<uintptr_t>(image_header.GetImageBegin()) + objects_section.Offset(),
1184         reinterpret_cast<uintptr_t>(objects_location),
1185         objects_section.Size());
1186     // Use the oat data section since this is where the OatFile::Begin is.
1187     RelocationRange app_oat(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()),
1188                             // Not necessarily in low 4GB.
1189                             reinterpret_cast<uintptr_t>(app_oat_file->Begin()),
1190                             image_header.GetOatDataEnd() - image_header.GetOatDataBegin());
1191     VLOG(image) << "App image metadata " << app_image_metadata;
1192     VLOG(image) << "App image objects " << app_image_objects;
1193     VLOG(image) << "App oat " << app_oat;
1194     VLOG(image) << "Boot image " << boot_image;
1195     // True if we need to fixup any heap pointers.
1196     const bool fixup_image = boot_image.Delta() != 0 || app_image_metadata.Delta() != 0 ||
1197         app_image_objects.Delta() != 0;
1198     if (!fixup_image) {
1199       // Nothing to fix up.
1200       return true;
1201     }
1202     ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
1203 
1204     using ForwardObject = ForwardAddress<RelocationRange, RelocationRange>;
1205     ForwardObject forward_object(boot_image, app_image_objects);
1206     ForwardObject forward_metadata(boot_image, app_image_metadata);
1207     using ForwardCode = ForwardAddress<RelocationRange, RelocationRange>;
1208     ForwardCode forward_code(boot_image, app_oat);
1209     PatchObjectVisitor<kPointerSize, ForwardObject, ForwardCode> patch_object_visitor(
1210         forward_object,
1211         forward_metadata);
1212     if (fixup_image) {
1213       // Two pass approach, fix up all classes first, then fix up non class-objects.
1214       // The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
1215       std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> visited_bitmap(
1216           gc::accounting::ContinuousSpaceBitmap::Create("Relocate bitmap",
1217                                                         target_base,
1218                                                         image_header.GetImageSize()));
1219       {
1220         TimingLogger::ScopedTiming timing("Fixup classes", &logger);
1221         const auto& class_table_section = image_header.GetClassTableSection();
1222         if (class_table_section.Size() > 0u) {
1223           ScopedObjectAccess soa(Thread::Current());
1224           ClassTableVisitor class_table_visitor(forward_object);
1225           size_t read_count = 0u;
1226           const uint8_t* data = target_base + class_table_section.Offset();
1227           // We avoid making a copy of the data since we want modifications to be propagated to the
1228           // memory map.
1229           ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
1230           for (ClassTable::TableSlot& slot : temp_set) {
1231             slot.VisitRoot(class_table_visitor);
1232             mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
1233             if (!app_image_objects.InDest(klass)) {
1234               continue;
1235             }
1236             const bool already_marked = visited_bitmap->Set(klass);
1237             CHECK(!already_marked) << "App image class already visited";
1238             patch_object_visitor.VisitClass(klass);
1239             // Then patch the non-embedded vtable and iftable.
1240             ObjPtr<mirror::PointerArray> vtable =
1241                 klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
1242             if (vtable != nullptr &&
1243                 app_image_objects.InDest(vtable.Ptr()) &&
1244                 !visited_bitmap->Set(vtable.Ptr())) {
1245               patch_object_visitor.VisitPointerArray(vtable);
1246             }
1247             ObjPtr<mirror::IfTable> iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
1248             if (iftable != nullptr && app_image_objects.InDest(iftable.Ptr())) {
1249               // Avoid processing the fields of iftable since we will process them later anyways
1250               // below.
1251               int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
1252               for (int32_t i = 0; i != ifcount; ++i) {
1253                 ObjPtr<mirror::PointerArray> unpatched_ifarray =
1254                     iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
1255                 if (unpatched_ifarray != nullptr) {
1256                   // The iftable has not been patched, so we need to explicitly adjust the pointer.
1257                   ObjPtr<mirror::PointerArray> ifarray = forward_object(unpatched_ifarray.Ptr());
1258                   if (app_image_objects.InDest(ifarray.Ptr()) &&
1259                       !visited_bitmap->Set(ifarray.Ptr())) {
1260                     patch_object_visitor.VisitPointerArray(ifarray);
1261                   }
1262                 }
1263               }
1264             }
1265           }
1266         }
1267       }
1268 
1269       // Fixup objects may read fields in the boot image, use the mutator lock here for sanity.
1270       // Though its probably not required.
1271       TimingLogger::ScopedTiming timing("Fixup objects", &logger);
1272       ScopedObjectAccess soa(Thread::Current());
1273       // Need to update the image to be at the target base.
1274       uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
1275       uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
1276       FixupObjectVisitor<ForwardObject> fixup_object_visitor(visited_bitmap.get(), forward_object);
1277       bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
1278       // Fixup image roots.
1279       CHECK(app_image_objects.InSource(reinterpret_cast<uintptr_t>(
1280           image_header.GetImageRoots<kWithoutReadBarrier>().Ptr())));
1281       image_header.RelocateImageObjects(app_image_objects.Delta());
1282       CHECK_EQ(image_header.GetImageBegin(), target_base);
1283       // Fix up dex cache DexFile pointers.
1284       ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
1285           image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kDexCaches)
1286               ->AsObjectArray<mirror::DexCache, kVerifyNone>();
1287       for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
1288         ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
1289         CHECK(dex_cache != nullptr);
1290         patch_object_visitor.VisitDexCacheArrays(dex_cache);
1291       }
1292     }
1293     {
1294       // Only touches objects in the app image, no need for mutator lock.
1295       TimingLogger::ScopedTiming timing("Fixup methods", &logger);
1296       image_header.VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
1297         // TODO: Consider a separate visitor for runtime vs normal methods.
1298         if (UNLIKELY(method.IsRuntimeMethod())) {
1299           ImtConflictTable* table = method.GetImtConflictTable(kPointerSize);
1300           if (table != nullptr) {
1301             ImtConflictTable* new_table = forward_metadata(table);
1302             if (table != new_table) {
1303               method.SetImtConflictTable(new_table, kPointerSize);
1304             }
1305           }
1306           const void* old_code = method.GetEntryPointFromQuickCompiledCodePtrSize(kPointerSize);
1307           const void* new_code = forward_code(old_code);
1308           if (old_code != new_code) {
1309             method.SetEntryPointFromQuickCompiledCodePtrSize(new_code, kPointerSize);
1310           }
1311         } else {
1312           method.UpdateObjectsForImageRelocation(forward_object);
1313           method.UpdateEntrypoints(forward_code, kPointerSize);
1314         }
1315       }, target_base, kPointerSize);
1316     }
1317     if (fixup_image) {
1318       {
1319         // Only touches objects in the app image, no need for mutator lock.
1320         TimingLogger::ScopedTiming timing("Fixup fields", &logger);
1321         image_header.VisitPackedArtFields([&](ArtField& field) NO_THREAD_SAFETY_ANALYSIS {
1322           field.UpdateObjects(forward_object);
1323         }, target_base);
1324       }
1325       {
1326         TimingLogger::ScopedTiming timing("Fixup imt", &logger);
1327         image_header.VisitPackedImTables(forward_metadata, target_base, kPointerSize);
1328       }
1329       {
1330         TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
1331         image_header.VisitPackedImtConflictTables(forward_metadata, target_base, kPointerSize);
1332       }
1333       // In the app image case, the image methods are actually in the boot image.
1334       image_header.RelocateImageMethods(boot_image.Delta());
1335       // Fix up the intern table.
1336       const auto& intern_table_section = image_header.GetInternedStringsSection();
1337       if (intern_table_section.Size() > 0u) {
1338         TimingLogger::ScopedTiming timing("Fixup intern table", &logger);
1339         ScopedObjectAccess soa(Thread::Current());
1340         // Fixup the pointers in the newly written intern table to contain image addresses.
1341         InternTable temp_intern_table;
1342         // Note that we require that ReadFromMemory does not make an internal copy of the elements
1343         // so that the VisitRoots() will update the memory directly rather than the copies.
1344         temp_intern_table.AddTableFromMemory(target_base + intern_table_section.Offset(),
1345                                              [&](InternTable::UnorderedSet& strings)
1346             REQUIRES_SHARED(Locks::mutator_lock_) {
1347           for (GcRoot<mirror::String>& root : strings) {
1348             root = GcRoot<mirror::String>(forward_object(root.Read<kWithoutReadBarrier>()));
1349           }
1350         }, /*is_boot_image=*/ false);
1351       }
1352     }
1353     if (VLOG_IS_ON(image)) {
1354       logger.Dump(LOG_STREAM(INFO));
1355     }
1356     return true;
1357   }
1358 };
1359 
1360 class ImageSpace::BootImageLoader {
1361  public:
BootImageLoader(const std::vector<std::string> & boot_class_path,const std::vector<std::string> & boot_class_path_locations,const std::string & image_location,InstructionSet image_isa,bool relocate,bool executable,bool is_zygote)1362   BootImageLoader(const std::vector<std::string>& boot_class_path,
1363                   const std::vector<std::string>& boot_class_path_locations,
1364                   const std::string& image_location,
1365                   InstructionSet image_isa,
1366                   bool relocate,
1367                   bool executable,
1368                   bool is_zygote)
1369       : boot_class_path_(boot_class_path),
1370         boot_class_path_locations_(boot_class_path_locations),
1371         image_location_(image_location),
1372         image_isa_(image_isa),
1373         relocate_(relocate),
1374         executable_(executable),
1375         is_zygote_(is_zygote),
1376         has_system_(false),
1377         has_cache_(false),
1378         is_global_cache_(true),
1379         dalvik_cache_exists_(false),
1380         dalvik_cache_(),
1381         cache_filename_() {
1382   }
1383 
IsZygote() const1384   bool IsZygote() const { return is_zygote_; }
1385 
FindImageFiles()1386   void FindImageFiles() {
1387     std::string system_filename;
1388     bool found_image = FindImageFilenameImpl(image_location_.c_str(),
1389                                              image_isa_,
1390                                              &has_system_,
1391                                              &system_filename,
1392                                              &dalvik_cache_exists_,
1393                                              &dalvik_cache_,
1394                                              &is_global_cache_,
1395                                              &has_cache_,
1396                                              &cache_filename_);
1397     DCHECK(!dalvik_cache_exists_ || !dalvik_cache_.empty());
1398     DCHECK_EQ(found_image, has_system_ || has_cache_);
1399   }
1400 
HasSystem() const1401   bool HasSystem() const { return has_system_; }
HasCache() const1402   bool HasCache() const { return has_cache_; }
1403 
DalvikCacheExists() const1404   bool DalvikCacheExists() const { return dalvik_cache_exists_; }
IsGlobalCache() const1405   bool IsGlobalCache() const { return is_global_cache_; }
1406 
GetDalvikCache() const1407   const std::string& GetDalvikCache() const {
1408     return dalvik_cache_;
1409   }
1410 
GetCacheFilename() const1411   const std::string& GetCacheFilename() const {
1412     return cache_filename_;
1413   }
1414 
LoadFromSystem(bool validate_oat_file,size_t extra_reservation_size,std::vector<std::unique_ptr<space::ImageSpace>> * boot_image_spaces,MemMap * extra_reservation,std::string * error_msg)1415   bool LoadFromSystem(bool validate_oat_file,
1416                       size_t extra_reservation_size,
1417                       /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1418                       /*out*/MemMap* extra_reservation,
1419                       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
1420     TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
1421     std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
1422 
1423     if (!LoadFromFile(filename,
1424                       validate_oat_file,
1425                       extra_reservation_size,
1426                       &logger,
1427                       boot_image_spaces,
1428                       extra_reservation,
1429                       error_msg)) {
1430       return false;
1431     }
1432 
1433     if (VLOG_IS_ON(image)) {
1434       LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromSystem exiting "
1435           << boot_image_spaces->front();
1436       logger.Dump(LOG_STREAM(INFO));
1437     }
1438     return true;
1439   }
1440 
LoadFromDalvikCache(bool validate_oat_file,size_t extra_reservation_size,std::vector<std::unique_ptr<space::ImageSpace>> * boot_image_spaces,MemMap * extra_reservation,std::string * error_msg)1441   bool LoadFromDalvikCache(
1442       bool validate_oat_file,
1443       size_t extra_reservation_size,
1444       /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1445       /*out*/MemMap* extra_reservation,
1446       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
1447     TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
1448     DCHECK(DalvikCacheExists());
1449 
1450     if (!LoadFromFile(cache_filename_,
1451                       validate_oat_file,
1452                       extra_reservation_size,
1453                       &logger,
1454                       boot_image_spaces,
1455                       extra_reservation,
1456                       error_msg)) {
1457       return false;
1458     }
1459 
1460     if (VLOG_IS_ON(image)) {
1461       LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromDalvikCache exiting "
1462           << boot_image_spaces->front();
1463       logger.Dump(LOG_STREAM(INFO));
1464     }
1465     return true;
1466   }
1467 
1468  private:
LoadFromFile(const std::string & filename,bool validate_oat_file,size_t extra_reservation_size,TimingLogger * logger,std::vector<std::unique_ptr<space::ImageSpace>> * boot_image_spaces,MemMap * extra_reservation,std::string * error_msg)1469   bool LoadFromFile(
1470       const std::string& filename,
1471       bool validate_oat_file,
1472       size_t extra_reservation_size,
1473       TimingLogger* logger,
1474       /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1475       /*out*/MemMap* extra_reservation,
1476       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
1477     ImageHeader system_hdr;
1478     if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
1479       *error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
1480       return false;
1481     }
1482     if (system_hdr.GetComponentCount() == 0u ||
1483         system_hdr.GetComponentCount() > boot_class_path_.size()) {
1484       *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
1485                                     "expected non-zero and <= %zu",
1486                                 filename.c_str(),
1487                                 system_hdr.GetComponentCount(),
1488                                 boot_class_path_.size());
1489       return false;
1490     }
1491     MemMap image_reservation;
1492     MemMap local_extra_reservation;
1493     if (!ReserveBootImageMemory(system_hdr.GetImageReservationSize(),
1494                                 reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin()),
1495                                 extra_reservation_size,
1496                                 &image_reservation,
1497                                 &local_extra_reservation,
1498                                 error_msg)) {
1499       return false;
1500     }
1501 
1502     ArrayRef<const std::string> provided_locations(boot_class_path_locations_.data(),
1503                                                    system_hdr.GetComponentCount());
1504     std::vector<std::string> locations =
1505         ExpandMultiImageLocations(provided_locations, image_location_);
1506     std::vector<std::string> filenames =
1507         ExpandMultiImageLocations(provided_locations, filename);
1508     DCHECK_EQ(locations.size(), filenames.size());
1509     std::vector<std::unique_ptr<ImageSpace>> spaces;
1510     spaces.reserve(locations.size());
1511     for (std::size_t i = 0u, size = locations.size(); i != size; ++i) {
1512       spaces.push_back(Load(locations[i], filenames[i], logger, &image_reservation, error_msg));
1513       const ImageSpace* space = spaces.back().get();
1514       if (space == nullptr) {
1515         return false;
1516       }
1517       uint32_t expected_component_count = (i == 0u) ? system_hdr.GetComponentCount() : 0u;
1518       uint32_t expected_reservation_size = (i == 0u) ? system_hdr.GetImageReservationSize() : 0u;
1519       if (!Loader::CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
1520           !Loader::CheckImageComponentCount(*space, expected_component_count, error_msg)) {
1521         return false;
1522       }
1523     }
1524     for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
1525       std::string expected_boot_class_path =
1526           (i == 0u) ? android::base::Join(provided_locations, ':') : std::string();
1527       if (!OpenOatFile(spaces[i].get(),
1528                        boot_class_path_[i],
1529                        expected_boot_class_path,
1530                        validate_oat_file,
1531                        logger,
1532                        &image_reservation,
1533                        error_msg)) {
1534         return false;
1535       }
1536     }
1537     if (!CheckReservationExhausted(image_reservation, error_msg)) {
1538       return false;
1539     }
1540 
1541     MaybeRelocateSpaces(spaces, logger);
1542     boot_image_spaces->swap(spaces);
1543     *extra_reservation = std::move(local_extra_reservation);
1544     return true;
1545   }
1546 
1547  private:
1548   class RelocateVisitor {
1549    public:
RelocateVisitor(uint32_t diff)1550     explicit RelocateVisitor(uint32_t diff) : diff_(diff) {}
1551 
1552     template <typename T>
operator ()(T * src) const1553     ALWAYS_INLINE T* operator()(T* src) const {
1554       DCHECK(src != nullptr);
1555       return reinterpret_cast32<T*>(reinterpret_cast32<uint32_t>(src) + diff_);
1556     }
1557 
1558    private:
1559     const uint32_t diff_;
1560   };
1561 
PointerAddress(ArtMethod * method,MemberOffset offset)1562   static void** PointerAddress(ArtMethod* method, MemberOffset offset) {
1563     return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(method) + offset.Uint32Value());
1564   }
1565 
1566   template <PointerSize kPointerSize>
DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>> & spaces,uint32_t diff)1567   static void DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
1568                                uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) {
1569     std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> patched_objects(
1570         gc::accounting::ContinuousSpaceBitmap::Create(
1571             "Marked objects",
1572             spaces.front()->Begin(),
1573             spaces.back()->End() - spaces.front()->Begin()));
1574     using PatchRelocateVisitor = PatchObjectVisitor<kPointerSize, RelocateVisitor, RelocateVisitor>;
1575     RelocateVisitor relocate_visitor(diff);
1576     PatchRelocateVisitor patch_object_visitor(relocate_visitor, relocate_visitor);
1577 
1578     mirror::Class* dcheck_class_class = nullptr;  // Used only for a DCHECK().
1579     for (const std::unique_ptr<ImageSpace>& space : spaces) {
1580       // First patch the image header. The `diff` is OK for patching 32-bit fields but
1581       // the 64-bit method fields in the ImageHeader may need a negative `delta`.
1582       reinterpret_cast<ImageHeader*>(space->Begin())->RelocateImage(
1583           (reinterpret_cast32<uint32_t>(space->Begin()) >= -diff)  // Would `begin+diff` overflow?
1584               ? -static_cast<int64_t>(-diff) : static_cast<int64_t>(diff));
1585 
1586       // Patch fields and methods.
1587       const ImageHeader& image_header = space->GetImageHeader();
1588       image_header.VisitPackedArtFields([&](ArtField& field) REQUIRES_SHARED(Locks::mutator_lock_) {
1589         patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(
1590             &field.DeclaringClassRoot());
1591       }, space->Begin());
1592       image_header.VisitPackedArtMethods([&](ArtMethod& method)
1593           REQUIRES_SHARED(Locks::mutator_lock_) {
1594         patch_object_visitor.PatchGcRoot(&method.DeclaringClassRoot());
1595         void** data_address = PointerAddress(&method, ArtMethod::DataOffset(kPointerSize));
1596         patch_object_visitor.PatchNativePointer(data_address);
1597         void** entrypoint_address =
1598             PointerAddress(&method, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kPointerSize));
1599         patch_object_visitor.PatchNativePointer(entrypoint_address);
1600       }, space->Begin(), kPointerSize);
1601       auto method_table_visitor = [&](ArtMethod* method) {
1602         DCHECK(method != nullptr);
1603         return relocate_visitor(method);
1604       };
1605       image_header.VisitPackedImTables(method_table_visitor, space->Begin(), kPointerSize);
1606       image_header.VisitPackedImtConflictTables(method_table_visitor, space->Begin(), kPointerSize);
1607 
1608       // Patch the intern table.
1609       if (image_header.GetInternedStringsSection().Size() != 0u) {
1610         const uint8_t* data = space->Begin() + image_header.GetInternedStringsSection().Offset();
1611         size_t read_count;
1612         InternTable::UnorderedSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
1613         for (GcRoot<mirror::String>& slot : temp_set) {
1614           patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(&slot);
1615         }
1616       }
1617 
1618       // Patch the class table and classes, so that we can traverse class hierarchy to
1619       // determine the types of other objects when we visit them later.
1620       if (image_header.GetClassTableSection().Size() != 0u) {
1621         uint8_t* data = space->Begin() + image_header.GetClassTableSection().Offset();
1622         size_t read_count;
1623         ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
1624         DCHECK(!temp_set.empty());
1625         ClassTableVisitor class_table_visitor(relocate_visitor);
1626         for (ClassTable::TableSlot& slot : temp_set) {
1627           slot.VisitRoot(class_table_visitor);
1628           mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
1629           DCHECK(klass != nullptr);
1630           patched_objects->Set(klass);
1631           patch_object_visitor.VisitClass(klass);
1632           if (kIsDebugBuild) {
1633             mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
1634             if (dcheck_class_class == nullptr) {
1635               dcheck_class_class = class_class;
1636             } else {
1637               CHECK_EQ(class_class, dcheck_class_class);
1638             }
1639           }
1640           // Then patch the non-embedded vtable and iftable.
1641           ObjPtr<mirror::PointerArray> vtable =
1642               klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
1643           if (vtable != nullptr && !patched_objects->Set(vtable.Ptr())) {
1644             patch_object_visitor.VisitPointerArray(vtable);
1645           }
1646           ObjPtr<mirror::IfTable> iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
1647           if (iftable != nullptr) {
1648             int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
1649             for (int32_t i = 0; i != ifcount; ++i) {
1650               ObjPtr<mirror::PointerArray> unpatched_ifarray =
1651                   iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
1652               if (unpatched_ifarray != nullptr) {
1653                 // The iftable has not been patched, so we need to explicitly adjust the pointer.
1654                 ObjPtr<mirror::PointerArray> ifarray = relocate_visitor(unpatched_ifarray.Ptr());
1655                 if (!patched_objects->Set(ifarray.Ptr())) {
1656                   patch_object_visitor.VisitPointerArray(ifarray);
1657                 }
1658               }
1659             }
1660           }
1661         }
1662       }
1663     }
1664 
1665     // Patch class roots now, so that we can recognize mirror::Method and mirror::Constructor.
1666     ObjPtr<mirror::Class> method_class;
1667     ObjPtr<mirror::Class> constructor_class;
1668     {
1669       const ImageSpace* space = spaces.front().get();
1670       const ImageHeader& image_header = space->GetImageHeader();
1671 
1672       ObjPtr<mirror::ObjectArray<mirror::Object>> image_roots =
1673           image_header.GetImageRoots<kWithoutReadBarrier>();
1674       patched_objects->Set(image_roots.Ptr());
1675       patch_object_visitor.VisitObject(image_roots.Ptr());
1676 
1677       ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
1678           ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(
1679               image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kClassRoots));
1680       patched_objects->Set(class_roots.Ptr());
1681       patch_object_visitor.VisitObject(class_roots.Ptr());
1682 
1683       method_class = GetClassRoot<mirror::Method, kWithoutReadBarrier>(class_roots);
1684       constructor_class = GetClassRoot<mirror::Constructor, kWithoutReadBarrier>(class_roots);
1685     }
1686 
1687     for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
1688       const ImageSpace* space = spaces[s].get();
1689       const ImageHeader& image_header = space->GetImageHeader();
1690 
1691       static_assert(IsAligned<kObjectAlignment>(sizeof(ImageHeader)), "Header alignment check");
1692       uint32_t objects_end = image_header.GetObjectsSection().Size();
1693       DCHECK_ALIGNED(objects_end, kObjectAlignment);
1694       for (uint32_t pos = sizeof(ImageHeader); pos != objects_end; ) {
1695         mirror::Object* object = reinterpret_cast<mirror::Object*>(space->Begin() + pos);
1696         if (!patched_objects->Test(object)) {
1697           // This is the last pass over objects, so we do not need to Set().
1698           patch_object_visitor.VisitObject(object);
1699           ObjPtr<mirror::Class> klass = object->GetClass<kVerifyNone, kWithoutReadBarrier>();
1700           if (klass->IsDexCacheClass<kVerifyNone>()) {
1701             // Patch dex cache array pointers and elements.
1702             ObjPtr<mirror::DexCache> dex_cache =
1703                 object->AsDexCache<kVerifyNone, kWithoutReadBarrier>();
1704             patch_object_visitor.VisitDexCacheArrays(dex_cache);
1705           } else if (klass == method_class || klass == constructor_class) {
1706             // Patch the ArtMethod* in the mirror::Executable subobject.
1707             ObjPtr<mirror::Executable> as_executable =
1708                 ObjPtr<mirror::Executable>::DownCast(object);
1709             ArtMethod* unpatched_method = as_executable->GetArtMethod<kVerifyNone>();
1710             ArtMethod* patched_method = relocate_visitor(unpatched_method);
1711             as_executable->SetArtMethod</*kTransactionActive=*/ false,
1712                                         /*kCheckTransaction=*/ true,
1713                                         kVerifyNone>(patched_method);
1714           }
1715         }
1716         pos += RoundUp(object->SizeOf<kVerifyNone>(), kObjectAlignment);
1717       }
1718     }
1719   }
1720 
MaybeRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>> & spaces,TimingLogger * logger)1721   void MaybeRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
1722                            TimingLogger* logger)
1723       REQUIRES_SHARED(Locks::mutator_lock_) {
1724     TimingLogger::ScopedTiming timing("MaybeRelocateSpaces", logger);
1725     ImageSpace* first_space = spaces.front().get();
1726     const ImageHeader& first_space_header = first_space->GetImageHeader();
1727     uint32_t diff =
1728         static_cast<uint32_t>(first_space->Begin() - first_space_header.GetImageBegin());
1729     if (!relocate_) {
1730       DCHECK_EQ(diff, 0u);
1731       return;
1732     }
1733 
1734     PointerSize pointer_size = first_space_header.GetPointerSize();
1735     if (pointer_size == PointerSize::k64) {
1736       DoRelocateSpaces<PointerSize::k64>(spaces, diff);
1737     } else {
1738       DoRelocateSpaces<PointerSize::k32>(spaces, diff);
1739     }
1740   }
1741 
Load(const std::string & image_location,const std::string & image_filename,TimingLogger * logger,MemMap * image_reservation,std::string * error_msg)1742   std::unique_ptr<ImageSpace> Load(const std::string& image_location,
1743                                    const std::string& image_filename,
1744                                    TimingLogger* logger,
1745                                    /*inout*/MemMap* image_reservation,
1746                                    /*out*/std::string* error_msg)
1747       REQUIRES_SHARED(Locks::mutator_lock_) {
1748     // Should this be a RDWR lock? This is only a defensive measure, as at
1749     // this point the image should exist.
1750     // However, only the zygote can write into the global dalvik-cache, so
1751     // restrict to zygote processes, or any process that isn't using
1752     // /data/dalvik-cache (which we assume to be allowed to write there).
1753     const bool rw_lock = is_zygote_ || !is_global_cache_;
1754 
1755     // Note that we must not use the file descriptor associated with
1756     // ScopedFlock::GetFile to Init the image file. We want the file
1757     // descriptor (and the associated exclusive lock) to be released when
1758     // we leave Create.
1759     ScopedFlock image = LockedFile::Open(image_filename.c_str(),
1760                                          /*flags=*/ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY,
1761                                          /*block=*/ true,
1762                                          error_msg);
1763 
1764     VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
1765                   << image_location;
1766     // If we are in /system we can assume the image is good. We can also
1767     // assume this if we are using a relocated image (i.e. image checksum
1768     // matches) since this is only different by the offset. We need this to
1769     // make sure that host tests continue to work.
1770     // Since we are the boot image, pass null since we load the oat file from the boot image oat
1771     // file name.
1772     return Loader::Init(image_filename.c_str(),
1773                         image_location.c_str(),
1774                         /*oat_file=*/ nullptr,
1775                         logger,
1776                         image_reservation,
1777                         error_msg);
1778   }
1779 
OpenOatFile(ImageSpace * space,const std::string & dex_filename,const std::string & expected_boot_class_path,bool validate_oat_file,TimingLogger * logger,MemMap * image_reservation,std::string * error_msg)1780   bool OpenOatFile(ImageSpace* space,
1781                    const std::string& dex_filename,
1782                    const std::string& expected_boot_class_path,
1783                    bool validate_oat_file,
1784                    TimingLogger* logger,
1785                    /*inout*/MemMap* image_reservation,
1786                    /*out*/std::string* error_msg) {
1787     // VerifyImageAllocations() will be called later in Runtime::Init()
1788     // as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
1789     // and ArtField::java_lang_reflect_ArtField_, which are used from
1790     // Object::SizeOf() which VerifyImageAllocations() calls, are not
1791     // set yet at this point.
1792     DCHECK(image_reservation != nullptr);
1793     std::unique_ptr<OatFile> oat_file;
1794     {
1795       TimingLogger::ScopedTiming timing("OpenOatFile", logger);
1796       std::string oat_filename =
1797           ImageHeader::GetOatLocationFromImageLocation(space->GetImageFilename());
1798       std::string oat_location =
1799           ImageHeader::GetOatLocationFromImageLocation(space->GetImageLocation());
1800 
1801       oat_file.reset(OatFile::Open(/*zip_fd=*/ -1,
1802                                    oat_filename,
1803                                    oat_location,
1804                                    executable_,
1805                                    /*low_4gb=*/ false,
1806                                    /*abs_dex_location=*/ dex_filename.c_str(),
1807                                    image_reservation,
1808                                    error_msg));
1809       if (oat_file == nullptr) {
1810         *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
1811                                   oat_filename.c_str(),
1812                                   space->GetName(),
1813                                   error_msg->c_str());
1814         return false;
1815       }
1816       const ImageHeader& image_header = space->GetImageHeader();
1817       uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
1818       uint32_t image_oat_checksum = image_header.GetOatChecksum();
1819       if (oat_checksum != image_oat_checksum) {
1820         *error_msg = StringPrintf("Failed to match oat file checksum 0x%x to expected oat checksum"
1821                                   " 0x%x in image %s",
1822                                   oat_checksum,
1823                                   image_oat_checksum,
1824                                   space->GetName());
1825         return false;
1826       }
1827       const char* oat_boot_class_path =
1828           oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathKey);
1829       oat_boot_class_path = (oat_boot_class_path != nullptr) ? oat_boot_class_path : "";
1830       if (expected_boot_class_path != oat_boot_class_path) {
1831         *error_msg = StringPrintf("Failed to match oat boot class path %s to expected "
1832                                   "boot class path %s in image %s",
1833                                   oat_boot_class_path,
1834                                   expected_boot_class_path.c_str(),
1835                                   space->GetName());
1836         return false;
1837       }
1838       ptrdiff_t relocation_diff = space->Begin() - image_header.GetImageBegin();
1839       CHECK(image_header.GetOatDataBegin() != nullptr);
1840       uint8_t* oat_data_begin = image_header.GetOatDataBegin() + relocation_diff;
1841       if (oat_file->Begin() != oat_data_begin) {
1842         *error_msg = StringPrintf("Oat file '%s' referenced from image %s has unexpected begin"
1843                                       " %p v. %p",
1844                                   oat_filename.c_str(),
1845                                   space->GetName(),
1846                                   oat_file->Begin(),
1847                                   oat_data_begin);
1848         return false;
1849       }
1850     }
1851     if (validate_oat_file) {
1852       TimingLogger::ScopedTiming timing("ValidateOatFile", logger);
1853       if (!ImageSpace::ValidateOatFile(*oat_file, error_msg)) {
1854         DCHECK(!error_msg->empty());
1855         return false;
1856       }
1857     }
1858     space->oat_file_ = std::move(oat_file);
1859     space->oat_file_non_owned_ = space->oat_file_.get();
1860     return true;
1861   }
1862 
ReserveBootImageMemory(uint32_t reservation_size,uint32_t image_start,size_t extra_reservation_size,MemMap * image_reservation,MemMap * extra_reservation,std::string * error_msg)1863   bool ReserveBootImageMemory(uint32_t reservation_size,
1864                               uint32_t image_start,
1865                               size_t extra_reservation_size,
1866                               /*out*/MemMap* image_reservation,
1867                               /*out*/MemMap* extra_reservation,
1868                               /*out*/std::string* error_msg) {
1869     DCHECK_ALIGNED(reservation_size, kPageSize);
1870     DCHECK_ALIGNED(image_start, kPageSize);
1871     DCHECK(!image_reservation->IsValid());
1872     DCHECK_LT(extra_reservation_size, std::numeric_limits<uint32_t>::max() - reservation_size);
1873     size_t total_size = reservation_size + extra_reservation_size;
1874     // If relocating, choose a random address for ALSR.
1875     uint32_t addr = relocate_ ? ART_BASE_ADDRESS + ChooseRelocationOffsetDelta() : image_start;
1876     *image_reservation =
1877         MemMap::MapAnonymous("Boot image reservation",
1878                              reinterpret_cast32<uint8_t*>(addr),
1879                              total_size,
1880                              PROT_NONE,
1881                              /*low_4gb=*/ true,
1882                              /*reuse=*/ false,
1883                              /*reservation=*/ nullptr,
1884                              error_msg);
1885     if (!image_reservation->IsValid()) {
1886       return false;
1887     }
1888     DCHECK(!extra_reservation->IsValid());
1889     if (extra_reservation_size != 0u) {
1890       DCHECK_ALIGNED(extra_reservation_size, kPageSize);
1891       DCHECK_LT(extra_reservation_size, image_reservation->Size());
1892       uint8_t* split = image_reservation->End() - extra_reservation_size;
1893       *extra_reservation = image_reservation->RemapAtEnd(split,
1894                                                          "Boot image extra reservation",
1895                                                          PROT_NONE,
1896                                                          error_msg);
1897       if (!extra_reservation->IsValid()) {
1898         return false;
1899       }
1900     }
1901 
1902     return true;
1903   }
1904 
CheckReservationExhausted(const MemMap & image_reservation,std::string * error_msg)1905   bool CheckReservationExhausted(const MemMap& image_reservation, /*out*/std::string* error_msg) {
1906     if (image_reservation.IsValid()) {
1907       *error_msg = StringPrintf("Excessive image reservation after loading boot image: %p-%p",
1908                                 image_reservation.Begin(),
1909                                 image_reservation.End());
1910       return false;
1911     }
1912     return true;
1913   }
1914 
1915   const std::vector<std::string>& boot_class_path_;
1916   const std::vector<std::string>& boot_class_path_locations_;
1917   const std::string& image_location_;
1918   InstructionSet image_isa_;
1919   bool relocate_;
1920   bool executable_;
1921   bool is_zygote_;
1922   bool has_system_;
1923   bool has_cache_;
1924   bool is_global_cache_;
1925   bool dalvik_cache_exists_;
1926   std::string dalvik_cache_;
1927   std::string cache_filename_;
1928 };
1929 
1930 static constexpr uint64_t kLowSpaceValue = 50 * MB;
1931 static constexpr uint64_t kTmpFsSentinelValue = 384 * MB;
1932 
1933 // Read the free space of the cache partition and make a decision whether to keep the generated
1934 // image. This is to try to mitigate situations where the system might run out of space later.
CheckSpace(const std::string & cache_filename,std::string * error_msg)1935 static bool CheckSpace(const std::string& cache_filename, std::string* error_msg) {
1936   // Using statvfs vs statvfs64 because of b/18207376, and it is enough for all practical purposes.
1937   struct statvfs buf;
1938 
1939   int res = TEMP_FAILURE_RETRY(statvfs(cache_filename.c_str(), &buf));
1940   if (res != 0) {
1941     // Could not stat. Conservatively tell the system to delete the image.
1942     *error_msg = "Could not stat the filesystem, assuming low-memory situation.";
1943     return false;
1944   }
1945 
1946   uint64_t fs_overall_size = buf.f_bsize * static_cast<uint64_t>(buf.f_blocks);
1947   // Zygote is privileged, but other things are not. Use bavail.
1948   uint64_t fs_free_size = buf.f_bsize * static_cast<uint64_t>(buf.f_bavail);
1949 
1950   // Take the overall size as an indicator for a tmpfs, which is being used for the decryption
1951   // environment. We do not want to fail quickening the boot image there, as it is beneficial
1952   // for time-to-UI.
1953   if (fs_overall_size > kTmpFsSentinelValue) {
1954     if (fs_free_size < kLowSpaceValue) {
1955       *error_msg = StringPrintf("Low-memory situation: only %4.2f megabytes available, need at "
1956                                 "least %" PRIu64 ".",
1957                                 static_cast<double>(fs_free_size) / MB,
1958                                 kLowSpaceValue / MB);
1959       return false;
1960     }
1961   }
1962   return true;
1963 }
1964 
LoadBootImage(const std::vector<std::string> & boot_class_path,const std::vector<std::string> & boot_class_path_locations,const std::string & image_location,const InstructionSet image_isa,ImageSpaceLoadingOrder order,bool relocate,bool executable,bool is_zygote,size_t extra_reservation_size,std::vector<std::unique_ptr<space::ImageSpace>> * boot_image_spaces,MemMap * extra_reservation)1965 bool ImageSpace::LoadBootImage(
1966     const std::vector<std::string>& boot_class_path,
1967     const std::vector<std::string>& boot_class_path_locations,
1968     const std::string& image_location,
1969     const InstructionSet image_isa,
1970     ImageSpaceLoadingOrder order,
1971     bool relocate,
1972     bool executable,
1973     bool is_zygote,
1974     size_t extra_reservation_size,
1975     /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1976     /*out*/MemMap* extra_reservation) {
1977   ScopedTrace trace(__FUNCTION__);
1978 
1979   DCHECK(boot_image_spaces != nullptr);
1980   DCHECK(boot_image_spaces->empty());
1981   DCHECK_ALIGNED(extra_reservation_size, kPageSize);
1982   DCHECK(extra_reservation != nullptr);
1983   DCHECK_NE(image_isa, InstructionSet::kNone);
1984 
1985   if (image_location.empty()) {
1986     return false;
1987   }
1988 
1989   BootImageLoader loader(boot_class_path,
1990                          boot_class_path_locations,
1991                          image_location,
1992                          image_isa,
1993                          relocate,
1994                          executable,
1995                          is_zygote);
1996 
1997   // Step 0: Extra zygote work.
1998 
1999   // Step 0.a: If we're the zygote, mark boot.
2000   if (loader.IsZygote() && CanWriteToDalvikCache(image_isa)) {
2001     MarkZygoteStart(image_isa, Runtime::Current()->GetZygoteMaxFailedBoots());
2002   }
2003 
2004   loader.FindImageFiles();
2005 
2006   // Step 0.b: If we're the zygote, check for free space, and prune the cache preemptively,
2007   //           if necessary. While the runtime may be fine (it is pretty tolerant to
2008   //           out-of-disk-space situations), other parts of the platform are not.
2009   //
2010   //           The advantage of doing this proactively is that the later steps are simplified,
2011   //           i.e., we do not need to code retries.
2012   bool low_space = false;
2013   if (loader.IsZygote() && loader.DalvikCacheExists()) {
2014     // Extra checks for the zygote. These only apply when loading the first image, explained below.
2015     const std::string& dalvik_cache = loader.GetDalvikCache();
2016     DCHECK(!dalvik_cache.empty());
2017     std::string local_error_msg;
2018     bool check_space = CheckSpace(dalvik_cache, &local_error_msg);
2019     if (!check_space) {
2020       LOG(WARNING) << local_error_msg << " Preemptively pruning the dalvik cache.";
2021       PruneDalvikCache(image_isa);
2022 
2023       // Re-evaluate the image.
2024       loader.FindImageFiles();
2025 
2026       // Disable compilation/patching - we do not want to fill up the space again.
2027       low_space = true;
2028     }
2029   }
2030 
2031   // Collect all the errors.
2032   std::vector<std::string> error_msgs;
2033 
2034   auto try_load_from = [&](auto has_fn, auto load_fn, bool validate_oat_file) {
2035     if ((loader.*has_fn)()) {
2036       std::string local_error_msg;
2037       if ((loader.*load_fn)(validate_oat_file,
2038                             extra_reservation_size,
2039                             boot_image_spaces,
2040                             extra_reservation,
2041                             &local_error_msg)) {
2042         return true;
2043       }
2044       error_msgs.push_back(local_error_msg);
2045     }
2046     return false;
2047   };
2048 
2049   auto try_load_from_system = [&]() {
2050     return try_load_from(&BootImageLoader::HasSystem, &BootImageLoader::LoadFromSystem, false);
2051   };
2052   auto try_load_from_cache = [&]() {
2053     return try_load_from(&BootImageLoader::HasCache, &BootImageLoader::LoadFromDalvikCache, true);
2054   };
2055 
2056   auto invoke_sequentially = [](auto first, auto second) {
2057     return first() || second();
2058   };
2059 
2060   // Step 1+2: Check system and cache images in the asked-for order.
2061   if (order == ImageSpaceLoadingOrder::kSystemFirst) {
2062     if (invoke_sequentially(try_load_from_system, try_load_from_cache)) {
2063       return true;
2064     }
2065   } else {
2066     if (invoke_sequentially(try_load_from_cache, try_load_from_system)) {
2067       return true;
2068     }
2069   }
2070 
2071   // Step 3: We do not have an existing image in /system,
2072   //         so generate an image into the dalvik cache.
2073   if (!loader.HasSystem() && loader.DalvikCacheExists()) {
2074     std::string local_error_msg;
2075     if (low_space || !Runtime::Current()->IsImageDex2OatEnabled()) {
2076       local_error_msg = "Image compilation disabled.";
2077     } else if (ImageCreationAllowed(loader.IsGlobalCache(),
2078                                     image_isa,
2079                                     is_zygote,
2080                                     &local_error_msg)) {
2081       bool compilation_success =
2082           GenerateImage(loader.GetCacheFilename(), image_isa, &local_error_msg);
2083       if (compilation_success) {
2084         if (loader.LoadFromDalvikCache(/*validate_oat_file=*/ false,
2085                                        extra_reservation_size,
2086                                        boot_image_spaces,
2087                                        extra_reservation,
2088                                        &local_error_msg)) {
2089           return true;
2090         }
2091       }
2092     }
2093     error_msgs.push_back(StringPrintf("Cannot compile image to %s: %s",
2094                                       loader.GetCacheFilename().c_str(),
2095                                       local_error_msg.c_str()));
2096   }
2097 
2098   // We failed. Prune the cache the free up space, create a compound error message
2099   // and return false.
2100   if (loader.DalvikCacheExists()) {
2101     PruneDalvikCache(image_isa);
2102   }
2103 
2104   std::ostringstream oss;
2105   bool first = true;
2106   for (const auto& msg : error_msgs) {
2107     if (!first) {
2108       oss << "\n    ";
2109     }
2110     oss << msg;
2111   }
2112 
2113   LOG(ERROR) << "Could not create image space with image file '" << image_location << "'. "
2114       << "Attempting to fall back to imageless running. Error was: " << oss.str();
2115 
2116   return false;
2117 }
2118 
~ImageSpace()2119 ImageSpace::~ImageSpace() {
2120   // Everything done by member destructors. Classes forward-declared in header are now defined.
2121 }
2122 
CreateFromAppImage(const char * image,const OatFile * oat_file,std::string * error_msg)2123 std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image,
2124                                                            const OatFile* oat_file,
2125                                                            std::string* error_msg) {
2126   // Note: The oat file has already been validated.
2127   return Loader::InitAppImage(image,
2128                               image,
2129                               oat_file,
2130                               /*image_reservation=*/ nullptr,
2131                               error_msg);
2132 }
2133 
GetOatFile() const2134 const OatFile* ImageSpace::GetOatFile() const {
2135   return oat_file_non_owned_;
2136 }
2137 
ReleaseOatFile()2138 std::unique_ptr<const OatFile> ImageSpace::ReleaseOatFile() {
2139   CHECK(oat_file_ != nullptr);
2140   return std::move(oat_file_);
2141 }
2142 
Dump(std::ostream & os) const2143 void ImageSpace::Dump(std::ostream& os) const {
2144   os << GetType()
2145       << " begin=" << reinterpret_cast<void*>(Begin())
2146       << ",end=" << reinterpret_cast<void*>(End())
2147       << ",size=" << PrettySize(Size())
2148       << ",name=\"" << GetName() << "\"]";
2149 }
2150 
ValidateOatFile(const OatFile & oat_file,std::string * error_msg)2151 bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg) {
2152   const ArtDexFileLoader dex_file_loader;
2153   for (const OatDexFile* oat_dex_file : oat_file.GetOatDexFiles()) {
2154     const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
2155 
2156     // Skip multidex locations - These will be checked when we visit their
2157     // corresponding primary non-multidex location.
2158     if (DexFileLoader::IsMultiDexLocation(dex_file_location.c_str())) {
2159       continue;
2160     }
2161 
2162     std::vector<uint32_t> checksums;
2163     if (!dex_file_loader.GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) {
2164       *error_msg = StringPrintf("ValidateOatFile failed to get checksums of dex file '%s' "
2165                                 "referenced by oat file %s: %s",
2166                                 dex_file_location.c_str(),
2167                                 oat_file.GetLocation().c_str(),
2168                                 error_msg->c_str());
2169       return false;
2170     }
2171     CHECK(!checksums.empty());
2172     if (checksums[0] != oat_dex_file->GetDexFileLocationChecksum()) {
2173       *error_msg = StringPrintf("ValidateOatFile found checksum mismatch between oat file "
2174                                 "'%s' and dex file '%s' (0x%x != 0x%x)",
2175                                 oat_file.GetLocation().c_str(),
2176                                 dex_file_location.c_str(),
2177                                 oat_dex_file->GetDexFileLocationChecksum(),
2178                                 checksums[0]);
2179       return false;
2180     }
2181 
2182     // Verify checksums for any related multidex entries.
2183     for (size_t i = 1; i < checksums.size(); i++) {
2184       std::string multi_dex_location = DexFileLoader::GetMultiDexLocation(
2185           i,
2186           dex_file_location.c_str());
2187       const OatDexFile* multi_dex = oat_file.GetOatDexFile(multi_dex_location.c_str(),
2188                                                            nullptr,
2189                                                            error_msg);
2190       if (multi_dex == nullptr) {
2191         *error_msg = StringPrintf("ValidateOatFile oat file '%s' is missing entry '%s'",
2192                                   oat_file.GetLocation().c_str(),
2193                                   multi_dex_location.c_str());
2194         return false;
2195       }
2196 
2197       if (checksums[i] != multi_dex->GetDexFileLocationChecksum()) {
2198         *error_msg = StringPrintf("ValidateOatFile found checksum mismatch between oat file "
2199                                   "'%s' and dex file '%s' (0x%x != 0x%x)",
2200                                   oat_file.GetLocation().c_str(),
2201                                   multi_dex_location.c_str(),
2202                                   multi_dex->GetDexFileLocationChecksum(),
2203                                   checksums[i]);
2204         return false;
2205       }
2206     }
2207   }
2208   return true;
2209 }
2210 
GetBootClassPathChecksums(ArrayRef<const std::string> boot_class_path,const std::string & image_location,InstructionSet image_isa,ImageSpaceLoadingOrder order,std::string * error_msg)2211 std::string ImageSpace::GetBootClassPathChecksums(ArrayRef<const std::string> boot_class_path,
2212                                                   const std::string& image_location,
2213                                                   InstructionSet image_isa,
2214                                                   ImageSpaceLoadingOrder order,
2215                                                   /*out*/std::string* error_msg) {
2216   std::string system_filename;
2217   bool has_system = false;
2218   std::string cache_filename;
2219   bool has_cache = false;
2220   bool dalvik_cache_exists = false;
2221   bool is_global_cache = false;
2222   if (!FindImageFilename(image_location.c_str(),
2223                          image_isa,
2224                          &system_filename,
2225                          &has_system,
2226                          &cache_filename,
2227                          &dalvik_cache_exists,
2228                          &has_cache,
2229                          &is_global_cache)) {
2230     *error_msg = StringPrintf("Unable to find image file for %s and %s",
2231                               image_location.c_str(),
2232                               GetInstructionSetString(image_isa));
2233     return std::string();
2234   }
2235 
2236   DCHECK(has_system || has_cache);
2237   const std::string& filename = (order == ImageSpaceLoadingOrder::kSystemFirst)
2238       ? (has_system ? system_filename : cache_filename)
2239       : (has_cache ? cache_filename : system_filename);
2240   std::unique_ptr<ImageHeader> header = ReadSpecificImageHeader(filename.c_str(), error_msg);
2241   if (header == nullptr) {
2242     return std::string();
2243   }
2244   if (header->GetComponentCount() == 0u || header->GetComponentCount() > boot_class_path.size()) {
2245     *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
2246                                   "expected non-zero and <= %zu",
2247                               filename.c_str(),
2248                               header->GetComponentCount(),
2249                               boot_class_path.size());
2250     return std::string();
2251   }
2252 
2253   std::string boot_image_checksum =
2254       StringPrintf("i;%d/%08x", header->GetComponentCount(), header->GetImageChecksum());
2255   ArrayRef<const std::string> boot_class_path_tail =
2256       ArrayRef<const std::string>(boot_class_path).SubArray(header->GetComponentCount());
2257   for (const std::string& bcp_filename : boot_class_path_tail) {
2258     std::vector<std::unique_ptr<const DexFile>> dex_files;
2259     const ArtDexFileLoader dex_file_loader;
2260     if (!dex_file_loader.Open(bcp_filename.c_str(),
2261                               bcp_filename,  // The location does not matter here.
2262                               /*verify=*/ false,
2263                               /*verify_checksum=*/ false,
2264                               error_msg,
2265                               &dex_files)) {
2266       return std::string();
2267     }
2268     DCHECK(!dex_files.empty());
2269     StringAppendF(&boot_image_checksum, ":d");
2270     for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
2271       StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
2272     }
2273   }
2274   return boot_image_checksum;
2275 }
2276 
GetBootClassPathChecksums(const std::vector<ImageSpace * > & image_spaces,const std::vector<const DexFile * > & boot_class_path)2277 std::string ImageSpace::GetBootClassPathChecksums(
2278     const std::vector<ImageSpace*>& image_spaces,
2279     const std::vector<const DexFile*>& boot_class_path) {
2280   size_t pos = 0u;
2281   std::string boot_image_checksum;
2282 
2283   if (!image_spaces.empty()) {
2284     const ImageHeader& primary_header = image_spaces.front()->GetImageHeader();
2285     uint32_t component_count = primary_header.GetComponentCount();
2286     DCHECK_EQ(component_count, image_spaces.size());
2287     boot_image_checksum =
2288         StringPrintf("i;%d/%08x", component_count, primary_header.GetImageChecksum());
2289     for (const ImageSpace* space : image_spaces) {
2290       size_t num_dex_files = space->oat_file_non_owned_->GetOatDexFiles().size();
2291       if (kIsDebugBuild) {
2292         CHECK_NE(num_dex_files, 0u);
2293         CHECK_LE(space->oat_file_non_owned_->GetOatDexFiles().size(), boot_class_path.size() - pos);
2294         for (size_t i = 0; i != num_dex_files; ++i) {
2295           CHECK_EQ(space->oat_file_non_owned_->GetOatDexFiles()[i]->GetDexFileLocation(),
2296                    boot_class_path[pos + i]->GetLocation());
2297         }
2298       }
2299       pos += num_dex_files;
2300     }
2301   }
2302 
2303   ArrayRef<const DexFile* const> boot_class_path_tail =
2304       ArrayRef<const DexFile* const>(boot_class_path).SubArray(pos);
2305   DCHECK(boot_class_path_tail.empty() ||
2306          !DexFileLoader::IsMultiDexLocation(boot_class_path_tail.front()->GetLocation().c_str()));
2307   for (const DexFile* dex_file : boot_class_path_tail) {
2308     if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) {
2309       StringAppendF(&boot_image_checksum, boot_image_checksum.empty() ? "d" : ":d");
2310     }
2311     StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
2312   }
2313   return boot_image_checksum;
2314 }
2315 
ExpandMultiImageLocations(const std::vector<std::string> & dex_locations,const std::string & image_location)2316 std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
2317     const std::vector<std::string>& dex_locations,
2318     const std::string& image_location) {
2319   return ExpandMultiImageLocations(ArrayRef<const std::string>(dex_locations), image_location);
2320 }
2321 
ExpandMultiImageLocations(ArrayRef<const std::string> dex_locations,const std::string & image_location)2322 std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
2323     ArrayRef<const std::string> dex_locations,
2324     const std::string& image_location) {
2325   DCHECK(!dex_locations.empty());
2326 
2327   // Find the path.
2328   size_t last_slash = image_location.rfind('/');
2329   CHECK_NE(last_slash, std::string::npos);
2330 
2331   // We also need to honor path components that were encoded through '@'. Otherwise the loading
2332   // code won't be able to find the images.
2333   if (image_location.find('@', last_slash) != std::string::npos) {
2334     last_slash = image_location.rfind('@');
2335   }
2336 
2337   // Find the dot separating the primary image name from the extension.
2338   size_t last_dot = image_location.rfind('.');
2339   // Extract the extension and base (the path and primary image name).
2340   std::string extension;
2341   std::string base = image_location;
2342   if (last_dot != std::string::npos && last_dot > last_slash) {
2343     extension = image_location.substr(last_dot);  // Including the dot.
2344     base.resize(last_dot);
2345   }
2346   // For non-empty primary image name, add '-' to the `base`.
2347   if (last_slash + 1u != base.size()) {
2348     base += '-';
2349   }
2350 
2351   std::vector<std::string> locations;
2352   locations.reserve(dex_locations.size());
2353   locations.push_back(image_location);
2354 
2355   // Now create the other names. Use a counted loop to skip the first one.
2356   for (size_t i = 1u; i < dex_locations.size(); ++i) {
2357     // Replace path with `base` (i.e. image path and prefix) and replace the original
2358     // extension (if any) with `extension`.
2359     std::string name = dex_locations[i];
2360     size_t last_dex_slash = name.rfind('/');
2361     if (last_dex_slash != std::string::npos) {
2362       name = name.substr(last_dex_slash + 1);
2363     }
2364     size_t last_dex_dot = name.rfind('.');
2365     if (last_dex_dot != std::string::npos) {
2366       name.resize(last_dex_dot);
2367     }
2368     locations.push_back(base + name + extension);
2369   }
2370   return locations;
2371 }
2372 
DumpSections(std::ostream & os) const2373 void ImageSpace::DumpSections(std::ostream& os) const {
2374   const uint8_t* base = Begin();
2375   const ImageHeader& header = GetImageHeader();
2376   for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
2377     auto section_type = static_cast<ImageHeader::ImageSections>(i);
2378     const ImageSection& section = header.GetImageSection(section_type);
2379     os << section_type << " " << reinterpret_cast<const void*>(base + section.Offset())
2380        << "-" << reinterpret_cast<const void*>(base + section.End()) << "\n";
2381   }
2382 }
2383 
DisablePreResolvedStrings()2384 void ImageSpace::DisablePreResolvedStrings() {
2385   // Clear dex cache pointers.
2386   ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
2387       GetImageHeader().GetImageRoot(ImageHeader::kDexCaches)->AsObjectArray<mirror::DexCache>();
2388   for (size_t len = dex_caches->GetLength(), i = 0; i < len; ++i) {
2389     ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
2390     dex_cache->ClearPreResolvedStrings();
2391   }
2392 }
2393 
ReleaseMetadata()2394 void ImageSpace::ReleaseMetadata() {
2395   const ImageSection& metadata = GetImageHeader().GetMetadataSection();
2396   VLOG(image) << "Releasing " << metadata.Size() << " image metadata bytes";
2397   // In the case where new app images may have been added around the checkpoint, ensure that we
2398   // don't madvise the cache for these.
2399   ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
2400       GetImageHeader().GetImageRoot(ImageHeader::kDexCaches)->AsObjectArray<mirror::DexCache>();
2401   bool have_startup_cache = false;
2402   for (size_t len = dex_caches->GetLength(), i = 0; i < len; ++i) {
2403     ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
2404     if (dex_cache->NumPreResolvedStrings() != 0u) {
2405       have_startup_cache = true;
2406     }
2407   }
2408   // Only safe to do for images that have their preresolved strings caches disabled. This is because
2409   // uncompressed images madvise to the original unrelocated image contents.
2410   if (!have_startup_cache) {
2411     // Avoid using ZeroAndReleasePages since the zero fill might not be word atomic.
2412     uint8_t* const page_begin = AlignUp(Begin() + metadata.Offset(), kPageSize);
2413     uint8_t* const page_end = AlignDown(Begin() + metadata.End(), kPageSize);
2414     if (page_begin < page_end) {
2415       CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
2416     }
2417   }
2418 }
2419 
2420 }  // namespace space
2421 }  // namespace gc
2422 }  // namespace art
2423