1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "image.h"
18
19 #include <lz4.h>
20 #include <lz4hc.h>
21 #include <sstream>
22 #include <sys/stat.h>
23 #include <zlib.h>
24
25 #include "android-base/stringprintf.h"
26
27 #include "base/bit_utils.h"
28 #include "base/length_prefixed_array.h"
29 #include "base/utils.h"
30 #include "mirror/object-inl.h"
31 #include "mirror/object_array-inl.h"
32 #include "mirror/object_array.h"
33 #include "oat.h"
34
35 namespace art HIDDEN {
36
37 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
38 // Revert dex cache change.
39 const uint8_t ImageHeader::kImageVersion[] = { '1', '1', '8', '\0' };
40
ImageHeader(uint32_t image_reservation_size,uint32_t component_count,uint32_t image_begin,uint32_t image_size,ImageSection * sections,uint32_t image_roots,uint32_t oat_checksum,uint32_t oat_file_begin,uint32_t oat_data_begin,uint32_t oat_data_end,uint32_t oat_file_end,uint32_t boot_image_begin,uint32_t boot_image_size,uint32_t boot_image_component_count,uint32_t boot_image_checksum,PointerSize pointer_size)41 ImageHeader::ImageHeader(uint32_t image_reservation_size,
42 uint32_t component_count,
43 uint32_t image_begin,
44 uint32_t image_size,
45 ImageSection* sections,
46 uint32_t image_roots,
47 uint32_t oat_checksum,
48 uint32_t oat_file_begin,
49 uint32_t oat_data_begin,
50 uint32_t oat_data_end,
51 uint32_t oat_file_end,
52 uint32_t boot_image_begin,
53 uint32_t boot_image_size,
54 uint32_t boot_image_component_count,
55 uint32_t boot_image_checksum,
56 PointerSize pointer_size)
57 : image_reservation_size_(image_reservation_size),
58 component_count_(component_count),
59 image_begin_(image_begin),
60 image_size_(image_size),
61 image_checksum_(0u),
62 oat_checksum_(oat_checksum),
63 oat_file_begin_(oat_file_begin),
64 oat_data_begin_(oat_data_begin),
65 oat_data_end_(oat_data_end),
66 oat_file_end_(oat_file_end),
67 boot_image_begin_(boot_image_begin),
68 boot_image_size_(boot_image_size),
69 boot_image_component_count_(boot_image_component_count),
70 boot_image_checksum_(boot_image_checksum),
71 image_roots_(image_roots),
72 pointer_size_(pointer_size) {
73 CHECK_EQ(image_begin, RoundUp(image_begin, kElfSegmentAlignment));
74 if (oat_checksum != 0u) {
75 CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kElfSegmentAlignment));
76 CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, alignof(OatHeader)));
77 CHECK_LT(image_roots, oat_file_begin);
78 CHECK_LE(oat_file_begin, oat_data_begin);
79 CHECK_LT(oat_data_begin, oat_data_end);
80 CHECK_LE(oat_data_end, oat_file_end);
81 }
82 static_assert(sizeof(PointerSize) == sizeof(uint32_t),
83 "PointerSize class is expected to be a uint32_t for the header");
84 memcpy(magic_, kImageMagic, sizeof(kImageMagic));
85 memcpy(version_, kImageVersion, sizeof(kImageVersion));
86 std::copy_n(sections, kSectionCount, sections_);
87 }
88
RelocateImageReferences(int64_t delta)89 void ImageHeader::RelocateImageReferences(int64_t delta) {
90 // App Images can be relocated to a page aligned address.
91 // Unlike with the Boot Image, for which the memory is reserved in advance of
92 // loading and is aligned to kElfSegmentAlignment, the App Images can be mapped
93 // without reserving memory i.e. via direct file mapping in which case the
94 // memory range is aligned by the kernel and the only guarantee is that it is
95 // aligned to the page sizes.
96 //
97 // NOTE: While this might be less than alignment required via information in
98 // the ELF header, it should be sufficient in practice as the only reason
99 // for the ELF segment alignment to be more than one page size is the
100 // compatibility of the ELF with system configurations that use larger
101 // page size.
102 //
103 // Adding preliminary memory reservation would introduce certain overhead.
104 //
105 // However, technically the alignment requirement isn't fulfilled and that
106 // might be worth addressing even if it adds certain overhead. This will have
107 // to be done in alignment with the dynamic linker's ELF loader as
108 // otherwise inconsistency would still be possible e.g. when using
109 // `dlopen`-like calls to load OAT files.
110 CHECK_ALIGNED_PARAM(delta, gPageSize) << "relocation delta must be page aligned";
111 oat_file_begin_ += delta;
112 oat_data_begin_ += delta;
113 oat_data_end_ += delta;
114 oat_file_end_ += delta;
115 image_begin_ += delta;
116 image_roots_ += delta;
117 }
118
RelocateBootImageReferences(int64_t delta)119 void ImageHeader::RelocateBootImageReferences(int64_t delta) {
120 CHECK_ALIGNED(delta, kElfSegmentAlignment) << "relocation delta must be Elf segment aligned";
121 DCHECK_EQ(boot_image_begin_ != 0u, boot_image_size_ != 0u);
122 if (boot_image_begin_ != 0u) {
123 boot_image_begin_ += delta;
124 }
125 for (size_t i = 0; i < kImageMethodsCount; ++i) {
126 image_methods_[i] += delta;
127 }
128 }
129
IsAppImage() const130 bool ImageHeader::IsAppImage() const {
131 // Unlike boot image and boot image extensions which include address space for
132 // oat files in their reservation size, app images are loaded separately from oat
133 // files and their reservation size is the image size rounded up to Elf alignment.
134 return image_reservation_size_ == RoundUp(image_size_, kElfSegmentAlignment);
135 }
136
GetImageSpaceCount() const137 uint32_t ImageHeader::GetImageSpaceCount() const {
138 DCHECK(!IsAppImage());
139 DCHECK_NE(component_count_, 0u); // Must be the header for the first component.
140 // For images compiled with --single-image, there is only one oat file. To detect
141 // that, check whether the reservation ends at the end of the first oat file.
142 return (image_begin_ + image_reservation_size_ == oat_file_end_) ? 1u : component_count_;
143 }
144
IsValid() const145 bool ImageHeader::IsValid() const {
146 if (memcmp(magic_, kImageMagic, sizeof(kImageMagic)) != 0) {
147 return false;
148 }
149 if (memcmp(version_, kImageVersion, sizeof(kImageVersion)) != 0) {
150 return false;
151 }
152 if (!IsAligned<kElfSegmentAlignment>(image_reservation_size_)) {
153 return false;
154 }
155 // Unsigned so wraparound is well defined.
156 if (image_begin_ >= image_begin_ + image_size_) {
157 return false;
158 }
159 if (oat_checksum_ != 0u) {
160 if (oat_file_begin_ > oat_file_end_) {
161 return false;
162 }
163 if (oat_data_begin_ > oat_data_end_) {
164 return false;
165 }
166 if (oat_file_begin_ >= oat_data_begin_) {
167 return false;
168 }
169 }
170 return true;
171 }
172
GetMagic() const173 const char* ImageHeader::GetMagic() const {
174 CHECK(IsValid());
175 return reinterpret_cast<const char*>(magic_);
176 }
177
GetImageMethod(ImageMethod index) const178 ArtMethod* ImageHeader::GetImageMethod(ImageMethod index) const {
179 CHECK_LT(static_cast<size_t>(index), kImageMethodsCount);
180 return reinterpret_cast<ArtMethod*>(image_methods_[index]);
181 }
182
operator <<(std::ostream & os,const ImageSection & section)183 std::ostream& operator<<(std::ostream& os, const ImageSection& section) {
184 return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End();
185 }
186
VisitObjects(ObjectVisitor * visitor,uint8_t * base,PointerSize pointer_size) const187 void ImageHeader::VisitObjects(ObjectVisitor* visitor,
188 uint8_t* base,
189 PointerSize pointer_size) const {
190 DCHECK_EQ(pointer_size, GetPointerSize());
191 const ImageSection& objects = GetObjectsSection();
192 static const size_t kStartPos = RoundUp(sizeof(ImageHeader), kObjectAlignment);
193 for (size_t pos = kStartPos; pos < objects.Size(); ) {
194 mirror::Object* object = reinterpret_cast<mirror::Object*>(base + objects.Offset() + pos);
195 visitor->Visit(object);
196 pos += RoundUp(object->SizeOf(), kObjectAlignment);
197 }
198 }
199
GetPointerSize() const200 PointerSize ImageHeader::GetPointerSize() const {
201 return pointer_size_;
202 }
203
LZ4_decompress_safe_checked(const char * source,char * dest,int compressed_size,int max_decompressed_size,size_t * decompressed_size_checked,std::string * error_msg)204 bool LZ4_decompress_safe_checked(const char* source,
205 char* dest,
206 int compressed_size,
207 int max_decompressed_size,
208 /*out*/ size_t* decompressed_size_checked,
209 /*out*/ std::string* error_msg) {
210 int decompressed_size = LZ4_decompress_safe(source, dest, compressed_size, max_decompressed_size);
211 if (UNLIKELY(decompressed_size < 0)) {
212 *error_msg = android::base::StringPrintf("LZ4_decompress_safe() returned negative size: %d",
213 decompressed_size);
214 return false;
215 } else {
216 *decompressed_size_checked = static_cast<size_t>(decompressed_size);
217 return true;
218 }
219 }
220
Decompress(uint8_t * out_ptr,const uint8_t * in_ptr,std::string * error_msg) const221 bool ImageHeader::Block::Decompress(uint8_t* out_ptr,
222 const uint8_t* in_ptr,
223 std::string* error_msg) const {
224 switch (storage_mode_) {
225 case kStorageModeUncompressed: {
226 CHECK_EQ(image_size_, data_size_);
227 memcpy(out_ptr + image_offset_, in_ptr + data_offset_, data_size_);
228 break;
229 }
230 case kStorageModeLZ4:
231 case kStorageModeLZ4HC: {
232 // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
233 size_t decompressed_size;
234 bool ok = LZ4_decompress_safe_checked(
235 reinterpret_cast<const char*>(in_ptr) + data_offset_,
236 reinterpret_cast<char*>(out_ptr) + image_offset_,
237 data_size_,
238 image_size_,
239 &decompressed_size,
240 error_msg);
241 if (!ok) {
242 return false;
243 }
244 if (decompressed_size != image_size_) {
245 if (error_msg != nullptr) {
246 // Maybe some disk / memory corruption, just bail.
247 *error_msg = (std::ostringstream() << "Decompressed size different than image size: "
248 << decompressed_size << ", and " << image_size_).str();
249 }
250 return false;
251 }
252 break;
253 }
254 default: {
255 if (error_msg != nullptr) {
256 *error_msg = (std::ostringstream() << "Invalid image format " << storage_mode_).str();
257 }
258 return false;
259 }
260 }
261 return true;
262 }
263
GetImageSectionName(ImageSections index)264 const char* ImageHeader::GetImageSectionName(ImageSections index) {
265 switch (index) {
266 case kSectionObjects: return "Objects";
267 case kSectionArtFields: return "ArtFields";
268 case kSectionArtMethods: return "ArtMethods";
269 case kSectionImTables: return "ImTables";
270 case kSectionIMTConflictTables: return "IMTConflictTables";
271 case kSectionRuntimeMethods: return "RuntimeMethods";
272 case kSectionJniStubMethods: return "JniStubMethods";
273 case kSectionInternedStrings: return "InternedStrings";
274 case kSectionClassTable: return "ClassTable";
275 case kSectionStringReferenceOffsets: return "StringReferenceOffsets";
276 case kSectionDexCacheArrays: return "DexCacheArrays";
277 case kSectionMetadata: return "Metadata";
278 case kSectionImageBitmap: return "ImageBitmap";
279 case kSectionCount: return nullptr;
280 }
281 }
282
283 // Compress data from `source` into `storage`.
CompressData(ArrayRef<const uint8_t> source,ImageHeader::StorageMode image_storage_mode,dchecked_vector<uint8_t> * storage)284 static bool CompressData(ArrayRef<const uint8_t> source,
285 ImageHeader::StorageMode image_storage_mode,
286 /*out*/ dchecked_vector<uint8_t>* storage) {
287 const uint64_t compress_start_time = NanoTime();
288
289 // Bound is same for both LZ4 and LZ4HC.
290 storage->resize(LZ4_compressBound(source.size()));
291 size_t data_size = 0;
292 if (image_storage_mode == ImageHeader::kStorageModeLZ4) {
293 data_size = LZ4_compress_default(
294 reinterpret_cast<char*>(const_cast<uint8_t*>(source.data())),
295 reinterpret_cast<char*>(storage->data()),
296 source.size(),
297 storage->size());
298 } else {
299 DCHECK_EQ(image_storage_mode, ImageHeader::kStorageModeLZ4HC);
300 data_size = LZ4_compress_HC(
301 reinterpret_cast<const char*>(const_cast<uint8_t*>(source.data())),
302 reinterpret_cast<char*>(storage->data()),
303 source.size(),
304 storage->size(),
305 LZ4HC_CLEVEL_MAX);
306 }
307
308 if (data_size == 0) {
309 return false;
310 }
311 storage->resize(data_size);
312
313 VLOG(image) << "Compressed from " << source.size() << " to " << storage->size() << " in "
314 << PrettyDuration(NanoTime() - compress_start_time);
315 if (kIsDebugBuild) {
316 dchecked_vector<uint8_t> decompressed(source.size());
317 size_t decompressed_size;
318 std::string error_msg;
319 bool ok = LZ4_decompress_safe_checked(
320 reinterpret_cast<char*>(storage->data()),
321 reinterpret_cast<char*>(decompressed.data()),
322 storage->size(),
323 decompressed.size(),
324 &decompressed_size,
325 &error_msg);
326 if (!ok) {
327 LOG(FATAL) << error_msg;
328 UNREACHABLE();
329 }
330 CHECK_EQ(decompressed_size, decompressed.size());
331 CHECK_EQ(memcmp(source.data(), decompressed.data(), source.size()), 0) << image_storage_mode;
332 }
333 return true;
334 }
335
WriteData(const ImageFileGuard & image_file,const uint8_t * data,const uint8_t * bitmap_data,ImageHeader::StorageMode image_storage_mode,uint32_t max_image_block_size,bool update_checksum,std::string * error_msg)336 bool ImageHeader::WriteData(const ImageFileGuard& image_file,
337 const uint8_t* data,
338 const uint8_t* bitmap_data,
339 ImageHeader::StorageMode image_storage_mode,
340 uint32_t max_image_block_size,
341 bool update_checksum,
342 std::string* error_msg) {
343 const bool is_compressed = image_storage_mode != ImageHeader::kStorageModeUncompressed;
344 dchecked_vector<std::pair<uint32_t, uint32_t>> block_sources;
345 dchecked_vector<ImageHeader::Block> blocks;
346
347 // Add a set of solid blocks such that no block is larger than the maximum size. A solid block
348 // is a block that must be decompressed all at once.
349 auto add_blocks = [&](uint32_t offset, uint32_t size) {
350 while (size != 0u) {
351 const uint32_t cur_size = std::min(size, max_image_block_size);
352 block_sources.emplace_back(offset, cur_size);
353 offset += cur_size;
354 size -= cur_size;
355 }
356 };
357
358 add_blocks(sizeof(ImageHeader), this->GetImageSize() - sizeof(ImageHeader));
359
360 // Checksum of compressed image data and header.
361 uint32_t image_checksum = 0u;
362 if (update_checksum) {
363 image_checksum = adler32(0L, Z_NULL, 0);
364 image_checksum = adler32(image_checksum,
365 reinterpret_cast<const uint8_t*>(this),
366 sizeof(ImageHeader));
367 }
368
369 // Copy and compress blocks.
370 uint32_t out_offset = sizeof(ImageHeader);
371 for (const std::pair<uint32_t, uint32_t> block : block_sources) {
372 ArrayRef<const uint8_t> raw_image_data(data + block.first, block.second);
373 dchecked_vector<uint8_t> compressed_data;
374 ArrayRef<const uint8_t> image_data;
375 if (is_compressed) {
376 if (!CompressData(raw_image_data, image_storage_mode, &compressed_data)) {
377 *error_msg = "Error compressing data for " +
378 image_file->GetPath() + ": " + std::string(strerror(errno));
379 return false;
380 }
381 image_data = ArrayRef<const uint8_t>(compressed_data);
382 } else {
383 image_data = raw_image_data;
384 // For uncompressed, preserve alignment since the image will be directly mapped.
385 out_offset = block.first;
386 }
387
388 // Fill in the compressed location of the block.
389 blocks.emplace_back(ImageHeader::Block(
390 image_storage_mode,
391 /*data_offset=*/ out_offset,
392 /*data_size=*/ image_data.size(),
393 /*image_offset=*/ block.first,
394 /*image_size=*/ block.second));
395
396 if (!image_file->PwriteFully(image_data.data(), image_data.size(), out_offset)) {
397 *error_msg = "Failed to write image file data " +
398 image_file->GetPath() + ": " + std::string(strerror(errno));
399 return false;
400 }
401 out_offset += image_data.size();
402 if (update_checksum) {
403 image_checksum = adler32(image_checksum, image_data.data(), image_data.size());
404 }
405 }
406
407 if (is_compressed) {
408 // Align up since the compressed data is not necessarily aligned.
409 out_offset = RoundUp(out_offset, alignof(ImageHeader::Block));
410 CHECK(!blocks.empty());
411 const size_t blocks_bytes = blocks.size() * sizeof(blocks[0]);
412 if (!image_file->PwriteFully(&blocks[0], blocks_bytes, out_offset)) {
413 *error_msg = "Failed to write image blocks " +
414 image_file->GetPath() + ": " + std::string(strerror(errno));
415 return false;
416 }
417 this->blocks_offset_ = out_offset;
418 this->blocks_count_ = blocks.size();
419 out_offset += blocks_bytes;
420 }
421
422 // Data size includes everything except the bitmap.
423 this->data_size_ = out_offset - sizeof(ImageHeader);
424
425 // Update and write the bitmap section. Note that the bitmap section is relative to the
426 // possibly compressed image.
427 ImageSection& bitmap_section = GetImageSection(ImageHeader::kSectionImageBitmap);
428 // Align up since data size may be unaligned if the image is compressed.
429 out_offset = RoundUp(out_offset, kElfSegmentAlignment);
430 bitmap_section = ImageSection(out_offset, bitmap_section.Size());
431
432 if (!image_file->PwriteFully(bitmap_data,
433 bitmap_section.Size(),
434 bitmap_section.Offset())) {
435 *error_msg = "Failed to write image file bitmap " +
436 image_file->GetPath() + ": " + std::string(strerror(errno));
437 return false;
438 }
439
440 int err = image_file->Flush();
441 if (err < 0) {
442 *error_msg = "Failed to flush image file " + image_file->GetPath() + ": " + std::to_string(err);
443 return false;
444 }
445
446 if (update_checksum) {
447 // Calculate the image checksum of the remaining data.
448 image_checksum = adler32(image_checksum,
449 reinterpret_cast<const uint8_t*>(bitmap_data),
450 bitmap_section.Size());
451 this->SetImageChecksum(image_checksum);
452 }
453
454 if (VLOG_IS_ON(image)) {
455 const size_t separately_written_section_size = bitmap_section.Size();
456 const size_t total_uncompressed_size = image_size_ + separately_written_section_size;
457 const size_t total_compressed_size = out_offset + separately_written_section_size;
458
459 VLOG(compiler) << "UncompressedImageSize = " << total_uncompressed_size;
460 if (total_uncompressed_size != total_compressed_size) {
461 VLOG(compiler) << "CompressedImageSize = " << total_compressed_size;
462 }
463 }
464
465 DCHECK_EQ(bitmap_section.End(), static_cast<size_t>(image_file->GetLength()))
466 << "Bitmap should be at the end of the file";
467 return true;
468 }
469
470 } // namespace art
471