• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <sys/mman.h>
22 #include <sys/ptrace.h>
23 #include <sys/stat.h>
24 #include <sys/types.h>
25 #include <sys/uio.h>
26 #include <unistd.h>
27 
28 #include <7zCrc.h>
29 #include <Xz.h>
30 #include <XzCrc64.h>
31 
32 #include <algorithm>
33 #include <memory>
34 #include <mutex>
35 #include <optional>
36 
37 #include <android-base/unique_fd.h>
38 
39 #include <unwindstack/Log.h>
40 #include <unwindstack/Memory.h>
41 
42 #include "Check.h"
43 #include "MemoryBuffer.h"
44 #include "MemoryCache.h"
45 #include "MemoryFileAtOffset.h"
46 #include "MemoryLocal.h"
47 #include "MemoryOffline.h"
48 #include "MemoryOfflineBuffer.h"
49 #include "MemoryRange.h"
50 #include "MemoryRemote.h"
51 #include "MemoryXz.h"
52 
53 namespace unwindstack {
54 
55 // Statistics (used only for optional debug log messages).
56 static constexpr bool kLogMemoryXzUsage = false;
57 std::atomic_size_t MemoryXz::total_used_ = 0;
58 std::atomic_size_t MemoryXz::total_size_ = 0;
59 std::atomic_size_t MemoryXz::total_open_ = 0;
60 
ProcessVmRead(pid_t pid,uint64_t remote_src,void * dst,size_t len)61 static size_t ProcessVmRead(pid_t pid, uint64_t remote_src, void* dst, size_t len) {
62 
63   // Split up the remote read across page boundaries.
64   // From the manpage:
65   //   A partial read/write may result if one of the remote_iov elements points to an invalid
66   //   memory region in the remote process.
67   //
68   //   Partial transfers apply at the granularity of iovec elements.  These system calls won't
69   //   perform a partial transfer that splits a single iovec element.
70   constexpr size_t kMaxIovecs = 64;
71   struct iovec src_iovs[kMaxIovecs];
72 
73   uint64_t cur = remote_src;
74   size_t total_read = 0;
75   while (len > 0) {
76     struct iovec dst_iov = {
77         .iov_base = &reinterpret_cast<uint8_t*>(dst)[total_read], .iov_len = len,
78     };
79 
80     size_t iovecs_used = 0;
81     while (len > 0) {
82       if (iovecs_used == kMaxIovecs) {
83         break;
84       }
85 
86       // struct iovec uses void* for iov_base.
87       if (cur >= UINTPTR_MAX) {
88         errno = EFAULT;
89         return total_read;
90       }
91 
92       src_iovs[iovecs_used].iov_base = reinterpret_cast<void*>(cur);
93 
94       uintptr_t misalignment = cur & (getpagesize() - 1);
95       size_t iov_len = getpagesize() - misalignment;
96       iov_len = std::min(iov_len, len);
97 
98       len -= iov_len;
99       if (__builtin_add_overflow(cur, iov_len, &cur)) {
100         errno = EFAULT;
101         return total_read;
102       }
103 
104       src_iovs[iovecs_used].iov_len = iov_len;
105       ++iovecs_used;
106     }
107 
108     ssize_t rc = process_vm_readv(pid, &dst_iov, 1, src_iovs, iovecs_used, 0);
109     if (rc == -1) {
110       return total_read;
111     }
112     total_read += rc;
113   }
114   return total_read;
115 }
116 
PtraceReadLong(pid_t pid,uint64_t addr,long * value)117 static bool PtraceReadLong(pid_t pid, uint64_t addr, long* value) {
118   // ptrace() returns -1 and sets errno when the operation fails.
119   // To disambiguate -1 from a valid result, we clear errno beforehand.
120   errno = 0;
121   *value = ptrace(PTRACE_PEEKTEXT, pid, reinterpret_cast<void*>(addr), nullptr);
122   if (*value == -1 && errno) {
123     return false;
124   }
125   return true;
126 }
127 
PtraceRead(pid_t pid,uint64_t addr,void * dst,size_t bytes)128 static size_t PtraceRead(pid_t pid, uint64_t addr, void* dst, size_t bytes) {
129   // Make sure that there is no overflow.
130   uint64_t max_size;
131   if (__builtin_add_overflow(addr, bytes, &max_size)) {
132     return 0;
133   }
134 
135   size_t bytes_read = 0;
136   long data;
137   size_t align_bytes = addr & (sizeof(long) - 1);
138   if (align_bytes != 0) {
139     if (!PtraceReadLong(pid, addr & ~(sizeof(long) - 1), &data)) {
140       return 0;
141     }
142     size_t copy_bytes = std::min(sizeof(long) - align_bytes, bytes);
143     memcpy(dst, reinterpret_cast<uint8_t*>(&data) + align_bytes, copy_bytes);
144     addr += copy_bytes;
145     dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + copy_bytes);
146     bytes -= copy_bytes;
147     bytes_read += copy_bytes;
148   }
149 
150   for (size_t i = 0; i < bytes / sizeof(long); i++) {
151     if (!PtraceReadLong(pid, addr, &data)) {
152       return bytes_read;
153     }
154     memcpy(dst, &data, sizeof(long));
155     dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + sizeof(long));
156     addr += sizeof(long);
157     bytes_read += sizeof(long);
158   }
159 
160   size_t left_over = bytes & (sizeof(long) - 1);
161   if (left_over) {
162     if (!PtraceReadLong(pid, addr, &data)) {
163       return bytes_read;
164     }
165     memcpy(dst, &data, left_over);
166     bytes_read += left_over;
167   }
168   return bytes_read;
169 }
170 
ReadFully(uint64_t addr,void * dst,size_t size)171 bool Memory::ReadFully(uint64_t addr, void* dst, size_t size) {
172   size_t rc = Read(addr, dst, size);
173   return rc == size;
174 }
175 
ReadString(uint64_t addr,std::string * dst,size_t max_read)176 bool Memory::ReadString(uint64_t addr, std::string* dst, size_t max_read) {
177   char buffer[256];  // Large enough for 99% of symbol names.
178   size_t size = 0;   // Number of bytes which were read into the buffer.
179   for (size_t offset = 0; offset < max_read; offset += size) {
180     // Look for null-terminator first, so we can allocate string of exact size.
181     // If we know the end of valid memory range, do the reads in larger blocks.
182     size_t read = std::min(sizeof(buffer), max_read - offset);
183     size = Read(addr + offset, buffer, read);
184     if (size == 0) {
185       return false;  // We have not found end of string yet and we can not read more data.
186     }
187     size_t length = strnlen(buffer, size);  // Index of the null-terminator.
188     if (length < size) {
189       // We found the null-terminator. Allocate the string and set its content.
190       if (offset == 0) {
191         // We did just single read, so the buffer already contains the whole string.
192         dst->assign(buffer, length);
193         return true;
194       } else {
195         // The buffer contains only the last block. Read the whole string again.
196         dst->assign(offset + length, '\0');
197         return ReadFully(addr, dst->data(), dst->size());
198       }
199     }
200   }
201   return false;
202 }
203 
CreateFileMemory(const std::string & path,uint64_t offset,uint64_t size)204 std::unique_ptr<Memory> Memory::CreateFileMemory(const std::string& path, uint64_t offset,
205                                                  uint64_t size) {
206   auto memory = std::make_unique<MemoryFileAtOffset>();
207 
208   if (memory->Init(path, offset, size)) {
209     return memory;
210   }
211 
212   return nullptr;
213 }
214 
CreateProcessMemory(pid_t pid)215 std::shared_ptr<Memory> Memory::CreateProcessMemory(pid_t pid) {
216   if (pid == getpid()) {
217     return std::shared_ptr<Memory>(new MemoryLocal());
218   }
219   return std::shared_ptr<Memory>(new MemoryRemote(pid));
220 }
221 
CreateProcessMemoryCached(pid_t pid)222 std::shared_ptr<Memory> Memory::CreateProcessMemoryCached(pid_t pid) {
223   if (pid == getpid()) {
224     return std::shared_ptr<Memory>(new MemoryCache(new MemoryLocal()));
225   }
226   return std::shared_ptr<Memory>(new MemoryCache(new MemoryRemote(pid)));
227 }
228 
CreateProcessMemoryThreadCached(pid_t pid)229 std::shared_ptr<Memory> Memory::CreateProcessMemoryThreadCached(pid_t pid) {
230   if (pid == getpid()) {
231     return std::shared_ptr<Memory>(new MemoryThreadCache(new MemoryLocal()));
232   }
233   return std::shared_ptr<Memory>(new MemoryThreadCache(new MemoryRemote(pid)));
234 }
235 
CreateOfflineMemory(const uint8_t * data,uint64_t start,uint64_t end)236 std::shared_ptr<Memory> Memory::CreateOfflineMemory(const uint8_t* data, uint64_t start,
237                                                     uint64_t end) {
238   return std::shared_ptr<Memory>(new MemoryOfflineBuffer(data, start, end));
239 }
240 
Read(uint64_t addr,void * dst,size_t size)241 size_t MemoryBuffer::Read(uint64_t addr, void* dst, size_t size) {
242   if (addr >= size_) {
243     return 0;
244   }
245 
246   size_t bytes_left = size_ - static_cast<size_t>(addr);
247   const unsigned char* actual_base = static_cast<const unsigned char*>(raw_) + addr;
248   size_t actual_len = std::min(bytes_left, size);
249 
250   memcpy(dst, actual_base, actual_len);
251   return actual_len;
252 }
253 
GetPtr(size_t offset)254 uint8_t* MemoryBuffer::GetPtr(size_t offset) {
255   if (offset < size_) {
256     return &raw_[offset];
257   }
258   return nullptr;
259 }
260 
~MemoryFileAtOffset()261 MemoryFileAtOffset::~MemoryFileAtOffset() {
262   Clear();
263 }
264 
Clear()265 void MemoryFileAtOffset::Clear() {
266   if (data_) {
267     munmap(&data_[-offset_], size_ + offset_);
268     data_ = nullptr;
269   }
270 }
271 
Init(const std::string & file,uint64_t offset,uint64_t size)272 bool MemoryFileAtOffset::Init(const std::string& file, uint64_t offset, uint64_t size) {
273   // Clear out any previous data if it exists.
274   Clear();
275 
276   android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC)));
277   if (fd == -1) {
278     return false;
279   }
280   struct stat buf;
281   if (fstat(fd, &buf) == -1) {
282     return false;
283   }
284   if (offset >= static_cast<uint64_t>(buf.st_size)) {
285     return false;
286   }
287 
288   offset_ = offset & (getpagesize() - 1);
289   uint64_t aligned_offset = offset & ~(getpagesize() - 1);
290   if (aligned_offset > static_cast<uint64_t>(buf.st_size) ||
291       offset > static_cast<uint64_t>(buf.st_size)) {
292     return false;
293   }
294 
295   size_ = buf.st_size - aligned_offset;
296   uint64_t max_size;
297   if (!__builtin_add_overflow(size, offset_, &max_size) && max_size < size_) {
298     // Truncate the mapped size.
299     size_ = max_size;
300   }
301   void* map = mmap(nullptr, size_, PROT_READ, MAP_PRIVATE, fd, aligned_offset);
302   if (map == MAP_FAILED) {
303     return false;
304   }
305 
306   data_ = &reinterpret_cast<uint8_t*>(map)[offset_];
307   size_ -= offset_;
308 
309   return true;
310 }
311 
Read(uint64_t addr,void * dst,size_t size)312 size_t MemoryFileAtOffset::Read(uint64_t addr, void* dst, size_t size) {
313   if (addr >= size_) {
314     return 0;
315   }
316 
317   size_t bytes_left = size_ - static_cast<size_t>(addr);
318   const unsigned char* actual_base = static_cast<const unsigned char*>(data_) + addr;
319   size_t actual_len = std::min(bytes_left, size);
320 
321   memcpy(dst, actual_base, actual_len);
322   return actual_len;
323 }
324 
Read(uint64_t addr,void * dst,size_t size)325 size_t MemoryRemote::Read(uint64_t addr, void* dst, size_t size) {
326 #if !defined(__LP64__)
327   // Cannot read an address greater than 32 bits in a 32 bit context.
328   if (addr > UINT32_MAX) {
329     return 0;
330   }
331 #endif
332 
333   size_t (*read_func)(pid_t, uint64_t, void*, size_t) =
334       reinterpret_cast<size_t (*)(pid_t, uint64_t, void*, size_t)>(read_redirect_func_.load());
335   if (read_func != nullptr) {
336     return read_func(pid_, addr, dst, size);
337   } else {
338     // Prefer process_vm_read, try it first. If it doesn't work, use the
339     // ptrace function. If at least one of them returns at least some data,
340     // set that as the permanent function to use.
341     // This assumes that if process_vm_read works once, it will continue
342     // to work.
343     size_t bytes = ProcessVmRead(pid_, addr, dst, size);
344     if (bytes > 0) {
345       read_redirect_func_ = reinterpret_cast<uintptr_t>(ProcessVmRead);
346       return bytes;
347     }
348     bytes = PtraceRead(pid_, addr, dst, size);
349     if (bytes > 0) {
350       read_redirect_func_ = reinterpret_cast<uintptr_t>(PtraceRead);
351     }
352     return bytes;
353   }
354 }
355 
Read(uint64_t addr,void * dst,size_t size)356 size_t MemoryLocal::Read(uint64_t addr, void* dst, size_t size) {
357   return ProcessVmRead(getpid(), addr, dst, size);
358 }
359 
MemoryRange(const std::shared_ptr<Memory> & memory,uint64_t begin,uint64_t length,uint64_t offset)360 MemoryRange::MemoryRange(const std::shared_ptr<Memory>& memory, uint64_t begin, uint64_t length,
361                          uint64_t offset)
362     : memory_(memory), begin_(begin), length_(length), offset_(offset) {}
363 
Read(uint64_t addr,void * dst,size_t size)364 size_t MemoryRange::Read(uint64_t addr, void* dst, size_t size) {
365   if (addr < offset_) {
366     return 0;
367   }
368 
369   uint64_t read_offset = addr - offset_;
370   if (read_offset >= length_) {
371     return 0;
372   }
373 
374   uint64_t read_length = std::min(static_cast<uint64_t>(size), length_ - read_offset);
375   uint64_t read_addr;
376   if (__builtin_add_overflow(read_offset, begin_, &read_addr)) {
377     return 0;
378   }
379 
380   return memory_->Read(read_addr, dst, read_length);
381 }
382 
Insert(MemoryRange * memory)383 void MemoryRanges::Insert(MemoryRange* memory) {
384   uint64_t last_addr;
385   if (__builtin_add_overflow(memory->offset(), memory->length(), &last_addr)) {
386     // This should never happen in the real world. However, it is possible
387     // that an offset in a mapped in segment could be crafted such that
388     // this value overflows. In that case, clamp the value to the max uint64
389     // value.
390     last_addr = UINT64_MAX;
391   }
392   maps_.emplace(last_addr, memory);
393 }
394 
Read(uint64_t addr,void * dst,size_t size)395 size_t MemoryRanges::Read(uint64_t addr, void* dst, size_t size) {
396   auto entry = maps_.upper_bound(addr);
397   if (entry != maps_.end()) {
398     return entry->second->Read(addr, dst, size);
399   }
400   return 0;
401 }
402 
Init(const std::string & file,uint64_t offset)403 bool MemoryOffline::Init(const std::string& file, uint64_t offset) {
404   auto memory_file = std::make_shared<MemoryFileAtOffset>();
405   if (!memory_file->Init(file, offset)) {
406     return false;
407   }
408 
409   // The first uint64_t value is the start of memory.
410   uint64_t start;
411   if (!memory_file->ReadFully(0, &start, sizeof(start))) {
412     return false;
413   }
414 
415   uint64_t size = memory_file->Size();
416   if (__builtin_sub_overflow(size, sizeof(start), &size)) {
417     return false;
418   }
419 
420   memory_ = std::make_unique<MemoryRange>(memory_file, sizeof(start), size, start);
421   return true;
422 }
423 
Read(uint64_t addr,void * dst,size_t size)424 size_t MemoryOffline::Read(uint64_t addr, void* dst, size_t size) {
425   if (!memory_) {
426     return 0;
427   }
428 
429   return memory_->Read(addr, dst, size);
430 }
431 
MemoryOfflineBuffer(const uint8_t * data,uint64_t start,uint64_t end)432 MemoryOfflineBuffer::MemoryOfflineBuffer(const uint8_t* data, uint64_t start, uint64_t end)
433     : data_(data), start_(start), end_(end) {}
434 
Reset(const uint8_t * data,uint64_t start,uint64_t end)435 void MemoryOfflineBuffer::Reset(const uint8_t* data, uint64_t start, uint64_t end) {
436   data_ = data;
437   start_ = start;
438   end_ = end;
439 }
440 
Read(uint64_t addr,void * dst,size_t size)441 size_t MemoryOfflineBuffer::Read(uint64_t addr, void* dst, size_t size) {
442   if (addr < start_ || addr >= end_) {
443     return 0;
444   }
445 
446   size_t read_length = std::min(size, static_cast<size_t>(end_ - addr));
447   memcpy(dst, &data_[addr - start_], read_length);
448   return read_length;
449 }
450 
~MemoryOfflineParts()451 MemoryOfflineParts::~MemoryOfflineParts() {
452   for (auto memory : memories_) {
453     delete memory;
454   }
455 }
456 
Read(uint64_t addr,void * dst,size_t size)457 size_t MemoryOfflineParts::Read(uint64_t addr, void* dst, size_t size) {
458   if (memories_.empty()) {
459     return 0;
460   }
461 
462   // Do a read on each memory object, no support for reading across the
463   // different memory objects.
464   for (MemoryOffline* memory : memories_) {
465     size_t bytes = memory->Read(addr, dst, size);
466     if (bytes != 0) {
467       return bytes;
468     }
469   }
470   return 0;
471 }
472 
InternalCachedRead(uint64_t addr,void * dst,size_t size,CacheDataType * cache)473 size_t MemoryCacheBase::InternalCachedRead(uint64_t addr, void* dst, size_t size,
474                                            CacheDataType* cache) {
475   uint64_t addr_page = addr >> kCacheBits;
476   auto entry = cache->find(addr_page);
477   uint8_t* cache_dst;
478   if (entry != cache->end()) {
479     cache_dst = entry->second;
480   } else {
481     cache_dst = (*cache)[addr_page];
482     if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
483       // Erase the entry.
484       cache->erase(addr_page);
485       return impl_->Read(addr, dst, size);
486     }
487   }
488   size_t max_read = ((addr_page + 1) << kCacheBits) - addr;
489   if (size <= max_read) {
490     memcpy(dst, &cache_dst[addr & kCacheMask], size);
491     return size;
492   }
493 
494   // The read crossed into another cached entry, since a read can only cross
495   // into one extra cached page, duplicate the code rather than looping.
496   memcpy(dst, &cache_dst[addr & kCacheMask], max_read);
497   dst = &reinterpret_cast<uint8_t*>(dst)[max_read];
498   addr_page++;
499 
500   entry = cache->find(addr_page);
501   if (entry != cache->end()) {
502     cache_dst = entry->second;
503   } else {
504     cache_dst = (*cache)[addr_page];
505     if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
506       // Erase the entry.
507       cache->erase(addr_page);
508       return impl_->Read(addr_page << kCacheBits, dst, size - max_read) + max_read;
509     }
510   }
511   memcpy(dst, cache_dst, size - max_read);
512   return size;
513 }
514 
Clear()515 void MemoryCache::Clear() {
516   std::lock_guard<std::mutex> lock(cache_lock_);
517   cache_.clear();
518 }
519 
CachedRead(uint64_t addr,void * dst,size_t size)520 size_t MemoryCache::CachedRead(uint64_t addr, void* dst, size_t size) {
521   // Use a single lock since this object is not designed to be performant
522   // for multiple object reading from multiple threads.
523   std::lock_guard<std::mutex> lock(cache_lock_);
524 
525   return InternalCachedRead(addr, dst, size, &cache_);
526 }
527 
MemoryThreadCache(Memory * memory)528 MemoryThreadCache::MemoryThreadCache(Memory* memory) : MemoryCacheBase(memory) {
529   thread_cache_ = std::make_optional<pthread_t>();
530   if (pthread_key_create(&*thread_cache_, [](void* memory) {
531         CacheDataType* cache = reinterpret_cast<CacheDataType*>(memory);
532         delete cache;
533       }) != 0) {
534     log_async_safe("Failed to create pthread key.");
535     thread_cache_.reset();
536   }
537 }
538 
~MemoryThreadCache()539 MemoryThreadCache::~MemoryThreadCache() {
540   if (thread_cache_) {
541     CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
542     delete cache;
543     pthread_key_delete(*thread_cache_);
544   }
545 }
546 
CachedRead(uint64_t addr,void * dst,size_t size)547 size_t MemoryThreadCache::CachedRead(uint64_t addr, void* dst, size_t size) {
548   if (!thread_cache_) {
549     return impl_->Read(addr, dst, size);
550   }
551 
552   CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
553   if (cache == nullptr) {
554     cache = new CacheDataType;
555     pthread_setspecific(*thread_cache_, cache);
556   }
557 
558   return InternalCachedRead(addr, dst, size, cache);
559 }
560 
Clear()561 void MemoryThreadCache::Clear() {
562   CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
563   if (cache != nullptr) {
564     delete cache;
565     pthread_setspecific(*thread_cache_, nullptr);
566   }
567 }
568 
MemoryXz(Memory * memory,uint64_t addr,uint64_t size,const std::string & name)569 MemoryXz::MemoryXz(Memory* memory, uint64_t addr, uint64_t size, const std::string& name)
570     : compressed_memory_(memory), compressed_addr_(addr), compressed_size_(size), name_(name) {
571   total_open_ += 1;
572 }
573 
Init()574 bool MemoryXz::Init() {
575   static std::once_flag crc_initialized;
576   std::call_once(crc_initialized, []() {
577     CrcGenerateTable();
578     Crc64GenerateTable();
579   });
580   if (compressed_size_ >= kMaxCompressedSize) {
581     return false;
582   }
583   if (!ReadBlocks()) {
584     return false;
585   }
586 
587   // All blocks (except the last one) must have the same power-of-2 size.
588   if (blocks_.size() > 1) {
589     size_t block_size_log2 = __builtin_ctz(blocks_.front().decompressed_size);
590     auto correct_size = [=](XzBlock& b) { return b.decompressed_size == (1 << block_size_log2); };
591     if (std::all_of(blocks_.begin(), std::prev(blocks_.end()), correct_size) &&
592         blocks_.back().decompressed_size <= (1 << block_size_log2)) {
593       block_size_log2_ = block_size_log2;
594     } else {
595       // Inconsistent block-sizes.  Decompress and merge everything now.
596       std::unique_ptr<uint8_t[]> data(new uint8_t[size_]);
597       size_t offset = 0;
598       for (XzBlock& block : blocks_) {
599         if (!Decompress(&block)) {
600           return false;
601         }
602         memcpy(data.get() + offset, block.decompressed_data.get(), block.decompressed_size);
603         offset += block.decompressed_size;
604       }
605       blocks_.clear();
606       blocks_.push_back(XzBlock{
607           .decompressed_data = std::move(data),
608           .decompressed_size = size_,
609       });
610       block_size_log2_ = 31;  // Because 32 bits is too big (shift right by 32 is not allowed).
611     }
612   }
613 
614   return true;
615 }
616 
~MemoryXz()617 MemoryXz::~MemoryXz() {
618   total_used_ -= used_;
619   total_size_ -= size_;
620   total_open_ -= 1;
621 }
622 
Read(uint64_t addr,void * buffer,size_t size)623 size_t MemoryXz::Read(uint64_t addr, void* buffer, size_t size) {
624   if (addr >= size_) {
625     return 0;  // Read past the end.
626   }
627   uint8_t* dst = reinterpret_cast<uint8_t*>(buffer);  // Position in the output buffer.
628   for (size_t i = addr >> block_size_log2_; i < blocks_.size(); i++) {
629     XzBlock* block = &blocks_[i];
630     if (block->decompressed_data == nullptr) {
631       if (!Decompress(block)) {
632         break;
633       }
634     }
635     size_t offset = (addr - (i << block_size_log2_));  // Start inside the block.
636     size_t copy_bytes = std::min<size_t>(size, block->decompressed_size - offset);
637     memcpy(dst, block->decompressed_data.get() + offset, copy_bytes);
638     dst += copy_bytes;
639     addr += copy_bytes;
640     size -= copy_bytes;
641     if (size == 0) {
642       break;
643     }
644   }
645   return dst - reinterpret_cast<uint8_t*>(buffer);
646 }
647 
ReadBlocks()648 bool MemoryXz::ReadBlocks() {
649   static ISzAlloc alloc;
650   alloc.Alloc = [](ISzAllocPtr, size_t size) { return malloc(size); };
651   alloc.Free = [](ISzAllocPtr, void* ptr) { return free(ptr); };
652 
653   // Read the compressed data, so we can quickly scan through the headers.
654   std::unique_ptr<uint8_t[]> compressed_data(new (std::nothrow) uint8_t[compressed_size_]);
655   if (compressed_data.get() == nullptr) {
656     return false;
657   }
658   if (!compressed_memory_->ReadFully(compressed_addr_, compressed_data.get(), compressed_size_)) {
659     return false;
660   }
661 
662   // Implement the required interface for communication
663   // (written in C so we can not use virtual methods or member functions).
664   struct XzLookInStream : public ILookInStream, public ICompressProgress {
665     static SRes LookImpl(const ILookInStream* p, const void** buf, size_t* size) {
666       auto* ctx = reinterpret_cast<const XzLookInStream*>(p);
667       *buf = ctx->data + ctx->offset;
668       *size = std::min(*size, ctx->size - ctx->offset);
669       return SZ_OK;
670     }
671     static SRes SkipImpl(const ILookInStream* p, size_t len) {
672       auto* ctx = reinterpret_cast<XzLookInStream*>(const_cast<ILookInStream*>(p));
673       ctx->offset += len;
674       return SZ_OK;
675     }
676     static SRes ReadImpl(const ILookInStream* p, void* buf, size_t* size) {
677       auto* ctx = reinterpret_cast<const XzLookInStream*>(p);
678       *size = std::min(*size, ctx->size - ctx->offset);
679       memcpy(buf, ctx->data + ctx->offset, *size);
680       return SZ_OK;
681     }
682     static SRes SeekImpl(const ILookInStream* p, Int64* pos, ESzSeek origin) {
683       auto* ctx = reinterpret_cast<XzLookInStream*>(const_cast<ILookInStream*>(p));
684       switch (origin) {
685         case SZ_SEEK_SET:
686           ctx->offset = *pos;
687           break;
688         case SZ_SEEK_CUR:
689           ctx->offset += *pos;
690           break;
691         case SZ_SEEK_END:
692           ctx->offset = ctx->size + *pos;
693           break;
694       }
695       *pos = ctx->offset;
696       return SZ_OK;
697     }
698     static SRes ProgressImpl(const ICompressProgress*, UInt64, UInt64) { return SZ_OK; }
699     size_t offset;
700     uint8_t* data;
701     size_t size;
702   };
703   XzLookInStream callbacks;
704   callbacks.Look = &XzLookInStream::LookImpl;
705   callbacks.Skip = &XzLookInStream::SkipImpl;
706   callbacks.Read = &XzLookInStream::ReadImpl;
707   callbacks.Seek = &XzLookInStream::SeekImpl;
708   callbacks.Progress = &XzLookInStream::ProgressImpl;
709   callbacks.offset = 0;
710   callbacks.data = compressed_data.get();
711   callbacks.size = compressed_size_;
712 
713   // Iterate over the internal XZ blocks without decompressing them.
714   CXzs xzs;
715   Xzs_Construct(&xzs);
716   Int64 end_offset = compressed_size_;
717   if (Xzs_ReadBackward(&xzs, &callbacks, &end_offset, &callbacks, &alloc) == SZ_OK) {
718     blocks_.reserve(Xzs_GetNumBlocks(&xzs));
719     size_t dst_offset = 0;
720     for (int s = xzs.num - 1; s >= 0; s--) {
721       const CXzStream& stream = xzs.streams[s];
722       size_t src_offset = stream.startOffset + XZ_STREAM_HEADER_SIZE;
723       for (size_t b = 0; b < stream.numBlocks; b++) {
724         const CXzBlockSizes& block = stream.blocks[b];
725         blocks_.push_back(XzBlock{
726             .decompressed_data = nullptr,  // Lazy allocation and decompression.
727             .decompressed_size = static_cast<uint32_t>(block.unpackSize),
728             .compressed_offset = static_cast<uint32_t>(src_offset),
729             .compressed_size = static_cast<uint32_t>((block.totalSize + 3) & ~3u),
730             .stream_flags = stream.flags,
731         });
732         dst_offset += blocks_.back().decompressed_size;
733         src_offset += blocks_.back().compressed_size;
734       }
735     }
736     size_ = dst_offset;
737     total_size_ += dst_offset;
738   }
739   Xzs_Free(&xzs, &alloc);
740   return !blocks_.empty();
741 }
742 
Decompress(XzBlock * block)743 bool MemoryXz::Decompress(XzBlock* block) {
744   static ISzAlloc alloc;
745   alloc.Alloc = [](ISzAllocPtr, size_t size) { return malloc(size); };
746   alloc.Free = [](ISzAllocPtr, void* ptr) { return free(ptr); };
747 
748   // Read the compressed data for this block.
749   std::unique_ptr<uint8_t[]> compressed_data(new (std::nothrow) uint8_t[block->compressed_size]);
750   if (compressed_data.get() == nullptr) {
751     return false;
752   }
753   if (!compressed_memory_->ReadFully(compressed_addr_ + block->compressed_offset,
754                                      compressed_data.get(), block->compressed_size)) {
755     return false;
756   }
757 
758   // Allocate decompressed memory.
759   std::unique_ptr<uint8_t[]> decompressed_data(new uint8_t[block->decompressed_size]);
760   if (decompressed_data == nullptr) {
761     return false;
762   }
763 
764   // Decompress.
765   CXzUnpacker state{};
766   XzUnpacker_Construct(&state, &alloc);
767   state.streamFlags = block->stream_flags;
768   XzUnpacker_PrepareToRandomBlockDecoding(&state);
769   size_t decompressed_size = block->decompressed_size;
770   size_t compressed_size = block->compressed_size;
771   ECoderStatus status;
772   XzUnpacker_SetOutBuf(&state, decompressed_data.get(), decompressed_size);
773   int return_val =
774       XzUnpacker_Code(&state, /*decompressed_data=*/nullptr, &decompressed_size,
775                       compressed_data.get(), &compressed_size, true, CODER_FINISH_END, &status);
776   XzUnpacker_Free(&state);
777   if (return_val != SZ_OK || status != CODER_STATUS_FINISHED_WITH_MARK) {
778     log(0, "Can not decompress \"%s\"", name_.c_str());
779     return false;
780   }
781 
782   used_ += block->decompressed_size;
783   total_used_ += block->decompressed_size;
784   if (kLogMemoryXzUsage) {
785     log(0, "decompressed memory: %zi%% of %ziKB (%zi files), %i%% of %iKB (%s)",
786         100 * total_used_ / total_size_, total_size_ / 1024, total_open_.load(),
787         100 * used_ / size_, size_ / 1024, name_.c_str());
788   }
789 
790   block->decompressed_data = std::move(decompressed_data);
791   return true;
792 }
793 
794 }  // namespace unwindstack
795