1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <sys/mman.h>
22 #include <sys/ptrace.h>
23 #include <sys/stat.h>
24 #include <sys/types.h>
25 #include <sys/uio.h>
26 #include <unistd.h>
27
28 #include <algorithm>
29 #include <memory>
30 #include <mutex>
31 #include <optional>
32 #include <string>
33
34 #include <android-base/unique_fd.h>
35
36 #include <unwindstack/Log.h>
37 #include <unwindstack/Memory.h>
38
39 #include "MemoryBuffer.h"
40 #include "MemoryCache.h"
41 #include "MemoryFileAtOffset.h"
42 #include "MemoryLocal.h"
43 #include "MemoryOffline.h"
44 #include "MemoryOfflineBuffer.h"
45 #include "MemoryRange.h"
46 #include "MemoryRemote.h"
47
48 namespace unwindstack {
49
ProcessVmRead(pid_t pid,uint64_t remote_src,void * dst,size_t len)50 static size_t ProcessVmRead(pid_t pid, uint64_t remote_src, void* dst, size_t len) {
51
52 // Split up the remote read across page boundaries.
53 // From the manpage:
54 // A partial read/write may result if one of the remote_iov elements points to an invalid
55 // memory region in the remote process.
56 //
57 // Partial transfers apply at the granularity of iovec elements. These system calls won't
58 // perform a partial transfer that splits a single iovec element.
59 constexpr size_t kMaxIovecs = 64;
60 struct iovec src_iovs[kMaxIovecs];
61
62 uint64_t cur = remote_src;
63 size_t total_read = 0;
64 while (len > 0) {
65 struct iovec dst_iov = {
66 .iov_base = &reinterpret_cast<uint8_t*>(dst)[total_read], .iov_len = len,
67 };
68
69 size_t iovecs_used = 0;
70 while (len > 0) {
71 if (iovecs_used == kMaxIovecs) {
72 break;
73 }
74
75 // struct iovec uses void* for iov_base.
76 if (cur >= UINTPTR_MAX) {
77 errno = EFAULT;
78 return total_read;
79 }
80
81 src_iovs[iovecs_used].iov_base = reinterpret_cast<void*>(cur);
82
83 uintptr_t misalignment = cur & (getpagesize() - 1);
84 size_t iov_len = getpagesize() - misalignment;
85 iov_len = std::min(iov_len, len);
86
87 len -= iov_len;
88 if (__builtin_add_overflow(cur, iov_len, &cur)) {
89 errno = EFAULT;
90 return total_read;
91 }
92
93 src_iovs[iovecs_used].iov_len = iov_len;
94 ++iovecs_used;
95 }
96
97 ssize_t rc = process_vm_readv(pid, &dst_iov, 1, src_iovs, iovecs_used, 0);
98 if (rc == -1) {
99 return total_read;
100 }
101 total_read += rc;
102 }
103 return total_read;
104 }
105
PtraceReadLong(pid_t pid,uint64_t addr,long * value)106 static bool PtraceReadLong(pid_t pid, uint64_t addr, long* value) {
107 // ptrace() returns -1 and sets errno when the operation fails.
108 // To disambiguate -1 from a valid result, we clear errno beforehand.
109 errno = 0;
110 *value = ptrace(PTRACE_PEEKTEXT, pid, reinterpret_cast<void*>(addr), nullptr);
111 if (*value == -1 && errno) {
112 return false;
113 }
114 return true;
115 }
116
PtraceRead(pid_t pid,uint64_t addr,void * dst,size_t bytes)117 static size_t PtraceRead(pid_t pid, uint64_t addr, void* dst, size_t bytes) {
118 // Make sure that there is no overflow.
119 uint64_t max_size;
120 if (__builtin_add_overflow(addr, bytes, &max_size)) {
121 return 0;
122 }
123
124 size_t bytes_read = 0;
125 long data;
126 size_t align_bytes = addr & (sizeof(long) - 1);
127 if (align_bytes != 0) {
128 if (!PtraceReadLong(pid, addr & ~(sizeof(long) - 1), &data)) {
129 return 0;
130 }
131 size_t copy_bytes = std::min(sizeof(long) - align_bytes, bytes);
132 memcpy(dst, reinterpret_cast<uint8_t*>(&data) + align_bytes, copy_bytes);
133 addr += copy_bytes;
134 dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + copy_bytes);
135 bytes -= copy_bytes;
136 bytes_read += copy_bytes;
137 }
138
139 for (size_t i = 0; i < bytes / sizeof(long); i++) {
140 if (!PtraceReadLong(pid, addr, &data)) {
141 return bytes_read;
142 }
143 memcpy(dst, &data, sizeof(long));
144 dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + sizeof(long));
145 addr += sizeof(long);
146 bytes_read += sizeof(long);
147 }
148
149 size_t left_over = bytes & (sizeof(long) - 1);
150 if (left_over) {
151 if (!PtraceReadLong(pid, addr, &data)) {
152 return bytes_read;
153 }
154 memcpy(dst, &data, left_over);
155 bytes_read += left_over;
156 }
157 return bytes_read;
158 }
159
ReadFully(uint64_t addr,void * dst,size_t size)160 bool Memory::ReadFully(uint64_t addr, void* dst, size_t size) {
161 size_t rc = Read(addr, dst, size);
162 return rc == size;
163 }
164
ReadString(uint64_t addr,std::string * dst,size_t max_read)165 bool Memory::ReadString(uint64_t addr, std::string* dst, size_t max_read) {
166 char buffer[256]; // Large enough for 99% of symbol names.
167 size_t size = 0; // Number of bytes which were read into the buffer.
168 for (size_t offset = 0; offset < max_read; offset += size) {
169 // Look for null-terminator first, so we can allocate string of exact size.
170 // If we know the end of valid memory range, do the reads in larger blocks.
171 size_t read = std::min(sizeof(buffer), max_read - offset);
172 size = Read(addr + offset, buffer, read);
173 if (size == 0) {
174 return false; // We have not found end of string yet and we can not read more data.
175 }
176 size_t length = strnlen(buffer, size); // Index of the null-terminator.
177 if (length < size) {
178 // We found the null-terminator. Allocate the string and set its content.
179 if (offset == 0) {
180 // We did just single read, so the buffer already contains the whole string.
181 dst->assign(buffer, length);
182 return true;
183 } else {
184 // The buffer contains only the last block. Read the whole string again.
185 dst->assign(offset + length, '\0');
186 return ReadFully(addr, dst->data(), dst->size());
187 }
188 }
189 }
190 return false;
191 }
192
CreateFileMemory(const std::string & path,uint64_t offset,uint64_t size)193 std::unique_ptr<Memory> Memory::CreateFileMemory(const std::string& path, uint64_t offset,
194 uint64_t size) {
195 auto memory = std::make_unique<MemoryFileAtOffset>();
196
197 if (memory->Init(path, offset, size)) {
198 return memory;
199 }
200
201 return nullptr;
202 }
203
CreateProcessMemory(pid_t pid)204 std::shared_ptr<Memory> Memory::CreateProcessMemory(pid_t pid) {
205 if (pid == getpid()) {
206 return std::shared_ptr<Memory>(new MemoryLocal());
207 }
208 return std::shared_ptr<Memory>(new MemoryRemote(pid));
209 }
210
CreateProcessMemoryCached(pid_t pid)211 std::shared_ptr<Memory> Memory::CreateProcessMemoryCached(pid_t pid) {
212 if (pid == getpid()) {
213 return std::shared_ptr<Memory>(new MemoryCache(new MemoryLocal()));
214 }
215 return std::shared_ptr<Memory>(new MemoryCache(new MemoryRemote(pid)));
216 }
217
CreateProcessMemoryThreadCached(pid_t pid)218 std::shared_ptr<Memory> Memory::CreateProcessMemoryThreadCached(pid_t pid) {
219 if (pid == getpid()) {
220 return std::shared_ptr<Memory>(new MemoryThreadCache(new MemoryLocal()));
221 }
222 return std::shared_ptr<Memory>(new MemoryThreadCache(new MemoryRemote(pid)));
223 }
224
CreateOfflineMemory(const uint8_t * data,uint64_t start,uint64_t end)225 std::shared_ptr<Memory> Memory::CreateOfflineMemory(const uint8_t* data, uint64_t start,
226 uint64_t end) {
227 return std::shared_ptr<Memory>(new MemoryOfflineBuffer(data, start, end));
228 }
229
Read(uint64_t addr,void * dst,size_t size)230 size_t MemoryBuffer::Read(uint64_t addr, void* dst, size_t size) {
231 if (addr >= size_) {
232 return 0;
233 }
234
235 size_t bytes_left = size_ - static_cast<size_t>(addr);
236 const unsigned char* actual_base = static_cast<const unsigned char*>(raw_) + addr;
237 size_t actual_len = std::min(bytes_left, size);
238
239 memcpy(dst, actual_base, actual_len);
240 return actual_len;
241 }
242
GetPtr(size_t offset)243 uint8_t* MemoryBuffer::GetPtr(size_t offset) {
244 if (offset < size_) {
245 return &raw_[offset];
246 }
247 return nullptr;
248 }
249
~MemoryFileAtOffset()250 MemoryFileAtOffset::~MemoryFileAtOffset() {
251 Clear();
252 }
253
Clear()254 void MemoryFileAtOffset::Clear() {
255 if (data_) {
256 munmap(&data_[-offset_], size_ + offset_);
257 data_ = nullptr;
258 }
259 }
260
Init(const std::string & file,uint64_t offset,uint64_t size)261 bool MemoryFileAtOffset::Init(const std::string& file, uint64_t offset, uint64_t size) {
262 // Clear out any previous data if it exists.
263 Clear();
264
265 android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC)));
266 if (fd == -1) {
267 return false;
268 }
269 struct stat buf;
270 if (fstat(fd, &buf) == -1) {
271 return false;
272 }
273 if (offset >= static_cast<uint64_t>(buf.st_size)) {
274 return false;
275 }
276
277 offset_ = offset & (getpagesize() - 1);
278 uint64_t aligned_offset = offset & ~(getpagesize() - 1);
279 if (aligned_offset > static_cast<uint64_t>(buf.st_size) ||
280 offset > static_cast<uint64_t>(buf.st_size)) {
281 return false;
282 }
283
284 size_ = buf.st_size - aligned_offset;
285 uint64_t max_size;
286 if (!__builtin_add_overflow(size, offset_, &max_size) && max_size < size_) {
287 // Truncate the mapped size.
288 size_ = max_size;
289 }
290 void* map = mmap(nullptr, size_, PROT_READ, MAP_PRIVATE, fd, aligned_offset);
291 if (map == MAP_FAILED) {
292 return false;
293 }
294
295 data_ = &reinterpret_cast<uint8_t*>(map)[offset_];
296 size_ -= offset_;
297
298 return true;
299 }
300
Read(uint64_t addr,void * dst,size_t size)301 size_t MemoryFileAtOffset::Read(uint64_t addr, void* dst, size_t size) {
302 if (addr >= size_) {
303 return 0;
304 }
305
306 size_t bytes_left = size_ - static_cast<size_t>(addr);
307 const unsigned char* actual_base = static_cast<const unsigned char*>(data_) + addr;
308 size_t actual_len = std::min(bytes_left, size);
309
310 memcpy(dst, actual_base, actual_len);
311 return actual_len;
312 }
313
Read(uint64_t addr,void * dst,size_t size)314 size_t MemoryRemote::Read(uint64_t addr, void* dst, size_t size) {
315 #if !defined(__LP64__)
316 // Cannot read an address greater than 32 bits in a 32 bit context.
317 if (addr > UINT32_MAX) {
318 return 0;
319 }
320 #endif
321
322 size_t (*read_func)(pid_t, uint64_t, void*, size_t) =
323 reinterpret_cast<size_t (*)(pid_t, uint64_t, void*, size_t)>(read_redirect_func_.load());
324 if (read_func != nullptr) {
325 return read_func(pid_, addr, dst, size);
326 } else {
327 // Prefer process_vm_read, try it first. If it doesn't work, use the
328 // ptrace function. If at least one of them returns at least some data,
329 // set that as the permanent function to use.
330 // This assumes that if process_vm_read works once, it will continue
331 // to work.
332 size_t bytes = ProcessVmRead(pid_, addr, dst, size);
333 if (bytes > 0) {
334 read_redirect_func_ = reinterpret_cast<uintptr_t>(ProcessVmRead);
335 return bytes;
336 }
337 bytes = PtraceRead(pid_, addr, dst, size);
338 if (bytes > 0) {
339 read_redirect_func_ = reinterpret_cast<uintptr_t>(PtraceRead);
340 }
341 return bytes;
342 }
343 }
344
Read(uint64_t addr,void * dst,size_t size)345 size_t MemoryLocal::Read(uint64_t addr, void* dst, size_t size) {
346 return ProcessVmRead(getpid(), addr, dst, size);
347 }
348
MemoryRange(const std::shared_ptr<Memory> & memory,uint64_t begin,uint64_t length,uint64_t offset)349 MemoryRange::MemoryRange(const std::shared_ptr<Memory>& memory, uint64_t begin, uint64_t length,
350 uint64_t offset)
351 : memory_(memory), begin_(begin), length_(length), offset_(offset) {}
352
Read(uint64_t addr,void * dst,size_t size)353 size_t MemoryRange::Read(uint64_t addr, void* dst, size_t size) {
354 if (addr < offset_) {
355 return 0;
356 }
357
358 uint64_t read_offset = addr - offset_;
359 if (read_offset >= length_) {
360 return 0;
361 }
362
363 uint64_t read_length = std::min(static_cast<uint64_t>(size), length_ - read_offset);
364 uint64_t read_addr;
365 if (__builtin_add_overflow(read_offset, begin_, &read_addr)) {
366 return 0;
367 }
368
369 return memory_->Read(read_addr, dst, read_length);
370 }
371
Insert(MemoryRange * memory)372 bool MemoryRanges::Insert(MemoryRange* memory) {
373 uint64_t last_addr;
374 if (__builtin_add_overflow(memory->offset(), memory->length(), &last_addr)) {
375 // This should never happen in the real world. However, it is possible
376 // that an offset in a mapped in segment could be crafted such that
377 // this value overflows. In that case, clamp the value to the max uint64
378 // value.
379 last_addr = UINT64_MAX;
380 }
381 auto entry = maps_.try_emplace(last_addr, memory);
382 if (entry.second) {
383 return true;
384 }
385 delete memory;
386 return false;
387 }
388
Read(uint64_t addr,void * dst,size_t size)389 size_t MemoryRanges::Read(uint64_t addr, void* dst, size_t size) {
390 auto entry = maps_.upper_bound(addr);
391 if (entry != maps_.end()) {
392 return entry->second->Read(addr, dst, size);
393 }
394 return 0;
395 }
396
Init(const std::string & file,uint64_t offset)397 bool MemoryOffline::Init(const std::string& file, uint64_t offset) {
398 auto memory_file = std::make_shared<MemoryFileAtOffset>();
399 if (!memory_file->Init(file, offset)) {
400 return false;
401 }
402
403 // The first uint64_t value is the start of memory.
404 uint64_t start;
405 if (!memory_file->ReadFully(0, &start, sizeof(start))) {
406 return false;
407 }
408
409 uint64_t size = memory_file->Size();
410 if (__builtin_sub_overflow(size, sizeof(start), &size)) {
411 return false;
412 }
413
414 memory_ = std::make_unique<MemoryRange>(memory_file, sizeof(start), size, start);
415 return true;
416 }
417
Init(const std::string & file,uint64_t offset,uint64_t start,uint64_t size)418 bool MemoryOffline::Init(const std::string& file, uint64_t offset, uint64_t start, uint64_t size) {
419 auto memory_file = std::make_shared<MemoryFileAtOffset>();
420 if (!memory_file->Init(file, offset)) {
421 return false;
422 }
423
424 memory_ = std::make_unique<MemoryRange>(memory_file, 0, size, start);
425 return true;
426 }
427
Read(uint64_t addr,void * dst,size_t size)428 size_t MemoryOffline::Read(uint64_t addr, void* dst, size_t size) {
429 if (!memory_) {
430 return 0;
431 }
432
433 return memory_->Read(addr, dst, size);
434 }
435
MemoryOfflineBuffer(const uint8_t * data,uint64_t start,uint64_t end)436 MemoryOfflineBuffer::MemoryOfflineBuffer(const uint8_t* data, uint64_t start, uint64_t end)
437 : data_(data), start_(start), end_(end) {}
438
Reset(const uint8_t * data,uint64_t start,uint64_t end)439 void MemoryOfflineBuffer::Reset(const uint8_t* data, uint64_t start, uint64_t end) {
440 data_ = data;
441 start_ = start;
442 end_ = end;
443 }
444
Read(uint64_t addr,void * dst,size_t size)445 size_t MemoryOfflineBuffer::Read(uint64_t addr, void* dst, size_t size) {
446 if (addr < start_ || addr >= end_) {
447 return 0;
448 }
449
450 size_t read_length = std::min(size, static_cast<size_t>(end_ - addr));
451 memcpy(dst, &data_[addr - start_], read_length);
452 return read_length;
453 }
454
~MemoryOfflineParts()455 MemoryOfflineParts::~MemoryOfflineParts() {
456 for (auto memory : memories_) {
457 delete memory;
458 }
459 }
460
Read(uint64_t addr,void * dst,size_t size)461 size_t MemoryOfflineParts::Read(uint64_t addr, void* dst, size_t size) {
462 if (memories_.empty()) {
463 return 0;
464 }
465
466 // Do a read on each memory object, no support for reading across the
467 // different memory objects.
468 for (MemoryOffline* memory : memories_) {
469 size_t bytes = memory->Read(addr, dst, size);
470 if (bytes != 0) {
471 return bytes;
472 }
473 }
474 return 0;
475 }
476
InternalCachedRead(uint64_t addr,void * dst,size_t size,CacheDataType * cache)477 size_t MemoryCacheBase::InternalCachedRead(uint64_t addr, void* dst, size_t size,
478 CacheDataType* cache) {
479 uint64_t addr_page = addr >> kCacheBits;
480 auto entry = cache->find(addr_page);
481 uint8_t* cache_dst;
482 if (entry != cache->end()) {
483 cache_dst = entry->second;
484 } else {
485 cache_dst = (*cache)[addr_page];
486 if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
487 // Erase the entry.
488 cache->erase(addr_page);
489 return impl_->Read(addr, dst, size);
490 }
491 }
492 size_t max_read = ((addr_page + 1) << kCacheBits) - addr;
493 if (size <= max_read) {
494 memcpy(dst, &cache_dst[addr & kCacheMask], size);
495 return size;
496 }
497
498 // The read crossed into another cached entry, since a read can only cross
499 // into one extra cached page, duplicate the code rather than looping.
500 memcpy(dst, &cache_dst[addr & kCacheMask], max_read);
501 dst = &reinterpret_cast<uint8_t*>(dst)[max_read];
502 addr_page++;
503
504 entry = cache->find(addr_page);
505 if (entry != cache->end()) {
506 cache_dst = entry->second;
507 } else {
508 cache_dst = (*cache)[addr_page];
509 if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
510 // Erase the entry.
511 cache->erase(addr_page);
512 return impl_->Read(addr_page << kCacheBits, dst, size - max_read) + max_read;
513 }
514 }
515 memcpy(dst, cache_dst, size - max_read);
516 return size;
517 }
518
Clear()519 void MemoryCache::Clear() {
520 std::lock_guard<std::mutex> lock(cache_lock_);
521 cache_.clear();
522 }
523
CachedRead(uint64_t addr,void * dst,size_t size)524 size_t MemoryCache::CachedRead(uint64_t addr, void* dst, size_t size) {
525 // Use a single lock since this object is not designed to be performant
526 // for multiple object reading from multiple threads.
527 std::lock_guard<std::mutex> lock(cache_lock_);
528
529 return InternalCachedRead(addr, dst, size, &cache_);
530 }
531
MemoryThreadCache(Memory * memory)532 MemoryThreadCache::MemoryThreadCache(Memory* memory) : MemoryCacheBase(memory) {
533 thread_cache_ = std::make_optional<pthread_t>();
534 if (pthread_key_create(&*thread_cache_, [](void* memory) {
535 CacheDataType* cache = reinterpret_cast<CacheDataType*>(memory);
536 delete cache;
537 }) != 0) {
538 Log::AsyncSafe("Failed to create pthread key.");
539 thread_cache_.reset();
540 }
541 }
542
~MemoryThreadCache()543 MemoryThreadCache::~MemoryThreadCache() {
544 if (thread_cache_) {
545 CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
546 delete cache;
547 pthread_key_delete(*thread_cache_);
548 }
549 }
550
CachedRead(uint64_t addr,void * dst,size_t size)551 size_t MemoryThreadCache::CachedRead(uint64_t addr, void* dst, size_t size) {
552 if (!thread_cache_) {
553 return impl_->Read(addr, dst, size);
554 }
555
556 CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
557 if (cache == nullptr) {
558 cache = new CacheDataType;
559 pthread_setspecific(*thread_cache_, cache);
560 }
561
562 return InternalCachedRead(addr, dst, size, cache);
563 }
564
Clear()565 void MemoryThreadCache::Clear() {
566 if (!thread_cache_) {
567 return;
568 }
569
570 CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
571 if (cache != nullptr) {
572 delete cache;
573 pthread_setspecific(*thread_cache_, nullptr);
574 }
575 }
576
577 } // namespace unwindstack
578