• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <string.h>
20 #include <sys/mman.h>
21 #include <sys/ptrace.h>
22 #include <sys/stat.h>
23 #include <sys/types.h>
24 #include <sys/uio.h>
25 #include <unistd.h>
26 
27 #include <algorithm>
28 #include <memory>
29 
30 #include <android-base/unique_fd.h>
31 
32 #include <unwindstack/Memory.h>
33 
34 #include "Check.h"
35 
36 namespace unwindstack {
37 
ProcessVmRead(pid_t pid,uint64_t remote_src,void * dst,size_t len)38 static size_t ProcessVmRead(pid_t pid, uint64_t remote_src, void* dst, size_t len) {
39 
40   // Split up the remote read across page boundaries.
41   // From the manpage:
42   //   A partial read/write may result if one of the remote_iov elements points to an invalid
43   //   memory region in the remote process.
44   //
45   //   Partial transfers apply at the granularity of iovec elements.  These system calls won't
46   //   perform a partial transfer that splits a single iovec element.
47   constexpr size_t kMaxIovecs = 64;
48   struct iovec src_iovs[kMaxIovecs];
49 
50   uint64_t cur = remote_src;
51   size_t total_read = 0;
52   while (len > 0) {
53     struct iovec dst_iov = {
54         .iov_base = &reinterpret_cast<uint8_t*>(dst)[total_read], .iov_len = len,
55     };
56 
57     size_t iovecs_used = 0;
58     while (len > 0) {
59       if (iovecs_used == kMaxIovecs) {
60         break;
61       }
62 
63       // struct iovec uses void* for iov_base.
64       if (cur >= UINTPTR_MAX) {
65         errno = EFAULT;
66         return total_read;
67       }
68 
69       src_iovs[iovecs_used].iov_base = reinterpret_cast<void*>(cur);
70 
71       uintptr_t misalignment = cur & (getpagesize() - 1);
72       size_t iov_len = getpagesize() - misalignment;
73       iov_len = std::min(iov_len, len);
74 
75       len -= iov_len;
76       if (__builtin_add_overflow(cur, iov_len, &cur)) {
77         errno = EFAULT;
78         return total_read;
79       }
80 
81       src_iovs[iovecs_used].iov_len = iov_len;
82       ++iovecs_used;
83     }
84 
85     ssize_t rc = process_vm_readv(pid, &dst_iov, 1, src_iovs, iovecs_used, 0);
86     if (rc == -1) {
87       return total_read;
88     }
89     total_read += rc;
90   }
91   return total_read;
92 }
93 
PtraceReadLong(pid_t pid,uint64_t addr,long * value)94 static bool PtraceReadLong(pid_t pid, uint64_t addr, long* value) {
95   // ptrace() returns -1 and sets errno when the operation fails.
96   // To disambiguate -1 from a valid result, we clear errno beforehand.
97   errno = 0;
98   *value = ptrace(PTRACE_PEEKTEXT, pid, reinterpret_cast<void*>(addr), nullptr);
99   if (*value == -1 && errno) {
100     return false;
101   }
102   return true;
103 }
104 
PtraceRead(pid_t pid,uint64_t addr,void * dst,size_t bytes)105 static size_t PtraceRead(pid_t pid, uint64_t addr, void* dst, size_t bytes) {
106   // Make sure that there is no overflow.
107   uint64_t max_size;
108   if (__builtin_add_overflow(addr, bytes, &max_size)) {
109     return 0;
110   }
111 
112   size_t bytes_read = 0;
113   long data;
114   size_t align_bytes = addr & (sizeof(long) - 1);
115   if (align_bytes != 0) {
116     if (!PtraceReadLong(pid, addr & ~(sizeof(long) - 1), &data)) {
117       return 0;
118     }
119     size_t copy_bytes = std::min(sizeof(long) - align_bytes, bytes);
120     memcpy(dst, reinterpret_cast<uint8_t*>(&data) + align_bytes, copy_bytes);
121     addr += copy_bytes;
122     dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + copy_bytes);
123     bytes -= copy_bytes;
124     bytes_read += copy_bytes;
125   }
126 
127   for (size_t i = 0; i < bytes / sizeof(long); i++) {
128     if (!PtraceReadLong(pid, addr, &data)) {
129       return bytes_read;
130     }
131     memcpy(dst, &data, sizeof(long));
132     dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + sizeof(long));
133     addr += sizeof(long);
134     bytes_read += sizeof(long);
135   }
136 
137   size_t left_over = bytes & (sizeof(long) - 1);
138   if (left_over) {
139     if (!PtraceReadLong(pid, addr, &data)) {
140       return bytes_read;
141     }
142     memcpy(dst, &data, left_over);
143     bytes_read += left_over;
144   }
145   return bytes_read;
146 }
147 
ReadFully(uint64_t addr,void * dst,size_t size)148 bool Memory::ReadFully(uint64_t addr, void* dst, size_t size) {
149   size_t rc = Read(addr, dst, size);
150   return rc == size;
151 }
152 
ReadString(uint64_t addr,std::string * string,uint64_t max_read)153 bool Memory::ReadString(uint64_t addr, std::string* string, uint64_t max_read) {
154   string->clear();
155   uint64_t bytes_read = 0;
156   while (bytes_read < max_read) {
157     uint8_t value;
158     if (!ReadFully(addr, &value, sizeof(value))) {
159       return false;
160     }
161     if (value == '\0') {
162       return true;
163     }
164     string->push_back(value);
165     addr++;
166     bytes_read++;
167   }
168   return false;
169 }
170 
CreateProcessMemory(pid_t pid)171 std::shared_ptr<Memory> Memory::CreateProcessMemory(pid_t pid) {
172   if (pid == getpid()) {
173     return std::shared_ptr<Memory>(new MemoryLocal());
174   }
175   return std::shared_ptr<Memory>(new MemoryRemote(pid));
176 }
177 
CreateProcessMemoryCached(pid_t pid)178 std::shared_ptr<Memory> Memory::CreateProcessMemoryCached(pid_t pid) {
179   if (pid == getpid()) {
180     return std::shared_ptr<Memory>(new MemoryCache(new MemoryLocal()));
181   }
182   return std::shared_ptr<Memory>(new MemoryCache(new MemoryRemote(pid)));
183 }
184 
Read(uint64_t addr,void * dst,size_t size)185 size_t MemoryBuffer::Read(uint64_t addr, void* dst, size_t size) {
186   if (addr >= raw_.size()) {
187     return 0;
188   }
189 
190   size_t bytes_left = raw_.size() - static_cast<size_t>(addr);
191   const unsigned char* actual_base = static_cast<const unsigned char*>(raw_.data()) + addr;
192   size_t actual_len = std::min(bytes_left, size);
193 
194   memcpy(dst, actual_base, actual_len);
195   return actual_len;
196 }
197 
GetPtr(size_t offset)198 uint8_t* MemoryBuffer::GetPtr(size_t offset) {
199   if (offset < raw_.size()) {
200     return &raw_[offset];
201   }
202   return nullptr;
203 }
204 
~MemoryFileAtOffset()205 MemoryFileAtOffset::~MemoryFileAtOffset() {
206   Clear();
207 }
208 
Clear()209 void MemoryFileAtOffset::Clear() {
210   if (data_) {
211     munmap(&data_[-offset_], size_ + offset_);
212     data_ = nullptr;
213   }
214 }
215 
Init(const std::string & file,uint64_t offset,uint64_t size)216 bool MemoryFileAtOffset::Init(const std::string& file, uint64_t offset, uint64_t size) {
217   // Clear out any previous data if it exists.
218   Clear();
219 
220   android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC)));
221   if (fd == -1) {
222     return false;
223   }
224   struct stat buf;
225   if (fstat(fd, &buf) == -1) {
226     return false;
227   }
228   if (offset >= static_cast<uint64_t>(buf.st_size)) {
229     return false;
230   }
231 
232   offset_ = offset & (getpagesize() - 1);
233   uint64_t aligned_offset = offset & ~(getpagesize() - 1);
234   if (aligned_offset > static_cast<uint64_t>(buf.st_size) ||
235       offset > static_cast<uint64_t>(buf.st_size)) {
236     return false;
237   }
238 
239   size_ = buf.st_size - aligned_offset;
240   uint64_t max_size;
241   if (!__builtin_add_overflow(size, offset_, &max_size) && max_size < size_) {
242     // Truncate the mapped size.
243     size_ = max_size;
244   }
245   void* map = mmap(nullptr, size_, PROT_READ, MAP_PRIVATE, fd, aligned_offset);
246   if (map == MAP_FAILED) {
247     return false;
248   }
249 
250   data_ = &reinterpret_cast<uint8_t*>(map)[offset_];
251   size_ -= offset_;
252 
253   return true;
254 }
255 
Read(uint64_t addr,void * dst,size_t size)256 size_t MemoryFileAtOffset::Read(uint64_t addr, void* dst, size_t size) {
257   if (addr >= size_) {
258     return 0;
259   }
260 
261   size_t bytes_left = size_ - static_cast<size_t>(addr);
262   const unsigned char* actual_base = static_cast<const unsigned char*>(data_) + addr;
263   size_t actual_len = std::min(bytes_left, size);
264 
265   memcpy(dst, actual_base, actual_len);
266   return actual_len;
267 }
268 
Read(uint64_t addr,void * dst,size_t size)269 size_t MemoryRemote::Read(uint64_t addr, void* dst, size_t size) {
270 #if !defined(__LP64__)
271   // Cannot read an address greater than 32 bits in a 32 bit context.
272   if (addr > UINT32_MAX) {
273     return 0;
274   }
275 #endif
276 
277   size_t (*read_func)(pid_t, uint64_t, void*, size_t) =
278       reinterpret_cast<size_t (*)(pid_t, uint64_t, void*, size_t)>(read_redirect_func_.load());
279   if (read_func != nullptr) {
280     return read_func(pid_, addr, dst, size);
281   } else {
282     // Prefer process_vm_read, try it first. If it doesn't work, use the
283     // ptrace function. If at least one of them returns at least some data,
284     // set that as the permanent function to use.
285     // This assumes that if process_vm_read works once, it will continue
286     // to work.
287     size_t bytes = ProcessVmRead(pid_, addr, dst, size);
288     if (bytes > 0) {
289       read_redirect_func_ = reinterpret_cast<uintptr_t>(ProcessVmRead);
290       return bytes;
291     }
292     bytes = PtraceRead(pid_, addr, dst, size);
293     if (bytes > 0) {
294       read_redirect_func_ = reinterpret_cast<uintptr_t>(PtraceRead);
295     }
296     return bytes;
297   }
298 }
299 
Read(uint64_t addr,void * dst,size_t size)300 size_t MemoryLocal::Read(uint64_t addr, void* dst, size_t size) {
301   return ProcessVmRead(getpid(), addr, dst, size);
302 }
303 
MemoryRange(const std::shared_ptr<Memory> & memory,uint64_t begin,uint64_t length,uint64_t offset)304 MemoryRange::MemoryRange(const std::shared_ptr<Memory>& memory, uint64_t begin, uint64_t length,
305                          uint64_t offset)
306     : memory_(memory), begin_(begin), length_(length), offset_(offset) {}
307 
Read(uint64_t addr,void * dst,size_t size)308 size_t MemoryRange::Read(uint64_t addr, void* dst, size_t size) {
309   if (addr < offset_) {
310     return 0;
311   }
312 
313   uint64_t read_offset = addr - offset_;
314   if (read_offset >= length_) {
315     return 0;
316   }
317 
318   uint64_t read_length = std::min(static_cast<uint64_t>(size), length_ - read_offset);
319   uint64_t read_addr;
320   if (__builtin_add_overflow(read_offset, begin_, &read_addr)) {
321     return 0;
322   }
323 
324   return memory_->Read(read_addr, dst, read_length);
325 }
326 
Insert(MemoryRange * memory)327 void MemoryRanges::Insert(MemoryRange* memory) {
328   maps_.emplace(memory->offset() + memory->length(), memory);
329 }
330 
Read(uint64_t addr,void * dst,size_t size)331 size_t MemoryRanges::Read(uint64_t addr, void* dst, size_t size) {
332   auto entry = maps_.upper_bound(addr);
333   if (entry != maps_.end()) {
334     return entry->second->Read(addr, dst, size);
335   }
336   return 0;
337 }
338 
Init(const std::string & file,uint64_t offset)339 bool MemoryOffline::Init(const std::string& file, uint64_t offset) {
340   auto memory_file = std::make_shared<MemoryFileAtOffset>();
341   if (!memory_file->Init(file, offset)) {
342     return false;
343   }
344 
345   // The first uint64_t value is the start of memory.
346   uint64_t start;
347   if (!memory_file->ReadFully(0, &start, sizeof(start))) {
348     return false;
349   }
350 
351   uint64_t size = memory_file->Size();
352   if (__builtin_sub_overflow(size, sizeof(start), &size)) {
353     return false;
354   }
355 
356   memory_ = std::make_unique<MemoryRange>(memory_file, sizeof(start), size, start);
357   return true;
358 }
359 
Read(uint64_t addr,void * dst,size_t size)360 size_t MemoryOffline::Read(uint64_t addr, void* dst, size_t size) {
361   if (!memory_) {
362     return 0;
363   }
364 
365   return memory_->Read(addr, dst, size);
366 }
367 
MemoryOfflineBuffer(const uint8_t * data,uint64_t start,uint64_t end)368 MemoryOfflineBuffer::MemoryOfflineBuffer(const uint8_t* data, uint64_t start, uint64_t end)
369     : data_(data), start_(start), end_(end) {}
370 
Reset(const uint8_t * data,uint64_t start,uint64_t end)371 void MemoryOfflineBuffer::Reset(const uint8_t* data, uint64_t start, uint64_t end) {
372   data_ = data;
373   start_ = start;
374   end_ = end;
375 }
376 
Read(uint64_t addr,void * dst,size_t size)377 size_t MemoryOfflineBuffer::Read(uint64_t addr, void* dst, size_t size) {
378   if (addr < start_ || addr >= end_) {
379     return 0;
380   }
381 
382   size_t read_length = std::min(size, static_cast<size_t>(end_ - addr));
383   memcpy(dst, &data_[addr - start_], read_length);
384   return read_length;
385 }
386 
~MemoryOfflineParts()387 MemoryOfflineParts::~MemoryOfflineParts() {
388   for (auto memory : memories_) {
389     delete memory;
390   }
391 }
392 
Read(uint64_t addr,void * dst,size_t size)393 size_t MemoryOfflineParts::Read(uint64_t addr, void* dst, size_t size) {
394   if (memories_.empty()) {
395     return 0;
396   }
397 
398   // Do a read on each memory object, no support for reading across the
399   // different memory objects.
400   for (MemoryOffline* memory : memories_) {
401     size_t bytes = memory->Read(addr, dst, size);
402     if (bytes != 0) {
403       return bytes;
404     }
405   }
406   return 0;
407 }
408 
Read(uint64_t addr,void * dst,size_t size)409 size_t MemoryCache::Read(uint64_t addr, void* dst, size_t size) {
410   // Only bother caching and looking at the cache if this is a small read for now.
411   if (size > 64) {
412     return impl_->Read(addr, dst, size);
413   }
414 
415   uint64_t addr_page = addr >> kCacheBits;
416   auto entry = cache_.find(addr_page);
417   uint8_t* cache_dst;
418   if (entry != cache_.end()) {
419     cache_dst = entry->second;
420   } else {
421     cache_dst = cache_[addr_page];
422     if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
423       // Erase the entry.
424       cache_.erase(addr_page);
425       return impl_->Read(addr, dst, size);
426     }
427   }
428   size_t max_read = ((addr_page + 1) << kCacheBits) - addr;
429   if (size <= max_read) {
430     memcpy(dst, &cache_dst[addr & kCacheMask], size);
431     return size;
432   }
433 
434   // The read crossed into another cached entry, since a read can only cross
435   // into one extra cached page, duplicate the code rather than looping.
436   memcpy(dst, &cache_dst[addr & kCacheMask], max_read);
437   dst = &reinterpret_cast<uint8_t*>(dst)[max_read];
438   addr_page++;
439 
440   entry = cache_.find(addr_page);
441   if (entry != cache_.end()) {
442     cache_dst = entry->second;
443   } else {
444     cache_dst = cache_[addr_page];
445     if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
446       // Erase the entry.
447       cache_.erase(addr_page);
448       return impl_->Read(addr_page << kCacheBits, dst, size - max_read) + max_read;
449     }
450   }
451   memcpy(dst, cache_dst, size - max_read);
452   return size;
453 }
454 
455 }  // namespace unwindstack
456