• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <errno.h>
18 #include <inttypes.h>
19 #include <sys/mman.h>
20 #include <unistd.h>
21 
22 #include <map>
23 #include <utility>
24 
25 #include "Allocator.h"
26 #include "HeapWalker.h"
27 #include "LeakFolding.h"
28 #include "ScopedSignalHandler.h"
29 #include "log.h"
30 
31 namespace android {
32 
Allocation(uintptr_t begin,uintptr_t end)33 bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
34   if (end == begin) {
35     end = begin + 1;
36   }
37   Range range{begin, end};
38   if (valid_mappings_range_.end != 0 &&
39       (begin < valid_mappings_range_.begin || end > valid_mappings_range_.end)) {
40     MEM_LOG_ALWAYS_FATAL("allocation %p-%p is outside mapping range %p-%p",
41                          reinterpret_cast<void*>(begin), reinterpret_cast<void*>(end),
42                          reinterpret_cast<void*>(valid_mappings_range_.begin),
43                          reinterpret_cast<void*>(valid_mappings_range_.end));
44   }
45   auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
46   if (inserted.second) {
47     valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
48     valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
49     allocation_bytes_ += range.size();
50     return true;
51   } else {
52     Range overlap = inserted.first->first;
53     if (overlap != range) {
54       MEM_ALOGE("range %p-%p overlaps with existing range %p-%p", reinterpret_cast<void*>(begin),
55                 reinterpret_cast<void*>(end), reinterpret_cast<void*>(overlap.begin),
56                 reinterpret_cast<void*>(overlap.end));
57     }
58     return false;
59   }
60 }
61 
62 // Sanitizers may consider certain memory inaccessible through certain pointers.
63 // With MTE this will need to use unchecked instructions or disable tag checking globally.
ReadWordAtAddressUnsafe(uintptr_t word_ptr)64 static uintptr_t ReadWordAtAddressUnsafe(uintptr_t word_ptr)
65     __attribute__((no_sanitize("address", "hwaddress"))) {
66   return *reinterpret_cast<uintptr_t*>(word_ptr);
67 }
68 
WordContainsAllocationPtr(uintptr_t word_ptr,Range * range,AllocationInfo ** info)69 bool HeapWalker::WordContainsAllocationPtr(uintptr_t word_ptr, Range* range, AllocationInfo** info) {
70   walking_ptr_ = word_ptr;
71   // This access may segfault if the process under test has done something strange,
72   // for example mprotect(PROT_NONE) on a native heap page.  If so, it will be
73   // caught and handled by mmaping a zero page over the faulting page.
74   uintptr_t value = ReadWordAtAddressUnsafe(word_ptr);
75   walking_ptr_ = 0;
76   if (value >= valid_allocations_range_.begin && value < valid_allocations_range_.end) {
77     AllocationMap::iterator it = allocations_.find(Range{value, value + 1});
78     if (it != allocations_.end()) {
79       *range = it->first;
80       *info = &it->second;
81       return true;
82     }
83   }
84   return false;
85 }
86 
RecurseRoot(const Range & root)87 void HeapWalker::RecurseRoot(const Range& root) {
88   allocator::vector<Range> to_do(1, root, allocator_);
89   while (!to_do.empty()) {
90     Range range = to_do.back();
91     to_do.pop_back();
92 
93     walking_range_ = range;
94     ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
95       if (!ref_info->referenced_from_root) {
96         ref_info->referenced_from_root = true;
97         to_do.push_back(ref_range);
98       }
99     });
100     walking_range_ = Range{0, 0};
101   }
102 }
103 
Mapping(uintptr_t begin,uintptr_t end)104 void HeapWalker::Mapping(uintptr_t begin, uintptr_t end) {
105   valid_mappings_range_.begin = std::min(valid_mappings_range_.begin, begin);
106   valid_mappings_range_.end = std::max(valid_mappings_range_.end, end);
107 }
108 
Root(uintptr_t begin,uintptr_t end)109 void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
110   roots_.push_back(Range{begin, end});
111 }
112 
Root(const allocator::vector<uintptr_t> & vals)113 void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
114   root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
115 }
116 
Allocations()117 size_t HeapWalker::Allocations() {
118   return allocations_.size();
119 }
120 
AllocationBytes()121 size_t HeapWalker::AllocationBytes() {
122   return allocation_bytes_;
123 }
124 
DetectLeaks()125 bool HeapWalker::DetectLeaks() {
126   // Recursively walk pointers from roots to mark referenced allocations
127   for (auto it = roots_.begin(); it != roots_.end(); it++) {
128     RecurseRoot(*it);
129   }
130 
131   Range vals;
132   vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
133   vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
134 
135   RecurseRoot(vals);
136 
137   if (segv_page_count_ > 0) {
138     MEM_ALOGE("%zu pages skipped due to segfaults", segv_page_count_);
139   }
140 
141   return true;
142 }
143 
Leaked(allocator::vector<Range> & leaked,size_t limit,size_t * num_leaks_out,size_t * leak_bytes_out)144 bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, size_t* num_leaks_out,
145                         size_t* leak_bytes_out) {
146   leaked.clear();
147 
148   size_t num_leaks = 0;
149   size_t leak_bytes = 0;
150   for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
151     if (!it->second.referenced_from_root) {
152       num_leaks++;
153       leak_bytes += it->first.end - it->first.begin;
154     }
155   }
156 
157   size_t n = 0;
158   for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
159     if (!it->second.referenced_from_root) {
160       if (n++ < limit) {
161         leaked.push_back(it->first);
162       }
163     }
164   }
165 
166   if (num_leaks_out) {
167     *num_leaks_out = num_leaks;
168   }
169   if (leak_bytes_out) {
170     *leak_bytes_out = leak_bytes;
171   }
172 
173   return true;
174 }
175 
MapOverPage(void * addr)176 static bool MapOverPage(void* addr) {
177   const size_t page_size = sysconf(_SC_PAGE_SIZE);
178   void* page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size - 1));
179 
180   void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
181   if (ret == MAP_FAILED) {
182     MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno));
183     return false;
184   }
185 
186   return true;
187 }
188 
HandleSegFault(ScopedSignalHandler & handler,int signal,siginfo_t * si,void *)189 void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si,
190                                 void* /*uctx*/) {
191   uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
192   if (addr != walking_ptr_) {
193     handler.reset();
194     return;
195   }
196   if (!segv_logged_) {
197     MEM_ALOGW("failed to read page at %p, signal %d", si->si_addr, signal);
198     if (walking_range_.begin != 0U) {
199       MEM_ALOGW("while walking range %p-%p", reinterpret_cast<void*>(walking_range_.begin),
200                 reinterpret_cast<void*>(walking_range_.end));
201     }
202     segv_logged_ = true;
203   }
204   segv_page_count_++;
205   if (!MapOverPage(si->si_addr)) {
206     handler.reset();
207   }
208 }
209 
210 Allocator<ScopedSignalHandler::SignalFnMap>::unique_ptr ScopedSignalHandler::handler_map_;
211 
212 }  // namespace android
213