1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <inttypes.h>
19 #include <sys/auxv.h>
20 #include <sys/mman.h>
21 #include <unistd.h>
22
23 #include <bionic/mte.h>
24
25 #include <map>
26 #include <utility>
27
28 #include "Allocator.h"
29 #include "HeapWalker.h"
30 #include "LeakFolding.h"
31 #include "ScopedSignalHandler.h"
32 #include "log.h"
33
34 namespace android {
UntagAddress(uintptr_t addr)35 static inline uintptr_t UntagAddress(uintptr_t addr) {
36 #if defined(__aarch64__)
37 constexpr uintptr_t mask = (static_cast<uintptr_t>(1) << 56) - 1;
38 addr = addr & mask;
39 #endif
40 return addr;
41 }
42
Allocation(uintptr_t begin,uintptr_t end)43 bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
44 if (end == begin) {
45 end = begin + 1;
46 }
47 begin = UntagAddress(begin);
48 end = UntagAddress(end);
49 Range range{begin, end};
50 if (valid_mappings_range_.end != 0 &&
51 (begin < valid_mappings_range_.begin || end > valid_mappings_range_.end)) {
52 MEM_LOG_ALWAYS_FATAL("allocation %p-%p is outside mapping range %p-%p",
53 reinterpret_cast<void*>(begin), reinterpret_cast<void*>(end),
54 reinterpret_cast<void*>(valid_mappings_range_.begin),
55 reinterpret_cast<void*>(valid_mappings_range_.end));
56 }
57 auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
58 if (inserted.second) {
59 valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
60 valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
61 allocation_bytes_ += range.size();
62 return true;
63 } else {
64 Range overlap = inserted.first->first;
65 if (overlap != range) {
66 MEM_ALOGE("range %p-%p overlaps with existing range %p-%p", reinterpret_cast<void*>(begin),
67 reinterpret_cast<void*>(end), reinterpret_cast<void*>(overlap.begin),
68 reinterpret_cast<void*>(overlap.end));
69 }
70 return false;
71 }
72 }
73
74 // Sanitizers and MTE may consider certain memory inaccessible through certain pointers.
75 // With MTE we set PSTATE.TCO during the access to suppress tag checks.
ReadWordAtAddressUnsafe(uintptr_t word_ptr)76 static uintptr_t ReadWordAtAddressUnsafe(uintptr_t word_ptr)
77 __attribute__((no_sanitize("address", "hwaddress"))) {
78 ScopedDisableMTE x;
79 return *reinterpret_cast<uintptr_t*>(word_ptr);
80 }
81
WordContainsAllocationPtr(uintptr_t word_ptr,Range * range,AllocationInfo ** info)82 bool HeapWalker::WordContainsAllocationPtr(uintptr_t word_ptr, Range* range, AllocationInfo** info) {
83 walking_ptr_ = word_ptr;
84 // This access may segfault if the process under test has done something strange,
85 // for example mprotect(PROT_NONE) on a native heap page. If so, it will be
86 // caught and handled by mmaping a zero page over the faulting page.
87 uintptr_t value = ReadWordAtAddressUnsafe(word_ptr);
88 value = UntagAddress(value);
89 walking_ptr_ = 0;
90 if (value >= valid_allocations_range_.begin && value < valid_allocations_range_.end) {
91 AllocationMap::iterator it = allocations_.find(Range{value, value + 1});
92 if (it != allocations_.end()) {
93 *range = it->first;
94 *info = &it->second;
95 return true;
96 }
97 }
98 return false;
99 }
100
RecurseRoot(const Range & root)101 void HeapWalker::RecurseRoot(const Range& root) {
102 allocator::vector<Range> to_do(1, root, allocator_);
103 while (!to_do.empty()) {
104 Range range = to_do.back();
105 to_do.pop_back();
106
107 walking_range_ = range;
108 ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
109 if (!ref_info->referenced_from_root) {
110 ref_info->referenced_from_root = true;
111 to_do.push_back(ref_range);
112 }
113 });
114 walking_range_ = Range{0, 0};
115 }
116 }
117
Mapping(uintptr_t begin,uintptr_t end)118 void HeapWalker::Mapping(uintptr_t begin, uintptr_t end) {
119 valid_mappings_range_.begin = std::min(valid_mappings_range_.begin, begin);
120 valid_mappings_range_.end = std::max(valid_mappings_range_.end, end);
121 }
122
Root(uintptr_t begin,uintptr_t end)123 void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
124 roots_.push_back(Range{begin, end});
125 }
126
Root(const allocator::vector<uintptr_t> & vals)127 void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
128 root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
129 }
130
Allocations()131 size_t HeapWalker::Allocations() {
132 return allocations_.size();
133 }
134
AllocationBytes()135 size_t HeapWalker::AllocationBytes() {
136 return allocation_bytes_;
137 }
138
DetectLeaks()139 bool HeapWalker::DetectLeaks() {
140 // Recursively walk pointers from roots to mark referenced allocations
141 for (auto it = roots_.begin(); it != roots_.end(); it++) {
142 RecurseRoot(*it);
143 }
144
145 Range vals;
146 vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
147 vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
148
149 RecurseRoot(vals);
150
151 if (segv_page_count_ > 0) {
152 MEM_ALOGE("%zu pages skipped due to segfaults", segv_page_count_);
153 }
154
155 return true;
156 }
157
Leaked(allocator::vector<Range> & leaked,size_t limit,size_t * num_leaks_out,size_t * leak_bytes_out)158 bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, size_t* num_leaks_out,
159 size_t* leak_bytes_out) {
160 leaked.clear();
161
162 size_t num_leaks = 0;
163 size_t leak_bytes = 0;
164 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
165 if (!it->second.referenced_from_root) {
166 num_leaks++;
167 leak_bytes += it->first.end - it->first.begin;
168 }
169 }
170
171 size_t n = 0;
172 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
173 if (!it->second.referenced_from_root) {
174 if (n++ < limit) {
175 leaked.push_back(it->first);
176 }
177 }
178 }
179
180 if (num_leaks_out) {
181 *num_leaks_out = num_leaks;
182 }
183 if (leak_bytes_out) {
184 *leak_bytes_out = leak_bytes;
185 }
186
187 return true;
188 }
189
MapOverPage(void * addr)190 static bool MapOverPage(void* addr) {
191 const size_t page_size = sysconf(_SC_PAGE_SIZE);
192 void* page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size - 1));
193
194 void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
195 if (ret == MAP_FAILED) {
196 MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno));
197 return false;
198 }
199
200 return true;
201 }
202
HandleSegFault(ScopedSignalHandler & handler,int signal,siginfo_t * si,void *)203 void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si,
204 void* /*uctx*/) {
205 uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
206 if (addr != walking_ptr_) {
207 handler.reset();
208 return;
209 }
210 if (!segv_logged_) {
211 MEM_ALOGW("failed to read page at %p, signal %d", si->si_addr, signal);
212 if (walking_range_.begin != 0U) {
213 MEM_ALOGW("while walking range %p-%p", reinterpret_cast<void*>(walking_range_.begin),
214 reinterpret_cast<void*>(walking_range_.end));
215 }
216 segv_logged_ = true;
217 }
218 segv_page_count_++;
219 if (!MapOverPage(si->si_addr)) {
220 handler.reset();
221 }
222 }
223
224 Allocator<ScopedSignalHandler::SignalFnMap>::unique_ptr ScopedSignalHandler::handler_map_;
225
226 } // namespace android
227