• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "meminspect.h"
2 #include <android-base/unique_fd.h>
3 #include "ziparchive/zip_archive.h"
4 
5 using namespace std;
6 using namespace android::base;
7 using namespace ::android::base;
8 
9 const static VmaRange VMA_RANGE_EMPTY = VmaRange(0, 0);
10 
end_offset() const11 uint32_t VmaRange::end_offset() const {
12     return offset + length;
13 }
14 
compute_total_size()15 uint64_t VmaRangeGroup::compute_total_size() {
16     uint64_t total_size = 0;
17     for (auto&& range : ranges) {
18         total_size += range.length;
19     }
20     return total_size;
21 }
22 
apply_offset(uint64_t offset)23 void VmaRangeGroup::apply_offset(uint64_t offset) {
24     for (auto&& range : ranges) {
25         range.offset += offset;
26     }
27 }
28 
compute_coverage(const VmaRange & range,VmaRangeGroup & out_memres) const29 void VmaRangeGroup::compute_coverage(const VmaRange& range, VmaRangeGroup& out_memres) const {
30     for (auto&& resident_range : ranges) {
31         VmaRange intersect_res = resident_range.intersect(range);
32         if (!intersect_res.is_empty()) {
33             out_memres.ranges.push_back(intersect_res);
34         }
35     }
36 }
37 
is_empty() const38 bool VmaRange::is_empty() const {
39     return length == 0;
40 }
41 
intersect(const VmaRange & target) const42 VmaRange VmaRange::intersect(const VmaRange& target) const {
43     // First check if the slice is outside our range
44     if (target.end_offset() <= this->offset) {
45         return VMA_RANGE_EMPTY;
46     }
47     if (target.offset >= this->end_offset()) {
48         return VMA_RANGE_EMPTY;
49     }
50     VmaRange result;
51     // the slice should now be inside the range so compute the intersection.
52     result.offset = std::max(target.offset, this->offset);
53     uint32_t res_end = std::min(target.end_offset(), end_offset());
54     result.length = res_end - result.offset;
55 
56     return result;
57 }
58 
union_merge(const VmaRange & target) const59 VmaRange VmaRange::union_merge(const VmaRange& target) const {
60     VmaRange result = intersect(target);
61     if (result.is_empty()) {
62         // Disjointed ranges, no merge.
63         return VMA_RANGE_EMPTY;
64     }
65 
66     // Since there is an intersection, merge ranges between lowest
67     // and highest value.
68     result.offset = std::min(offset, target.offset);
69     uint32_t res_end = std::max(target.end_offset(), end_offset());
70     result.length = res_end - result.offset;
71     return result;
72 }
73 
align_ranges(std::vector<VmaRange> & vmas_to_align,unsigned int alignment)74 void align_ranges(std::vector<VmaRange>& vmas_to_align, unsigned int alignment) {
75     for (auto&& vma_to_align : vmas_to_align) {
76         uint32_t unaligned_offset = vma_to_align.offset % alignment;
77         vma_to_align.offset -= unaligned_offset;
78         vma_to_align.length += unaligned_offset;
79     }
80 }
81 
compare_range(VmaRange & a,VmaRange & b)82 bool compare_range(VmaRange& a, VmaRange& b) {
83     return a.offset < b.offset;
84 }
85 
merge_ranges(const std::vector<VmaRange> & ranges)86 std::vector<VmaRange> merge_ranges(const std::vector<VmaRange>& ranges) {
87     if (ranges.size() <= 1) {
88         // Not enough ranges to perform a merge.
89         return ranges;
90     }
91 
92     std::vector<VmaRange> to_merge_ranges = ranges;
93     std::vector<VmaRange> merged_ranges;
94     // Sort the ranges to make a slightly more efficient merging.
95     std::sort(to_merge_ranges.begin(), to_merge_ranges.end(), compare_range);
96 
97     // The first element will always start as-is, then start merging with subsequent elements.
98     merged_ranges.push_back(to_merge_ranges[0]);
99     for (int iMerged = 0, iTarget = 1; iTarget < to_merge_ranges.size(); ++iTarget) {
100         VmaRange merged = merged_ranges[iMerged].union_merge(to_merge_ranges[iTarget]);
101         if (!merged.is_empty()) {
102             // Merge was successful, swallow range.
103             merged_ranges[iMerged] = merged;
104         } else {
105             // Merge failed, add disjointed range.
106             merged_ranges.push_back(to_merge_ranges[iTarget]);
107             ++iMerged;
108         }
109     }
110 
111     return merged_ranges;
112 }
113 
get_file_size(const std::string & file)114 int64_t get_file_size(const std::string& file) {
115     unique_fd file_ufd(open(file.c_str(), O_RDONLY));
116     int fd = file_ufd.get();
117     if (fd == -1) {
118         return -1;
119     }
120 
121     struct stat fstat_res;
122     int res = fstat(fd, &fstat_res);
123     if (res == -1) {
124         return -1;
125     }
126 
127     return fstat_res.st_size;
128 }
129 
probe_resident_memory(string probed_file,VmaRangeGroup & resident_ranges,int pages_per_mincore)130 int probe_resident_memory(string probed_file,
131                           /*out*/ VmaRangeGroup& resident_ranges, int pages_per_mincore) {
132     unique_fd probed_file_ufd(open(probed_file.c_str(), O_RDONLY));
133     int probe_fd = probed_file_ufd.get();
134     if (probe_fd == -1) {
135         return MEMINSPECT_FAIL_OPEN;
136     }
137 
138     int64_t total_bytes = get_file_size(probed_file);
139     if (total_bytes < 0) {
140         return MEMINSPECT_FAIL_FSTAT;
141     }
142 
143     char* base_address =
144             (char*)mmap(0, (uint64_t)total_bytes, PROT_READ, MAP_SHARED, probe_fd, /*offset*/ 0);
145 
146     // this determines how many pages to inspect per mincore syscall
147     unsigned char* window = new unsigned char[pages_per_mincore];
148 
149     unsigned int page_size = sysconf(_SC_PAGESIZE);
150     unsigned long bytes_inspected = 0;
151 
152     // total bytes in inspection window
153     unsigned long window_bytes = page_size * pages_per_mincore;
154 
155     char* window_base;
156     bool started_vma_range = false;
157     uint32_t resident_vma_start_offset = 0;
158     for (window_base = base_address; bytes_inspected < total_bytes;
159          window_base += window_bytes, bytes_inspected += window_bytes) {
160         int res = mincore(window_base, window_bytes, window);
161         if (res != 0) {
162             if (errno == ENOMEM) {
163                 // Did not find page, maybe it's a hole.
164                 continue;
165             }
166             return MEMINSPECT_FAIL_MINCORE;
167         }
168         // Inspect the provided mincore window result sequentially
169         // and as soon as a change in residency happens a range is
170         // created or finished.
171         for (int iWin = 0; iWin < pages_per_mincore; ++iWin) {
172             if ((window[iWin] & (unsigned char)1) != 0) {
173                 // Page is resident
174                 if (!started_vma_range) {
175                     // End of range
176                     started_vma_range = true;
177                     uint32_t window_offset = iWin * page_size;
178                     resident_vma_start_offset = window_base + window_offset - base_address;
179                 }
180             } else {
181                 // Page is not resident
182                 if (started_vma_range) {
183                     // Start of range
184                     started_vma_range = false;
185                     uint32_t window_offset = iWin * page_size;
186                     uint32_t resident_vma_end_offset = window_base + window_offset - base_address;
187                     uint32_t resident_len = resident_vma_end_offset - resident_vma_start_offset;
188                     VmaRange vma_range(resident_vma_start_offset, resident_len);
189                     resident_ranges.ranges.push_back(vma_range);
190                 }
191             }
192         }
193     }
194     // This was the last window, so close any opened vma range
195     if (started_vma_range) {
196         started_vma_range = false;
197         uint32_t in_memory_vma_end = window_base - base_address;
198         uint32_t resident_len = in_memory_vma_end - resident_vma_start_offset;
199         VmaRange vma_range(resident_vma_start_offset, resident_len);
200         resident_ranges.ranges.push_back(vma_range);
201     }
202 
203     return 0;
204 }
205 
~ZipMemInspector()206 ZipMemInspector::~ZipMemInspector() {
207     CloseArchive(handle_);
208     delete probe_resident_;
209 }
210 
compute_coverage(const VmaRangeGroup & probe) const211 ZipEntryCoverage ZipEntryCoverage::compute_coverage(const VmaRangeGroup& probe) const {
212     ZipEntryCoverage file_coverage;
213     file_coverage.info = info;
214 
215     // Compute coverage for each range in file against probe which represents a set of ranges.
216     for (auto&& range : coverage.ranges) {
217         probe.compute_coverage(range, file_coverage.coverage);
218     }
219 
220     return file_coverage;
221 }
222 
compute_coverage(const std::vector<ZipEntryCoverage> & files,VmaRangeGroup * probe)223 std::vector<ZipEntryCoverage> ZipMemInspector::compute_coverage(
224         const std::vector<ZipEntryCoverage>& files, VmaRangeGroup* probe) {
225     if (probe == nullptr) {
226         // No probe to calculate coverage against, so coverage is zero.
227         return std::vector<ZipEntryCoverage>();
228     }
229 
230     std::vector<ZipEntryCoverage> file_coverages;
231     // Find the file coverage against provided probe.
232     for (auto&& file : files) {
233         // For each file, compute coverage against the probe which represents a list of ranges.
234         ZipEntryCoverage file_coverage = file.compute_coverage(*probe);
235         file_coverages.push_back(file_coverage);
236     }
237 
238     return file_coverages;
239 }
240 
add_file_info(ZipEntryInfo & file)241 void ZipMemInspector::add_file_info(ZipEntryInfo& file) {
242     entry_infos_.push_back(file);
243 }
244 
compute_per_file_coverage()245 int ZipMemInspector::compute_per_file_coverage() {
246     if (entry_infos_.empty()) {
247         // We haven't read the file information yet, so do it now.
248         if (read_files_and_offsets()) {
249             cerr << "Could not read zip entries to compute coverages." << endl;
250             return 1;
251         }
252     }
253 
254     // All existing files should consider their whole memory as present by default.
255     std::vector<ZipEntryCoverage> entry_coverages;
256     for (auto&& entry_info : entry_infos_) {
257         ZipEntryCoverage entry_coverage;
258         entry_coverage.info = entry_info;
259         VmaRange file_vma_range(entry_info.offset_in_zip, entry_info.file_size_bytes);
260         entry_coverage.coverage.ranges.push_back(file_vma_range);
261         entry_coverage.coverage.compute_total_size();
262         entry_coverages.push_back(entry_coverage);
263     }
264 
265     if (probe_resident_ != nullptr) {
266         // We decided to compute coverage based on a probe
267         entry_coverages_ = compute_coverage(entry_coverages, probe_resident_);
268     } else {
269         // No probe means whole file coverage
270         entry_coverages_ = entry_coverages;
271     }
272 
273     return 0;
274 }
275 
get_probe()276 VmaRangeGroup* ZipMemInspector::get_probe() {
277     return probe_resident_;
278 }
279 
set_existing_probe(VmaRangeGroup * probe)280 void ZipMemInspector::set_existing_probe(VmaRangeGroup* probe) {
281     this->probe_resident_ = probe;
282 }
283 
get_file_coverages()284 std::vector<ZipEntryCoverage>& ZipMemInspector::get_file_coverages() {
285     return entry_coverages_;
286 }
287 
probe_resident()288 int ZipMemInspector::probe_resident() {
289     probe_resident_ = new VmaRangeGroup();
290     int res = probe_resident_memory(filename_, *probe_resident_);
291     if (res != 0) {
292         // Failed to probe
293         return res;
294     }
295 
296     return 0;
297 }
298 
get_file_infos()299 std::vector<ZipEntryInfo>& ZipMemInspector::get_file_infos() {
300     return entry_infos_;
301 }
302 
read_files_and_offsets()303 int ZipMemInspector::read_files_and_offsets() {
304     if (OpenArchive(filename_.c_str(), &handle_) < 0) {
305         return 1;
306     }
307     void* cookie;
308     int res = StartIteration(handle_, &cookie);
309     if (res != 0) {
310         return 1;
311     }
312 
313     ZipEntry64 entry;
314     string name;
315     while (Next(cookie, &entry, &name) == 0) {
316         ZipEntryInfo file;
317         file.name = name;
318         file.offset_in_zip = entry.offset;
319         file.file_size_bytes = entry.compressed_length;
320         file.uncompressed_size = entry.uncompressed_length;
321         entry_infos_.push_back(file);
322     }
323     return 0;
324 }
325