1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdio.h>
18 #include <stdlib.h>
19
20 #include <fstream>
21 #include <functional>
22 #include <iostream>
23 #include <string>
24 #include <vector>
25 #include <set>
26 #include <map>
27 #include <unordered_set>
28
29 #include "android-base/stringprintf.h"
30
31 #include "art_field-inl.h"
32 #include "art_method-inl.h"
33 #include "base/unix_file/fd_file.h"
34 #include "class_linker.h"
35 #include "gc/space/image_space.h"
36 #include "gc/heap.h"
37 #include "mirror/class-inl.h"
38 #include "mirror/object-inl.h"
39 #include "image.h"
40 #include "oat.h"
41 #include "oat_file.h"
42 #include "oat_file_manager.h"
43 #include "os.h"
44 #include "scoped_thread_state_change-inl.h"
45
46 #include "cmdline.h"
47 #include "backtrace/BacktraceMap.h"
48
49 #include <sys/stat.h>
50 #include <sys/types.h>
51 #include <signal.h>
52
53 namespace art {
54
55 using android::base::StringPrintf;
56
57 namespace {
58
59 constexpr size_t kMaxAddressPrint = 5;
60
61 enum class ProcessType {
62 kZygote,
63 kRemote
64 };
65
66 enum class RemoteProcesses {
67 kImageOnly,
68 kZygoteOnly,
69 kImageAndZygote
70 };
71
72 struct MappingData {
73 // The count of pages that are considered dirty by the OS.
74 size_t dirty_pages = 0;
75 // The count of pages that differ by at least one byte.
76 size_t different_pages = 0;
77 // The count of differing bytes.
78 size_t different_bytes = 0;
79 // The count of differing four-byte units.
80 size_t different_int32s = 0;
81 // The count of pages that have mapping count == 1.
82 size_t private_pages = 0;
83 // The count of private pages that are also dirty.
84 size_t private_dirty_pages = 0;
85 // The count of pages that are marked dirty but do not differ.
86 size_t false_dirty_pages = 0;
87 // Set of the local virtual page indices that are dirty.
88 std::set<size_t> dirty_page_set;
89 };
90
GetClassDescriptor(mirror::Class * klass)91 static std::string GetClassDescriptor(mirror::Class* klass)
92 REQUIRES_SHARED(Locks::mutator_lock_) {
93 CHECK(klass != nullptr);
94
95 std::string descriptor;
96 const char* descriptor_str = klass->GetDescriptor(&descriptor /*out*/);
97
98 return std::string(descriptor_str);
99 }
100
PrettyFieldValue(ArtField * field,mirror::Object * object)101 static std::string PrettyFieldValue(ArtField* field, mirror::Object* object)
102 REQUIRES_SHARED(Locks::mutator_lock_) {
103 std::ostringstream oss;
104 switch (field->GetTypeAsPrimitiveType()) {
105 case Primitive::kPrimNot: {
106 oss << object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
107 field->GetOffset());
108 break;
109 }
110 case Primitive::kPrimBoolean: {
111 oss << static_cast<bool>(object->GetFieldBoolean<kVerifyNone>(field->GetOffset()));
112 break;
113 }
114 case Primitive::kPrimByte: {
115 oss << static_cast<int32_t>(object->GetFieldByte<kVerifyNone>(field->GetOffset()));
116 break;
117 }
118 case Primitive::kPrimChar: {
119 oss << object->GetFieldChar<kVerifyNone>(field->GetOffset());
120 break;
121 }
122 case Primitive::kPrimShort: {
123 oss << object->GetFieldShort<kVerifyNone>(field->GetOffset());
124 break;
125 }
126 case Primitive::kPrimInt: {
127 oss << object->GetField32<kVerifyNone>(field->GetOffset());
128 break;
129 }
130 case Primitive::kPrimLong: {
131 oss << object->GetField64<kVerifyNone>(field->GetOffset());
132 break;
133 }
134 case Primitive::kPrimFloat: {
135 oss << object->GetField32<kVerifyNone>(field->GetOffset());
136 break;
137 }
138 case Primitive::kPrimDouble: {
139 oss << object->GetField64<kVerifyNone>(field->GetOffset());
140 break;
141 }
142 case Primitive::kPrimVoid: {
143 oss << "void";
144 break;
145 }
146 }
147 return oss.str();
148 }
149
150 template <typename K, typename V, typename D>
SortByValueDesc(const std::map<K,D> map,std::function<V (const D &)> value_mapper=[](const D & d){})151 static std::vector<std::pair<V, K>> SortByValueDesc(
152 const std::map<K, D> map,
153 std::function<V(const D&)> value_mapper = [](const D& d) { return static_cast<V>(d); }) {
154 // Store value->key so that we can use the default sort from pair which
155 // sorts by value first and then key
156 std::vector<std::pair<V, K>> value_key_vector;
157
158 for (const auto& kv_pair : map) {
159 value_key_vector.push_back(std::make_pair(value_mapper(kv_pair.second), kv_pair.first));
160 }
161
162 // Sort in reverse (descending order)
163 std::sort(value_key_vector.rbegin(), value_key_vector.rend());
164 return value_key_vector;
165 }
166
167 // Fixup a remote pointer that we read from a foreign boot.art to point to our own memory.
168 // Returned pointer will point to inside of remote_contents.
169 template <typename T>
FixUpRemotePointer(T * remote_ptr,std::vector<uint8_t> & remote_contents,const backtrace_map_t & boot_map)170 static T* FixUpRemotePointer(T* remote_ptr,
171 std::vector<uint8_t>& remote_contents,
172 const backtrace_map_t& boot_map) {
173 if (remote_ptr == nullptr) {
174 return nullptr;
175 }
176
177 uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr);
178
179 CHECK_LE(boot_map.start, remote);
180 CHECK_GT(boot_map.end, remote);
181
182 off_t boot_offset = remote - boot_map.start;
183
184 return reinterpret_cast<T*>(&remote_contents[boot_offset]);
185 }
186
187 template <typename T>
RemoteContentsPointerToLocal(T * remote_ptr,std::vector<uint8_t> & remote_contents,const ImageHeader & image_header)188 static T* RemoteContentsPointerToLocal(T* remote_ptr,
189 std::vector<uint8_t>& remote_contents,
190 const ImageHeader& image_header) {
191 if (remote_ptr == nullptr) {
192 return nullptr;
193 }
194
195 uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr);
196 ptrdiff_t boot_offset = remote - &remote_contents[0];
197
198 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + boot_offset;
199
200 return reinterpret_cast<T*>(const_cast<uint8_t*>(local_ptr));
201 }
202
203 template <typename T> size_t EntrySize(T* entry);
EntrySize(mirror::Object * object)204 template<> size_t EntrySize(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
205 return object->SizeOf();
206 }
EntrySize(ArtMethod * art_method)207 template<> size_t EntrySize(ArtMethod* art_method) REQUIRES_SHARED(Locks::mutator_lock_) {
208 return sizeof(*art_method);
209 }
210
211 template <typename T>
EntriesDiffer(T * entry1,T * entry2)212 static bool EntriesDiffer(T* entry1, T* entry2) REQUIRES_SHARED(Locks::mutator_lock_) {
213 return memcmp(entry1, entry2, EntrySize(entry1)) != 0;
214 }
215
216 template <typename T>
217 struct RegionCommon {
218 public:
RegionCommonart::__anonb951707b0111::RegionCommon219 RegionCommon(std::ostream* os,
220 std::vector<uint8_t>* remote_contents,
221 std::vector<uint8_t>* zygote_contents,
222 const backtrace_map_t& boot_map,
223 const ImageHeader& image_header) :
224 os_(*os),
225 remote_contents_(remote_contents),
226 zygote_contents_(zygote_contents),
227 boot_map_(boot_map),
228 image_header_(image_header),
229 different_entries_(0),
230 dirty_entry_bytes_(0),
231 false_dirty_entry_bytes_(0) {
232 CHECK(remote_contents != nullptr);
233 CHECK(zygote_contents != nullptr);
234 }
235
DumpSamplesAndOffsetCountart::__anonb951707b0111::RegionCommon236 void DumpSamplesAndOffsetCount() {
237 os_ << " sample object addresses: ";
238 for (size_t i = 0; i < dirty_entries_.size() && i < kMaxAddressPrint; ++i) {
239 T* entry = dirty_entries_[i];
240 os_ << reinterpret_cast<void*>(entry) << ", ";
241 }
242 os_ << "\n";
243 os_ << " dirty byte +offset:count list = ";
244 std::vector<std::pair<size_t, off_t>> field_dirty_count_sorted =
245 SortByValueDesc<off_t, size_t, size_t>(field_dirty_count_);
246 for (const std::pair<size_t, off_t>& pair : field_dirty_count_sorted) {
247 off_t offset = pair.second;
248 size_t count = pair.first;
249 os_ << "+" << offset << ":" << count << ", ";
250 }
251 os_ << "\n";
252 }
253
GetDifferentEntryCountart::__anonb951707b0111::RegionCommon254 size_t GetDifferentEntryCount() const { return different_entries_; }
GetDirtyEntryBytesart::__anonb951707b0111::RegionCommon255 size_t GetDirtyEntryBytes() const { return dirty_entry_bytes_; }
GetFalseDirtyEntryCountart::__anonb951707b0111::RegionCommon256 size_t GetFalseDirtyEntryCount() const { return false_dirty_entries_.size(); }
GetFalseDirtyEntryBytesart::__anonb951707b0111::RegionCommon257 size_t GetFalseDirtyEntryBytes() const { return false_dirty_entry_bytes_; }
GetZygoteDirtyEntryCountart::__anonb951707b0111::RegionCommon258 size_t GetZygoteDirtyEntryCount() const { return zygote_dirty_entries_.size(); }
259
260 protected:
IsEntryOnDirtyPageart::__anonb951707b0111::RegionCommon261 bool IsEntryOnDirtyPage(T* entry, const std::set<size_t>& dirty_pages) const
262 REQUIRES_SHARED(Locks::mutator_lock_) {
263 size_t size = EntrySize(entry);
264 size_t page_off = 0;
265 size_t current_page_idx;
266 uintptr_t entry_address = reinterpret_cast<uintptr_t>(entry);
267 // Iterate every page this entry belongs to
268 do {
269 current_page_idx = entry_address / kPageSize + page_off;
270 if (dirty_pages.find(current_page_idx) != dirty_pages.end()) {
271 // This entry is on a dirty page
272 return true;
273 }
274 page_off++;
275 } while ((current_page_idx * kPageSize) < RoundUp(entry_address + size, kObjectAlignment));
276 return false;
277 }
278
AddZygoteDirtyEntryart::__anonb951707b0111::RegionCommon279 void AddZygoteDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
280 zygote_dirty_entries_.insert(entry);
281 }
282
AddImageDirtyEntryart::__anonb951707b0111::RegionCommon283 void AddImageDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
284 image_dirty_entries_.insert(entry);
285 }
286
AddFalseDirtyEntryart::__anonb951707b0111::RegionCommon287 void AddFalseDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
288 false_dirty_entries_.push_back(entry);
289 false_dirty_entry_bytes_ += EntrySize(entry);
290 }
291
292 // The output stream to write to.
293 std::ostream& os_;
294 // The byte contents of the remote (image) process' image.
295 std::vector<uint8_t>* remote_contents_;
296 // The byte contents of the zygote process' image.
297 std::vector<uint8_t>* zygote_contents_;
298 const backtrace_map_t& boot_map_;
299 const ImageHeader& image_header_;
300
301 // Count of entries that are different.
302 size_t different_entries_;
303
304 // Local entries that are dirty (differ in at least one byte).
305 size_t dirty_entry_bytes_;
306 std::vector<T*> dirty_entries_;
307
308 // Local entries that are clean, but located on dirty pages.
309 size_t false_dirty_entry_bytes_;
310 std::vector<T*> false_dirty_entries_;
311
312 // Image dirty entries
313 // If zygote_pid_only_ == true, these are shared dirty entries in the zygote.
314 // If zygote_pid_only_ == false, these are private dirty entries in the application.
315 std::set<T*> image_dirty_entries_;
316
317 // Zygote dirty entries (probably private dirty).
318 // We only add entries here if they differed in both the image and the zygote, so
319 // they are probably private dirty.
320 std::set<T*> zygote_dirty_entries_;
321
322 std::map<off_t /* field offset */, size_t /* count */> field_dirty_count_;
323
324 private:
325 DISALLOW_COPY_AND_ASSIGN(RegionCommon);
326 };
327
328 template <typename T>
329 class RegionSpecializedBase : public RegionCommon<T> {
330 };
331
332 // Region analysis for mirror::Objects
333 class ImgObjectVisitor : public ObjectVisitor {
334 public:
335 using ComputeDirtyFunc = std::function<void(mirror::Object* object,
336 const uint8_t* begin_image_ptr,
337 const std::set<size_t>& dirty_pages)>;
ImgObjectVisitor(ComputeDirtyFunc dirty_func,const uint8_t * begin_image_ptr,const std::set<size_t> & dirty_pages)338 ImgObjectVisitor(ComputeDirtyFunc dirty_func,
339 const uint8_t* begin_image_ptr,
340 const std::set<size_t>& dirty_pages) :
341 dirty_func_(dirty_func),
342 begin_image_ptr_(begin_image_ptr),
343 dirty_pages_(dirty_pages) { }
344
~ImgObjectVisitor()345 virtual ~ImgObjectVisitor() OVERRIDE { }
346
Visit(mirror::Object * object)347 virtual void Visit(mirror::Object* object) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
348 // Sanity check that we are reading a real mirror::Object
349 CHECK(object->GetClass() != nullptr) << "Image object at address "
350 << object
351 << " has null class";
352 if (kUseBakerReadBarrier) {
353 object->AssertReadBarrierState();
354 }
355 dirty_func_(object, begin_image_ptr_, dirty_pages_);
356 }
357
358 private:
359 ComputeDirtyFunc dirty_func_;
360 const uint8_t* begin_image_ptr_;
361 const std::set<size_t>& dirty_pages_;
362 };
363
364 template<>
365 class RegionSpecializedBase<mirror::Object> : public RegionCommon<mirror::Object> {
366 public:
RegionSpecializedBase(std::ostream * os,std::vector<uint8_t> * remote_contents,std::vector<uint8_t> * zygote_contents,const backtrace_map_t & boot_map,const ImageHeader & image_header,bool dump_dirty_objects)367 RegionSpecializedBase(std::ostream* os,
368 std::vector<uint8_t>* remote_contents,
369 std::vector<uint8_t>* zygote_contents,
370 const backtrace_map_t& boot_map,
371 const ImageHeader& image_header,
372 bool dump_dirty_objects)
373 : RegionCommon<mirror::Object>(os, remote_contents, zygote_contents, boot_map, image_header),
374 os_(*os),
375 dump_dirty_objects_(dump_dirty_objects) { }
376
377 // Define a common public type name for use by RegionData.
378 using VisitorClass = ImgObjectVisitor;
379
VisitEntries(VisitorClass * visitor,uint8_t * base,PointerSize pointer_size)380 void VisitEntries(VisitorClass* visitor,
381 uint8_t* base,
382 PointerSize pointer_size)
383 REQUIRES_SHARED(Locks::mutator_lock_) {
384 RegionCommon<mirror::Object>::image_header_.VisitObjects(visitor, base, pointer_size);
385 }
386
VisitEntry(mirror::Object * entry)387 void VisitEntry(mirror::Object* entry)
388 REQUIRES_SHARED(Locks::mutator_lock_) {
389 // Unconditionally store the class descriptor in case we need it later
390 mirror::Class* klass = entry->GetClass();
391 class_data_[klass].descriptor = GetClassDescriptor(klass);
392 }
393
AddCleanEntry(mirror::Object * entry)394 void AddCleanEntry(mirror::Object* entry)
395 REQUIRES_SHARED(Locks::mutator_lock_) {
396 class_data_[entry->GetClass()].AddCleanObject();
397 }
398
AddFalseDirtyEntry(mirror::Object * entry)399 void AddFalseDirtyEntry(mirror::Object* entry)
400 REQUIRES_SHARED(Locks::mutator_lock_) {
401 RegionCommon<mirror::Object>::AddFalseDirtyEntry(entry);
402 class_data_[entry->GetClass()].AddFalseDirtyObject(entry);
403 }
404
AddDirtyEntry(mirror::Object * entry,mirror::Object * entry_remote)405 void AddDirtyEntry(mirror::Object* entry, mirror::Object* entry_remote)
406 REQUIRES_SHARED(Locks::mutator_lock_) {
407 size_t entry_size = EntrySize(entry);
408 ++different_entries_;
409 dirty_entry_bytes_ += entry_size;
410 // Log dirty count and objects for class objects only.
411 mirror::Class* klass = entry->GetClass();
412 if (klass->IsClassClass()) {
413 // Increment counts for the fields that are dirty
414 const uint8_t* current = reinterpret_cast<const uint8_t*>(entry);
415 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(entry_remote);
416 for (size_t i = 0; i < entry_size; ++i) {
417 if (current[i] != current_remote[i]) {
418 field_dirty_count_[i]++;
419 }
420 }
421 dirty_entries_.push_back(entry);
422 }
423 class_data_[klass].AddDirtyObject(entry, entry_remote);
424 }
425
DiffEntryContents(mirror::Object * entry,uint8_t * remote_bytes,const uint8_t * base_ptr,bool log_dirty_objects)426 void DiffEntryContents(mirror::Object* entry,
427 uint8_t* remote_bytes,
428 const uint8_t* base_ptr,
429 bool log_dirty_objects)
430 REQUIRES_SHARED(Locks::mutator_lock_) {
431 const char* tabs = " ";
432 // Attempt to find fields for all dirty bytes.
433 mirror::Class* klass = entry->GetClass();
434 if (entry->IsClass()) {
435 os_ << tabs
436 << "Class " << mirror::Class::PrettyClass(entry->AsClass()) << " " << entry << "\n";
437 } else {
438 os_ << tabs
439 << "Instance of " << mirror::Class::PrettyClass(klass) << " " << entry << "\n";
440 }
441
442 std::unordered_set<ArtField*> dirty_instance_fields;
443 std::unordered_set<ArtField*> dirty_static_fields;
444 // Examine the bytes comprising the Object, computing which fields are dirty
445 // and recording them for later display. If the Object is an array object,
446 // compute the dirty entries.
447 mirror::Object* remote_entry = reinterpret_cast<mirror::Object*>(remote_bytes);
448 for (size_t i = 0, count = entry->SizeOf(); i < count; ++i) {
449 if (base_ptr[i] != remote_bytes[i]) {
450 ArtField* field = ArtField::FindInstanceFieldWithOffset</*exact*/false>(klass, i);
451 if (field != nullptr) {
452 dirty_instance_fields.insert(field);
453 } else if (entry->IsClass()) {
454 field = ArtField::FindStaticFieldWithOffset</*exact*/false>(entry->AsClass(), i);
455 if (field != nullptr) {
456 dirty_static_fields.insert(field);
457 }
458 }
459 if (field == nullptr) {
460 if (klass->IsArrayClass()) {
461 mirror::Class* component_type = klass->GetComponentType();
462 Primitive::Type primitive_type = component_type->GetPrimitiveType();
463 size_t component_size = Primitive::ComponentSize(primitive_type);
464 size_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
465 if (i >= data_offset) {
466 os_ << tabs << "Dirty array element " << (i - data_offset) / component_size << "\n";
467 // Skip to next element to prevent spam.
468 i += component_size - 1;
469 continue;
470 }
471 }
472 os_ << tabs << "No field for byte offset " << i << "\n";
473 }
474 }
475 }
476 // Dump different fields.
477 if (!dirty_instance_fields.empty()) {
478 os_ << tabs << "Dirty instance fields " << dirty_instance_fields.size() << "\n";
479 for (ArtField* field : dirty_instance_fields) {
480 os_ << tabs << ArtField::PrettyField(field)
481 << " original=" << PrettyFieldValue(field, entry)
482 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
483 }
484 }
485 if (!dirty_static_fields.empty()) {
486 if (dump_dirty_objects_ && log_dirty_objects) {
487 dirty_objects_.insert(entry);
488 }
489 os_ << tabs << "Dirty static fields " << dirty_static_fields.size() << "\n";
490 for (ArtField* field : dirty_static_fields) {
491 os_ << tabs << ArtField::PrettyField(field)
492 << " original=" << PrettyFieldValue(field, entry)
493 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
494 }
495 }
496 os_ << "\n";
497 }
498
DumpDirtyObjects()499 void DumpDirtyObjects() REQUIRES_SHARED(Locks::mutator_lock_) {
500 for (mirror::Object* obj : dirty_objects_) {
501 if (obj->IsClass()) {
502 os_ << "Private dirty object: " << obj->AsClass()->PrettyDescriptor() << "\n";
503 }
504 }
505 }
506
DumpDirtyEntries()507 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
508 // vector of pairs (size_t count, Class*)
509 auto dirty_object_class_values =
510 SortByValueDesc<mirror::Class*, size_t, ClassData>(
511 class_data_,
512 [](const ClassData& d) { return d.dirty_object_count; });
513 os_ << "\n" << " Dirty object count by class:\n";
514 for (const auto& vk_pair : dirty_object_class_values) {
515 size_t dirty_object_count = vk_pair.first;
516 mirror::Class* klass = vk_pair.second;
517 ClassData& class_data = class_data_[klass];
518 size_t object_sizes = class_data.dirty_object_size_in_bytes;
519 float avg_dirty_bytes_per_class =
520 class_data.dirty_object_byte_count * 1.0f / object_sizes;
521 float avg_object_size = object_sizes * 1.0f / dirty_object_count;
522 const std::string& descriptor = class_data.descriptor;
523 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
524 << "objects: " << dirty_object_count << ", "
525 << "avg dirty bytes: " << avg_dirty_bytes_per_class << ", "
526 << "avg object size: " << avg_object_size << ", "
527 << "class descriptor: '" << descriptor << "'"
528 << ")\n";
529 if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) {
530 DumpSamplesAndOffsetCount();
531 os_ << " field contents:\n";
532 for (mirror::Object* object : class_data.dirty_objects) {
533 // remote class object
534 auto remote_klass = reinterpret_cast<mirror::Class*>(object);
535 // local class object
536 auto local_klass =
537 RemoteContentsPointerToLocal(remote_klass,
538 *RegionCommon<mirror::Object>::remote_contents_,
539 RegionCommon<mirror::Object>::image_header_);
540 os_ << " " << reinterpret_cast<const void*>(object) << " ";
541 os_ << " class_status (remote): " << remote_klass->GetStatus() << ", ";
542 os_ << " class_status (local): " << local_klass->GetStatus();
543 os_ << "\n";
544 }
545 }
546 }
547 }
548
DumpFalseDirtyEntries()549 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
550 // vector of pairs (size_t count, Class*)
551 auto false_dirty_object_class_values =
552 SortByValueDesc<mirror::Class*, size_t, ClassData>(
553 class_data_,
554 [](const ClassData& d) { return d.false_dirty_object_count; });
555 os_ << "\n" << " False-dirty object count by class:\n";
556 for (const auto& vk_pair : false_dirty_object_class_values) {
557 size_t object_count = vk_pair.first;
558 mirror::Class* klass = vk_pair.second;
559 ClassData& class_data = class_data_[klass];
560 size_t object_sizes = class_data.false_dirty_byte_count;
561 float avg_object_size = object_sizes * 1.0f / object_count;
562 const std::string& descriptor = class_data.descriptor;
563 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
564 << "objects: " << object_count << ", "
565 << "avg object size: " << avg_object_size << ", "
566 << "total bytes: " << object_sizes << ", "
567 << "class descriptor: '" << descriptor << "'"
568 << ")\n";
569 }
570 }
571
DumpCleanEntries()572 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
573 // vector of pairs (size_t count, Class*)
574 auto clean_object_class_values =
575 SortByValueDesc<mirror::Class*, size_t, ClassData>(
576 class_data_,
577 [](const ClassData& d) { return d.clean_object_count; });
578 os_ << "\n" << " Clean object count by class:\n";
579 for (const auto& vk_pair : clean_object_class_values) {
580 os_ << " " << mirror::Class::PrettyClass(vk_pair.second) << " (" << vk_pair.first << ")\n";
581 }
582 }
583
584 private:
585 // Aggregate and detail class data from an image diff.
586 struct ClassData {
587 size_t dirty_object_count = 0;
588 // Track only the byte-per-byte dirtiness (in bytes)
589 size_t dirty_object_byte_count = 0;
590 // Track the object-by-object dirtiness (in bytes)
591 size_t dirty_object_size_in_bytes = 0;
592 size_t clean_object_count = 0;
593 std::string descriptor;
594 size_t false_dirty_byte_count = 0;
595 size_t false_dirty_object_count = 0;
596 std::vector<mirror::Object*> false_dirty_objects;
597 // Remote pointers to dirty objects
598 std::vector<mirror::Object*> dirty_objects;
599
AddCleanObjectart::__anonb951707b0111::RegionSpecializedBase::ClassData600 void AddCleanObject() REQUIRES_SHARED(Locks::mutator_lock_) {
601 ++clean_object_count;
602 }
603
AddDirtyObjectart::__anonb951707b0111::RegionSpecializedBase::ClassData604 void AddDirtyObject(mirror::Object* object, mirror::Object* object_remote)
605 REQUIRES_SHARED(Locks::mutator_lock_) {
606 ++dirty_object_count;
607 dirty_object_byte_count += CountDirtyBytes(object, object_remote);
608 dirty_object_size_in_bytes += EntrySize(object);
609 dirty_objects.push_back(object_remote);
610 }
611
AddFalseDirtyObjectart::__anonb951707b0111::RegionSpecializedBase::ClassData612 void AddFalseDirtyObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
613 ++false_dirty_object_count;
614 false_dirty_objects.push_back(object);
615 false_dirty_byte_count += EntrySize(object);
616 }
617
618 private:
619 // Go byte-by-byte and figure out what exactly got dirtied
CountDirtyBytesart::__anonb951707b0111::RegionSpecializedBase::ClassData620 static size_t CountDirtyBytes(mirror::Object* object1, mirror::Object* object2)
621 REQUIRES_SHARED(Locks::mutator_lock_) {
622 const uint8_t* cur1 = reinterpret_cast<const uint8_t*>(object1);
623 const uint8_t* cur2 = reinterpret_cast<const uint8_t*>(object2);
624 size_t dirty_bytes = 0;
625 size_t object_size = EntrySize(object1);
626 for (size_t i = 0; i < object_size; ++i) {
627 if (cur1[i] != cur2[i]) {
628 dirty_bytes++;
629 }
630 }
631 return dirty_bytes;
632 }
633 };
634
635 std::ostream& os_;
636 bool dump_dirty_objects_;
637 std::unordered_set<mirror::Object*> dirty_objects_;
638 std::map<mirror::Class*, ClassData> class_data_;
639
640 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
641 };
642
643 // Region analysis for ArtMethods.
644 class ImgArtMethodVisitor : public ArtMethodVisitor {
645 public:
646 using ComputeDirtyFunc = std::function<void(ArtMethod*,
647 const uint8_t*,
648 const std::set<size_t>&)>;
ImgArtMethodVisitor(ComputeDirtyFunc dirty_func,const uint8_t * begin_image_ptr,const std::set<size_t> & dirty_pages)649 ImgArtMethodVisitor(ComputeDirtyFunc dirty_func,
650 const uint8_t* begin_image_ptr,
651 const std::set<size_t>& dirty_pages) :
652 dirty_func_(dirty_func),
653 begin_image_ptr_(begin_image_ptr),
654 dirty_pages_(dirty_pages) { }
~ImgArtMethodVisitor()655 virtual ~ImgArtMethodVisitor() OVERRIDE { }
Visit(ArtMethod * method)656 virtual void Visit(ArtMethod* method) OVERRIDE {
657 dirty_func_(method, begin_image_ptr_, dirty_pages_);
658 }
659
660 private:
661 ComputeDirtyFunc dirty_func_;
662 const uint8_t* begin_image_ptr_;
663 const std::set<size_t>& dirty_pages_;
664 };
665
666 // Struct and functor for computing offsets of members of ArtMethods.
667 // template <typename RegionType>
668 struct MemberInfo {
669 template <typename T>
operator ()art::__anonb951707b0111::MemberInfo670 void operator() (const ArtMethod* method, const T* member_address, const std::string& name) {
671 // Check that member_address is a pointer inside *method.
672 DCHECK(reinterpret_cast<uintptr_t>(method) <= reinterpret_cast<uintptr_t>(member_address));
673 DCHECK(reinterpret_cast<uintptr_t>(member_address) + sizeof(T) <=
674 reinterpret_cast<uintptr_t>(method) + sizeof(ArtMethod));
675 size_t offset =
676 reinterpret_cast<uintptr_t>(member_address) - reinterpret_cast<uintptr_t>(method);
677 offset_to_name_size_.insert({offset, NameAndSize(sizeof(T), name)});
678 }
679
680 struct NameAndSize {
681 size_t size_;
682 std::string name_;
NameAndSizeart::__anonb951707b0111::MemberInfo::NameAndSize683 NameAndSize(size_t size, const std::string& name) : size_(size), name_(name) { }
NameAndSizeart::__anonb951707b0111::MemberInfo::NameAndSize684 NameAndSize() : size_(0), name_("INVALID") { }
685 };
686
687 std::map<size_t, NameAndSize> offset_to_name_size_;
688 };
689
690 template<>
691 class RegionSpecializedBase<ArtMethod> : public RegionCommon<ArtMethod> {
692 public:
RegionSpecializedBase(std::ostream * os,std::vector<uint8_t> * remote_contents,std::vector<uint8_t> * zygote_contents,const backtrace_map_t & boot_map,const ImageHeader & image_header,bool dump_dirty_objects ATTRIBUTE_UNUSED)693 RegionSpecializedBase(std::ostream* os,
694 std::vector<uint8_t>* remote_contents,
695 std::vector<uint8_t>* zygote_contents,
696 const backtrace_map_t& boot_map,
697 const ImageHeader& image_header,
698 bool dump_dirty_objects ATTRIBUTE_UNUSED)
699 : RegionCommon<ArtMethod>(os, remote_contents, zygote_contents, boot_map, image_header),
700 os_(*os) {
701 // Prepare the table for offset to member lookups.
702 ArtMethod* art_method = reinterpret_cast<ArtMethod*>(&(*remote_contents)[0]);
703 art_method->VisitMembers(member_info_);
704 // Prepare the table for address to symbolic entry point names.
705 BuildEntryPointNames();
706 class_linker_ = Runtime::Current()->GetClassLinker();
707 }
708
709 // Define a common public type name for use by RegionData.
710 using VisitorClass = ImgArtMethodVisitor;
711
VisitEntries(VisitorClass * visitor,uint8_t * base,PointerSize pointer_size)712 void VisitEntries(VisitorClass* visitor,
713 uint8_t* base,
714 PointerSize pointer_size)
715 REQUIRES_SHARED(Locks::mutator_lock_) {
716 RegionCommon<ArtMethod>::image_header_.VisitPackedArtMethods(visitor, base, pointer_size);
717 }
718
VisitEntry(ArtMethod * method ATTRIBUTE_UNUSED)719 void VisitEntry(ArtMethod* method ATTRIBUTE_UNUSED)
720 REQUIRES_SHARED(Locks::mutator_lock_) {
721 }
722
AddCleanEntry(ArtMethod * method ATTRIBUTE_UNUSED)723 void AddCleanEntry(ArtMethod* method ATTRIBUTE_UNUSED) {
724 }
725
AddFalseDirtyEntry(ArtMethod * method)726 void AddFalseDirtyEntry(ArtMethod* method)
727 REQUIRES_SHARED(Locks::mutator_lock_) {
728 RegionCommon<ArtMethod>::AddFalseDirtyEntry(method);
729 }
730
AddDirtyEntry(ArtMethod * method,ArtMethod * method_remote)731 void AddDirtyEntry(ArtMethod* method, ArtMethod* method_remote)
732 REQUIRES_SHARED(Locks::mutator_lock_) {
733 size_t entry_size = EntrySize(method);
734 ++different_entries_;
735 dirty_entry_bytes_ += entry_size;
736 // Increment counts for the fields that are dirty
737 const uint8_t* current = reinterpret_cast<const uint8_t*>(method);
738 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(method_remote);
739 // ArtMethods always log their dirty count and entries.
740 for (size_t i = 0; i < entry_size; ++i) {
741 if (current[i] != current_remote[i]) {
742 field_dirty_count_[i]++;
743 }
744 }
745 dirty_entries_.push_back(method);
746 }
747
DiffEntryContents(ArtMethod * method,uint8_t * remote_bytes,const uint8_t * base_ptr,bool log_dirty_objects ATTRIBUTE_UNUSED)748 void DiffEntryContents(ArtMethod* method,
749 uint8_t* remote_bytes,
750 const uint8_t* base_ptr,
751 bool log_dirty_objects ATTRIBUTE_UNUSED)
752 REQUIRES_SHARED(Locks::mutator_lock_) {
753 const char* tabs = " ";
754 os_ << tabs << "ArtMethod " << ArtMethod::PrettyMethod(method) << "\n";
755
756 std::unordered_set<size_t> dirty_members;
757 // Examine the members comprising the ArtMethod, computing which members are dirty.
758 for (const std::pair<size_t, MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
759 const size_t offset = p.first;
760 if (memcmp(base_ptr + offset, remote_bytes + offset, p.second.size_) != 0) {
761 dirty_members.insert(p.first);
762 }
763 }
764 // Dump different fields.
765 if (!dirty_members.empty()) {
766 os_ << tabs << "Dirty members " << dirty_members.size() << "\n";
767 for (size_t offset : dirty_members) {
768 const MemberInfo::NameAndSize& member_info = member_info_.offset_to_name_size_[offset];
769 os_ << tabs << member_info.name_
770 << " original=" << StringFromBytes(base_ptr + offset, member_info.size_)
771 << " remote=" << StringFromBytes(remote_bytes + offset, member_info.size_)
772 << "\n";
773 }
774 }
775 os_ << "\n";
776 }
777
DumpDirtyObjects()778 void DumpDirtyObjects() REQUIRES_SHARED(Locks::mutator_lock_) {
779 }
780
DumpDirtyEntries()781 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
782 DumpSamplesAndOffsetCount();
783 os_ << " offset to field map:\n";
784 for (const std::pair<size_t, MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
785 const size_t offset = p.first;
786 const size_t size = p.second.size_;
787 os_ << StringPrintf(" %zu-%zu: ", offset, offset + size - 1)
788 << p.second.name_
789 << std::endl;
790 }
791
792 os_ << " field contents:\n";
793 for (ArtMethod* method : dirty_entries_) {
794 // remote method
795 auto art_method = reinterpret_cast<ArtMethod*>(method);
796 // remote class
797 mirror::Class* remote_declaring_class =
798 FixUpRemotePointer(art_method->GetDeclaringClass(),
799 *RegionCommon<ArtMethod>::remote_contents_,
800 RegionCommon<ArtMethod>::boot_map_);
801 // local class
802 mirror::Class* declaring_class =
803 RemoteContentsPointerToLocal(remote_declaring_class,
804 *RegionCommon<ArtMethod>::remote_contents_,
805 RegionCommon<ArtMethod>::image_header_);
806 DumpOneArtMethod(art_method, declaring_class, remote_declaring_class);
807 }
808 }
809
DumpFalseDirtyEntries()810 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
811 os_ << "\n" << " False-dirty ArtMethods\n";
812 os_ << " field contents:\n";
813 for (ArtMethod* method : false_dirty_entries_) {
814 // local class
815 mirror::Class* declaring_class = method->GetDeclaringClass();
816 DumpOneArtMethod(method, declaring_class, nullptr);
817 }
818 }
819
DumpCleanEntries()820 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
821 }
822
823 private:
824 std::ostream& os_;
825 MemberInfo member_info_;
826 std::map<const void*, std::string> entry_point_names_;
827 ClassLinker* class_linker_;
828
829 // Compute a map of addresses to names in the boot OAT file(s).
BuildEntryPointNames()830 void BuildEntryPointNames() {
831 OatFileManager& oat_file_manager = Runtime::Current()->GetOatFileManager();
832 std::vector<const OatFile*> boot_oat_files = oat_file_manager.GetBootOatFiles();
833 for (const OatFile* oat_file : boot_oat_files) {
834 const OatHeader& oat_header = oat_file->GetOatHeader();
835 const void* i2ib = oat_header.GetInterpreterToInterpreterBridge();
836 if (i2ib != nullptr) {
837 entry_point_names_[i2ib] = "InterpreterToInterpreterBridge (from boot oat file)";
838 }
839 const void* i2ccb = oat_header.GetInterpreterToCompiledCodeBridge();
840 if (i2ccb != nullptr) {
841 entry_point_names_[i2ccb] = "InterpreterToCompiledCodeBridge (from boot oat file)";
842 }
843 const void* jdl = oat_header.GetJniDlsymLookup();
844 if (jdl != nullptr) {
845 entry_point_names_[jdl] = "JniDlsymLookup (from boot oat file)";
846 }
847 const void* qgjt = oat_header.GetQuickGenericJniTrampoline();
848 if (qgjt != nullptr) {
849 entry_point_names_[qgjt] = "QuickGenericJniTrampoline (from boot oat file)";
850 }
851 const void* qrt = oat_header.GetQuickResolutionTrampoline();
852 if (qrt != nullptr) {
853 entry_point_names_[qrt] = "QuickResolutionTrampoline (from boot oat file)";
854 }
855 const void* qict = oat_header.GetQuickImtConflictTrampoline();
856 if (qict != nullptr) {
857 entry_point_names_[qict] = "QuickImtConflictTrampoline (from boot oat file)";
858 }
859 const void* q2ib = oat_header.GetQuickToInterpreterBridge();
860 if (q2ib != nullptr) {
861 entry_point_names_[q2ib] = "QuickToInterpreterBridge (from boot oat file)";
862 }
863 }
864 }
865
StringFromBytes(const uint8_t * bytes,size_t size)866 std::string StringFromBytes(const uint8_t* bytes, size_t size) {
867 switch (size) {
868 case 1:
869 return StringPrintf("%" PRIx8, *bytes);
870 case 2:
871 return StringPrintf("%" PRIx16, *reinterpret_cast<const uint16_t*>(bytes));
872 case 4:
873 case 8: {
874 // Compute an address if the bytes might contain one.
875 uint64_t intval;
876 if (size == 4) {
877 intval = *reinterpret_cast<const uint32_t*>(bytes);
878 } else {
879 intval = *reinterpret_cast<const uint64_t*>(bytes);
880 }
881 const void* addr = reinterpret_cast<const void*>(intval);
882 // Match the address against those that have Is* methods in the ClassLinker.
883 if (class_linker_->IsQuickToInterpreterBridge(addr)) {
884 return "QuickToInterpreterBridge";
885 } else if (class_linker_->IsQuickGenericJniStub(addr)) {
886 return "QuickGenericJniStub";
887 } else if (class_linker_->IsQuickResolutionStub(addr)) {
888 return "QuickResolutionStub";
889 } else if (class_linker_->IsJniDlsymLookupStub(addr)) {
890 return "JniDlsymLookupStub";
891 }
892 // Match the address against those that we saved from the boot OAT files.
893 if (entry_point_names_.find(addr) != entry_point_names_.end()) {
894 return entry_point_names_[addr];
895 }
896 return StringPrintf("%" PRIx64, intval);
897 }
898 default:
899 LOG(WARNING) << "Don't know how to convert " << size << " bytes to integer";
900 return "<UNKNOWN>";
901 }
902 }
903
DumpOneArtMethod(ArtMethod * art_method,mirror::Class * declaring_class,mirror::Class * remote_declaring_class)904 void DumpOneArtMethod(ArtMethod* art_method,
905 mirror::Class* declaring_class,
906 mirror::Class* remote_declaring_class)
907 REQUIRES_SHARED(Locks::mutator_lock_) {
908 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
909 os_ << " " << reinterpret_cast<const void*>(art_method) << " ";
910 os_ << " entryPointFromJni: "
911 << reinterpret_cast<const void*>(art_method->GetDataPtrSize(pointer_size)) << ", ";
912 os_ << " entryPointFromQuickCompiledCode: "
913 << reinterpret_cast<const void*>(
914 art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size))
915 << ", ";
916 os_ << " isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
917 // Null for runtime metionds.
918 if (declaring_class != nullptr) {
919 os_ << " class_status (local): " << declaring_class->GetStatus();
920 }
921 if (remote_declaring_class != nullptr) {
922 os_ << ", class_status (remote): " << remote_declaring_class->GetStatus();
923 }
924 os_ << "\n";
925 }
926
927 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
928 };
929
930 template <typename T>
931 class RegionData : public RegionSpecializedBase<T> {
932 public:
RegionData(std::ostream * os,std::vector<uint8_t> * remote_contents,std::vector<uint8_t> * zygote_contents,const backtrace_map_t & boot_map,const ImageHeader & image_header,bool dump_dirty_objects)933 RegionData(std::ostream* os,
934 std::vector<uint8_t>* remote_contents,
935 std::vector<uint8_t>* zygote_contents,
936 const backtrace_map_t& boot_map,
937 const ImageHeader& image_header,
938 bool dump_dirty_objects)
939 : RegionSpecializedBase<T>(os,
940 remote_contents,
941 zygote_contents,
942 boot_map,
943 image_header,
944 dump_dirty_objects),
945 os_(*os) {
946 CHECK(remote_contents != nullptr);
947 CHECK(zygote_contents != nullptr);
948 }
949
950 // Walk over the type T entries in theregion between begin_image_ptr and end_image_ptr,
951 // collecting and reporting data regarding dirty, difference, etc.
ProcessRegion(const MappingData & mapping_data,RemoteProcesses remotes,const uint8_t * begin_image_ptr)952 void ProcessRegion(const MappingData& mapping_data,
953 RemoteProcesses remotes,
954 const uint8_t* begin_image_ptr)
955 REQUIRES_SHARED(Locks::mutator_lock_) {
956 typename RegionSpecializedBase<T>::VisitorClass visitor(
957 [this](T* entry,
958 const uint8_t* begin_image_ptr,
959 const std::set<size_t>& dirty_page_set) REQUIRES_SHARED(Locks::mutator_lock_) {
960 this->ComputeEntryDirty(entry, begin_image_ptr, dirty_page_set);
961 },
962 begin_image_ptr,
963 mapping_data.dirty_page_set);
964 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
965 RegionSpecializedBase<T>::VisitEntries(&visitor,
966 const_cast<uint8_t*>(begin_image_ptr),
967 pointer_size);
968
969 // Looking at only dirty pages, figure out how many of those bytes belong to dirty entries.
970 // TODO: fix this now that there are multiple regions in a mapping.
971 float true_dirtied_percent =
972 RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * kPageSize);
973
974 // Entry specific statistics.
975 os_ << RegionCommon<T>::GetDifferentEntryCount() << " different entries, \n "
976 << RegionCommon<T>::GetDirtyEntryBytes() << " different entry [bytes], \n "
977 << RegionCommon<T>::GetFalseDirtyEntryCount() << " false dirty entries,\n "
978 << RegionCommon<T>::GetFalseDirtyEntryBytes() << " false dirty entry [bytes], \n "
979 << true_dirtied_percent << " different entries-vs-total in a dirty page;\n "
980 << "\n";
981
982 const uint8_t* base_ptr = begin_image_ptr;
983 switch (remotes) {
984 case RemoteProcesses::kZygoteOnly:
985 os_ << " Zygote shared dirty entries: ";
986 break;
987 case RemoteProcesses::kImageAndZygote:
988 os_ << " Application dirty entries (private dirty): ";
989 // If we are dumping private dirty, diff against the zygote map to make it clearer what
990 // fields caused the page to be private dirty.
991 base_ptr = &RegionCommon<T>::zygote_contents_->operator[](0);
992 break;
993 case RemoteProcesses::kImageOnly:
994 os_ << " Application dirty entries (unknown whether private or shared dirty): ";
995 break;
996 }
997 DiffDirtyEntries(ProcessType::kRemote,
998 begin_image_ptr,
999 RegionCommon<T>::remote_contents_,
1000 base_ptr,
1001 /*log_dirty_objects*/true);
1002 // Print shared dirty after since it's less important.
1003 if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
1004 // We only reach this point if both pids were specified. Furthermore,
1005 // entries are only displayed here if they differed in both the image
1006 // and the zygote, so they are probably private dirty.
1007 CHECK(remotes == RemoteProcesses::kImageAndZygote);
1008 os_ << "\n" << " Zygote dirty entries (probably shared dirty): ";
1009 DiffDirtyEntries(ProcessType::kZygote,
1010 begin_image_ptr,
1011 RegionCommon<T>::zygote_contents_,
1012 begin_image_ptr,
1013 /*log_dirty_objects*/false);
1014 }
1015 RegionSpecializedBase<T>::DumpDirtyObjects();
1016 RegionSpecializedBase<T>::DumpDirtyEntries();
1017 RegionSpecializedBase<T>::DumpFalseDirtyEntries();
1018 RegionSpecializedBase<T>::DumpCleanEntries();
1019 }
1020
1021 private:
1022 std::ostream& os_;
1023
DiffDirtyEntries(ProcessType process_type,const uint8_t * begin_image_ptr,std::vector<uint8_t> * contents,const uint8_t * base_ptr,bool log_dirty_objects)1024 void DiffDirtyEntries(ProcessType process_type,
1025 const uint8_t* begin_image_ptr,
1026 std::vector<uint8_t>* contents,
1027 const uint8_t* base_ptr,
1028 bool log_dirty_objects)
1029 REQUIRES_SHARED(Locks::mutator_lock_) {
1030 os_ << RegionCommon<T>::dirty_entries_.size() << "\n";
1031 const std::set<T*>& entries =
1032 (process_type == ProcessType::kZygote) ?
1033 RegionCommon<T>::zygote_dirty_entries_:
1034 RegionCommon<T>::image_dirty_entries_;
1035 for (T* entry : entries) {
1036 uint8_t* entry_bytes = reinterpret_cast<uint8_t*>(entry);
1037 ptrdiff_t offset = entry_bytes - begin_image_ptr;
1038 uint8_t* remote_bytes = &(*contents)[offset];
1039 RegionSpecializedBase<T>::DiffEntryContents(entry,
1040 remote_bytes,
1041 &base_ptr[offset],
1042 log_dirty_objects);
1043 }
1044 }
1045
ComputeEntryDirty(T * entry,const uint8_t * begin_image_ptr,const std::set<size_t> & dirty_pages)1046 void ComputeEntryDirty(T* entry,
1047 const uint8_t* begin_image_ptr,
1048 const std::set<size_t>& dirty_pages)
1049 REQUIRES_SHARED(Locks::mutator_lock_) {
1050 // Set up pointers in the remote and the zygote for comparison.
1051 uint8_t* current = reinterpret_cast<uint8_t*>(entry);
1052 ptrdiff_t offset = current - begin_image_ptr;
1053 T* entry_remote =
1054 reinterpret_cast<T*>(const_cast<uint8_t*>(&(*RegionCommon<T>::remote_contents_)[offset]));
1055 const bool have_zygote = !RegionCommon<T>::zygote_contents_->empty();
1056 const uint8_t* current_zygote =
1057 have_zygote ? &(*RegionCommon<T>::zygote_contents_)[offset] : nullptr;
1058 T* entry_zygote = reinterpret_cast<T*>(const_cast<uint8_t*>(current_zygote));
1059 // Visit and classify entries at the current location.
1060 RegionSpecializedBase<T>::VisitEntry(entry);
1061
1062 // Test private dirty first.
1063 bool is_dirty = false;
1064 if (have_zygote) {
1065 bool private_dirty = EntriesDiffer(entry_zygote, entry_remote);
1066 if (private_dirty) {
1067 // Private dirty, app vs zygote.
1068 is_dirty = true;
1069 RegionCommon<T>::AddImageDirtyEntry(entry);
1070 }
1071 if (EntriesDiffer(entry_zygote, entry)) {
1072 // Shared dirty, zygote vs image.
1073 is_dirty = true;
1074 RegionCommon<T>::AddZygoteDirtyEntry(entry);
1075 }
1076 } else if (EntriesDiffer(entry_remote, entry)) {
1077 // Shared or private dirty, app vs image.
1078 is_dirty = true;
1079 RegionCommon<T>::AddImageDirtyEntry(entry);
1080 }
1081 if (is_dirty) {
1082 // TODO: Add support dirty entries in zygote and image.
1083 RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
1084 } else {
1085 RegionSpecializedBase<T>::AddCleanEntry(entry);
1086 if (RegionCommon<T>::IsEntryOnDirtyPage(entry, dirty_pages)) {
1087 // This entry was either never mutated or got mutated back to the same value.
1088 // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
1089 RegionSpecializedBase<T>::AddFalseDirtyEntry(entry);
1090 }
1091 }
1092 }
1093
1094 DISALLOW_COPY_AND_ASSIGN(RegionData);
1095 };
1096
1097 } // namespace
1098
1099
1100 class ImgDiagDumper {
1101 public:
ImgDiagDumper(std::ostream * os,const ImageHeader & image_header,const std::string & image_location,pid_t image_diff_pid,pid_t zygote_diff_pid,bool dump_dirty_objects)1102 explicit ImgDiagDumper(std::ostream* os,
1103 const ImageHeader& image_header,
1104 const std::string& image_location,
1105 pid_t image_diff_pid,
1106 pid_t zygote_diff_pid,
1107 bool dump_dirty_objects)
1108 : os_(os),
1109 image_header_(image_header),
1110 image_location_(image_location),
1111 image_diff_pid_(image_diff_pid),
1112 zygote_diff_pid_(zygote_diff_pid),
1113 dump_dirty_objects_(dump_dirty_objects),
1114 zygote_pid_only_(false) {}
1115
Init()1116 bool Init() {
1117 std::ostream& os = *os_;
1118
1119 if (image_diff_pid_ < 0 && zygote_diff_pid_ < 0) {
1120 os << "Either --image-diff-pid or --zygote-diff-pid (or both) must be specified.\n";
1121 return false;
1122 }
1123
1124 // To avoid the combinations of command-line argument use cases:
1125 // If the user invoked with only --zygote-diff-pid, shuffle that to
1126 // image_diff_pid_, invalidate zygote_diff_pid_, and remember that
1127 // image_diff_pid_ is now special.
1128 if (image_diff_pid_ < 0) {
1129 image_diff_pid_ = zygote_diff_pid_;
1130 zygote_diff_pid_ = -1;
1131 zygote_pid_only_ = true;
1132 }
1133
1134 {
1135 struct stat sts;
1136 std::string proc_pid_str =
1137 StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
1138 if (stat(proc_pid_str.c_str(), &sts) == -1) {
1139 os << "Process does not exist";
1140 return false;
1141 }
1142 }
1143
1144 // Open /proc/$pid/maps to view memory maps
1145 auto tmp_proc_maps = std::unique_ptr<BacktraceMap>(BacktraceMap::Create(image_diff_pid_));
1146 if (tmp_proc_maps == nullptr) {
1147 os << "Could not read backtrace maps";
1148 return false;
1149 }
1150
1151 bool found_boot_map = false;
1152 // Find the memory map only for boot.art
1153 for (const backtrace_map_t& map : *tmp_proc_maps) {
1154 if (EndsWith(map.name, GetImageLocationBaseName())) {
1155 if ((map.flags & PROT_WRITE) != 0) {
1156 boot_map_ = map;
1157 found_boot_map = true;
1158 break;
1159 }
1160 // In actuality there's more than 1 map, but the second one is read-only.
1161 // The one we care about is the write-able map.
1162 // The readonly maps are guaranteed to be identical, so its not interesting to compare
1163 // them.
1164 }
1165 }
1166
1167 if (!found_boot_map) {
1168 os << "Could not find map for " << GetImageLocationBaseName();
1169 return false;
1170 }
1171 // Sanity check boot_map_.
1172 CHECK(boot_map_.end >= boot_map_.start);
1173 boot_map_size_ = boot_map_.end - boot_map_.start;
1174
1175 // Open /proc/<image_diff_pid_>/mem and read as remote_contents_.
1176 std::string image_file_name =
1177 StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
1178 auto image_map_file = std::unique_ptr<File>(OS::OpenFileForReading(image_file_name.c_str()));
1179 if (image_map_file == nullptr) {
1180 os << "Failed to open " << image_file_name << " for reading";
1181 return false;
1182 }
1183 std::vector<uint8_t> tmp_remote_contents(boot_map_size_);
1184 if (!image_map_file->PreadFully(&tmp_remote_contents[0], boot_map_size_, boot_map_.start)) {
1185 os << "Could not fully read file " << image_file_name;
1186 return false;
1187 }
1188
1189 // If zygote_diff_pid_ != -1, open /proc/<zygote_diff_pid_>/mem and read as zygote_contents_.
1190 std::vector<uint8_t> tmp_zygote_contents;
1191 if (zygote_diff_pid_ != -1) {
1192 std::string zygote_file_name =
1193 StringPrintf("/proc/%ld/mem", static_cast<long>(zygote_diff_pid_)); // NOLINT [runtime/int]
1194 std::unique_ptr<File> zygote_map_file(OS::OpenFileForReading(zygote_file_name.c_str()));
1195 if (zygote_map_file == nullptr) {
1196 os << "Failed to open " << zygote_file_name << " for reading";
1197 return false;
1198 }
1199 // The boot map should be at the same address.
1200 tmp_zygote_contents.resize(boot_map_size_);
1201 if (!zygote_map_file->PreadFully(&tmp_zygote_contents[0], boot_map_size_, boot_map_.start)) {
1202 LOG(WARNING) << "Could not fully read zygote file " << zygote_file_name;
1203 return false;
1204 }
1205 }
1206
1207 // Open /proc/<image_diff_pid_>/pagemap.
1208 std::string pagemap_file_name = StringPrintf(
1209 "/proc/%ld/pagemap", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
1210 auto tmp_pagemap_file =
1211 std::unique_ptr<File>(OS::OpenFileForReading(pagemap_file_name.c_str()));
1212 if (tmp_pagemap_file == nullptr) {
1213 os << "Failed to open " << pagemap_file_name << " for reading: " << strerror(errno);
1214 return false;
1215 }
1216
1217 // Not truly clean, mmap-ing boot.art again would be more pristine, but close enough
1218 const char* clean_pagemap_file_name = "/proc/self/pagemap";
1219 auto tmp_clean_pagemap_file = std::unique_ptr<File>(
1220 OS::OpenFileForReading(clean_pagemap_file_name));
1221 if (tmp_clean_pagemap_file == nullptr) {
1222 os << "Failed to open " << clean_pagemap_file_name << " for reading: " << strerror(errno);
1223 return false;
1224 }
1225
1226 auto tmp_kpageflags_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpageflags"));
1227 if (tmp_kpageflags_file == nullptr) {
1228 os << "Failed to open /proc/kpageflags for reading: " << strerror(errno);
1229 return false;
1230 }
1231
1232 auto tmp_kpagecount_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpagecount"));
1233 if (tmp_kpagecount_file == nullptr) {
1234 os << "Failed to open /proc/kpagecount for reading:" << strerror(errno);
1235 return false;
1236 }
1237
1238 // Commit the mappings, etc.
1239 proc_maps_ = std::move(tmp_proc_maps);
1240 remote_contents_ = std::move(tmp_remote_contents);
1241 zygote_contents_ = std::move(tmp_zygote_contents);
1242 pagemap_file_ = std::move(*tmp_pagemap_file.release());
1243 clean_pagemap_file_ = std::move(*tmp_clean_pagemap_file.release());
1244 kpageflags_file_ = std::move(*tmp_kpageflags_file.release());
1245 kpagecount_file_ = std::move(*tmp_kpagecount_file.release());
1246
1247 return true;
1248 }
1249
Dump()1250 bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
1251 std::ostream& os = *os_;
1252 os << "IMAGE LOCATION: " << image_location_ << "\n\n";
1253
1254 os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
1255
1256 os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
1257
1258 PrintPidLine("IMAGE", image_diff_pid_);
1259 os << "\n\n";
1260 PrintPidLine("ZYGOTE", zygote_diff_pid_);
1261 bool ret = true;
1262 if (image_diff_pid_ >= 0 || zygote_diff_pid_ >= 0) {
1263 ret = DumpImageDiff();
1264 os << "\n\n";
1265 }
1266
1267 os << std::flush;
1268
1269 return ret;
1270 }
1271
1272 private:
DumpImageDiff()1273 bool DumpImageDiff()
1274 REQUIRES_SHARED(Locks::mutator_lock_) {
1275 return DumpImageDiffMap();
1276 }
1277
ComputeDirtyBytes(const uint8_t * image_begin,MappingData * mapping_data)1278 bool ComputeDirtyBytes(const uint8_t* image_begin, MappingData* mapping_data /*out*/) {
1279 std::ostream& os = *os_;
1280
1281 size_t virtual_page_idx = 0; // Virtual page number (for an absolute memory address)
1282 size_t page_idx = 0; // Page index relative to 0
1283 size_t previous_page_idx = 0; // Previous page index relative to 0
1284
1285
1286 // Iterate through one page at a time. Boot map begin/end already implicitly aligned.
1287 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; begin += kPageSize) {
1288 ptrdiff_t offset = begin - boot_map_.start;
1289
1290 // We treat the image header as part of the memory map for now
1291 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1292 // But it might still be interesting to see if any of the ImageHeader data mutated
1293 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1294 uint8_t* remote_ptr = &remote_contents_[offset];
1295
1296 if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) {
1297 mapping_data->different_pages++;
1298
1299 // Count the number of 32-bit integers that are different.
1300 for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) {
1301 uint32_t* remote_ptr_int32 = reinterpret_cast<uint32_t*>(remote_ptr);
1302 const uint32_t* local_ptr_int32 = reinterpret_cast<const uint32_t*>(local_ptr);
1303
1304 if (remote_ptr_int32[i] != local_ptr_int32[i]) {
1305 mapping_data->different_int32s++;
1306 }
1307 }
1308 }
1309 }
1310
1311 std::vector<size_t> private_dirty_pages_for_section(ImageHeader::kSectionCount, 0u);
1312
1313 // Iterate through one byte at a time.
1314 ptrdiff_t page_off_begin = image_header_.GetImageBegin() - image_begin;
1315 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; ++begin) {
1316 previous_page_idx = page_idx;
1317 ptrdiff_t offset = begin - boot_map_.start;
1318
1319 // We treat the image header as part of the memory map for now
1320 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1321 // But it might still be interesting to see if any of the ImageHeader data mutated
1322 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1323 uint8_t* remote_ptr = &remote_contents_[offset];
1324
1325 virtual_page_idx = reinterpret_cast<uintptr_t>(local_ptr) / kPageSize;
1326
1327 // Calculate the page index, relative to the 0th page where the image begins
1328 page_idx = (offset + page_off_begin) / kPageSize;
1329 if (*local_ptr != *remote_ptr) {
1330 // Track number of bytes that are different
1331 mapping_data->different_bytes++;
1332 }
1333
1334 // Independently count the # of dirty pages on the remote side
1335 size_t remote_virtual_page_idx = begin / kPageSize;
1336 if (previous_page_idx != page_idx) {
1337 uint64_t page_count = 0xC0FFEE;
1338 // TODO: virtual_page_idx needs to be from the same process
1339 std::string error_msg;
1340 int dirtiness = (IsPageDirty(&pagemap_file_, // Image-diff-pid procmap
1341 &clean_pagemap_file_, // Self procmap
1342 &kpageflags_file_,
1343 &kpagecount_file_,
1344 remote_virtual_page_idx, // potentially "dirty" page
1345 virtual_page_idx, // true "clean" page
1346 &page_count,
1347 &error_msg));
1348 if (dirtiness < 0) {
1349 os << error_msg;
1350 return false;
1351 } else if (dirtiness > 0) {
1352 mapping_data->dirty_pages++;
1353 mapping_data->dirty_page_set.insert(mapping_data->dirty_page_set.end(), virtual_page_idx);
1354 }
1355
1356 bool is_dirty = dirtiness > 0;
1357 bool is_private = page_count == 1;
1358
1359 if (page_count == 1) {
1360 mapping_data->private_pages++;
1361 }
1362
1363 if (is_dirty && is_private) {
1364 mapping_data->private_dirty_pages++;
1365 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
1366 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i);
1367 if (image_header_.GetImageSection(section).Contains(offset)) {
1368 ++private_dirty_pages_for_section[i];
1369 }
1370 }
1371 }
1372 }
1373 }
1374 mapping_data->false_dirty_pages = mapping_data->dirty_pages - mapping_data->different_pages;
1375 // Print low-level (bytes, int32s, pages) statistics.
1376 os << mapping_data->different_bytes << " differing bytes,\n "
1377 << mapping_data->different_int32s << " differing int32s,\n "
1378 << mapping_data->different_pages << " differing pages,\n "
1379 << mapping_data->dirty_pages << " pages are dirty;\n "
1380 << mapping_data->false_dirty_pages << " pages are false dirty;\n "
1381 << mapping_data->private_pages << " pages are private;\n "
1382 << mapping_data->private_dirty_pages << " pages are Private_Dirty\n "
1383 << "\n";
1384
1385 size_t total_private_dirty_pages = std::accumulate(private_dirty_pages_for_section.begin(),
1386 private_dirty_pages_for_section.end(),
1387 0u);
1388 os << "Image sections (total private dirty pages " << total_private_dirty_pages << ")\n";
1389 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
1390 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i);
1391 os << section << " " << image_header_.GetImageSection(section)
1392 << " private dirty pages=" << private_dirty_pages_for_section[i] << "\n";
1393 }
1394 os << "\n";
1395
1396 return true;
1397 }
1398
1399 // Look at /proc/$pid/mem and only diff the things from there
DumpImageDiffMap()1400 bool DumpImageDiffMap()
1401 REQUIRES_SHARED(Locks::mutator_lock_) {
1402 std::ostream& os = *os_;
1403 std::string error_msg;
1404
1405 // Walk the bytes and diff against our boot image
1406 os << "\nObserving boot image header at address "
1407 << reinterpret_cast<const void*>(&image_header_)
1408 << "\n\n";
1409
1410 const uint8_t* image_begin_unaligned = image_header_.GetImageBegin();
1411 const uint8_t* image_end_unaligned = image_begin_unaligned + image_header_.GetImageSize();
1412
1413 // Adjust range to nearest page
1414 const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize);
1415 const uint8_t* image_end = AlignUp(image_end_unaligned, kPageSize);
1416
1417 if (reinterpret_cast<uintptr_t>(image_begin) > boot_map_.start ||
1418 reinterpret_cast<uintptr_t>(image_end) < boot_map_.end) {
1419 // Sanity check that we aren't trying to read a completely different boot image
1420 os << "Remote boot map is out of range of local boot map: " <<
1421 "local begin " << reinterpret_cast<const void*>(image_begin) <<
1422 ", local end " << reinterpret_cast<const void*>(image_end) <<
1423 ", remote begin " << reinterpret_cast<const void*>(boot_map_.start) <<
1424 ", remote end " << reinterpret_cast<const void*>(boot_map_.end);
1425 return false;
1426 // If we wanted even more validation we could map the ImageHeader from the file
1427 }
1428
1429 MappingData mapping_data;
1430
1431 os << "Mapping at [" << reinterpret_cast<void*>(boot_map_.start) << ", "
1432 << reinterpret_cast<void*>(boot_map_.end) << ") had:\n ";
1433 if (!ComputeDirtyBytes(image_begin, &mapping_data)) {
1434 return false;
1435 }
1436 RemoteProcesses remotes;
1437 if (zygote_pid_only_) {
1438 remotes = RemoteProcesses::kZygoteOnly;
1439 } else if (zygote_diff_pid_ > 0) {
1440 remotes = RemoteProcesses::kImageAndZygote;
1441 } else {
1442 remotes = RemoteProcesses::kImageOnly;
1443 }
1444
1445 // Check all the mirror::Object entries in the image.
1446 RegionData<mirror::Object> object_region_data(os_,
1447 &remote_contents_,
1448 &zygote_contents_,
1449 boot_map_,
1450 image_header_,
1451 dump_dirty_objects_);
1452 object_region_data.ProcessRegion(mapping_data,
1453 remotes,
1454 image_begin_unaligned);
1455
1456 // Check all the ArtMethod entries in the image.
1457 RegionData<ArtMethod> artmethod_region_data(os_,
1458 &remote_contents_,
1459 &zygote_contents_,
1460 boot_map_,
1461 image_header_,
1462 dump_dirty_objects_);
1463 artmethod_region_data.ProcessRegion(mapping_data,
1464 remotes,
1465 image_begin_unaligned);
1466 return true;
1467 }
1468
GetPageFrameNumber(File * page_map_file,size_t virtual_page_index,uint64_t * page_frame_number,std::string * error_msg)1469 static bool GetPageFrameNumber(File* page_map_file,
1470 size_t virtual_page_index,
1471 uint64_t* page_frame_number,
1472 std::string* error_msg) {
1473 CHECK(page_map_file != nullptr);
1474 CHECK(page_frame_number != nullptr);
1475 CHECK(error_msg != nullptr);
1476
1477 constexpr size_t kPageMapEntrySize = sizeof(uint64_t);
1478 constexpr uint64_t kPageFrameNumberMask = (1ULL << 55) - 1; // bits 0-54 [in /proc/$pid/pagemap]
1479 constexpr uint64_t kPageSoftDirtyMask = (1ULL << 55); // bit 55 [in /proc/$pid/pagemap]
1480
1481 uint64_t page_map_entry = 0;
1482
1483 // Read 64-bit entry from /proc/$pid/pagemap to get the physical page frame number
1484 if (!page_map_file->PreadFully(&page_map_entry, kPageMapEntrySize,
1485 virtual_page_index * kPageMapEntrySize)) {
1486 *error_msg = StringPrintf("Failed to read the virtual page index entry from %s",
1487 page_map_file->GetPath().c_str());
1488 return false;
1489 }
1490
1491 // TODO: seems useless, remove this.
1492 bool soft_dirty = (page_map_entry & kPageSoftDirtyMask) != 0;
1493 if ((false)) {
1494 LOG(VERBOSE) << soft_dirty; // Suppress unused warning
1495 UNREACHABLE();
1496 }
1497
1498 *page_frame_number = page_map_entry & kPageFrameNumberMask;
1499
1500 return true;
1501 }
1502
IsPageDirty(File * page_map_file,File * clean_pagemap_file,File * kpageflags_file,File * kpagecount_file,size_t virtual_page_idx,size_t clean_virtual_page_idx,uint64_t * page_count,std::string * error_msg)1503 static int IsPageDirty(File* page_map_file,
1504 File* clean_pagemap_file,
1505 File* kpageflags_file,
1506 File* kpagecount_file,
1507 size_t virtual_page_idx,
1508 size_t clean_virtual_page_idx,
1509 // Out parameters:
1510 uint64_t* page_count, std::string* error_msg) {
1511 CHECK(page_map_file != nullptr);
1512 CHECK(clean_pagemap_file != nullptr);
1513 CHECK_NE(page_map_file, clean_pagemap_file);
1514 CHECK(kpageflags_file != nullptr);
1515 CHECK(kpagecount_file != nullptr);
1516 CHECK(page_count != nullptr);
1517 CHECK(error_msg != nullptr);
1518
1519 // Constants are from https://www.kernel.org/doc/Documentation/vm/pagemap.txt
1520
1521 constexpr size_t kPageFlagsEntrySize = sizeof(uint64_t);
1522 constexpr size_t kPageCountEntrySize = sizeof(uint64_t);
1523 constexpr uint64_t kPageFlagsDirtyMask = (1ULL << 4); // in /proc/kpageflags
1524 constexpr uint64_t kPageFlagsNoPageMask = (1ULL << 20); // in /proc/kpageflags
1525 constexpr uint64_t kPageFlagsMmapMask = (1ULL << 11); // in /proc/kpageflags
1526
1527 uint64_t page_frame_number = 0;
1528 if (!GetPageFrameNumber(page_map_file, virtual_page_idx, &page_frame_number, error_msg)) {
1529 return -1;
1530 }
1531
1532 uint64_t page_frame_number_clean = 0;
1533 if (!GetPageFrameNumber(clean_pagemap_file, clean_virtual_page_idx, &page_frame_number_clean,
1534 error_msg)) {
1535 return -1;
1536 }
1537
1538 // Read 64-bit entry from /proc/kpageflags to get the dirty bit for a page
1539 uint64_t kpage_flags_entry = 0;
1540 if (!kpageflags_file->PreadFully(&kpage_flags_entry,
1541 kPageFlagsEntrySize,
1542 page_frame_number * kPageFlagsEntrySize)) {
1543 *error_msg = StringPrintf("Failed to read the page flags from %s",
1544 kpageflags_file->GetPath().c_str());
1545 return -1;
1546 }
1547
1548 // Read 64-bit entyry from /proc/kpagecount to get mapping counts for a page
1549 if (!kpagecount_file->PreadFully(page_count /*out*/,
1550 kPageCountEntrySize,
1551 page_frame_number * kPageCountEntrySize)) {
1552 *error_msg = StringPrintf("Failed to read the page count from %s",
1553 kpagecount_file->GetPath().c_str());
1554 return -1;
1555 }
1556
1557 // There must be a page frame at the requested address.
1558 CHECK_EQ(kpage_flags_entry & kPageFlagsNoPageMask, 0u);
1559 // The page frame must be memory mapped
1560 CHECK_NE(kpage_flags_entry & kPageFlagsMmapMask, 0u);
1561
1562 // Page is dirty, i.e. has diverged from file, if the 4th bit is set to 1
1563 bool flags_dirty = (kpage_flags_entry & kPageFlagsDirtyMask) != 0;
1564
1565 // page_frame_number_clean must come from the *same* process
1566 // but a *different* mmap than page_frame_number
1567 if (flags_dirty) {
1568 CHECK_NE(page_frame_number, page_frame_number_clean);
1569 }
1570
1571 return page_frame_number != page_frame_number_clean;
1572 }
1573
PrintPidLine(const std::string & kind,pid_t pid)1574 void PrintPidLine(const std::string& kind, pid_t pid) {
1575 if (pid < 0) {
1576 *os_ << kind << " DIFF PID: disabled\n\n";
1577 } else {
1578 *os_ << kind << " DIFF PID (" << pid << "): ";
1579 }
1580 }
1581
EndsWith(const std::string & str,const std::string & suffix)1582 static bool EndsWith(const std::string& str, const std::string& suffix) {
1583 return str.size() >= suffix.size() &&
1584 str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
1585 }
1586
1587 // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar)
BaseName(const std::string & str)1588 static std::string BaseName(const std::string& str) {
1589 size_t idx = str.rfind('/');
1590 if (idx == std::string::npos) {
1591 return str;
1592 }
1593
1594 return str.substr(idx + 1);
1595 }
1596
1597 // Return the image location, stripped of any directories, e.g. "boot.art" or "core.art"
GetImageLocationBaseName() const1598 std::string GetImageLocationBaseName() const {
1599 return BaseName(std::string(image_location_));
1600 }
1601
1602 std::ostream* os_;
1603 const ImageHeader& image_header_;
1604 const std::string image_location_;
1605 pid_t image_diff_pid_; // Dump image diff against boot.art if pid is non-negative
1606 pid_t zygote_diff_pid_; // Dump image diff against zygote boot.art if pid is non-negative
1607 bool dump_dirty_objects_; // Adds dumping of objects that are dirty.
1608 bool zygote_pid_only_; // The user only specified a pid for the zygote.
1609
1610 // BacktraceMap used for finding the memory mapping of the image file.
1611 std::unique_ptr<BacktraceMap> proc_maps_;
1612 // Boot image mapping.
1613 backtrace_map_t boot_map_{}; // NOLINT
1614 // The size of the boot image mapping.
1615 size_t boot_map_size_;
1616 // The contents of /proc/<image_diff_pid_>/maps.
1617 std::vector<uint8_t> remote_contents_;
1618 // The contents of /proc/<zygote_diff_pid_>/maps.
1619 std::vector<uint8_t> zygote_contents_;
1620 // A File for reading /proc/<zygote_diff_pid_>/maps.
1621 File pagemap_file_;
1622 // A File for reading /proc/self/pagemap.
1623 File clean_pagemap_file_;
1624 // A File for reading /proc/kpageflags.
1625 File kpageflags_file_;
1626 // A File for reading /proc/kpagecount.
1627 File kpagecount_file_;
1628
1629 DISALLOW_COPY_AND_ASSIGN(ImgDiagDumper);
1630 };
1631
DumpImage(Runtime * runtime,std::ostream * os,pid_t image_diff_pid,pid_t zygote_diff_pid,bool dump_dirty_objects)1632 static int DumpImage(Runtime* runtime,
1633 std::ostream* os,
1634 pid_t image_diff_pid,
1635 pid_t zygote_diff_pid,
1636 bool dump_dirty_objects) {
1637 ScopedObjectAccess soa(Thread::Current());
1638 gc::Heap* heap = runtime->GetHeap();
1639 std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
1640 CHECK(!image_spaces.empty());
1641 for (gc::space::ImageSpace* image_space : image_spaces) {
1642 const ImageHeader& image_header = image_space->GetImageHeader();
1643 if (!image_header.IsValid()) {
1644 fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
1645 return EXIT_FAILURE;
1646 }
1647
1648 ImgDiagDumper img_diag_dumper(os,
1649 image_header,
1650 image_space->GetImageLocation(),
1651 image_diff_pid,
1652 zygote_diff_pid,
1653 dump_dirty_objects);
1654 if (!img_diag_dumper.Init()) {
1655 return EXIT_FAILURE;
1656 }
1657 if (!img_diag_dumper.Dump()) {
1658 return EXIT_FAILURE;
1659 }
1660 }
1661 return EXIT_SUCCESS;
1662 }
1663
1664 struct ImgDiagArgs : public CmdlineArgs {
1665 protected:
1666 using Base = CmdlineArgs;
1667
ParseCustomart::ImgDiagArgs1668 virtual ParseStatus ParseCustom(const StringPiece& option,
1669 std::string* error_msg) OVERRIDE {
1670 {
1671 ParseStatus base_parse = Base::ParseCustom(option, error_msg);
1672 if (base_parse != kParseUnknownArgument) {
1673 return base_parse;
1674 }
1675 }
1676
1677 if (option.starts_with("--image-diff-pid=")) {
1678 const char* image_diff_pid = option.substr(strlen("--image-diff-pid=")).data();
1679
1680 if (!ParseInt(image_diff_pid, &image_diff_pid_)) {
1681 *error_msg = "Image diff pid out of range";
1682 return kParseError;
1683 }
1684 } else if (option.starts_with("--zygote-diff-pid=")) {
1685 const char* zygote_diff_pid = option.substr(strlen("--zygote-diff-pid=")).data();
1686
1687 if (!ParseInt(zygote_diff_pid, &zygote_diff_pid_)) {
1688 *error_msg = "Zygote diff pid out of range";
1689 return kParseError;
1690 }
1691 } else if (option == "--dump-dirty-objects") {
1692 dump_dirty_objects_ = true;
1693 } else {
1694 return kParseUnknownArgument;
1695 }
1696
1697 return kParseOk;
1698 }
1699
ParseChecksart::ImgDiagArgs1700 virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
1701 // Perform the parent checks.
1702 ParseStatus parent_checks = Base::ParseChecks(error_msg);
1703 if (parent_checks != kParseOk) {
1704 return parent_checks;
1705 }
1706
1707 // Perform our own checks.
1708
1709 if (kill(image_diff_pid_,
1710 /*sig*/0) != 0) { // No signal is sent, perform error-checking only.
1711 // Check if the pid exists before proceeding.
1712 if (errno == ESRCH) {
1713 *error_msg = "Process specified does not exist";
1714 } else {
1715 *error_msg = StringPrintf("Failed to check process status: %s", strerror(errno));
1716 }
1717 return kParseError;
1718 } else if (instruction_set_ != kRuntimeISA) {
1719 // Don't allow different ISAs since the images are ISA-specific.
1720 // Right now the code assumes both the runtime ISA and the remote ISA are identical.
1721 *error_msg = "Must use the default runtime ISA; changing ISA is not supported.";
1722 return kParseError;
1723 }
1724
1725 return kParseOk;
1726 }
1727
GetUsageart::ImgDiagArgs1728 virtual std::string GetUsage() const {
1729 std::string usage;
1730
1731 usage +=
1732 "Usage: imgdiag [options] ...\n"
1733 " Example: imgdiag --image-diff-pid=$(pidof dex2oat)\n"
1734 " Example: adb shell imgdiag --image-diff-pid=$(pid zygote)\n"
1735 "\n";
1736
1737 usage += Base::GetUsage();
1738
1739 usage += // Optional.
1740 " --image-diff-pid=<pid>: provide the PID of a process whose boot.art you want to diff.\n"
1741 " Example: --image-diff-pid=$(pid zygote)\n"
1742 " --zygote-diff-pid=<pid>: provide the PID of the zygote whose boot.art you want to diff "
1743 "against.\n"
1744 " Example: --zygote-diff-pid=$(pid zygote)\n"
1745 " --dump-dirty-objects: additionally output dirty objects of interest.\n"
1746 "\n";
1747
1748 return usage;
1749 }
1750
1751 public:
1752 pid_t image_diff_pid_ = -1;
1753 pid_t zygote_diff_pid_ = -1;
1754 bool dump_dirty_objects_ = false;
1755 };
1756
1757 struct ImgDiagMain : public CmdlineMain<ImgDiagArgs> {
ExecuteWithRuntimeart::ImgDiagMain1758 virtual bool ExecuteWithRuntime(Runtime* runtime) {
1759 CHECK(args_ != nullptr);
1760
1761 return DumpImage(runtime,
1762 args_->os_,
1763 args_->image_diff_pid_,
1764 args_->zygote_diff_pid_,
1765 args_->dump_dirty_objects_) == EXIT_SUCCESS;
1766 }
1767 };
1768
1769 } // namespace art
1770
main(int argc,char ** argv)1771 int main(int argc, char** argv) {
1772 art::ImgDiagMain main;
1773 return main.Main(argc, argv);
1774 }
1775