• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "verification-inl.h"
18 
19 #include <iomanip>
20 #include <sstream>
21 
22 #include <android-base/unique_fd.h>
23 
24 #include "art_field-inl.h"
25 #include "base/file_utils.h"
26 #include "base/logging.h"
27 #include "mirror/class-inl.h"
28 #include "mirror/object-refvisitor-inl.h"
29 
30 namespace art HIDDEN {
31 namespace gc {
32 
DumpRAMAroundAddress(uintptr_t addr,uintptr_t bytes) const33 std::string Verification::DumpRAMAroundAddress(uintptr_t addr, uintptr_t bytes) const {
34   uintptr_t* dump_start = reinterpret_cast<uintptr_t*>(addr - bytes);
35   uintptr_t* dump_end = reinterpret_cast<uintptr_t*>(addr + bytes);
36   std::ostringstream oss;
37   oss << " adjacent_ram=";
38 
39   {
40     // Check if the RAM is accessible.
41     android::base::unique_fd read_fd, write_fd;
42     if (!android::base::Pipe(&read_fd, &write_fd)) {
43       LOG(WARNING) << "Could not create pipe, RAM being dumped may be unaccessible";
44     } else {
45       size_t count = 2 * bytes;
46       if (write(write_fd.get(), dump_start, count) != static_cast<ssize_t>(count)) {
47         oss << "unaccessible";
48         dump_start = dump_end;
49       }
50     }
51   }
52 
53   for (const uintptr_t* p = dump_start; p < dump_end; ++p) {
54     if (p == reinterpret_cast<uintptr_t*>(addr)) {
55       // Marker of where the address is.
56       oss << "|";
57     }
58     oss << std::hex << std::setfill('0') << std::setw(sizeof(uintptr_t) * 2) << *p << " ";
59   }
60   return oss.str();
61 }
62 
DumpObjectInfo(const void * addr,const char * tag) const63 std::string Verification::DumpObjectInfo(const void* addr, const char* tag) const {
64   std::ostringstream oss;
65   oss << tag << "=" << addr;
66   if (IsValidHeapObjectAddress(addr)) {
67     mirror::Object* obj = reinterpret_cast<mirror::Object*>(const_cast<void*>(addr));
68     mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
69     oss << " klass=" << klass;
70     if (IsValidClass(klass)) {
71       oss << "(" << klass->PrettyClass() << ")";
72       if (klass->IsArrayClass<kVerifyNone>()) {
73         oss << " length=" << obj->AsArray<kVerifyNone>()->GetLength();
74       }
75     } else {
76       oss << " <invalid address>";
77     }
78     space::Space* const space = heap_->FindSpaceFromAddress(addr);
79     if (space != nullptr) {
80       oss << " space=" << *space;
81     }
82     accounting::CardTable* card_table = heap_->GetCardTable();
83     if (card_table->AddrIsInCardTable(addr)) {
84       oss << " card=" << static_cast<size_t>(
85           card_table->GetCard(reinterpret_cast<const mirror::Object*>(addr)));
86     }
87     // Dump adjacent RAM.
88     oss << DumpRAMAroundAddress(reinterpret_cast<uintptr_t>(addr), 4 * kObjectAlignment);
89   } else {
90     oss << " <invalid address>";
91   }
92   return oss.str();
93 }
94 
LogHeapCorruption(ObjPtr<mirror::Object> holder,MemberOffset offset,mirror::Object * ref,bool fatal) const95 void Verification::LogHeapCorruption(ObjPtr<mirror::Object> holder,
96                                      MemberOffset offset,
97                                      mirror::Object* ref,
98                                      bool fatal) const {
99   // Highest priority logging first.
100   // Buffer the output in the string stream since it is more important than the stack traces
101   // and we want it to have log priority. The stack traces are printed from Runtime::Abort
102   // which is called from LOG(FATAL) but before the abort message.
103   std::ostringstream oss;
104   oss << "GC tried to mark invalid reference " << ref << std::endl;
105   oss << DumpObjectInfo(ref, "ref") << "\n";
106   oss << DumpObjectInfo(holder.Ptr(), "holder") << "\n";
107   if (holder != nullptr) {
108     mirror::Class* holder_klass = holder->GetClass<kVerifyNone, kWithoutReadBarrier>();
109     if (IsValidClass(holder_klass)) {
110       oss << " field_offset=" << offset.Uint32Value();
111       ArtField* field = holder->FindFieldByOffset(offset);
112       if (field != nullptr) {
113         oss << " name=" << field->GetName();
114       }
115     }
116     mirror::HeapReference<mirror::Object>* addr = holder->GetFieldObjectReferenceAddr(offset);
117     oss << " reference addr"
118         << DumpRAMAroundAddress(reinterpret_cast<uintptr_t>(addr), 4 * kObjectAlignment);
119   }
120   Runtime::Current()->GetHeap()->DumpSpaces(oss);
121   MemMap::DumpMaps(oss, /* terse= */ true);
122 
123   if (fatal) {
124     LOG(FATAL) << oss.str();
125   } else {
126     LOG(FATAL_WITHOUT_ABORT) << oss.str();
127   }
128 }
129 
IsAddressInHeapSpace(const void * addr,space::Space ** out_space) const130 bool Verification::IsAddressInHeapSpace(const void* addr, space::Space** out_space) const {
131   space::Space* const space = heap_->FindSpaceFromAddress(addr);
132   if (space != nullptr) {
133     if (out_space != nullptr) {
134       *out_space = space;
135     }
136     return true;
137   }
138   return false;
139 }
140 
IsValidHeapObjectAddress(const void * addr,space::Space ** out_space) const141 bool Verification::IsValidHeapObjectAddress(const void* addr, space::Space** out_space) const {
142   return IsAligned<kObjectAlignment>(addr) && IsAddressInHeapSpace(addr, out_space);
143 }
144 
145 using ObjectSet = std::set<mirror::Object*>;
146 using WorkQueue = std::deque<std::pair<mirror::Object*, std::string>>;
147 
148 // Use for visiting the GcRoots held live by ArtFields, ArtMethods, and ClassLoaders.
149 class Verification::BFSFindReachable {
150  public:
BFSFindReachable(ObjectSet * visited)151   explicit BFSFindReachable(ObjectSet* visited) : visited_(visited) {}
152 
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const153   void operator()(mirror::Object* obj, MemberOffset offset, [[maybe_unused]] bool is_static) const
154       REQUIRES_SHARED(Locks::mutator_lock_) {
155     ArtField* field = obj->FindFieldByOffset(offset);
156     Visit(obj->GetFieldObject<mirror::Object>(offset),
157           field != nullptr ? field->GetName() : "");
158   }
159 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const160   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
161       REQUIRES_SHARED(Locks::mutator_lock_) {
162     if (!root->IsNull()) {
163       VisitRoot(root);
164     }
165   }
166 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const167   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
168       REQUIRES_SHARED(Locks::mutator_lock_) {
169     Visit(root->AsMirrorPtr(), "!nativeRoot");
170   }
171 
Visit(mirror::Object * ref,const std::string & field_name) const172   void Visit(mirror::Object* ref, const std::string& field_name) const
173       REQUIRES_SHARED(Locks::mutator_lock_) {
174     if (ref != nullptr && visited_->insert(ref).second) {
175       new_visited_.emplace_back(ref, field_name);
176     }
177   }
178 
NewlyVisited() const179   const WorkQueue& NewlyVisited() const {
180     return new_visited_;
181   }
182 
183  private:
184   ObjectSet* visited_;
185   mutable WorkQueue new_visited_;
186 };
187 
188 class Verification::CollectRootVisitor : public SingleRootVisitor {
189  public:
CollectRootVisitor(ObjectSet * visited,WorkQueue * work)190   CollectRootVisitor(ObjectSet* visited, WorkQueue* work) : visited_(visited), work_(work) {}
191 
VisitRoot(mirror::Object * obj,const RootInfo & info)192   void VisitRoot(mirror::Object* obj, const RootInfo& info)
193       override REQUIRES_SHARED(Locks::mutator_lock_) {
194     if (obj != nullptr && visited_->insert(obj).second) {
195       std::ostringstream oss;
196       oss << info.ToString() << " = " << obj << "(" << obj->PrettyTypeOf() << ")";
197       work_->emplace_back(obj, oss.str());
198     }
199   }
200 
201  private:
202   ObjectSet* const visited_;
203   WorkQueue* const work_;
204 };
205 
FirstPathFromRootSet(ObjPtr<mirror::Object> target) const206 std::string Verification::FirstPathFromRootSet(ObjPtr<mirror::Object> target) const {
207   Runtime* const runtime =  Runtime::Current();
208   std::set<mirror::Object*> visited;
209   std::deque<std::pair<mirror::Object*, std::string>> work;
210   {
211     CollectRootVisitor root_visitor(&visited, &work);
212     runtime->VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
213   }
214   while (!work.empty()) {
215     auto pair = work.front();
216     work.pop_front();
217     if (pair.first == target) {
218       return pair.second;
219     }
220     BFSFindReachable visitor(&visited);
221     pair.first->VisitReferences(visitor, VoidFunctor());
222     for (auto&& pair2 : visitor.NewlyVisited()) {
223       std::ostringstream oss;
224       mirror::Object* obj = pair2.first;
225       oss << pair.second << " -> " << obj << "(" << obj->PrettyTypeOf() << ")." << pair2.second;
226       work.emplace_back(obj, oss.str());
227     }
228   }
229   return "<no path found>";
230 }
231 
232 }  // namespace gc
233 }  // namespace art
234