• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "fault_handler.h"
18 
19 #include <string.h>
20 #include <sys/mman.h>
21 #include <sys/ucontext.h>
22 
23 #include <atomic>
24 
25 #include "art_method-inl.h"
26 #include "base/logging.h"  // For VLOG
27 #include "base/membarrier.h"
28 #include "base/stl_util.h"
29 #include "dex/dex_file_types.h"
30 #include "gc/heap.h"
31 #include "jit/jit.h"
32 #include "jit/jit_code_cache.h"
33 #include "mirror/class.h"
34 #include "mirror/object_reference.h"
35 #include "oat/oat_file.h"
36 #include "oat/oat_quick_method_header.h"
37 #include "sigchain.h"
38 #include "thread-current-inl.h"
39 #include "verify_object-inl.h"
40 
41 namespace art HIDDEN {
42 // Static fault manger object accessed by signal handler.
43 FaultManager fault_manager;
44 
45 // These need to be NO_INLINE since some debuggers do not read the inline-info to set a breakpoint
46 // if they aren't.
art_sigsegv_fault()47 extern "C" NO_INLINE __attribute__((visibility("default"))) void art_sigsegv_fault() {
48   // Set a breakpoint here to be informed when a SIGSEGV is unhandled by ART.
49   VLOG(signals)<< "Caught unknown SIGSEGV in ART fault handler - chaining to next handler.";
50 }
art_sigbus_fault()51 extern "C" NO_INLINE __attribute__((visibility("default"))) void art_sigbus_fault() {
52   // Set a breakpoint here to be informed when a SIGBUS is unhandled by ART.
53   VLOG(signals) << "Caught unknown SIGBUS in ART fault handler - chaining to next handler.";
54 }
55 
56 // Signal handler called on SIGSEGV.
art_sigsegv_handler(int sig,siginfo_t * info,void * context)57 static bool art_sigsegv_handler(int sig, siginfo_t* info, void* context) {
58   return fault_manager.HandleSigsegvFault(sig, info, context);
59 }
60 
61 // Signal handler called on SIGBUS.
art_sigbus_handler(int sig,siginfo_t * info,void * context)62 static bool art_sigbus_handler(int sig, siginfo_t* info, void* context) {
63   return fault_manager.HandleSigbusFault(sig, info, context);
64 }
65 
FaultManager()66 FaultManager::FaultManager()
67     : generated_code_ranges_lock_("FaultHandler generated code ranges lock",
68                                   LockLevel::kGenericBottomLock),
69       initialized_(false) {}
70 
~FaultManager()71 FaultManager::~FaultManager() {
72 }
73 
SignalCodeName(int sig,int code)74 static const char* SignalCodeName(int sig, int code) {
75   if (sig == SIGSEGV) {
76     switch (code) {
77       case SEGV_MAPERR: return "SEGV_MAPERR";
78       case SEGV_ACCERR: return "SEGV_ACCERR";
79       case 8:           return "SEGV_MTEAERR";
80       case 9:           return "SEGV_MTESERR";
81       default:          return "SEGV_UNKNOWN";
82     }
83   } else if (sig == SIGBUS) {
84     switch (code) {
85       case BUS_ADRALN: return "BUS_ADRALN";
86       case BUS_ADRERR: return "BUS_ADRERR";
87       case BUS_OBJERR: return "BUS_OBJERR";
88       default:         return "BUS_UNKNOWN";
89     }
90   } else {
91     return "UNKNOWN";
92   }
93 }
94 
PrintSignalInfo(std::ostream & os,siginfo_t * info)95 static std::ostream& PrintSignalInfo(std::ostream& os, siginfo_t* info) {
96   os << "  si_signo: " << info->si_signo << " (" << strsignal(info->si_signo) << ")\n"
97      << "  si_code: " << info->si_code
98      << " (" << SignalCodeName(info->si_signo, info->si_code) << ")";
99   if (info->si_signo == SIGSEGV || info->si_signo == SIGBUS) {
100     os << "\n" << "  si_addr: " << info->si_addr;
101   }
102   return os;
103 }
104 
InstallSigbusHandler()105 static bool InstallSigbusHandler() {
106   return gUseUserfaultfd &&
107          Runtime::Current()->GetHeap()->MarkCompactCollector()->IsUsingSigbusFeature();
108 }
109 
Init(bool use_sig_chain)110 void FaultManager::Init(bool use_sig_chain) {
111   CHECK(!initialized_);
112   if (use_sig_chain) {
113     sigset_t mask;
114     sigfillset(&mask);
115     sigdelset(&mask, SIGABRT);
116     sigdelset(&mask, SIGBUS);
117     sigdelset(&mask, SIGFPE);
118     sigdelset(&mask, SIGILL);
119     sigdelset(&mask, SIGSEGV);
120 
121     SigchainAction sa = {
122         .sc_sigaction = art_sigsegv_handler,
123         .sc_mask = mask,
124         .sc_flags = 0UL,
125     };
126 
127     AddSpecialSignalHandlerFn(SIGSEGV, &sa);
128     if (InstallSigbusHandler()) {
129       sa.sc_sigaction = art_sigbus_handler;
130       AddSpecialSignalHandlerFn(SIGBUS, &sa);
131     }
132 
133     // Notify the kernel that we intend to use a specific `membarrier()` command.
134     int result = art::membarrier(MembarrierCommand::kRegisterPrivateExpedited);
135     if (result != 0) {
136       LOG(WARNING) << "FaultHandler: MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED failed: "
137                    << errno << " " << strerror(errno);
138     }
139 
140     {
141       MutexLock lock(Thread::Current(), generated_code_ranges_lock_);
142       for (size_t i = 0; i != kNumLocalGeneratedCodeRanges; ++i) {
143         GeneratedCodeRange* next = (i + 1u != kNumLocalGeneratedCodeRanges)
144             ? &generated_code_ranges_storage_[i + 1u]
145             : nullptr;
146         generated_code_ranges_storage_[i].next.store(next, std::memory_order_relaxed);
147         generated_code_ranges_storage_[i].start = nullptr;
148         generated_code_ranges_storage_[i].size = 0u;
149       }
150       free_generated_code_ranges_ = generated_code_ranges_storage_;
151     }
152 
153     initialized_ = true;
154   } else if (InstallSigbusHandler()) {
155     struct sigaction act;
156     std::memset(&act, '\0', sizeof(act));
157     act.sa_flags = SA_SIGINFO | SA_RESTART;
158     act.sa_sigaction = [](int sig, siginfo_t* info, void* context) {
159       if (!art_sigbus_handler(sig, info, context)) {
160         std::ostringstream oss;
161         PrintSignalInfo(oss, info);
162         LOG(FATAL) << "Couldn't handle SIGBUS fault:"
163                    << "\n"
164                    << oss.str();
165       }
166     };
167     if (sigaction(SIGBUS, &act, nullptr)) {
168       LOG(FATAL) << "Fault handler for SIGBUS couldn't be setup: " << strerror(errno);
169     }
170   }
171 }
172 
Release()173 void FaultManager::Release() {
174   if (initialized_) {
175     RemoveSpecialSignalHandlerFn(SIGSEGV, art_sigsegv_handler);
176     if (InstallSigbusHandler()) {
177       RemoveSpecialSignalHandlerFn(SIGBUS, art_sigbus_handler);
178     }
179     initialized_ = false;
180   }
181 }
182 
Shutdown()183 void FaultManager::Shutdown() {
184   if (initialized_) {
185     Release();
186 
187     // Free all handlers.
188     STLDeleteElements(&generated_code_handlers_);
189     STLDeleteElements(&other_handlers_);
190 
191     // Delete remaining code ranges if any (such as nterp code or oat code from
192     // oat files that have not been unloaded, including boot image oat files).
193     MutexLock lock(Thread::Current(), generated_code_ranges_lock_);
194     GeneratedCodeRange* range = generated_code_ranges_.load(std::memory_order_acquire);
195     generated_code_ranges_.store(nullptr, std::memory_order_release);
196     while (range != nullptr) {
197       GeneratedCodeRange* next_range = range->next.load(std::memory_order_relaxed);
198       std::less<GeneratedCodeRange*> less;
199       if (!less(range, generated_code_ranges_storage_) &&
200           less(range, generated_code_ranges_storage_ + kNumLocalGeneratedCodeRanges)) {
201         // Nothing to do - not adding `range` to the `free_generated_code_ranges_` anymore.
202       } else {
203         // Range is not in the `generated_code_ranges_storage_`.
204         delete range;
205       }
206       range = next_range;
207     }
208   }
209 }
210 
HandleFaultByOtherHandlers(int sig,siginfo_t * info,void * context)211 bool FaultManager::HandleFaultByOtherHandlers(int sig, siginfo_t* info, void* context) {
212   if (other_handlers_.empty()) {
213     return false;
214   }
215 
216   Thread* self = Thread::Current();
217 
218   DCHECK(self != nullptr);
219   DCHECK(Runtime::Current() != nullptr);
220   DCHECK(Runtime::Current()->IsStarted());
221   for (const auto& handler : other_handlers_) {
222     if (handler->Action(sig, info, context)) {
223       return true;
224     }
225   }
226   return false;
227 }
228 
HandleSigbusFault(int sig,siginfo_t * info,void * context)229 bool FaultManager::HandleSigbusFault(int sig, siginfo_t* info, [[maybe_unused]] void* context) {
230   DCHECK_EQ(sig, SIGBUS);
231   if (VLOG_IS_ON(signals)) {
232     PrintSignalInfo(VLOG_STREAM(signals) << "Handling SIGBUS fault:\n", info);
233   }
234 
235 #ifdef TEST_NESTED_SIGNAL
236   // Simulate a crash in a handler.
237   raise(SIGBUS);
238 #endif
239   if (Runtime::Current()->GetHeap()->MarkCompactCollector()->SigbusHandler(info)) {
240     return true;
241   }
242 
243   // Set a breakpoint in this function to catch unhandled signals.
244   art_sigbus_fault();
245   return false;
246 }
247 
CheckForUnrecognizedImplicitSuspendCheckInBootImage(siginfo_t * siginfo,void * context)248 inline void FaultManager::CheckForUnrecognizedImplicitSuspendCheckInBootImage(
249     siginfo_t* siginfo, void* context) {
250   CHECK_EQ(kRuntimeISA, InstructionSet::kArm64);
251   uintptr_t fault_pc = GetFaultPc(siginfo, context);
252   if (fault_pc == 0u || !IsUint<32>(fault_pc) || !IsAligned<4u>(fault_pc)) {
253     return;
254   }
255   Runtime* runtime = Runtime::Current();
256   if (runtime == nullptr) {
257     return;
258   }
259   gc::Heap* heap = runtime->GetHeap();
260   if (heap == nullptr ||
261       fault_pc < heap->GetBootImagesStartAddress() ||
262       fault_pc - heap->GetBootImagesStartAddress() >= heap->GetBootImagesSize() ||
263       reinterpret_cast<uint32_t*>(fault_pc)[0] != /*LDR x21. [x21]*/ 0xf94002b5u) {
264     return;
265   }
266   std::ostringstream oss;
267   oss << "Failed to recognize implicit suspend check at 0x" << std::hex << fault_pc << "; ";
268   Thread* thread = Thread::Current();
269   if (thread == nullptr) {
270     oss << "null thread";
271   } else {
272     oss << "thread state = " << thread->GetState() << std::boolalpha
273         << "; mutator lock shared held = " << Locks::mutator_lock_->IsSharedHeld(thread);
274   }
275   oss << "; code ranges = {";
276   GeneratedCodeRange* range = generated_code_ranges_.load(std::memory_order_acquire);
277   const char* s = "";
278   while (range != nullptr) {
279     oss << s << "{" << range->start << ", " << range->size << "}";
280     s = ", ";
281     range = range->next.load(std::memory_order_relaxed);
282   }
283   oss << "}";
284   LOG(FATAL) << oss.str();
285   UNREACHABLE();
286 }
287 
288 
HandleSigsegvFault(int sig,siginfo_t * info,void * context)289 bool FaultManager::HandleSigsegvFault(int sig, siginfo_t* info, void* context) {
290   if (VLOG_IS_ON(signals)) {
291     PrintSignalInfo(VLOG_STREAM(signals) << "Handling SIGSEGV fault:\n", info);
292   }
293 
294 #ifdef TEST_NESTED_SIGNAL
295   // Simulate a crash in a handler.
296   raise(SIGSEGV);
297 #endif
298 
299   if (IsInGeneratedCode(info, context)) {
300     VLOG(signals) << "in generated code, looking for handler";
301     for (const auto& handler : generated_code_handlers_) {
302       VLOG(signals) << "invoking Action on handler " << handler;
303       if (handler->Action(sig, info, context)) {
304         // We have handled a signal so it's time to return from the
305         // signal handler to the appropriate place.
306         return true;
307       }
308     }
309   } else if (kRuntimeISA == InstructionSet::kArm64) {
310     CheckForUnrecognizedImplicitSuspendCheckInBootImage(info, context);
311   }
312 
313   // We hit a signal we didn't handle.  This might be something for which
314   // we can give more information about so call all registered handlers to
315   // see if it is.
316   if (HandleFaultByOtherHandlers(sig, info, context)) {
317     return true;
318   }
319 
320   // Set a breakpoint in this function to catch unhandled signals.
321   art_sigsegv_fault();
322   return false;
323 }
324 
AddHandler(FaultHandler * handler,bool generated_code)325 void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
326   DCHECK(initialized_);
327   if (generated_code) {
328     generated_code_handlers_.push_back(handler);
329   } else {
330     other_handlers_.push_back(handler);
331   }
332 }
333 
RemoveHandler(FaultHandler * handler)334 void FaultManager::RemoveHandler(FaultHandler* handler) {
335   auto it = std::find(generated_code_handlers_.begin(), generated_code_handlers_.end(), handler);
336   if (it != generated_code_handlers_.end()) {
337     generated_code_handlers_.erase(it);
338     return;
339   }
340   auto it2 = std::find(other_handlers_.begin(), other_handlers_.end(), handler);
341   if (it2 != other_handlers_.end()) {
342     other_handlers_.erase(it2);
343     return;
344   }
345   LOG(FATAL) << "Attempted to remove non existent handler " << handler;
346 }
347 
CreateGeneratedCodeRange(const void * start,size_t size)348 inline FaultManager::GeneratedCodeRange* FaultManager::CreateGeneratedCodeRange(
349     const void* start, size_t size) {
350   GeneratedCodeRange* range = free_generated_code_ranges_;
351   if (range != nullptr) {
352     std::less<GeneratedCodeRange*> less;
353     DCHECK(!less(range, generated_code_ranges_storage_));
354     DCHECK(less(range, generated_code_ranges_storage_ + kNumLocalGeneratedCodeRanges));
355     range->start = start;
356     range->size = size;
357     free_generated_code_ranges_ = range->next.load(std::memory_order_relaxed);
358     range->next.store(nullptr, std::memory_order_relaxed);
359     return range;
360   } else {
361     return new GeneratedCodeRange{nullptr, start, size};
362   }
363 }
364 
FreeGeneratedCodeRange(GeneratedCodeRange * range)365 inline void FaultManager::FreeGeneratedCodeRange(GeneratedCodeRange* range) {
366   std::less<GeneratedCodeRange*> less;
367   if (!less(range, generated_code_ranges_storage_) &&
368       less(range, generated_code_ranges_storage_ + kNumLocalGeneratedCodeRanges)) {
369     MutexLock lock(Thread::Current(), generated_code_ranges_lock_);
370     range->start = nullptr;
371     range->size = 0u;
372     range->next.store(free_generated_code_ranges_, std::memory_order_relaxed);
373     free_generated_code_ranges_ = range;
374   } else {
375     // Range is not in the `generated_code_ranges_storage_`.
376     delete range;
377   }
378 }
379 
AddGeneratedCodeRange(const void * start,size_t size)380 void FaultManager::AddGeneratedCodeRange(const void* start, size_t size) {
381   GeneratedCodeRange* new_range = nullptr;
382   {
383     MutexLock lock(Thread::Current(), generated_code_ranges_lock_);
384     new_range = CreateGeneratedCodeRange(start, size);
385     GeneratedCodeRange* old_head = generated_code_ranges_.load(std::memory_order_relaxed);
386     new_range->next.store(old_head, std::memory_order_relaxed);
387     generated_code_ranges_.store(new_range, std::memory_order_release);
388   }
389 
390   // The above release operation on `generated_code_ranges_` with an acquire operation
391   // on the same atomic object in `IsInGeneratedCode()` ensures the correct memory
392   // visibility for the contents of `*new_range` for any thread that loads the value
393   // written above (or a value written by a release sequence headed by that write).
394   //
395   // However, we also need to ensure that any thread that encounters a segmentation
396   // fault in the provided range shall actually see the written value. For JIT code
397   // cache and nterp, the registration happens while the process is single-threaded
398   // but the synchronization is more complicated for code in oat files.
399   //
400   // Threads that load classes register dex files under the `Locks::dex_lock_` and
401   // the first one to register a dex file with a given oat file shall add the oat
402   // code range; the memory visibility for these threads is guaranteed by the lock.
403   // However a thread that did not try to load a class with oat code can execute the
404   // code if a direct or indirect reference to such class escapes from one of the
405   // threads that loaded it. Use `membarrier()` for memory visibility in this case.
406   art::membarrier(MembarrierCommand::kPrivateExpedited);
407 }
408 
RemoveGeneratedCodeRange(const void * start,size_t size)409 void FaultManager::RemoveGeneratedCodeRange(const void* start, size_t size) {
410   Thread* self = Thread::Current();
411   GeneratedCodeRange* range = nullptr;
412   {
413     MutexLock lock(self, generated_code_ranges_lock_);
414     std::atomic<GeneratedCodeRange*>* before = &generated_code_ranges_;
415     range = before->load(std::memory_order_relaxed);
416     while (range != nullptr && range->start != start) {
417       before = &range->next;
418       range = before->load(std::memory_order_relaxed);
419     }
420     if (range != nullptr) {
421       GeneratedCodeRange* next = range->next.load(std::memory_order_relaxed);
422       if (before == &generated_code_ranges_) {
423         // Relaxed store directly to `generated_code_ranges_` would not satisfy
424         // conditions for a release sequence, so we need to use store-release.
425         before->store(next, std::memory_order_release);
426       } else {
427         // In the middle of the list, we can use a relaxed store as we're not
428         // publishing any newly written memory to potential reader threads.
429         // Whether they see the removed node or not is unimportant as we should
430         // not execute that code anymore. We're keeping the `next` link of the
431         // removed node, so that concurrent walk can use it to reach remaining
432         // retained nodes, if any.
433         before->store(next, std::memory_order_relaxed);
434       }
435     }
436   }
437   CHECK(range != nullptr);
438   DCHECK_EQ(range->start, start);
439   CHECK_EQ(range->size, size);
440 
441   Runtime* runtime = Runtime::Current();
442   CHECK(runtime != nullptr);
443   if (runtime->IsStarted() && runtime->GetThreadList() != nullptr) {
444     // Run a checkpoint before deleting the range to ensure that no thread holds a
445     // pointer to the removed range while walking the list in `IsInGeneratedCode()`.
446     // That walk is guarded by checking that the thread is `Runnable`, so any walk
447     // started before the removal shall be done when running the checkpoint and the
448     // checkpoint also ensures the correct memory visibility of `next` links,
449     // so the thread shall not see the pointer during future walks.
450 
451     // This function is currently called in different mutex and thread states.
452     // Semi-space GC performs the cleanup during its `MarkingPhase()` while holding
453     // the mutator exclusively, so we do not need a checkpoint. All other GCs perform
454     // the cleanup in their `ReclaimPhase()` while holding the mutator lock as shared
455     // and it's safe to release and re-acquire the mutator lock. Despite holding the
456     // mutator lock as shared, the thread is not always marked as `Runnable`.
457     // TODO: Clean up state transitions in different GC implementations. b/259440389
458     if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
459       // We do not need a checkpoint because no other thread is Runnable.
460     } else {
461       DCHECK(Locks::mutator_lock_->IsSharedHeld(self));
462       // Use explicit state transitions or unlock/lock.
463       bool runnable = (self->GetState() == ThreadState::kRunnable);
464       if (runnable) {
465         self->TransitionFromRunnableToSuspended(ThreadState::kNative);
466       } else {
467         Locks::mutator_lock_->SharedUnlock(self);
468       }
469       DCHECK(!Locks::mutator_lock_->IsSharedHeld(self));
470       runtime->GetThreadList()->RunEmptyCheckpoint();
471       if (runnable) {
472         self->TransitionFromSuspendedToRunnable();
473       } else {
474         Locks::mutator_lock_->SharedLock(self);
475       }
476     }
477   }
478   FreeGeneratedCodeRange(range);
479 }
480 
481 // This function is called within the signal handler. It checks that the thread
482 // is `Runnable`, the `mutator_lock_` is held (shared) and the fault PC is in one
483 // of the registered generated code ranges. No annotalysis is done.
IsInGeneratedCode(siginfo_t * siginfo,void * context)484 bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context) {
485   // We can only be running Java code in the current thread if it
486   // is in Runnable state.
487   VLOG(signals) << "Checking for generated code";
488   Thread* thread = Thread::Current();
489   if (thread == nullptr) {
490     VLOG(signals) << "no current thread";
491     return false;
492   }
493 
494   ThreadState state = thread->GetState();
495   if (state != ThreadState::kRunnable) {
496     VLOG(signals) << "not runnable";
497     return false;
498   }
499 
500   // Current thread is runnable.
501   // Make sure it has the mutator lock.
502   if (!Locks::mutator_lock_->IsSharedHeld(thread)) {
503     VLOG(signals) << "no lock";
504     return false;
505   }
506 
507   uintptr_t fault_pc = GetFaultPc(siginfo, context);
508   if (fault_pc == 0u) {
509     VLOG(signals) << "no fault PC";
510     return false;
511   }
512 
513   // Walk over the list of registered code ranges.
514   GeneratedCodeRange* range = generated_code_ranges_.load(std::memory_order_acquire);
515   while (range != nullptr) {
516     if (fault_pc - reinterpret_cast<uintptr_t>(range->start) < range->size) {
517       return true;
518     }
519     // We may or may not see ranges that were concurrently removed, depending
520     // on when the relaxed writes of the `next` links become visible. However,
521     // even if we're currently at a node that is being removed, we shall visit
522     // all remaining ranges that are not being removed as the removed nodes
523     // retain the `next` link at the time of removal (which may lead to other
524     // removed nodes before reaching remaining retained nodes, if any). Correct
525     // memory visibility of `start` and `size` fields of the visited ranges is
526     // ensured by the release and acquire operations on `generated_code_ranges_`.
527     range = range->next.load(std::memory_order_relaxed);
528   }
529   return false;
530 }
531 
FaultHandler(FaultManager * manager)532 FaultHandler::FaultHandler(FaultManager* manager) : manager_(manager) {
533 }
534 
535 //
536 // Null pointer fault handler
537 //
NullPointerHandler(FaultManager * manager)538 NullPointerHandler::NullPointerHandler(FaultManager* manager) : FaultHandler(manager) {
539   manager_->AddHandler(this, true);
540 }
541 
IsValidMethod(ArtMethod * method)542 bool NullPointerHandler::IsValidMethod(ArtMethod* method) {
543   // At this point we know that the thread is `Runnable` and the PC is in one of
544   // the registered code ranges. The `method` was read from the top of the stack
545   // and should really point to an actual `ArtMethod`, unless we're crashing during
546   // prologue or epilogue, or somehow managed to jump to the compiled code by some
547   // unexpected path, other than method invoke or exception delivery. We do a few
548   // quick checks without guarding from another fault.
549   VLOG(signals) << "potential method: " << method;
550 
551   static_assert(IsAligned<sizeof(void*)>(ArtMethod::Size(kRuntimePointerSize)));
552   if (method == nullptr || !IsAligned<sizeof(void*)>(method)) {
553     VLOG(signals) << ((method == nullptr) ? "null method" : "unaligned method");
554     return false;
555   }
556 
557   // Check that the presumed method actually points to a class. Read barriers
558   // are not needed (and would be undesirable in a signal handler) when reading
559   // a chain of constant references to get to a non-movable `Class.class` object.
560 
561   // Note: Allowing nested faults. Checking that the method is in one of the
562   // `LinearAlloc` spaces, or that objects we look at are in the `Heap` would be
563   // slow and require locking a mutex, which is undesirable in a signal handler.
564   // (Though we could register valid ranges similarly to the generated code ranges.)
565 
566   mirror::Object* klass =
567       method->GetDeclaringClassAddressWithoutBarrier()->AsMirrorPtr();
568   if (klass == nullptr || !IsAligned<kObjectAlignment>(klass)) {
569     VLOG(signals) << ((klass == nullptr) ? "null class" : "unaligned class");
570     return false;
571   }
572 
573   mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
574   if (class_class == nullptr || !IsAligned<kObjectAlignment>(class_class)) {
575     VLOG(signals) << ((klass == nullptr) ? "null class_class" : "unaligned class_class");
576     return false;
577   }
578 
579   if (class_class != class_class->GetClass<kVerifyNone, kWithoutReadBarrier>()) {
580     VLOG(signals) << "invalid class_class";
581     return false;
582   }
583 
584   return true;
585 }
586 
IsValidReturnPc(ArtMethod ** sp,uintptr_t return_pc)587 bool NullPointerHandler::IsValidReturnPc(ArtMethod** sp, uintptr_t return_pc) {
588   // Check if we can associate a dex PC with the return PC, whether from Nterp,
589   // or with an existing stack map entry for a compiled method.
590   // Note: Allowing nested faults if `IsValidMethod()` returned a false positive.
591   // Note: The `ArtMethod::GetOatQuickMethodHeader()` can acquire locks (at least
592   // `Locks::jit_lock_`) and if the thread already held such a lock, the signal
593   // handler would deadlock. However, if a thread is holding one of the locks
594   // below the mutator lock, the PC should be somewhere in ART code and should
595   // not match any registered generated code range, so such as a deadlock is
596   // unlikely. If it happens anyway, the worst case is that an internal ART crash
597   // would be reported as ANR.
598   ArtMethod* method = *sp;
599   const OatQuickMethodHeader* method_header = method->GetOatQuickMethodHeader(return_pc);
600   if (method_header == nullptr) {
601     VLOG(signals) << "No method header.";
602     return false;
603   }
604   VLOG(signals) << "looking for dex pc for return pc 0x" << std::hex << return_pc
605                 << " pc offset: 0x" << std::hex
606                 << (return_pc - reinterpret_cast<uintptr_t>(method_header->GetEntryPoint()));
607   uint32_t dexpc = method_header->ToDexPc(reinterpret_cast<ArtMethod**>(sp), return_pc, false);
608   VLOG(signals) << "dexpc: " << dexpc;
609   return dexpc != dex::kDexNoIndex;
610 }
611 
612 //
613 // Suspension fault handler
614 //
SuspensionHandler(FaultManager * manager)615 SuspensionHandler::SuspensionHandler(FaultManager* manager) : FaultHandler(manager) {
616   manager_->AddHandler(this, true);
617 }
618 
619 //
620 // Stack overflow fault handler
621 //
StackOverflowHandler(FaultManager * manager)622 StackOverflowHandler::StackOverflowHandler(FaultManager* manager) : FaultHandler(manager) {
623   manager_->AddHandler(this, true);
624 }
625 
626 //
627 // Stack trace handler, used to help get a stack trace from SIGSEGV inside of compiled code.
628 //
JavaStackTraceHandler(FaultManager * manager)629 JavaStackTraceHandler::JavaStackTraceHandler(FaultManager* manager) : FaultHandler(manager) {
630   manager_->AddHandler(this, false);
631 }
632 
Action(int sig,siginfo_t * siginfo,void * context)633 bool JavaStackTraceHandler::Action([[maybe_unused]] int sig, siginfo_t* siginfo, void* context) {
634   // Make sure that we are in the generated code, but we may not have a dex pc.
635   bool in_generated_code = manager_->IsInGeneratedCode(siginfo, context);
636   if (in_generated_code) {
637     LOG(ERROR) << "Dumping java stack trace for crash in generated code";
638     Thread* self = Thread::Current();
639 
640     uintptr_t sp = FaultManager::GetFaultSp(context);
641     CHECK_NE(sp, 0u);  // Otherwise we should not have reached this handler.
642     // Inside of generated code, sp[0] is the method, so sp is the frame.
643     self->SetTopOfStack(reinterpret_cast<ArtMethod**>(sp));
644     self->DumpJavaStack(LOG_STREAM(ERROR));
645   }
646 
647   return false;  // Return false since we want to propagate the fault to the main signal handler.
648 }
649 
650 }   // namespace art
651