1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <atomic>
18 #include <csignal>
19 #include <cstring>
20 #include <memory>
21 #include <mutex>
22
23 #if defined(__BIONIC__)
24 #include <platform/bionic/reserved_signals.h>
25 #endif
26
27 #include "berberis/base/bit_util.h"
28 #include "berberis/base/checks.h"
29 #include "berberis/base/config_globals.h"
30 #include "berberis/base/forever_alloc.h"
31 #include "berberis/base/tracing.h"
32 #include "berberis/guest_os_primitives/guest_signal.h"
33 #include "berberis/guest_os_primitives/guest_thread.h"
34 #include "berberis/guest_os_primitives/guest_thread_manager.h"
35 #include "berberis/guest_os_primitives/syscall_numbers.h"
36 #include "berberis/guest_state/guest_addr.h"
37 #include "berberis/guest_state/guest_state_opaque.h"
38 #include "berberis/runtime_primitives/crash_reporter.h"
39 #include "berberis/runtime_primitives/recovery_code.h"
40
41 #include "guest_signal_action.h"
42 #include "guest_thread_manager_impl.h" // AttachCurrentThread, DetachCurrentThread
43 #include "scoped_signal_blocker.h"
44
45 // Glibc didn't define this macro for i386 and x86_64 at the moment of adding
46 // its use below. This condition still stands though.
47 #ifndef SI_FROMKERNEL
48 #define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
49 #endif
50
51 namespace berberis {
52
53 namespace {
54
55 // Execution cannot proceed until the next pending signals check for _kernel_ sent
56 // synchronious signals: the faulty instruction will be executed again, leading
57 // to the infinite recursion. So crash immediately to simplify debugging.
58 //
59 // Note that a _user_ sent signal which is typically synchronious, such as SIGSEGV,
60 // can continue until pending signals check.
IsPendingSignalWithoutRecoveryCodeFatal(siginfo_t * info)61 bool IsPendingSignalWithoutRecoveryCodeFatal(siginfo_t* info) {
62 switch (info->si_signo) {
63 case SIGSEGV:
64 case SIGBUS:
65 case SIGILL:
66 case SIGFPE:
67 return SI_FROMKERNEL(info);
68 default:
69 return false;
70 }
71 }
72
73 // Technically guest threads may work with different signal action tables, so it's possible to
74 // optimize by using different mutexes. But it's rather an exotic corner case, so we keep it simple.
GetSignalActionsGuardMutex()75 std::mutex* GetSignalActionsGuardMutex() {
76 static auto* g_mutex = NewForever<std::mutex>();
77 return g_mutex;
78 }
79
FindSignalHandler(const GuestSignalActionsTable & signal_actions,int signal)80 const Guest_sigaction* FindSignalHandler(const GuestSignalActionsTable& signal_actions,
81 int signal) {
82 CHECK_GT(signal, 0);
83 CHECK_LE(signal, Guest__KERNEL__NSIG);
84 std::lock_guard<std::mutex> lock(*GetSignalActionsGuardMutex());
85 return &signal_actions.at(signal - 1).GetClaimedGuestAction();
86 }
87
GetHostRegIP(const ucontext_t * ucontext)88 uintptr_t GetHostRegIP(const ucontext_t* ucontext) {
89 #if defined(__i386__)
90 return ucontext->uc_mcontext.gregs[REG_EIP];
91 #elif defined(__x86_64__)
92 return ucontext->uc_mcontext.gregs[REG_RIP];
93 #elif defined(__riscv)
94 return ucontext->uc_mcontext.__gregs[REG_PC];
95 #elif defined(__aarch64__)
96 return ucontext->uc_mcontext.pc;
97 #else
98 #error "Unknown host arch"
99 #endif
100 }
101
SetHostRegIP(ucontext * ucontext,uintptr_t addr)102 void SetHostRegIP(ucontext* ucontext, uintptr_t addr) {
103 #if defined(__i386__)
104 ucontext->uc_mcontext.gregs[REG_EIP] = addr;
105 #elif defined(__x86_64__)
106 ucontext->uc_mcontext.gregs[REG_RIP] = addr;
107 #elif defined(__riscv)
108 ucontext->uc_mcontext.__gregs[REG_PC] = addr;
109 #elif defined(__aarch64__)
110 ucontext->uc_mcontext.pc = addr;
111 #else
112 #error "Unknown host arch"
113 #endif
114 }
115
116 // Can be interrupted by another HandleHostSignal!
HandleHostSignal(int sig,siginfo_t * info,void * context)117 void HandleHostSignal(int sig, siginfo_t* info, void* context) {
118 ucontext_t* ucontext = bit_cast<ucontext_t*>(context);
119 TRACE("Handle host signal %s (%d) at pc=%p si_addr=%p",
120 strsignal(sig),
121 sig,
122 bit_cast<void*>(GetHostRegIP(ucontext)),
123 info->si_addr);
124
125 bool attached;
126 GuestThread* thread = AttachCurrentThread(false, &attached);
127
128 // If pending signals are enabled, just add this signal to currently pending.
129 // If pending signals are disabled, run handlers for currently pending signals
130 // and for this signal now. While running the handlers, enable nested signals
131 // to be pending.
132 bool prev_pending_signals_enabled = thread->TestAndEnablePendingSignals();
133 thread->SetSignalFromHost(*info);
134 if (!prev_pending_signals_enabled) {
135 CHECK_EQ(GetResidence(*thread->state()), kOutsideGeneratedCode);
136 thread->ProcessAndDisablePendingSignals();
137 if (attached) {
138 DetachCurrentThread();
139 }
140 } else {
141 // We can't make signals pendings as we need to detach the thread!
142 CHECK(!attached);
143
144 // Run recovery code to restore precise context and exit generated code.
145 uintptr_t addr = GetHostRegIP(ucontext);
146 uintptr_t recovery_addr = FindRecoveryCode(addr, thread->state());
147
148 if (recovery_addr) {
149 if (!IsConfigFlagSet(kAccurateSigsegv)) {
150 // We often get asynchronious signals at instructions with recovery code.
151 // This is okay when the recovery is accurate, but highly fragile with inaccurate recovery.
152 if (!IsPendingSignalWithoutRecoveryCodeFatal(info)) {
153 TRACE("Skipping imprecise context recovery for non-fatal signal");
154 TRACE("Guest signal handler suspended, continue");
155 return;
156 }
157 TRACE(
158 "Imprecise context at recovery, only guest pc is in sync."
159 " Other registers may be stale.");
160 }
161 SetHostRegIP(ucontext, recovery_addr);
162 TRACE("guest signal handler suspended, run recovery for host pc %p at host pc %p",
163 bit_cast<void*>(addr),
164 bit_cast<void*>(recovery_addr));
165 } else {
166 // Failed to find recovery code.
167 // Translated code should be arranged to continue till
168 // the next pending signals check unless it's fatal.
169 if (IsPendingSignalWithoutRecoveryCodeFatal(info)) {
170 HandleFatalSignal(sig, info, context);
171 // If the raised signal is blocked we may need to return from the handler to unblock it.
172 TRACE("Detected return from HandleFatalSignal, continue");
173 return;
174 }
175 TRACE("guest signal handler suspended, continue");
176 }
177 }
178 }
179
IsReservedSignal(int signal)180 bool IsReservedSignal(int signal) {
181 switch (signal) {
182 // Disallow guest action for SIGABRT to simplify debugging (b/32167022).
183 case SIGABRT:
184 #if defined(__BIONIC__)
185 // Disallow overwriting the host profiler handler from guest code. Otherwise
186 // guest __libc_init_profiling_handlers() would install its own handler, which
187 // is not yet supported for guest code (at least need a proxy for
188 // heapprofd_client.so) and fundamentally cannot be supported for host code.
189 // TODO(b/167966989): Instead intercept __libc_init_profiling_handlers.
190 case BIONIC_SIGNAL_PROFILER:
191 #endif
192 return true;
193 }
194 return false;
195 }
196
197 } // namespace
198
SetDefaultSignalActionsTable()199 void GuestThread::SetDefaultSignalActionsTable() {
200 static auto* g_signal_actions = NewForever<GuestSignalActionsTable>();
201 // We need to initialize shared_ptr, but we don't want to attempt to delete the default
202 // signal actions when guest thread terminates. Hence we specify a void deleter.
203 signal_actions_ = std::shared_ptr<GuestSignalActionsTable>(g_signal_actions, [](auto) {});
204 }
205
CloneSignalActionsTableFrom(GuestSignalActionsTable * from_table)206 void GuestThread::CloneSignalActionsTableFrom(GuestSignalActionsTable* from_table) {
207 // Need lock to make sure from_table isn't changed concurrently.
208 std::lock_guard<std::mutex> lock(*GetSignalActionsGuardMutex());
209 signal_actions_ = std::make_shared<GuestSignalActionsTable>(*from_table);
210 }
211
212 // Can be interrupted by another SetSignal!
SetSignalFromHost(const siginfo_t & host_info)213 void GuestThread::SetSignalFromHost(const siginfo_t& host_info) {
214 siginfo_t* guest_info = pending_signals_.AllocSignal();
215
216 // Convert host siginfo to guest.
217 *guest_info = host_info;
218 switch (host_info.si_signo) {
219 case SIGILL:
220 case SIGFPE: {
221 guest_info->si_addr = ToHostAddr<void>(GetInsnAddr(GetCPUState(*state_)));
222 break;
223 }
224 case SIGSYS: {
225 guest_info->si_syscall = ToGuestSyscallNumber(host_info.si_syscall);
226 break;
227 }
228 }
229
230 // This is never interrupted by code that clears queue or status,
231 // so the order in which to set them is not important.
232 pending_signals_.EnqueueSignal(guest_info);
233 // Check that pending signals are not disabled and mark them as present.
234 uint8_t old_status = GetPendingSignalsStatusAtomic(*state_).exchange(kPendingSignalsPresent,
235 std::memory_order_relaxed);
236 CHECK_NE(kPendingSignalsDisabled, old_status);
237 }
238
SigAltStack(const stack_t * ss,stack_t * old_ss,int * error)239 bool GuestThread::SigAltStack(const stack_t* ss, stack_t* old_ss, int* error) {
240 // The following code is not reentrant!
241 ScopedSignalBlocker signal_blocker;
242
243 if (old_ss) {
244 if (sig_alt_stack_) {
245 old_ss->ss_sp = sig_alt_stack_;
246 old_ss->ss_size = sig_alt_stack_size_;
247 old_ss->ss_flags = IsOnSigAltStack() ? SS_ONSTACK : 0;
248 } else {
249 old_ss->ss_sp = nullptr;
250 old_ss->ss_size = 0;
251 old_ss->ss_flags = SS_DISABLE;
252 }
253 }
254 if (ss) {
255 if (sig_alt_stack_ && IsOnSigAltStack()) {
256 *error = EPERM;
257 return false;
258 }
259 if (ss->ss_flags == SS_DISABLE) {
260 sig_alt_stack_ = nullptr;
261 sig_alt_stack_size_ = 0;
262 return true;
263 }
264 if (ss->ss_flags != 0) {
265 *error = EINVAL;
266 return false;
267 }
268 if (ss->ss_size < GetGuest_MINSIGSTKSZ()) {
269 *error = ENOMEM;
270 return false;
271 }
272 sig_alt_stack_ = ss->ss_sp;
273 sig_alt_stack_size_ = ss->ss_size;
274 }
275 return true;
276 }
277
SwitchToSigAltStack()278 void GuestThread::SwitchToSigAltStack() {
279 if (sig_alt_stack_ && !IsOnSigAltStack()) {
280 // TODO(b/289563835): Try removing `- 16` while ensuring app compatibility.
281 // Reliable context on why we use `- 16` here seems to be lost.
282 SetStackRegister(GetCPUState(*state_), ToGuestAddr(sig_alt_stack_) + sig_alt_stack_size_ - 16);
283 }
284 }
285
IsOnSigAltStack() const286 bool GuestThread::IsOnSigAltStack() const {
287 CHECK_NE(sig_alt_stack_, nullptr);
288 const char* ss_start = static_cast<const char*>(sig_alt_stack_);
289 const char* ss_curr = ToHostAddr<const char>(GetStackRegister(GetCPUState(*state_)));
290 return ss_curr >= ss_start && ss_curr < ss_start + sig_alt_stack_size_;
291 }
292
ProcessPendingSignals()293 void GuestThread::ProcessPendingSignals() {
294 for (;;) {
295 // Process pending signals while present.
296 uint8_t status = GetPendingSignalsStatusAtomic(*state_).load(std::memory_order_acquire);
297 CHECK_NE(kPendingSignalsDisabled, status);
298 if (status == kPendingSignalsEnabled) {
299 return;
300 }
301 ProcessPendingSignalsImpl();
302 }
303 }
304
ProcessAndDisablePendingSignals()305 bool GuestThread::ProcessAndDisablePendingSignals() {
306 for (;;) {
307 // If pending signals are not present, cas should disable them.
308 // Otherwise, process pending signals and try again.
309 uint8_t old_status = kPendingSignalsEnabled;
310 if (GetPendingSignalsStatusAtomic(*state_).compare_exchange_weak(
311 old_status, kPendingSignalsDisabled, std::memory_order_acq_rel)) {
312 return true;
313 }
314 if (old_status == kPendingSignalsDisabled) {
315 return false;
316 }
317 ProcessPendingSignalsImpl();
318 }
319 }
320
TestAndEnablePendingSignals()321 bool GuestThread::TestAndEnablePendingSignals() {
322 // If pending signals are disabled, cas should mark them enabled.
323 // Otherwise, pending signals are already enabled.
324 uint8_t old_status = kPendingSignalsDisabled;
325 return !GetPendingSignalsStatusAtomic(*state_).compare_exchange_strong(
326 old_status, kPendingSignalsEnabled, std::memory_order_acq_rel);
327 }
328
329 // Return if another iteration is needed.
330 // ATTENTION: Can be interrupted by SetSignal!
ProcessPendingSignalsImpl()331 void GuestThread::ProcessPendingSignalsImpl() {
332 // Clear pending signals status and queue.
333 // ATTENTION: It is important to change status before the queue!
334 // Otherwise if interrupted by SetSignal, we might end up with
335 // no pending signals status but with non-empty queue!
336 GetPendingSignalsStatusAtomic(*state_).store(kPendingSignalsEnabled, std::memory_order_relaxed);
337
338 siginfo_t* signal_info;
339 while ((signal_info = pending_signals_.DequeueSignalUnsafe())) {
340 const Guest_sigaction* sa = FindSignalHandler(*signal_actions_.get(), signal_info->si_signo);
341 ProcessGuestSignal(this, sa, signal_info);
342 pending_signals_.FreeSignal(signal_info);
343 }
344 }
345
SetGuestSignalHandler(int signal,const Guest_sigaction * act,Guest_sigaction * old_act,int * error)346 bool SetGuestSignalHandler(int signal,
347 const Guest_sigaction* act,
348 Guest_sigaction* old_act,
349 int* error) {
350 #if defined(__riscv)
351 TRACE("ATTENTION: SetGuestSignalHandler is unimplemented - skipping it without raising an error");
352 return true;
353 #endif
354 if (signal < 1 || signal > Guest__KERNEL__NSIG) {
355 *error = EINVAL;
356 return false;
357 }
358
359 if (act && IsReservedSignal(signal)) {
360 TRACE("sigaction for reserved signal %d not set", signal);
361 act = nullptr;
362 }
363
364 std::lock_guard<std::mutex> lock(*GetSignalActionsGuardMutex());
365 GuestSignalAction& action = GetCurrentGuestThread()->GetSignalActionsTable()->at(signal - 1);
366 return action.Change(signal, act, HandleHostSignal, old_act, error);
367 }
368
369 } // namespace berberis
370