1
2 /*--------------------------------------------------------------------*/
3 /*--- Implementation of POSIX signals. m_signals.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2000-2012 Julian Seward
11 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 /*
32 Signal handling.
33
34 There are 4 distinct classes of signal:
35
36 1. Synchronous, instruction-generated (SIGILL, FPE, BUS, SEGV and
37 TRAP): these are signals as a result of an instruction fault. If
38 we get one while running client code, then we just do the
39 appropriate thing. If it happens while running Valgrind code, then
40 it indicates a Valgrind bug. Note that we "manually" implement
41 automatic stack growth, such that if a fault happens near the
42 client process stack, it is extended in the same way the kernel
43 would, and the fault is never reported to the client program.
44
45 2. Asynchronous variants of the above signals: If the kernel tries
46 to deliver a sync signal while it is blocked, it just kills the
47 process. Therefore, we can't block those signals if we want to be
48 able to report on bugs in Valgrind. This means that we're also
49 open to receiving those signals from other processes, sent with
50 kill. We could get away with just dropping them, since they aren't
51 really signals that processes send to each other.
52
53 3. Synchronous, general signals. If a thread/process sends itself
54 a signal with kill, its expected to be synchronous: ie, the signal
55 will have been delivered by the time the syscall finishes.
56
57 4. Asynchronous, general signals. All other signals, sent by
58 another process with kill. These are generally blocked, except for
59 two special cases: we poll for them each time we're about to run a
60 thread for a time quanta, and while running blocking syscalls.
61
62
63 In addition, we reserve one signal for internal use: SIGVGKILL.
64 SIGVGKILL is used to terminate threads. When one thread wants
65 another to exit, it will set its exitreason and send it SIGVGKILL
66 if it appears to be blocked in a syscall.
67
68
69 We use a kernel thread for each application thread. When the
70 thread allows itself to be open to signals, it sets the thread
71 signal mask to what the client application set it to. This means
72 that we get the kernel to do all signal routing: under Valgrind,
73 signals get delivered in the same way as in the non-Valgrind case
74 (the exception being for the sync signal set, since they're almost
75 always unblocked).
76 */
77
78 /*
79 Some more details...
80
81 First off, we take note of the client's requests (via sys_sigaction
82 and sys_sigprocmask) to set the signal state (handlers for each
83 signal, which are process-wide, + a mask for each signal, which is
84 per-thread). This info is duly recorded in the SCSS (static Client
85 signal state) in m_signals.c, and if the client later queries what
86 the state is, we merely fish the relevant info out of SCSS and give
87 it back.
88
89 However, we set the real signal state in the kernel to something
90 entirely different. This is recorded in SKSS, the static Kernel
91 signal state. What's nice (to the extent that anything is nice w.r.t
92 signals) is that there's a pure function to calculate SKSS from SCSS,
93 calculate_SKSS_from_SCSS. So when the client changes SCSS then we
94 recompute the associated SKSS and apply any changes from the previous
95 SKSS through to the kernel.
96
97 Now, that said, the general scheme we have now is, that regardless of
98 what the client puts into the SCSS (viz, asks for), what we would
99 like to do is as follows:
100
101 (1) run code on the virtual CPU with all signals blocked
102
103 (2) at convenient moments for us (that is, when the VCPU stops, and
104 control is back with the scheduler), ask the kernel "do you have
105 any signals for me?" and if it does, collect up the info, and
106 deliver them to the client (by building sigframes).
107
108 And that's almost what we do. The signal polling is done by
109 VG_(poll_signals), which calls through to VG_(sigtimedwait_zero) to
110 do the dirty work. (of which more later).
111
112 By polling signals, rather than catching them, we get to deal with
113 them only at convenient moments, rather than having to recover from
114 taking a signal while generated code is running.
115
116 Now unfortunately .. the above scheme only works for so-called async
117 signals. An async signal is one which isn't associated with any
118 particular instruction, eg Control-C (SIGINT). For those, it doesn't
119 matter if we don't deliver the signal to the client immediately; it
120 only matters that we deliver it eventually. Hence polling is OK.
121
122 But the other group -- sync signals -- are all related by the fact
123 that they are various ways for the host CPU to fail to execute an
124 instruction: SIGILL, SIGSEGV, SIGFPU. And they can't be deferred,
125 because obviously if a host instruction can't execute, well then we
126 have to immediately do Plan B, whatever that is.
127
128 So the next approximation of what happens is:
129
130 (1) run code on vcpu with all async signals blocked
131
132 (2) at convenient moments (when NOT running the vcpu), poll for async
133 signals.
134
135 (1) and (2) together imply that if the host does deliver a signal to
136 async_signalhandler while the VCPU is running, something's
137 seriously wrong.
138
139 (3) when running code on vcpu, don't block sync signals. Instead
140 register sync_signalhandler and catch any such via that. Of
141 course, that means an ugly recovery path if we do -- the
142 sync_signalhandler has to longjump, exiting out of the generated
143 code, and the assembly-dispatcher thingy that runs it, and gets
144 caught in m_scheduler, which then tells m_signals to deliver the
145 signal.
146
147 Now naturally (ha ha) even that might be tolerable, but there's
148 something worse: dealing with signals delivered to threads in
149 syscalls.
150
151 Obviously from the above, SKSS's signal mask (viz, what we really run
152 with) is way different from SCSS's signal mask (viz, what the client
153 thread thought it asked for). (eg) It may well be that the client
154 did not block control-C, so that it just expects to drop dead if it
155 receives ^C whilst blocked in a syscall, but by default we are
156 running with all async signals blocked, and so that signal could be
157 arbitrarily delayed, or perhaps even lost (not sure).
158
159 So what we have to do, when doing any syscall which SfMayBlock, is to
160 quickly switch in the SCSS-specified signal mask just before the
161 syscall, and switch it back just afterwards, and hope that we don't
162 get caught up in some wierd race condition. This is the primary
163 purpose of the ultra-magical pieces of assembly code in
164 coregrind/m_syswrap/syscall-<plat>.S
165
166 -----------
167
168 The ways in which V can come to hear of signals that need to be
169 forwarded to the client as are follows:
170
171 sync signals: can arrive at any time whatsoever. These are caught
172 by sync_signalhandler
173
174 async signals:
175
176 if running generated code
177 then these are blocked, so we don't expect to catch them in
178 async_signalhandler
179
180 else
181 if thread is blocked in a syscall marked SfMayBlock
182 then signals may be delivered to async_sighandler, since we
183 temporarily unblocked them for the duration of the syscall,
184 by using the real (SCSS) mask for this thread
185
186 else we're doing misc housekeeping activities (eg, making a translation,
187 washing our hair, etc). As in the normal case, these signals are
188 blocked, but we can and do poll for them using VG_(poll_signals).
189
190 Now, re VG_(poll_signals), it polls the kernel by doing
191 VG_(sigtimedwait_zero). This is trivial on Linux, since it's just a
192 syscall. But on Darwin and AIX, we have to cobble together the
193 functionality in a tedious, longwinded and probably error-prone way.
194
195 Finally, if a gdb is debugging the process under valgrind,
196 the signal can be ignored if gdb tells this. So, before resuming the
197 scheduler/delivering the signal, a call to VG_(gdbserver_report_signal)
198 is done. If this returns True, the signal is delivered.
199 */
200
201 #include "pub_core_basics.h"
202 #include "pub_core_vki.h"
203 #include "pub_core_vkiscnums.h"
204 #include "pub_core_debuglog.h"
205 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
206 #include "pub_core_threadstate.h"
207 #include "pub_core_xarray.h"
208 #include "pub_core_clientstate.h"
209 #include "pub_core_aspacemgr.h"
210 #include "pub_core_debugger.h" // For VG_(start_debugger)
211 #include "pub_core_errormgr.h"
212 #include "pub_core_gdbserver.h"
213 #include "pub_core_libcbase.h"
214 #include "pub_core_libcassert.h"
215 #include "pub_core_libcprint.h"
216 #include "pub_core_libcproc.h"
217 #include "pub_core_libcsignal.h"
218 #include "pub_core_machine.h"
219 #include "pub_core_mallocfree.h"
220 #include "pub_core_options.h"
221 #include "pub_core_scheduler.h"
222 #include "pub_core_signals.h"
223 #include "pub_core_sigframe.h" // For VG_(sigframe_create)()
224 #include "pub_core_stacks.h" // For VG_(change_stack)()
225 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
226 #include "pub_core_syscall.h"
227 #include "pub_core_syswrap.h"
228 #include "pub_core_tooliface.h"
229 #include "pub_core_coredump.h"
230
231
232 /* ---------------------------------------------------------------------
233 Forwards decls.
234 ------------------------------------------------------------------ */
235
236 static void sync_signalhandler ( Int sigNo, vki_siginfo_t *info,
237 struct vki_ucontext * );
238 static void async_signalhandler ( Int sigNo, vki_siginfo_t *info,
239 struct vki_ucontext * );
240 static void sigvgkill_handler ( Int sigNo, vki_siginfo_t *info,
241 struct vki_ucontext * );
242
243 /* Maximum usable signal. */
244 Int VG_(max_signal) = _VKI_NSIG;
245
246 #define N_QUEUED_SIGNALS 8
247
248 typedef struct SigQueue {
249 Int next;
250 vki_siginfo_t sigs[N_QUEUED_SIGNALS];
251 } SigQueue;
252
253 /* ------ Macros for pulling stuff out of ucontexts ------ */
254
255 /* Q: what does VG_UCONTEXT_SYSCALL_SYSRES do? A: let's suppose the
256 machine context (uc) reflects the situation that a syscall had just
257 completed, quite literally -- that is, that the program counter was
258 now at the instruction following the syscall. (or we're slightly
259 downstream, but we're sure no relevant register has yet changed
260 value.) Then VG_UCONTEXT_SYSCALL_SYSRES returns a SysRes reflecting
261 the result of the syscall; it does this by fishing relevant bits of
262 the machine state out of the uc. Of course if the program counter
263 was somewhere else entirely then the result is likely to be
264 meaningless, so the caller of VG_UCONTEXT_SYSCALL_SYSRES has to be
265 very careful to pay attention to the results only when it is sure
266 that the said constraint on the program counter is indeed valid. */
267
268 #if defined(VGP_x86_linux)
269 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
270 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
271 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
272 /* Convert the value in uc_mcontext.eax into a SysRes. */ \
273 VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
274 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
275 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.eip); \
276 (srP)->r_sp = (ULong)((uc)->uc_mcontext.esp); \
277 (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.ebp; \
278 }
279
280 #elif defined(VGP_amd64_linux)
281 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.rip)
282 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.rsp)
283 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
284 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
285 VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
286 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
287 { (srP)->r_pc = (uc)->uc_mcontext.rip; \
288 (srP)->r_sp = (uc)->uc_mcontext.rsp; \
289 (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.rbp; \
290 }
291
292 #elif defined(VGP_ppc32_linux)
293 /* Comments from Paul Mackerras 25 Nov 05:
294
295 > I'm tracking down a problem where V's signal handling doesn't
296 > work properly on a ppc440gx running 2.4.20. The problem is that
297 > the ucontext being presented to V's sighandler seems completely
298 > bogus.
299
300 > V's kernel headers and hence ucontext layout are derived from
301 > 2.6.9. I compared include/asm-ppc/ucontext.h from 2.4.20 and
302 > 2.6.13.
303
304 > Can I just check my interpretation: the 2.4.20 one contains the
305 > uc_mcontext field in line, whereas the 2.6.13 one has a pointer
306 > to said struct? And so if V is using the 2.6.13 struct then a
307 > 2.4.20 one will make no sense to it.
308
309 Not quite... what is inline in the 2.4.20 version is a
310 sigcontext_struct, not an mcontext. The sigcontext looks like
311 this:
312
313 struct sigcontext_struct {
314 unsigned long _unused[4];
315 int signal;
316 unsigned long handler;
317 unsigned long oldmask;
318 struct pt_regs *regs;
319 };
320
321 The regs pointer of that struct ends up at the same offset as the
322 uc_regs of the 2.6 struct ucontext, and a struct pt_regs is the
323 same as the mc_gregs field of the mcontext. In fact the integer
324 regs are followed in memory by the floating point regs on 2.4.20.
325
326 Thus if you are using the 2.6 definitions, it should work on 2.4.20
327 provided that you go via uc->uc_regs rather than looking in
328 uc->uc_mcontext directly.
329
330 There is another subtlety: 2.4.20 doesn't save the vector regs when
331 delivering a signal, and 2.6.x only saves the vector regs if the
332 process has ever used an altivec instructions. If 2.6.x does save
333 the vector regs, it sets the MSR_VEC bit in
334 uc->uc_regs->mc_gregs[PT_MSR], otherwise it clears it. That bit
335 will always be clear under 2.4.20. So you can use that bit to tell
336 whether uc->uc_regs->mc_vregs is valid. */
337 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_NIP])
338 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
339 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
340 /* Convert the values in uc_mcontext r3,cr into a SysRes. */ \
341 VG_(mk_SysRes_ppc32_linux)( \
342 (uc)->uc_regs->mc_gregs[VKI_PT_R3], \
343 (((uc)->uc_regs->mc_gregs[VKI_PT_CCR] >> 28) & 1) \
344 )
345 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
346 { (srP)->r_pc = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_NIP]); \
347 (srP)->r_sp = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_R1]); \
348 (srP)->misc.PPC32.r_lr = (uc)->uc_regs->mc_gregs[VKI_PT_LNK]; \
349 }
350
351 #elif defined(VGP_ppc64_linux)
352 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_NIP])
353 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
354 /* Dubious hack: if there is an error, only consider the lowest 8
355 bits of r3. memcheck/tests/post-syscall shows a case where an
356 interrupted syscall should have produced a ucontext with 0x4
357 (VKI_EINTR) in r3 but is in fact producing 0x204. */
358 /* Awaiting clarification from PaulM. Evidently 0x204 is
359 ERESTART_RESTARTBLOCK, which shouldn't have made it into user
360 space. */
VG_UCONTEXT_SYSCALL_SYSRES(struct vki_ucontext * uc)361 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( struct vki_ucontext* uc )
362 {
363 ULong err = (uc->uc_mcontext.gp_regs[VKI_PT_CCR] >> 28) & 1;
364 ULong r3 = uc->uc_mcontext.gp_regs[VKI_PT_R3];
365 if (err) r3 &= 0xFF;
366 return VG_(mk_SysRes_ppc64_linux)( r3, err );
367 }
368 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
369 { (srP)->r_pc = (uc)->uc_mcontext.gp_regs[VKI_PT_NIP]; \
370 (srP)->r_sp = (uc)->uc_mcontext.gp_regs[VKI_PT_R1]; \
371 (srP)->misc.PPC64.r_lr = (uc)->uc_mcontext.gp_regs[VKI_PT_LNK]; \
372 }
373
374 #elif defined(VGP_arm_linux)
375 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.arm_pc)
376 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.arm_sp)
377 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
378 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
379 VG_(mk_SysRes_arm_linux)( (uc)->uc_mcontext.arm_r0 )
380 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
381 { (srP)->r_pc = (uc)->uc_mcontext.arm_pc; \
382 (srP)->r_sp = (uc)->uc_mcontext.arm_sp; \
383 (srP)->misc.ARM.r14 = (uc)->uc_mcontext.arm_lr; \
384 (srP)->misc.ARM.r12 = (uc)->uc_mcontext.arm_ip; \
385 (srP)->misc.ARM.r11 = (uc)->uc_mcontext.arm_fp; \
386 (srP)->misc.ARM.r7 = (uc)->uc_mcontext.arm_r7; \
387 }
388
389 #elif defined(VGP_x86_darwin)
390
VG_UCONTEXT_INSTR_PTR(void * ucV)391 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
392 ucontext_t* uc = (ucontext_t*)ucV;
393 struct __darwin_mcontext32* mc = uc->uc_mcontext;
394 struct __darwin_i386_thread_state* ss = &mc->__ss;
395 return ss->__eip;
396 }
VG_UCONTEXT_STACK_PTR(void * ucV)397 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
398 ucontext_t* uc = (ucontext_t*)ucV;
399 struct __darwin_mcontext32* mc = uc->uc_mcontext;
400 struct __darwin_i386_thread_state* ss = &mc->__ss;
401 return ss->__esp;
402 }
VG_UCONTEXT_SYSCALL_SYSRES(void * ucV,UWord scclass)403 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
404 UWord scclass ) {
405 /* this is complicated by the problem that there are 3 different
406 kinds of syscalls, each with its own return convention.
407 NB: scclass is a host word, hence UWord is good for both
408 amd64-darwin and x86-darwin */
409 ucontext_t* uc = (ucontext_t*)ucV;
410 struct __darwin_mcontext32* mc = uc->uc_mcontext;
411 struct __darwin_i386_thread_state* ss = &mc->__ss;
412 /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
413 UInt carry = 1 & ss->__eflags;
414 UInt err = 0;
415 UInt wLO = 0;
416 UInt wHI = 0;
417 switch (scclass) {
418 case VG_DARWIN_SYSCALL_CLASS_UNIX:
419 err = carry;
420 wLO = ss->__eax;
421 wHI = ss->__edx;
422 break;
423 case VG_DARWIN_SYSCALL_CLASS_MACH:
424 wLO = ss->__eax;
425 break;
426 case VG_DARWIN_SYSCALL_CLASS_MDEP:
427 wLO = ss->__eax;
428 break;
429 default:
430 vg_assert(0);
431 break;
432 }
433 return VG_(mk_SysRes_x86_darwin)( scclass, err ? True : False,
434 wHI, wLO );
435 }
436 static inline
VG_UCONTEXT_TO_UnwindStartRegs(UnwindStartRegs * srP,void * ucV)437 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
438 void* ucV ) {
439 ucontext_t* uc = (ucontext_t*)(ucV);
440 struct __darwin_mcontext32* mc = uc->uc_mcontext;
441 struct __darwin_i386_thread_state* ss = &mc->__ss;
442 srP->r_pc = (ULong)(ss->__eip);
443 srP->r_sp = (ULong)(ss->__esp);
444 srP->misc.X86.r_ebp = (UInt)(ss->__ebp);
445 }
446
447 #elif defined(VGP_amd64_darwin)
448
VG_UCONTEXT_INSTR_PTR(void * ucV)449 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
450 ucontext_t* uc = (ucontext_t*)ucV;
451 struct __darwin_mcontext64* mc = uc->uc_mcontext;
452 struct __darwin_x86_thread_state64* ss = &mc->__ss;
453 return ss->__rip;
454 }
VG_UCONTEXT_STACK_PTR(void * ucV)455 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
456 ucontext_t* uc = (ucontext_t*)ucV;
457 struct __darwin_mcontext64* mc = uc->uc_mcontext;
458 struct __darwin_x86_thread_state64* ss = &mc->__ss;
459 return ss->__rsp;
460 }
VG_UCONTEXT_SYSCALL_SYSRES(void * ucV,UWord scclass)461 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
462 UWord scclass ) {
463 /* This is copied from the x86-darwin case. I'm not sure if it
464 is correct. */
465 ucontext_t* uc = (ucontext_t*)ucV;
466 struct __darwin_mcontext64* mc = uc->uc_mcontext;
467 struct __darwin_x86_thread_state64* ss = &mc->__ss;
468 /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
469 ULong carry = 1 & ss->__rflags;
470 ULong err = 0;
471 ULong wLO = 0;
472 ULong wHI = 0;
473 switch (scclass) {
474 case VG_DARWIN_SYSCALL_CLASS_UNIX:
475 err = carry;
476 wLO = ss->__rax;
477 wHI = ss->__rdx;
478 break;
479 case VG_DARWIN_SYSCALL_CLASS_MACH:
480 wLO = ss->__rax;
481 break;
482 case VG_DARWIN_SYSCALL_CLASS_MDEP:
483 wLO = ss->__rax;
484 break;
485 default:
486 vg_assert(0);
487 break;
488 }
489 return VG_(mk_SysRes_amd64_darwin)( scclass, err ? True : False,
490 wHI, wLO );
491 }
492 static inline
VG_UCONTEXT_TO_UnwindStartRegs(UnwindStartRegs * srP,void * ucV)493 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
494 void* ucV ) {
495 ucontext_t* uc = (ucontext_t*)ucV;
496 struct __darwin_mcontext64* mc = uc->uc_mcontext;
497 struct __darwin_x86_thread_state64* ss = &mc->__ss;
498 srP->r_pc = (ULong)(ss->__rip);
499 srP->r_sp = (ULong)(ss->__rsp);
500 srP->misc.AMD64.r_rbp = (ULong)(ss->__rbp);
501 }
502
503 #elif defined(VGP_s390x_linux)
504
505 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.regs.psw.addr)
506 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.regs.gprs[15])
507 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.regs.gprs[11])
508 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
509 VG_(mk_SysRes_s390x_linux)((uc)->uc_mcontext.regs.gprs[2])
510 # define VG_UCONTEXT_LINK_REG(uc) ((uc)->uc_mcontext.regs.gprs[14])
511
512 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
513 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.regs.psw.addr); \
514 (srP)->r_sp = (ULong)((uc)->uc_mcontext.regs.gprs[15]); \
515 (srP)->misc.S390X.r_fp = (uc)->uc_mcontext.regs.gprs[11]; \
516 (srP)->misc.S390X.r_lr = (uc)->uc_mcontext.regs.gprs[14]; \
517 }
518
519 #elif defined(VGP_mips32_linux)
520 # define VG_UCONTEXT_INSTR_PTR(uc) ((UWord)(((uc)->uc_mcontext.sc_pc)))
521 # define VG_UCONTEXT_STACK_PTR(uc) ((UWord)((uc)->uc_mcontext.sc_regs[29]))
522 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.sc_regs[30])
523 # define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.sc_regs[2])
524 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
525 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
526 VG_(mk_SysRes_mips32_linux)( (uc)->uc_mcontext.sc_regs[2], \
527 (uc)->uc_mcontext.sc_regs[3], \
528 (uc)->uc_mcontext.sc_regs[7])
529
530 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
531 { (srP)->r_pc = (uc)->uc_mcontext.sc_pc; \
532 (srP)->r_sp = (uc)->uc_mcontext.sc_regs[29]; \
533 (srP)->misc.MIPS32.r30 = (uc)->uc_mcontext.sc_regs[30]; \
534 (srP)->misc.MIPS32.r31 = (uc)->uc_mcontext.sc_regs[31]; \
535 (srP)->misc.MIPS32.r28 = (uc)->uc_mcontext.sc_regs[28]; \
536 }
537
538
539 #else
540 # error Unknown platform
541 #endif
542
543
544 /* ------ Macros for pulling stuff out of siginfos ------ */
545
546 /* These macros allow use of uniform names when working with
547 both the Linux and AIX vki definitions. */
548 #if defined(VGO_linux)
549 # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
550 # define VKI_SIGINFO_si_pid _sifields._kill._pid
551 #elif defined(VGO_darwin)
552 # define VKI_SIGINFO_si_addr si_addr
553 # define VKI_SIGINFO_si_pid si_pid
554 #else
555 # error Unknown OS
556 #endif
557
558
559 /* ---------------------------------------------------------------------
560 HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
561 ------------------------------------------------------------------ */
562
563 /* ---------------------------------------------------------------------
564 Signal state for this process.
565 ------------------------------------------------------------------ */
566
567
568 /* Base-ment of these arrays[_VKI_NSIG].
569
570 Valid signal numbers are 1 .. _VKI_NSIG inclusive.
571 Rather than subtracting 1 for indexing these arrays, which
572 is tedious and error-prone, they are simply dimensioned 1 larger,
573 and entry [0] is not used.
574 */
575
576
577 /* -----------------------------------------------------
578 Static client signal state (SCSS). This is the state
579 that the client thinks it has the kernel in.
580 SCSS records verbatim the client's settings. These
581 are mashed around only when SKSS is calculated from it.
582 -------------------------------------------------- */
583
584 typedef
585 struct {
586 void* scss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
587 client's handler */
588 UInt scss_flags;
589 vki_sigset_t scss_mask;
590 void* scss_restorer; /* where sigreturn goes */
591 void* scss_sa_tramp; /* sa_tramp setting, Darwin only */
592 /* re _restorer and _sa_tramp, we merely record the values
593 supplied when the client does 'sigaction' and give them back
594 when requested. Otherwise they are simply ignored. */
595 }
596 SCSS_Per_Signal;
597
598 typedef
599 struct {
600 /* per-signal info */
601 SCSS_Per_Signal scss_per_sig[1+_VKI_NSIG];
602
603 /* Additional elements to SCSS not stored here:
604 - for each thread, the thread's blocking mask
605 - for each thread in WaitSIG, the set of waited-on sigs
606 */
607 }
608 SCSS;
609
610 static SCSS scss;
611
612
613 /* -----------------------------------------------------
614 Static kernel signal state (SKSS). This is the state
615 that we have the kernel in. It is computed from SCSS.
616 -------------------------------------------------- */
617
618 /* Let's do:
619 sigprocmask assigns to all thread masks
620 so that at least everything is always consistent
621 Flags:
622 SA_SIGINFO -- we always set it, and honour it for the client
623 SA_NOCLDSTOP -- passed to kernel
624 SA_ONESHOT or SA_RESETHAND -- pass through
625 SA_RESTART -- we observe this but set our handlers to always restart
626 SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
627 SA_ONSTACK -- pass through
628 SA_NOCLDWAIT -- pass through
629 */
630
631
632 typedef
633 struct {
634 void* skss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN
635 or ptr to our handler */
636 UInt skss_flags;
637 /* There is no skss_mask, since we know that we will always ask
638 for all signals to be blocked in our sighandlers. */
639 /* Also there is no skss_restorer. */
640 }
641 SKSS_Per_Signal;
642
643 typedef
644 struct {
645 SKSS_Per_Signal skss_per_sig[1+_VKI_NSIG];
646 }
647 SKSS;
648
649 static SKSS skss;
650
651 /* returns True if signal is to be ignored.
652 To check this, possibly call gdbserver with tid. */
is_sig_ign(Int sigNo,ThreadId tid)653 static Bool is_sig_ign(Int sigNo, ThreadId tid)
654 {
655 vg_assert(sigNo >= 1 && sigNo <= _VKI_NSIG);
656
657 return scss.scss_per_sig[sigNo].scss_handler == VKI_SIG_IGN
658 || !VG_(gdbserver_report_signal) (sigNo, tid);
659 }
660
661 /* ---------------------------------------------------------------------
662 Compute the SKSS required by the current SCSS.
663 ------------------------------------------------------------------ */
664
665 static
pp_SKSS(void)666 void pp_SKSS ( void )
667 {
668 Int sig;
669 VG_(printf)("\n\nSKSS:\n");
670 for (sig = 1; sig <= _VKI_NSIG; sig++) {
671 VG_(printf)("sig %d: handler %p, flags 0x%x\n", sig,
672 skss.skss_per_sig[sig].skss_handler,
673 skss.skss_per_sig[sig].skss_flags );
674
675 }
676 }
677
678 /* This is the core, clever bit. Computation is as follows:
679
680 For each signal
681 handler = if client has a handler, then our handler
682 else if client is DFL, then our handler as well
683 else (client must be IGN)
684 then hander is IGN
685 */
686 static
calculate_SKSS_from_SCSS(SKSS * dst)687 void calculate_SKSS_from_SCSS ( SKSS* dst )
688 {
689 Int sig;
690 UInt scss_flags;
691 UInt skss_flags;
692
693 for (sig = 1; sig <= _VKI_NSIG; sig++) {
694 void *skss_handler;
695 void *scss_handler;
696
697 scss_handler = scss.scss_per_sig[sig].scss_handler;
698 scss_flags = scss.scss_per_sig[sig].scss_flags;
699
700 switch(sig) {
701 case VKI_SIGSEGV:
702 case VKI_SIGBUS:
703 case VKI_SIGFPE:
704 case VKI_SIGILL:
705 case VKI_SIGTRAP:
706 /* For these, we always want to catch them and report, even
707 if the client code doesn't. */
708 skss_handler = sync_signalhandler;
709 break;
710
711 case VKI_SIGCONT:
712 /* Let the kernel handle SIGCONT unless the client is actually
713 catching it. */
714 case VKI_SIGCHLD:
715 case VKI_SIGWINCH:
716 case VKI_SIGURG:
717 /* For signals which are have a default action of Ignore,
718 only set a handler if the client has set a signal handler.
719 Otherwise the kernel will interrupt a syscall which
720 wouldn't have otherwise been interrupted. */
721 if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
722 skss_handler = VKI_SIG_DFL;
723 else if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
724 skss_handler = VKI_SIG_IGN;
725 else
726 skss_handler = async_signalhandler;
727 break;
728
729 default:
730 // VKI_SIGVG* are runtime variables, so we can't make them
731 // cases in the switch, so we handle them in the 'default' case.
732 if (sig == VG_SIGVGKILL)
733 skss_handler = sigvgkill_handler;
734 else {
735 if (scss_handler == VKI_SIG_IGN)
736 skss_handler = VKI_SIG_IGN;
737 else
738 skss_handler = async_signalhandler;
739 }
740 break;
741 }
742
743 /* Flags */
744
745 skss_flags = 0;
746
747 /* SA_NOCLDSTOP, SA_NOCLDWAIT: pass to kernel */
748 skss_flags |= scss_flags & (VKI_SA_NOCLDSTOP | VKI_SA_NOCLDWAIT);
749
750 /* SA_ONESHOT: ignore client setting */
751
752 /* SA_RESTART: ignore client setting and always set it for us.
753 Though we never rely on the kernel to restart a
754 syscall, we observe whether it wanted to restart the syscall
755 or not, which is needed by
756 VG_(fixup_guest_state_after_syscall_interrupted) */
757 skss_flags |= VKI_SA_RESTART;
758
759 /* SA_NOMASK: ignore it */
760
761 /* SA_ONSTACK: client setting is irrelevant here */
762 /* We don't set a signal stack, so ignore */
763
764 /* always ask for SA_SIGINFO */
765 skss_flags |= VKI_SA_SIGINFO;
766
767 /* use our own restorer */
768 skss_flags |= VKI_SA_RESTORER;
769
770 /* Create SKSS entry for this signal. */
771 if (sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
772 dst->skss_per_sig[sig].skss_handler = skss_handler;
773 else
774 dst->skss_per_sig[sig].skss_handler = VKI_SIG_DFL;
775
776 dst->skss_per_sig[sig].skss_flags = skss_flags;
777 }
778
779 /* Sanity checks. */
780 vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler == VKI_SIG_DFL);
781 vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler == VKI_SIG_DFL);
782
783 if (0)
784 pp_SKSS();
785 }
786
787
788 /* ---------------------------------------------------------------------
789 After a possible SCSS change, update SKSS and the kernel itself.
790 ------------------------------------------------------------------ */
791
792 // We need two levels of macro-expansion here to convert __NR_rt_sigreturn
793 // to a number before converting it to a string... sigh.
794 extern void my_sigreturn(void);
795
796 #if defined(VGP_x86_linux)
797 # define _MY_SIGRETURN(name) \
798 ".text\n" \
799 ".globl my_sigreturn\n" \
800 "my_sigreturn:\n" \
801 " movl $" #name ", %eax\n" \
802 " int $0x80\n" \
803 ".previous\n"
804
805 #elif defined(VGP_amd64_linux)
806 # define _MY_SIGRETURN(name) \
807 ".text\n" \
808 ".globl my_sigreturn\n" \
809 "my_sigreturn:\n" \
810 " movq $" #name ", %rax\n" \
811 " syscall\n" \
812 ".previous\n"
813
814 #elif defined(VGP_ppc32_linux)
815 # define _MY_SIGRETURN(name) \
816 ".text\n" \
817 ".globl my_sigreturn\n" \
818 "my_sigreturn:\n" \
819 " li 0, " #name "\n" \
820 " sc\n" \
821 ".previous\n"
822
823 #elif defined(VGP_ppc64_linux)
824 # define _MY_SIGRETURN(name) \
825 ".align 2\n" \
826 ".globl my_sigreturn\n" \
827 ".section \".opd\",\"aw\"\n" \
828 ".align 3\n" \
829 "my_sigreturn:\n" \
830 ".quad .my_sigreturn,.TOC.@tocbase,0\n" \
831 ".previous\n" \
832 ".type .my_sigreturn,@function\n" \
833 ".globl .my_sigreturn\n" \
834 ".my_sigreturn:\n" \
835 " li 0, " #name "\n" \
836 " sc\n"
837
838 #elif defined(VGP_arm_linux)
839 # define _MY_SIGRETURN(name) \
840 ".text\n" \
841 ".globl my_sigreturn\n" \
842 "my_sigreturn:\n\t" \
843 " mov r7, #" #name "\n\t" \
844 " svc 0x00000000\n" \
845 ".previous\n"
846
847 #elif defined(VGP_x86_darwin)
848 # define _MY_SIGRETURN(name) \
849 ".text\n" \
850 ".globl my_sigreturn\n" \
851 "my_sigreturn:\n" \
852 "movl $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%eax\n" \
853 "int $0x80"
854
855 #elif defined(VGP_amd64_darwin)
856 // DDD: todo
857 # define _MY_SIGRETURN(name) \
858 ".text\n" \
859 ".globl my_sigreturn\n" \
860 "my_sigreturn:\n" \
861 "ud2\n"
862
863 #elif defined(VGP_s390x_linux)
864 # define _MY_SIGRETURN(name) \
865 ".text\n" \
866 ".globl my_sigreturn\n" \
867 "my_sigreturn:\n" \
868 " svc " #name "\n" \
869 ".previous\n"
870
871 #elif defined(VGP_mips32_linux)
872 # define _MY_SIGRETURN(name) \
873 ".text\n" \
874 "my_sigreturn:\n" \
875 " li $2, " #name "\n" /* apparently $2 is v0 */ \
876 " syscall\n" \
877 ".previous\n"
878
879 #else
880 # error Unknown platform
881 #endif
882
883 #define MY_SIGRETURN(name) _MY_SIGRETURN(name)
884 asm(
885 MY_SIGRETURN(__NR_rt_sigreturn)
886 );
887
888
handle_SCSS_change(Bool force_update)889 static void handle_SCSS_change ( Bool force_update )
890 {
891 Int res, sig;
892 SKSS skss_old;
893 vki_sigaction_toK_t ksa;
894 vki_sigaction_fromK_t ksa_old;
895
896 /* Remember old SKSS and calculate new one. */
897 skss_old = skss;
898 calculate_SKSS_from_SCSS ( &skss );
899
900 /* Compare the new SKSS entries vs the old ones, and update kernel
901 where they differ. */
902 for (sig = 1; sig <= VG_(max_signal); sig++) {
903
904 /* Trying to do anything with SIGKILL is pointless; just ignore
905 it. */
906 if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
907 continue;
908
909 if (!force_update) {
910 if ((skss_old.skss_per_sig[sig].skss_handler
911 == skss.skss_per_sig[sig].skss_handler)
912 && (skss_old.skss_per_sig[sig].skss_flags
913 == skss.skss_per_sig[sig].skss_flags))
914 /* no difference */
915 continue;
916 }
917
918 ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
919 ksa.sa_flags = skss.skss_per_sig[sig].skss_flags;
920 # if !defined(VGP_ppc32_linux) && \
921 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
922 !defined(VGP_mips32_linux)
923 ksa.sa_restorer = my_sigreturn;
924 # endif
925 /* Re above ifdef (also the assertion below), PaulM says:
926 The sa_restorer field is not used at all on ppc. Glibc
927 converts the sigaction you give it into a kernel sigaction,
928 but it doesn't put anything in the sa_restorer field.
929 */
930
931 /* block all signals in handler */
932 VG_(sigfillset)( &ksa.sa_mask );
933 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGKILL );
934 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGSTOP );
935
936 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
937 VG_(dmsg)("setting ksig %d to: hdlr %p, flags 0x%lx, "
938 "mask(msb..lsb) 0x%llx 0x%llx\n",
939 sig, ksa.ksa_handler,
940 (UWord)ksa.sa_flags,
941 _VKI_NSIG_WORDS > 1 ? (ULong)ksa.sa_mask.sig[1] : 0,
942 (ULong)ksa.sa_mask.sig[0]);
943
944 res = VG_(sigaction)( sig, &ksa, &ksa_old );
945 vg_assert(res == 0);
946
947 /* Since we got the old sigaction more or less for free, might
948 as well extract the maximum sanity-check value from it. */
949 if (!force_update) {
950 vg_assert(ksa_old.ksa_handler
951 == skss_old.skss_per_sig[sig].skss_handler);
952 vg_assert(ksa_old.sa_flags
953 == skss_old.skss_per_sig[sig].skss_flags);
954 # if !defined(VGP_ppc32_linux) && \
955 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
956 !defined(VGP_mips32_linux)
957 vg_assert(ksa_old.sa_restorer
958 == my_sigreturn);
959 # endif
960 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
961 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGSTOP );
962 vg_assert(VG_(isfullsigset)( &ksa_old.sa_mask ));
963 }
964 }
965 }
966
967
968 /* ---------------------------------------------------------------------
969 Update/query SCSS in accordance with client requests.
970 ------------------------------------------------------------------ */
971
972 /* Logic for this alt-stack stuff copied directly from do_sigaltstack
973 in kernel/signal.[ch] */
974
975 /* True if we are on the alternate signal stack. */
on_sig_stack(ThreadId tid,Addr m_SP)976 static Bool on_sig_stack ( ThreadId tid, Addr m_SP )
977 {
978 ThreadState *tst = VG_(get_ThreadState)(tid);
979
980 return (m_SP - (Addr)tst->altstack.ss_sp < (Addr)tst->altstack.ss_size);
981 }
982
sas_ss_flags(ThreadId tid,Addr m_SP)983 static Int sas_ss_flags ( ThreadId tid, Addr m_SP )
984 {
985 ThreadState *tst = VG_(get_ThreadState)(tid);
986
987 return (tst->altstack.ss_size == 0
988 ? VKI_SS_DISABLE
989 : on_sig_stack(tid, m_SP) ? VKI_SS_ONSTACK : 0);
990 }
991
992
VG_(do_sys_sigaltstack)993 SysRes VG_(do_sys_sigaltstack) ( ThreadId tid, vki_stack_t* ss, vki_stack_t* oss )
994 {
995 Addr m_SP;
996
997 vg_assert(VG_(is_valid_tid)(tid));
998 m_SP = VG_(get_SP)(tid);
999
1000 if (VG_(clo_trace_signals))
1001 VG_(dmsg)("sys_sigaltstack: tid %d, "
1002 "ss %p{%p,sz=%llu,flags=0x%llx}, oss %p (current SP %p)\n",
1003 tid, (void*)ss,
1004 ss ? ss->ss_sp : 0,
1005 (ULong)(ss ? ss->ss_size : 0),
1006 (ULong)(ss ? ss->ss_flags : 0),
1007 (void*)oss, (void*)m_SP);
1008
1009 if (oss != NULL) {
1010 oss->ss_sp = VG_(threads)[tid].altstack.ss_sp;
1011 oss->ss_size = VG_(threads)[tid].altstack.ss_size;
1012 oss->ss_flags = VG_(threads)[tid].altstack.ss_flags
1013 | sas_ss_flags(tid, m_SP);
1014 }
1015
1016 if (ss != NULL) {
1017 if (on_sig_stack(tid, VG_(get_SP)(tid))) {
1018 return VG_(mk_SysRes_Error)( VKI_EPERM );
1019 }
1020 if (ss->ss_flags != VKI_SS_DISABLE
1021 && ss->ss_flags != VKI_SS_ONSTACK
1022 && ss->ss_flags != 0) {
1023 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1024 }
1025 if (ss->ss_flags == VKI_SS_DISABLE) {
1026 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
1027 } else {
1028 if (ss->ss_size < VKI_MINSIGSTKSZ) {
1029 return VG_(mk_SysRes_Error)( VKI_ENOMEM );
1030 }
1031
1032 VG_(threads)[tid].altstack.ss_sp = ss->ss_sp;
1033 VG_(threads)[tid].altstack.ss_size = ss->ss_size;
1034 VG_(threads)[tid].altstack.ss_flags = 0;
1035 }
1036 }
1037 return VG_(mk_SysRes_Success)( 0 );
1038 }
1039
1040
VG_(do_sys_sigaction)1041 SysRes VG_(do_sys_sigaction) ( Int signo,
1042 const vki_sigaction_toK_t* new_act,
1043 vki_sigaction_fromK_t* old_act )
1044 {
1045 if (VG_(clo_trace_signals))
1046 VG_(dmsg)("sys_sigaction: sigNo %d, "
1047 "new %#lx, old %#lx, new flags 0x%llx\n",
1048 signo, (UWord)new_act, (UWord)old_act,
1049 (ULong)(new_act ? new_act->sa_flags : 0));
1050
1051 /* Rule out various error conditions. The aim is to ensure that if
1052 when the call is passed to the kernel it will definitely
1053 succeed. */
1054
1055 /* Reject out-of-range signal numbers. */
1056 if (signo < 1 || signo > VG_(max_signal)) goto bad_signo;
1057
1058 /* don't let them use our signals */
1059 if ( (signo > VG_SIGVGRTUSERMAX)
1060 && new_act
1061 && !(new_act->ksa_handler == VKI_SIG_DFL
1062 || new_act->ksa_handler == VKI_SIG_IGN) )
1063 goto bad_signo_reserved;
1064
1065 /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
1066 if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
1067 && new_act
1068 && new_act->ksa_handler != VKI_SIG_DFL)
1069 goto bad_sigkill_or_sigstop;
1070
1071 /* If the client supplied non-NULL old_act, copy the relevant SCSS
1072 entry into it. */
1073 if (old_act) {
1074 old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
1075 old_act->sa_flags = scss.scss_per_sig[signo].scss_flags;
1076 old_act->sa_mask = scss.scss_per_sig[signo].scss_mask;
1077 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1078 old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
1079 # endif
1080 }
1081
1082 /* And now copy new SCSS entry from new_act. */
1083 if (new_act) {
1084 scss.scss_per_sig[signo].scss_handler = new_act->ksa_handler;
1085 scss.scss_per_sig[signo].scss_flags = new_act->sa_flags;
1086 scss.scss_per_sig[signo].scss_mask = new_act->sa_mask;
1087
1088 scss.scss_per_sig[signo].scss_restorer = NULL;
1089 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1090 scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
1091 # endif
1092
1093 scss.scss_per_sig[signo].scss_sa_tramp = NULL;
1094 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1095 scss.scss_per_sig[signo].scss_sa_tramp = new_act->sa_tramp;
1096 # endif
1097
1098 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
1099 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
1100 }
1101
1102 /* All happy bunnies ... */
1103 if (new_act) {
1104 handle_SCSS_change( False /* lazy update */ );
1105 }
1106 return VG_(mk_SysRes_Success)( 0 );
1107
1108 bad_signo:
1109 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1110 VG_(umsg)("Warning: bad signal number %d in sigaction()\n", signo);
1111 }
1112 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1113
1114 bad_signo_reserved:
1115 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1116 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1117 VG_(signame)(signo));
1118 VG_(umsg)(" the %s signal is used internally by Valgrind\n",
1119 VG_(signame)(signo));
1120 }
1121 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1122
1123 bad_sigkill_or_sigstop:
1124 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1125 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1126 VG_(signame)(signo));
1127 VG_(umsg)(" the %s signal is uncatchable\n",
1128 VG_(signame)(signo));
1129 }
1130 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1131 }
1132
1133
1134 static
do_sigprocmask_bitops(Int vki_how,vki_sigset_t * orig_set,vki_sigset_t * modifier)1135 void do_sigprocmask_bitops ( Int vki_how,
1136 vki_sigset_t* orig_set,
1137 vki_sigset_t* modifier )
1138 {
1139 switch (vki_how) {
1140 case VKI_SIG_BLOCK:
1141 VG_(sigaddset_from_set)( orig_set, modifier );
1142 break;
1143 case VKI_SIG_UNBLOCK:
1144 VG_(sigdelset_from_set)( orig_set, modifier );
1145 break;
1146 case VKI_SIG_SETMASK:
1147 *orig_set = *modifier;
1148 break;
1149 default:
1150 VG_(core_panic)("do_sigprocmask_bitops");
1151 break;
1152 }
1153 }
1154
1155 static
format_sigset(const vki_sigset_t * set)1156 HChar* format_sigset ( const vki_sigset_t* set )
1157 {
1158 static HChar buf[128];
1159 int w;
1160
1161 VG_(strcpy)(buf, "");
1162
1163 for (w = _VKI_NSIG_WORDS - 1; w >= 0; w--)
1164 {
1165 # if _VKI_NSIG_BPW == 32
1166 VG_(sprintf)(buf + VG_(strlen)(buf), "%08llx",
1167 set ? (ULong)set->sig[w] : 0);
1168 # elif _VKI_NSIG_BPW == 64
1169 VG_(sprintf)(buf + VG_(strlen)(buf), "%16llx",
1170 set ? (ULong)set->sig[w] : 0);
1171 # else
1172 # error "Unsupported value for _VKI_NSIG_BPW"
1173 # endif
1174 }
1175
1176 return buf;
1177 }
1178
1179 /*
1180 This updates the thread's signal mask. There's no such thing as a
1181 process-wide signal mask.
1182
1183 Note that the thread signal masks are an implicit part of SCSS,
1184 which is why this routine is allowed to mess with them.
1185 */
1186 static
do_setmask(ThreadId tid,Int how,vki_sigset_t * newset,vki_sigset_t * oldset)1187 void do_setmask ( ThreadId tid,
1188 Int how,
1189 vki_sigset_t* newset,
1190 vki_sigset_t* oldset )
1191 {
1192 if (VG_(clo_trace_signals))
1193 VG_(dmsg)("do_setmask: tid = %d how = %d (%s), newset = %p (%s)\n",
1194 tid, how,
1195 how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
1196 how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
1197 how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
1198 newset, newset ? format_sigset(newset) : "NULL" );
1199
1200 /* Just do this thread. */
1201 vg_assert(VG_(is_valid_tid)(tid));
1202 if (oldset) {
1203 *oldset = VG_(threads)[tid].sig_mask;
1204 if (VG_(clo_trace_signals))
1205 VG_(dmsg)("\toldset=%p %s\n", oldset, format_sigset(oldset));
1206 }
1207 if (newset) {
1208 do_sigprocmask_bitops (how, &VG_(threads)[tid].sig_mask, newset );
1209 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGKILL);
1210 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGSTOP);
1211 VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
1212 }
1213 }
1214
1215
VG_(do_sys_sigprocmask)1216 SysRes VG_(do_sys_sigprocmask) ( ThreadId tid,
1217 Int how,
1218 vki_sigset_t* set,
1219 vki_sigset_t* oldset )
1220 {
1221 switch(how) {
1222 case VKI_SIG_BLOCK:
1223 case VKI_SIG_UNBLOCK:
1224 case VKI_SIG_SETMASK:
1225 vg_assert(VG_(is_valid_tid)(tid));
1226 do_setmask ( tid, how, set, oldset );
1227 return VG_(mk_SysRes_Success)( 0 );
1228
1229 default:
1230 VG_(dmsg)("sigprocmask: unknown 'how' field %d\n", how);
1231 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1232 }
1233 }
1234
1235
1236 /* ---------------------------------------------------------------------
1237 LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
1238 ------------------------------------------------------------------ */
1239
1240 /* ---------------------------------------------------------------------
1241 Handy utilities to block/restore all host signals.
1242 ------------------------------------------------------------------ */
1243
1244 /* Block all host signals, dumping the old mask in *saved_mask. */
block_all_host_signals(vki_sigset_t * saved_mask)1245 static void block_all_host_signals ( /* OUT */ vki_sigset_t* saved_mask )
1246 {
1247 Int ret;
1248 vki_sigset_t block_procmask;
1249 VG_(sigfillset)(&block_procmask);
1250 ret = VG_(sigprocmask)
1251 (VKI_SIG_SETMASK, &block_procmask, saved_mask);
1252 vg_assert(ret == 0);
1253 }
1254
1255 /* Restore the blocking mask using the supplied saved one. */
restore_all_host_signals(vki_sigset_t * saved_mask)1256 static void restore_all_host_signals ( /* IN */ vki_sigset_t* saved_mask )
1257 {
1258 Int ret;
1259 ret = VG_(sigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
1260 vg_assert(ret == 0);
1261 }
1262
VG_(clear_out_queued_signals)1263 void VG_(clear_out_queued_signals)( ThreadId tid, vki_sigset_t* saved_mask )
1264 {
1265 block_all_host_signals(saved_mask);
1266 if (VG_(threads)[tid].sig_queue != NULL) {
1267 VG_(arena_free)(VG_AR_CORE, VG_(threads)[tid].sig_queue);
1268 VG_(threads)[tid].sig_queue = NULL;
1269 }
1270 restore_all_host_signals(saved_mask);
1271 }
1272
1273 /* ---------------------------------------------------------------------
1274 The signal simulation proper. A simplified version of what the
1275 Linux kernel does.
1276 ------------------------------------------------------------------ */
1277
1278 /* Set up a stack frame (VgSigContext) for the client's signal
1279 handler. */
1280 static
push_signal_frame(ThreadId tid,const vki_siginfo_t * siginfo,const struct vki_ucontext * uc)1281 void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo,
1282 const struct vki_ucontext *uc )
1283 {
1284 Addr esp_top_of_frame;
1285 ThreadState* tst;
1286 Int sigNo = siginfo->si_signo;
1287
1288 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
1289 vg_assert(VG_(is_valid_tid)(tid));
1290 tst = & VG_(threads)[tid];
1291
1292 if (VG_(clo_trace_signals)) {
1293 VG_(dmsg)("push_signal_frame (thread %d): signal %d\n", tid, sigNo);
1294 VG_(get_and_pp_StackTrace)(tid, 10);
1295 }
1296
1297 if (/* this signal asked to run on an alt stack */
1298 (scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
1299 && /* there is a defined and enabled alt stack, which we're not
1300 already using. Logic from get_sigframe in
1301 arch/i386/kernel/signal.c. */
1302 sas_ss_flags(tid, VG_(get_SP)(tid)) == 0
1303 ) {
1304 esp_top_of_frame
1305 = (Addr)(tst->altstack.ss_sp) + tst->altstack.ss_size;
1306 if (VG_(clo_trace_signals))
1307 VG_(dmsg)("delivering signal %d (%s) to thread %d: "
1308 "on ALT STACK (%p-%p; %ld bytes)\n",
1309 sigNo, VG_(signame)(sigNo), tid, tst->altstack.ss_sp,
1310 (UChar *)tst->altstack.ss_sp + tst->altstack.ss_size,
1311 (Word)tst->altstack.ss_size );
1312
1313 /* Signal delivery to tools */
1314 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/True );
1315
1316 } else {
1317 esp_top_of_frame = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
1318
1319 /* Signal delivery to tools */
1320 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/False );
1321 }
1322
1323 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
1324 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
1325
1326 /* This may fail if the client stack is busted; if that happens,
1327 the whole process will exit rather than simply calling the
1328 signal handler. */
1329 VG_(sigframe_create) (tid, esp_top_of_frame, siginfo, uc,
1330 scss.scss_per_sig[sigNo].scss_handler,
1331 scss.scss_per_sig[sigNo].scss_flags,
1332 &tst->sig_mask,
1333 scss.scss_per_sig[sigNo].scss_restorer);
1334 }
1335
1336
VG_(signame)1337 const Char *VG_(signame)(Int sigNo)
1338 {
1339 static Char buf[20];
1340
1341 switch(sigNo) {
1342 case VKI_SIGHUP: return "SIGHUP";
1343 case VKI_SIGINT: return "SIGINT";
1344 case VKI_SIGQUIT: return "SIGQUIT";
1345 case VKI_SIGILL: return "SIGILL";
1346 case VKI_SIGTRAP: return "SIGTRAP";
1347 case VKI_SIGABRT: return "SIGABRT";
1348 case VKI_SIGBUS: return "SIGBUS";
1349 case VKI_SIGFPE: return "SIGFPE";
1350 case VKI_SIGKILL: return "SIGKILL";
1351 case VKI_SIGUSR1: return "SIGUSR1";
1352 case VKI_SIGUSR2: return "SIGUSR2";
1353 case VKI_SIGSEGV: return "SIGSEGV";
1354 case VKI_SIGPIPE: return "SIGPIPE";
1355 case VKI_SIGALRM: return "SIGALRM";
1356 case VKI_SIGTERM: return "SIGTERM";
1357 # if defined(VKI_SIGSTKFLT)
1358 case VKI_SIGSTKFLT: return "SIGSTKFLT";
1359 # endif
1360 case VKI_SIGCHLD: return "SIGCHLD";
1361 case VKI_SIGCONT: return "SIGCONT";
1362 case VKI_SIGSTOP: return "SIGSTOP";
1363 case VKI_SIGTSTP: return "SIGTSTP";
1364 case VKI_SIGTTIN: return "SIGTTIN";
1365 case VKI_SIGTTOU: return "SIGTTOU";
1366 case VKI_SIGURG: return "SIGURG";
1367 case VKI_SIGXCPU: return "SIGXCPU";
1368 case VKI_SIGXFSZ: return "SIGXFSZ";
1369 case VKI_SIGVTALRM: return "SIGVTALRM";
1370 case VKI_SIGPROF: return "SIGPROF";
1371 case VKI_SIGWINCH: return "SIGWINCH";
1372 case VKI_SIGIO: return "SIGIO";
1373 # if defined(VKI_SIGPWR)
1374 case VKI_SIGPWR: return "SIGPWR";
1375 # endif
1376 # if defined(VKI_SIGUNUSED)
1377 case VKI_SIGUNUSED: return "SIGUNUSED";
1378 # endif
1379
1380 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1381 case VKI_SIGRTMIN ... VKI_SIGRTMAX:
1382 VG_(sprintf)(buf, "SIGRT%d", sigNo-VKI_SIGRTMIN);
1383 return buf;
1384 # endif
1385
1386 default:
1387 VG_(sprintf)(buf, "SIG%d", sigNo);
1388 return buf;
1389 }
1390 }
1391
1392 /* Hit ourselves with a signal using the default handler */
VG_(kill_self)1393 void VG_(kill_self)(Int sigNo)
1394 {
1395 Int r;
1396 vki_sigset_t mask, origmask;
1397 vki_sigaction_toK_t sa, origsa2;
1398 vki_sigaction_fromK_t origsa;
1399
1400 sa.ksa_handler = VKI_SIG_DFL;
1401 sa.sa_flags = 0;
1402 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1403 sa.sa_restorer = 0;
1404 # endif
1405 VG_(sigemptyset)(&sa.sa_mask);
1406
1407 VG_(sigaction)(sigNo, &sa, &origsa);
1408
1409 VG_(sigemptyset)(&mask);
1410 VG_(sigaddset)(&mask, sigNo);
1411 VG_(sigprocmask)(VKI_SIG_UNBLOCK, &mask, &origmask);
1412
1413 r = VG_(kill)(VG_(getpid)(), sigNo);
1414 # if defined(VGO_linux)
1415 /* This sometimes fails with EPERM on Darwin. I don't know why. */
1416 vg_assert(r == 0);
1417 # endif
1418
1419 VG_(convert_sigaction_fromK_to_toK)( &origsa, &origsa2 );
1420 VG_(sigaction)(sigNo, &origsa2, NULL);
1421 VG_(sigprocmask)(VKI_SIG_SETMASK, &origmask, NULL);
1422 }
1423
1424 // The si_code describes where the signal came from. Some come from the
1425 // kernel, eg.: seg faults, illegal opcodes. Some come from the user, eg.:
1426 // from kill() (SI_USER), or timer_settime() (SI_TIMER), or an async I/O
1427 // request (SI_ASYNCIO). There's lots of implementation-defined leeway in
1428 // POSIX, but the user vs. kernal distinction is what we want here. We also
1429 // pass in some other details that can help when si_code is unreliable.
is_signal_from_kernel(ThreadId tid,int signum,int si_code)1430 static Bool is_signal_from_kernel(ThreadId tid, int signum, int si_code)
1431 {
1432 # if defined(VGO_linux)
1433 // On Linux, SI_USER is zero, negative values are from the user, positive
1434 // values are from the kernel. There are SI_FROMUSER and SI_FROMKERNEL
1435 // macros but we don't use them here because other platforms don't have
1436 // them.
1437 return ( si_code > VKI_SI_USER ? True : False );
1438
1439 # elif defined(VGO_darwin)
1440 // On Darwin 9.6.0, the si_code is completely unreliable. It should be the
1441 // case that 0 means "user", and >0 means "kernel". But:
1442 // - For SIGSEGV, it seems quite reliable.
1443 // - For SIGBUS, it's always 2.
1444 // - For SIGFPE, it's often 0, even for kernel ones (eg.
1445 // div-by-integer-zero always gives zero).
1446 // - For SIGILL, it's unclear.
1447 // - For SIGTRAP, it's always 1.
1448 // You can see the "NOTIMP" (not implemented) status of a number of the
1449 // sub-cases in sys/signal.h. Hopefully future versions of Darwin will
1450 // get this right.
1451
1452 // If we're blocked waiting on a syscall, it must be a user signal, because
1453 // the kernel won't generate sync signals within syscalls.
1454 if (VG_(threads)[tid].status == VgTs_WaitSys) {
1455 return False;
1456
1457 // If it's a SIGSEGV, use the proper condition, since it's fairly reliable.
1458 } else if (SIGSEGV == signum) {
1459 return ( si_code > 0 ? True : False );
1460
1461 // If it's anything else, assume it's kernel-generated. Reason being that
1462 // kernel-generated sync signals are more common, and it's probable that
1463 // misdiagnosing a user signal as a kernel signal is better than the
1464 // opposite.
1465 } else {
1466 return True;
1467 }
1468 # else
1469 # error Unknown OS
1470 # endif
1471 }
1472
1473 // This is an arbitrary si_code that we only use internally. It corresponds
1474 // to the value SI_KERNEL on Linux, but that's not really of any significance
1475 // as far as I can determine.
1476 #define VKI_SEGV_MADE_UP_GPF 0x80
1477
1478 /*
1479 Perform the default action of a signal. If the signal is fatal, it
1480 marks all threads as needing to exit, but it doesn't actually kill
1481 the process or thread.
1482
1483 If we're not being quiet, then print out some more detail about
1484 fatal signals (esp. core dumping signals).
1485 */
default_action(const vki_siginfo_t * info,ThreadId tid)1486 static void default_action(const vki_siginfo_t *info, ThreadId tid)
1487 {
1488 Int sigNo = info->si_signo;
1489 Bool terminate = False; /* kills process */
1490 Bool core = False; /* kills process w/ core */
1491 struct vki_rlimit corelim;
1492 Bool could_core;
1493
1494 vg_assert(VG_(is_running_thread)(tid));
1495
1496 switch(sigNo) {
1497 case VKI_SIGQUIT: /* core */
1498 case VKI_SIGILL: /* core */
1499 case VKI_SIGABRT: /* core */
1500 case VKI_SIGFPE: /* core */
1501 case VKI_SIGSEGV: /* core */
1502 case VKI_SIGBUS: /* core */
1503 case VKI_SIGTRAP: /* core */
1504 case VKI_SIGXCPU: /* core */
1505 case VKI_SIGXFSZ: /* core */
1506 terminate = True;
1507 core = True;
1508 break;
1509
1510 case VKI_SIGHUP: /* term */
1511 case VKI_SIGINT: /* term */
1512 case VKI_SIGKILL: /* term - we won't see this */
1513 case VKI_SIGPIPE: /* term */
1514 case VKI_SIGALRM: /* term */
1515 case VKI_SIGTERM: /* term */
1516 case VKI_SIGUSR1: /* term */
1517 case VKI_SIGUSR2: /* term */
1518 case VKI_SIGIO: /* term */
1519 # if defined(VKI_SIGPWR)
1520 case VKI_SIGPWR: /* term */
1521 # endif
1522 case VKI_SIGSYS: /* term */
1523 case VKI_SIGPROF: /* term */
1524 case VKI_SIGVTALRM: /* term */
1525 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1526 case VKI_SIGRTMIN ... VKI_SIGRTMAX: /* term */
1527 # endif
1528 terminate = True;
1529 break;
1530 }
1531
1532 vg_assert(!core || (core && terminate));
1533
1534 if (VG_(clo_trace_signals))
1535 VG_(dmsg)("delivering %d (code %d) to default handler; action: %s%s\n",
1536 sigNo, info->si_code, terminate ? "terminate" : "ignore",
1537 core ? "+core" : "");
1538
1539 if (!terminate)
1540 return; /* nothing to do */
1541
1542 could_core = core;
1543
1544 if (core) {
1545 /* If they set the core-size limit to zero, don't generate a
1546 core file */
1547
1548 VG_(getrlimit)(VKI_RLIMIT_CORE, &corelim);
1549
1550 if (corelim.rlim_cur == 0)
1551 core = False;
1552 }
1553
1554 if ( (VG_(clo_verbosity) > 1 ||
1555 (could_core && is_signal_from_kernel(tid, sigNo, info->si_code))
1556 ) &&
1557 !VG_(clo_xml) ) {
1558 VG_(umsg)(
1559 "\n"
1560 "Process terminating with default action of signal %d (%s)%s\n",
1561 sigNo, VG_(signame)(sigNo), core ? ": dumping core" : "");
1562
1563 /* Be helpful - decode some more details about this fault */
1564 if (is_signal_from_kernel(tid, sigNo, info->si_code)) {
1565 const Char *event = NULL;
1566 Bool haveaddr = True;
1567
1568 switch(sigNo) {
1569 case VKI_SIGSEGV:
1570 switch(info->si_code) {
1571 case VKI_SEGV_MAPERR: event = "Access not within mapped region";
1572 break;
1573 case VKI_SEGV_ACCERR: event = "Bad permissions for mapped region";
1574 break;
1575 case VKI_SEGV_MADE_UP_GPF:
1576 /* General Protection Fault: The CPU/kernel
1577 isn't telling us anything useful, but this
1578 is commonly the result of exceeding a
1579 segment limit. */
1580 event = "General Protection Fault";
1581 haveaddr = False;
1582 break;
1583 }
1584 #if 0
1585 {
1586 HChar buf[110];
1587 VG_(am_show_nsegments)(0,"post segfault");
1588 VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1589 VG_(system)(buf);
1590 }
1591 #endif
1592 break;
1593
1594 case VKI_SIGILL:
1595 switch(info->si_code) {
1596 case VKI_ILL_ILLOPC: event = "Illegal opcode"; break;
1597 case VKI_ILL_ILLOPN: event = "Illegal operand"; break;
1598 case VKI_ILL_ILLADR: event = "Illegal addressing mode"; break;
1599 case VKI_ILL_ILLTRP: event = "Illegal trap"; break;
1600 case VKI_ILL_PRVOPC: event = "Privileged opcode"; break;
1601 case VKI_ILL_PRVREG: event = "Privileged register"; break;
1602 case VKI_ILL_COPROC: event = "Coprocessor error"; break;
1603 case VKI_ILL_BADSTK: event = "Internal stack error"; break;
1604 }
1605 break;
1606
1607 case VKI_SIGFPE:
1608 switch (info->si_code) {
1609 case VKI_FPE_INTDIV: event = "Integer divide by zero"; break;
1610 case VKI_FPE_INTOVF: event = "Integer overflow"; break;
1611 case VKI_FPE_FLTDIV: event = "FP divide by zero"; break;
1612 case VKI_FPE_FLTOVF: event = "FP overflow"; break;
1613 case VKI_FPE_FLTUND: event = "FP underflow"; break;
1614 case VKI_FPE_FLTRES: event = "FP inexact"; break;
1615 case VKI_FPE_FLTINV: event = "FP invalid operation"; break;
1616 case VKI_FPE_FLTSUB: event = "FP subscript out of range"; break;
1617 }
1618 break;
1619
1620 case VKI_SIGBUS:
1621 switch (info->si_code) {
1622 case VKI_BUS_ADRALN: event = "Invalid address alignment"; break;
1623 case VKI_BUS_ADRERR: event = "Non-existent physical address"; break;
1624 case VKI_BUS_OBJERR: event = "Hardware error"; break;
1625 }
1626 break;
1627 } /* switch (sigNo) */
1628
1629 if (event != NULL) {
1630 if (haveaddr)
1631 VG_(umsg)(" %s at address %p\n",
1632 event, info->VKI_SIGINFO_si_addr);
1633 else
1634 VG_(umsg)(" %s\n", event);
1635 }
1636 }
1637 /* Print a stack trace. Be cautious if the thread's SP is in an
1638 obviously stupid place (not mapped readable) that would
1639 likely cause a segfault. */
1640 if (VG_(is_valid_tid)(tid)) {
1641 Word first_ip_delta = 0;
1642 #if defined(VGO_linux)
1643 /* Make sure that the address stored in the stack pointer is
1644 located in a mapped page. That is not necessarily so. E.g.
1645 consider the scenario where the stack pointer was decreased
1646 and now has a value that is just below the end of a page that has
1647 not been mapped yet. In that case VG_(am_is_valid_for_client)
1648 will consider the address of the stack pointer invalid and that
1649 would cause a back-trace of depth 1 to be printed, instead of a
1650 full back-trace. */
1651 if (tid == 1) { // main thread
1652 Addr esp = VG_(get_SP)(tid);
1653 Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
1654 if (VG_(extend_stack)(base, VG_(threads)[tid].client_stack_szB)) {
1655 if (VG_(clo_trace_signals))
1656 VG_(dmsg)(" -> extended stack base to %#lx\n",
1657 VG_PGROUNDDN(esp));
1658 }
1659 }
1660 #endif
1661 #if defined(VGA_s390x)
1662 if (sigNo == VKI_SIGILL) {
1663 /* The guest instruction address has been adjusted earlier to
1664 point to the insn following the one that could not be decoded.
1665 When printing the back-trace here we need to undo that
1666 adjustment so the first line in the back-trace reports the
1667 correct address. */
1668 Addr addr = (Addr)info->VKI_SIGINFO_si_addr;
1669 UChar byte = ((UChar *)addr)[0];
1670 Int insn_length = ((((byte >> 6) + 1) >> 1) + 1) << 1;
1671
1672 first_ip_delta = -insn_length;
1673 }
1674 #endif
1675 ExeContext* ec = VG_(am_is_valid_for_client)
1676 (VG_(get_SP)(tid), sizeof(Addr), VKI_PROT_READ)
1677 ? VG_(record_ExeContext)( tid, first_ip_delta )
1678 : VG_(record_depth_1_ExeContext)( tid,
1679 first_ip_delta );
1680 vg_assert(ec);
1681 VG_(pp_ExeContext)( ec );
1682 }
1683 if (sigNo == VKI_SIGSEGV
1684 && info && is_signal_from_kernel(tid, sigNo, info->si_code)
1685 && info->si_code == VKI_SEGV_MAPERR) {
1686 VG_(umsg)(" If you believe this happened as a result of a stack\n" );
1687 VG_(umsg)(" overflow in your program's main thread (unlikely but\n");
1688 VG_(umsg)(" possible), you can try to increase the size of the\n" );
1689 VG_(umsg)(" main thread stack using the --main-stacksize= flag.\n" );
1690 // FIXME: assumes main ThreadId == 1
1691 if (VG_(is_valid_tid)(1)) {
1692 VG_(umsg)(
1693 " The main thread stack size used in this run was %d.\n",
1694 (Int)VG_(threads)[1].client_stack_szB);
1695 }
1696 }
1697 }
1698
1699 if (VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) )) {
1700 VG_(start_debugger)( tid );
1701 }
1702
1703 if (core) {
1704 const static struct vki_rlimit zero = { 0, 0 };
1705
1706 VG_(make_coredump)(tid, info, corelim.rlim_cur);
1707
1708 /* Make sure we don't get a confusing kernel-generated
1709 coredump when we finally exit */
1710 VG_(setrlimit)(VKI_RLIMIT_CORE, &zero);
1711 }
1712
1713 /* stash fatal signal in main thread */
1714 // what's this for?
1715 //VG_(threads)[VG_(master_tid)].os_state.fatalsig = sigNo;
1716
1717 /* everyone dies */
1718 VG_(nuke_all_threads_except)(tid, VgSrc_FatalSig);
1719 VG_(threads)[tid].exitreason = VgSrc_FatalSig;
1720 VG_(threads)[tid].os_state.fatalsig = sigNo;
1721 }
1722
1723 /*
1724 This does the business of delivering a signal to a thread. It may
1725 be called from either a real signal handler, or from normal code to
1726 cause the thread to enter the signal handler.
1727
1728 This updates the thread state, but it does not set it to be
1729 Runnable.
1730 */
deliver_signal(ThreadId tid,const vki_siginfo_t * info,const struct vki_ucontext * uc)1731 static void deliver_signal ( ThreadId tid, const vki_siginfo_t *info,
1732 const struct vki_ucontext *uc )
1733 {
1734 Int sigNo = info->si_signo;
1735 SCSS_Per_Signal *handler = &scss.scss_per_sig[sigNo];
1736 void *handler_fn;
1737 ThreadState *tst = VG_(get_ThreadState)(tid);
1738
1739 if (VG_(clo_trace_signals))
1740 VG_(dmsg)("delivering signal %d (%s):%d to thread %d\n",
1741 sigNo, VG_(signame)(sigNo), info->si_code, tid );
1742
1743 if (sigNo == VG_SIGVGKILL) {
1744 /* If this is a SIGVGKILL, we're expecting it to interrupt any
1745 blocked syscall. It doesn't matter whether the VCPU state is
1746 set to restart or not, because we don't expect it will
1747 execute any more client instructions. */
1748 vg_assert(VG_(is_exiting)(tid));
1749 return;
1750 }
1751
1752 /* If the client specifies SIG_IGN, treat it as SIG_DFL.
1753
1754 If deliver_signal() is being called on a thread, we want
1755 the signal to get through no matter what; if they're ignoring
1756 it, then we do this override (this is so we can send it SIGSEGV,
1757 etc). */
1758 handler_fn = handler->scss_handler;
1759 if (handler_fn == VKI_SIG_IGN)
1760 handler_fn = VKI_SIG_DFL;
1761
1762 vg_assert(handler_fn != VKI_SIG_IGN);
1763
1764 if (handler_fn == VKI_SIG_DFL) {
1765 default_action(info, tid);
1766 } else {
1767 /* Create a signal delivery frame, and set the client's %ESP and
1768 %EIP so that when execution continues, we will enter the
1769 signal handler with the frame on top of the client's stack,
1770 as it expects.
1771
1772 Signal delivery can fail if the client stack is too small or
1773 missing, and we can't push the frame. If that happens,
1774 push_signal_frame will cause the whole process to exit when
1775 we next hit the scheduler.
1776 */
1777 vg_assert(VG_(is_valid_tid)(tid));
1778
1779 push_signal_frame ( tid, info, uc );
1780
1781 if (handler->scss_flags & VKI_SA_ONESHOT) {
1782 /* Do the ONESHOT thing. */
1783 handler->scss_handler = VKI_SIG_DFL;
1784
1785 handle_SCSS_change( False /* lazy update */ );
1786 }
1787
1788 /* At this point:
1789 tst->sig_mask is the current signal mask
1790 tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
1791 handler->scss_mask is the mask set by the handler
1792
1793 Handler gets a mask of tmp_sig_mask|handler_mask|signo
1794 */
1795 tst->sig_mask = tst->tmp_sig_mask;
1796 if (!(handler->scss_flags & VKI_SA_NOMASK)) {
1797 VG_(sigaddset_from_set)(&tst->sig_mask, &handler->scss_mask);
1798 VG_(sigaddset)(&tst->sig_mask, sigNo);
1799 tst->tmp_sig_mask = tst->sig_mask;
1800 }
1801 }
1802
1803 /* Thread state is ready to go - just add Runnable */
1804 }
1805
resume_scheduler(ThreadId tid)1806 static void resume_scheduler(ThreadId tid)
1807 {
1808 ThreadState *tst = VG_(get_ThreadState)(tid);
1809
1810 vg_assert(tst->os_state.lwpid == VG_(gettid)());
1811
1812 if (tst->sched_jmpbuf_valid) {
1813 /* Can't continue; must longjmp back to the scheduler and thus
1814 enter the sighandler immediately. */
1815 VG_MINIMAL_LONGJMP(tst->sched_jmpbuf);
1816 }
1817 }
1818
synth_fault_common(ThreadId tid,Addr addr,Int si_code)1819 static void synth_fault_common(ThreadId tid, Addr addr, Int si_code)
1820 {
1821 vki_siginfo_t info;
1822
1823 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1824
1825 VG_(memset)(&info, 0, sizeof(info));
1826 info.si_signo = VKI_SIGSEGV;
1827 info.si_code = si_code;
1828 info.VKI_SIGINFO_si_addr = (void*)addr;
1829
1830 /* even if gdbserver indicates to ignore the signal, we will deliver it */
1831 VG_(gdbserver_report_signal) (VKI_SIGSEGV, tid);
1832
1833 /* If they're trying to block the signal, force it to be delivered */
1834 if (VG_(sigismember)(&VG_(threads)[tid].sig_mask, VKI_SIGSEGV))
1835 VG_(set_default_handler)(VKI_SIGSEGV);
1836
1837 deliver_signal(tid, &info, NULL);
1838 }
1839
1840 // Synthesize a fault where the address is OK, but the page
1841 // permissions are bad.
VG_(synth_fault_perms)1842 void VG_(synth_fault_perms)(ThreadId tid, Addr addr)
1843 {
1844 synth_fault_common(tid, addr, VKI_SEGV_ACCERR);
1845 }
1846
1847 // Synthesize a fault where the address there's nothing mapped at the address.
VG_(synth_fault_mapping)1848 void VG_(synth_fault_mapping)(ThreadId tid, Addr addr)
1849 {
1850 synth_fault_common(tid, addr, VKI_SEGV_MAPERR);
1851 }
1852
1853 // Synthesize a misc memory fault.
VG_(synth_fault)1854 void VG_(synth_fault)(ThreadId tid)
1855 {
1856 synth_fault_common(tid, 0, VKI_SEGV_MADE_UP_GPF);
1857 }
1858
1859 // Synthesise a SIGILL.
VG_(synth_sigill)1860 void VG_(synth_sigill)(ThreadId tid, Addr addr)
1861 {
1862 vki_siginfo_t info;
1863
1864 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1865
1866 VG_(memset)(&info, 0, sizeof(info));
1867 info.si_signo = VKI_SIGILL;
1868 info.si_code = VKI_ILL_ILLOPC; /* jrs: no idea what this should be */
1869 info.VKI_SIGINFO_si_addr = (void*)addr;
1870
1871 if (VG_(gdbserver_report_signal) (VKI_SIGILL, tid)) {
1872 resume_scheduler(tid);
1873 deliver_signal(tid, &info, NULL);
1874 }
1875 else
1876 resume_scheduler(tid);
1877 }
1878
1879 // Synthesise a SIGBUS.
VG_(synth_sigbus)1880 void VG_(synth_sigbus)(ThreadId tid)
1881 {
1882 vki_siginfo_t info;
1883
1884 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1885
1886 VG_(memset)(&info, 0, sizeof(info));
1887 info.si_signo = VKI_SIGBUS;
1888 /* There are several meanings to SIGBUS (as per POSIX, presumably),
1889 but the most widely understood is "invalid address alignment",
1890 so let's use that. */
1891 info.si_code = VKI_BUS_ADRALN;
1892 /* If we knew the invalid address in question, we could put it
1893 in .si_addr. Oh well. */
1894 /* info.VKI_SIGINFO_si_addr = (void*)addr; */
1895
1896 if (VG_(gdbserver_report_signal) (VKI_SIGBUS, tid)) {
1897 resume_scheduler(tid);
1898 deliver_signal(tid, &info, NULL);
1899 }
1900 else
1901 resume_scheduler(tid);
1902 }
1903
1904 // Synthesise a SIGTRAP.
VG_(synth_sigtrap)1905 void VG_(synth_sigtrap)(ThreadId tid)
1906 {
1907 vki_siginfo_t info;
1908 struct vki_ucontext uc;
1909 # if defined(VGP_x86_darwin)
1910 struct __darwin_mcontext32 mc;
1911 # elif defined(VGP_amd64_darwin)
1912 struct __darwin_mcontext64 mc;
1913 # endif
1914
1915 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1916
1917 VG_(memset)(&info, 0, sizeof(info));
1918 VG_(memset)(&uc, 0, sizeof(uc));
1919 info.si_signo = VKI_SIGTRAP;
1920 info.si_code = VKI_TRAP_BRKPT; /* tjh: only ever called for a brkpt ins */
1921
1922 # if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
1923 /* This is for teq on mips. Teq on mips for ins: 0xXXX1f4
1924 * cases VKI_SIGFPE not VKI_SIGTRAP
1925 */
1926 // JRS 2012-Jun-06: commented out until we know we need it
1927 // This isn't a clean solution; need something that avoids looking
1928 // at the guest code.
1929 //UInt *ins = (void*)(vgPlain_threads[tid].arch.vex.guest_PC-4);
1930 //UInt tcode = (((*ins) >> 6) & ((1 << 10) - 1));
1931 //if (tcode == VKI_BRK_OVERFLOW || tcode == VKI_BRK_DIVZERO) {
1932 // if (tcode == VKI_BRK_DIVZERO)
1933 // info.si_code = VKI_FPE_INTDIV;
1934 // else
1935 // info.si_code = VKI_FPE_INTOVF;
1936 // info.si_signo = VKI_SIGFPE;
1937 // info.si_errno = 0;
1938 // info.VKI_SIGINFO_si_addr
1939 // = (void*)(vgPlain_threads[tid].arch.vex.guest_PC-4);
1940 //}
1941 # endif
1942
1943 # if defined(VGP_x86_linux) || defined(VGP_amd64_linux)
1944 uc.uc_mcontext.trapno = 3; /* tjh: this is the x86 trap number
1945 for a breakpoint trap... */
1946 uc.uc_mcontext.err = 0; /* tjh: no error code for x86
1947 breakpoint trap... */
1948 # elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1949 /* the same thing, but using Darwin field/struct names */
1950 VG_(memset)(&mc, 0, sizeof(mc));
1951 uc.uc_mcontext = &mc;
1952 uc.uc_mcontext->__es.__trapno = 3;
1953 uc.uc_mcontext->__es.__err = 0;
1954 # endif
1955
1956 /* fixs390: do we need to do anything here for s390 ? */
1957 if (VG_(gdbserver_report_signal) (VKI_SIGTRAP, tid)) {
1958 resume_scheduler(tid);
1959 deliver_signal(tid, &info, &uc);
1960 }
1961 else
1962 resume_scheduler(tid);
1963 }
1964
1965 /* Make a signal pending for a thread, for later delivery.
1966 VG_(poll_signals) will arrange for it to be delivered at the right
1967 time.
1968
1969 tid==0 means add it to the process-wide queue, and not sent it to a
1970 specific thread.
1971 */
1972 static
queue_signal(ThreadId tid,const vki_siginfo_t * si)1973 void queue_signal(ThreadId tid, const vki_siginfo_t *si)
1974 {
1975 ThreadState *tst;
1976 SigQueue *sq;
1977 vki_sigset_t savedmask;
1978
1979 tst = VG_(get_ThreadState)(tid);
1980
1981 /* Protect the signal queue against async deliveries */
1982 block_all_host_signals(&savedmask);
1983
1984 if (tst->sig_queue == NULL) {
1985 tst->sig_queue = VG_(arena_malloc)(VG_AR_CORE, "signals.qs.1",
1986 sizeof(*tst->sig_queue));
1987 VG_(memset)(tst->sig_queue, 0, sizeof(*tst->sig_queue));
1988 }
1989 sq = tst->sig_queue;
1990
1991 if (VG_(clo_trace_signals))
1992 VG_(dmsg)("Queueing signal %d (idx %d) to thread %d\n",
1993 si->si_signo, sq->next, tid);
1994
1995 /* Add signal to the queue. If the queue gets overrun, then old
1996 queued signals may get lost.
1997
1998 XXX We should also keep a sigset of pending signals, so that at
1999 least a non-siginfo signal gets deliviered.
2000 */
2001 if (sq->sigs[sq->next].si_signo != 0)
2002 VG_(umsg)("Signal %d being dropped from thread %d's queue\n",
2003 sq->sigs[sq->next].si_signo, tid);
2004
2005 sq->sigs[sq->next] = *si;
2006 sq->next = (sq->next+1) % N_QUEUED_SIGNALS;
2007
2008 restore_all_host_signals(&savedmask);
2009 }
2010
2011 /*
2012 Returns the next queued signal for thread tid which is in "set".
2013 tid==0 means process-wide signal. Set si_signo to 0 when the
2014 signal has been delivered.
2015
2016 Must be called with all signals blocked, to protect against async
2017 deliveries.
2018 */
next_queued(ThreadId tid,const vki_sigset_t * set)2019 static vki_siginfo_t *next_queued(ThreadId tid, const vki_sigset_t *set)
2020 {
2021 ThreadState *tst = VG_(get_ThreadState)(tid);
2022 SigQueue *sq;
2023 Int idx;
2024 vki_siginfo_t *ret = NULL;
2025
2026 sq = tst->sig_queue;
2027 if (sq == NULL)
2028 goto out;
2029
2030 idx = sq->next;
2031 do {
2032 if (0)
2033 VG_(printf)("idx=%d si_signo=%d inset=%d\n", idx,
2034 sq->sigs[idx].si_signo,
2035 VG_(sigismember)(set, sq->sigs[idx].si_signo));
2036
2037 if (sq->sigs[idx].si_signo != 0
2038 && VG_(sigismember)(set, sq->sigs[idx].si_signo)) {
2039 if (VG_(clo_trace_signals))
2040 VG_(dmsg)("Returning queued signal %d (idx %d) for thread %d\n",
2041 sq->sigs[idx].si_signo, idx, tid);
2042 ret = &sq->sigs[idx];
2043 goto out;
2044 }
2045
2046 idx = (idx + 1) % N_QUEUED_SIGNALS;
2047 } while(idx != sq->next);
2048 out:
2049 return ret;
2050 }
2051
sanitize_si_code(int si_code)2052 static int sanitize_si_code(int si_code)
2053 {
2054 #if defined(VGO_linux)
2055 /* The linux kernel uses the top 16 bits of si_code for it's own
2056 use and only exports the bottom 16 bits to user space - at least
2057 that is the theory, but it turns out that there are some kernels
2058 around that forget to mask out the top 16 bits so we do it here.
2059
2060 The kernel treats the bottom 16 bits as signed and (when it does
2061 mask them off) sign extends them when exporting to user space so
2062 we do the same thing here. */
2063 return (Short)si_code;
2064 #elif defined(VGO_darwin)
2065 return si_code;
2066 #else
2067 # error Unknown OS
2068 #endif
2069 }
2070
2071 /*
2072 Receive an async signal from the kernel.
2073
2074 This should only happen when the thread is blocked in a syscall,
2075 since that's the only time this set of signals is unblocked.
2076 */
2077 static
async_signalhandler(Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2078 void async_signalhandler ( Int sigNo,
2079 vki_siginfo_t *info, struct vki_ucontext *uc )
2080 {
2081 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2082 ThreadState* tst = VG_(get_ThreadState)(tid);
2083 SysRes sres;
2084
2085 /* The thread isn't currently running, make it so before going on */
2086 vg_assert(tst->status == VgTs_WaitSys);
2087 VG_(acquire_BigLock)(tid, "async_signalhandler");
2088
2089 info->si_code = sanitize_si_code(info->si_code);
2090
2091 if (VG_(clo_trace_signals))
2092 VG_(dmsg)("async signal handler: signal=%d, tid=%d, si_code=%d\n",
2093 sigNo, tid, info->si_code);
2094
2095 /* Update thread state properly. The signal can only have been
2096 delivered whilst we were in
2097 coregrind/m_syswrap/syscall-<PLAT>.S, and only then in the
2098 window between the two sigprocmask calls, since at all other
2099 times, we run with async signals on the host blocked. Hence
2100 make enquiries on the basis that we were in or very close to a
2101 syscall, and attempt to fix up the guest state accordingly.
2102
2103 (normal async signals occurring during computation are blocked,
2104 but periodically polled for using VG_(sigtimedwait_zero), and
2105 delivered at a point convenient for us. Hence this routine only
2106 deals with signals that are delivered to a thread during a
2107 syscall.) */
2108
2109 /* First, extract a SysRes from the ucontext_t* given to this
2110 handler. If it is subsequently established by
2111 VG_(fixup_guest_state_after_syscall_interrupted) that the
2112 syscall was complete but the results had not been committed yet
2113 to the guest state, then it'll have to commit the results itself
2114 "by hand", and so we need to extract the SysRes. Of course if
2115 the thread was not in that particular window then the
2116 SysRes will be meaningless, but that's OK too because
2117 VG_(fixup_guest_state_after_syscall_interrupted) will detect
2118 that the thread was not in said window and ignore the SysRes. */
2119
2120 /* To make matters more complex still, on Darwin we need to know
2121 the "class" of the syscall under consideration in order to be
2122 able to extract the a correct SysRes. The class will have been
2123 saved just before the syscall, by VG_(client_syscall), into this
2124 thread's tst->arch.vex.guest_SC_CLASS. Hence: */
2125 # if defined(VGO_darwin)
2126 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc, tst->arch.vex.guest_SC_CLASS);
2127 # else
2128 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc);
2129 # endif
2130
2131 /* (1) */
2132 VG_(fixup_guest_state_after_syscall_interrupted)(
2133 tid,
2134 VG_UCONTEXT_INSTR_PTR(uc),
2135 sres,
2136 !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
2137 );
2138
2139 /* (2) */
2140 /* Set up the thread's state to deliver a signal */
2141 if (!is_sig_ign(info->si_signo, tid))
2142 deliver_signal(tid, info, uc);
2143
2144 /* It's crucial that (1) and (2) happen in the order (1) then (2)
2145 and not the other way around. (1) fixes up the guest thread
2146 state to reflect the fact that the syscall was interrupted --
2147 either to restart the syscall or to return EINTR. (2) then sets
2148 up the thread state to deliver the signal. Then we resume
2149 execution. First, the signal handler is run, since that's the
2150 second adjustment we made to the thread state. If that returns,
2151 then we resume at the guest state created by (1), viz, either
2152 the syscall returns EINTR or is restarted.
2153
2154 If (2) was done before (1) the outcome would be completely
2155 different, and wrong. */
2156
2157 /* longjmp back to the thread's main loop to start executing the
2158 handler. */
2159 resume_scheduler(tid);
2160
2161 VG_(core_panic)("async_signalhandler: got unexpected signal "
2162 "while outside of scheduler");
2163 }
2164
2165 /* Extend the stack to cover addr. maxsize is the limit the stack can grow to.
2166
2167 Returns True on success, False on failure.
2168
2169 Succeeds without doing anything if addr is already within a segment.
2170
2171 Failure could be caused by:
2172 - addr not below a growable segment
2173 - new stack size would exceed maxsize
2174 - mmap failed for some other reason
2175 */
VG_(extend_stack)2176 Bool VG_(extend_stack)(Addr addr, UInt maxsize)
2177 {
2178 SizeT udelta;
2179
2180 /* Find the next Segment above addr */
2181 NSegment const* seg
2182 = VG_(am_find_nsegment)(addr);
2183 NSegment const* seg_next
2184 = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
2185 : NULL;
2186
2187 if (seg && seg->kind == SkAnonC)
2188 /* addr is already mapped. Nothing to do. */
2189 return True;
2190
2191 /* Check that the requested new base is in a shrink-down
2192 reservation section which abuts an anonymous mapping that
2193 belongs to the client. */
2194 if ( ! (seg
2195 && seg->kind == SkResvn
2196 && seg->smode == SmUpper
2197 && seg_next
2198 && seg_next->kind == SkAnonC
2199 && seg->end+1 == seg_next->start))
2200 return False;
2201
2202 udelta = VG_PGROUNDUP(seg_next->start - addr);
2203 VG_(debugLog)(1, "signals",
2204 "extending a stack base 0x%llx down by %lld\n",
2205 (ULong)seg_next->start, (ULong)udelta);
2206 if (! VG_(am_extend_into_adjacent_reservation_client)
2207 ( (NSegment*)seg_next, -(SSizeT)udelta )) {
2208 VG_(debugLog)(1, "signals", "extending a stack base: FAILED\n");
2209 return False;
2210 }
2211
2212 /* When we change the main stack, we have to let the stack handling
2213 code know about it. */
2214 VG_(change_stack)(VG_(clstk_id), addr, VG_(clstk_end));
2215
2216 if (VG_(clo_sanity_level) > 2)
2217 VG_(sanity_check_general)(False);
2218
2219 return True;
2220 }
2221
2222 static void (*fault_catcher)(Int sig, Addr addr) = NULL;
2223
VG_(set_fault_catcher)2224 void VG_(set_fault_catcher)(void (*catcher)(Int, Addr))
2225 {
2226 if (0)
2227 VG_(debugLog)(0, "signals", "set fault catcher to %p\n", catcher);
2228 vg_assert2(NULL == catcher || NULL == fault_catcher,
2229 "Fault catcher is already registered");
2230
2231 fault_catcher = catcher;
2232 }
2233
2234 static
sync_signalhandler_from_user(ThreadId tid,Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2235 void sync_signalhandler_from_user ( ThreadId tid,
2236 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2237 {
2238 ThreadId qtid;
2239
2240 /* If some user-process sent us a sync signal (ie. it's not the result
2241 of a faulting instruction), then how we treat it depends on when it
2242 arrives... */
2243
2244 if (VG_(threads)[tid].status == VgTs_WaitSys) {
2245 /* Signal arrived while we're blocked in a syscall. This means that
2246 the client's signal mask was applied. In other words, so we can't
2247 get here unless the client wants this signal right now. This means
2248 we can simply use the async_signalhandler. */
2249 if (VG_(clo_trace_signals))
2250 VG_(dmsg)("Delivering user-sent sync signal %d as async signal\n",
2251 sigNo);
2252
2253 async_signalhandler(sigNo, info, uc);
2254 VG_(core_panic)("async_signalhandler returned!?\n");
2255
2256 } else {
2257 /* Signal arrived while in generated client code, or while running
2258 Valgrind core code. That means that every thread has these signals
2259 unblocked, so we can't rely on the kernel to route them properly, so
2260 we need to queue them manually. */
2261 if (VG_(clo_trace_signals))
2262 VG_(dmsg)("Routing user-sent sync signal %d via queue\n", sigNo);
2263
2264 # if defined(VGO_linux)
2265 /* On Linux, first we have to do a sanity check of the siginfo. */
2266 if (info->VKI_SIGINFO_si_pid == 0) {
2267 /* There's a per-user limit of pending siginfo signals. If
2268 you exceed this, by having more than that number of
2269 pending signals with siginfo, then new signals are
2270 delivered without siginfo. This condition can be caused
2271 by any unrelated program you're running at the same time
2272 as Valgrind, if it has a large number of pending siginfo
2273 signals which it isn't taking delivery of.
2274
2275 Since we depend on siginfo to work out why we were sent a
2276 signal and what we should do about it, we really can't
2277 continue unless we get it. */
2278 VG_(umsg)("Signal %d (%s) appears to have lost its siginfo; "
2279 "I can't go on.\n", sigNo, VG_(signame)(sigNo));
2280 VG_(printf)(
2281 " This may be because one of your programs has consumed your ration of\n"
2282 " siginfo structures. For more information, see:\n"
2283 " http://kerneltrap.org/mailarchive/1/message/25599/thread\n"
2284 " Basically, some program on your system is building up a large queue of\n"
2285 " pending signals, and this causes the siginfo data for other signals to\n"
2286 " be dropped because it's exceeding a system limit. However, Valgrind\n"
2287 " absolutely needs siginfo for SIGSEGV. A workaround is to track down the\n"
2288 " offending program and avoid running it while using Valgrind, but there\n"
2289 " is no easy way to do this. Apparently the problem was fixed in kernel\n"
2290 " 2.6.12.\n");
2291
2292 /* It's a fatal signal, so we force the default handler. */
2293 VG_(set_default_handler)(sigNo);
2294 deliver_signal(tid, info, uc);
2295 resume_scheduler(tid);
2296 VG_(exit)(99); /* If we can't resume, then just exit */
2297 }
2298 # endif
2299
2300 qtid = 0; /* shared pending by default */
2301 # if defined(VGO_linux)
2302 if (info->si_code == VKI_SI_TKILL)
2303 qtid = tid; /* directed to us specifically */
2304 # endif
2305 queue_signal(qtid, info);
2306 }
2307 }
2308
2309 /* Returns the reported fault address for an exact address */
fault_mask(Addr in)2310 static Addr fault_mask(Addr in)
2311 {
2312 /* We have to use VG_PGROUNDDN because faults on s390x only deliver
2313 the page address but not the address within a page.
2314 */
2315 # if defined(VGA_s390x)
2316 return VG_PGROUNDDN(in);
2317 # else
2318 return in;
2319 #endif
2320 }
2321
2322 /* Returns True if the sync signal was due to the stack requiring extension
2323 and the extension was successful.
2324 */
extend_stack_if_appropriate(ThreadId tid,vki_siginfo_t * info)2325 static Bool extend_stack_if_appropriate(ThreadId tid, vki_siginfo_t* info)
2326 {
2327 Addr fault;
2328 Addr esp;
2329 NSegment const* seg;
2330 NSegment const* seg_next;
2331
2332 if (info->si_signo != VKI_SIGSEGV)
2333 return False;
2334
2335 fault = (Addr)info->VKI_SIGINFO_si_addr;
2336 esp = VG_(get_SP)(tid);
2337 seg = VG_(am_find_nsegment)(fault);
2338 seg_next = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
2339 : NULL;
2340
2341 if (VG_(clo_trace_signals)) {
2342 if (seg == NULL)
2343 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2344 "seg=NULL\n",
2345 info->si_code, fault, tid, esp);
2346 else
2347 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2348 "seg=%#lx-%#lx\n",
2349 info->si_code, fault, tid, esp, seg->start, seg->end);
2350 }
2351
2352 if (info->si_code == VKI_SEGV_MAPERR
2353 && seg
2354 && seg->kind == SkResvn
2355 && seg->smode == SmUpper
2356 && seg_next
2357 && seg_next->kind == SkAnonC
2358 && seg->end+1 == seg_next->start
2359 && fault >= fault_mask(esp - VG_STACK_REDZONE_SZB)) {
2360 /* If the fault address is above esp but below the current known
2361 stack segment base, and it was a fault because there was
2362 nothing mapped there (as opposed to a permissions fault),
2363 then extend the stack segment.
2364 */
2365 Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
2366 if (VG_(extend_stack)(base, VG_(threads)[tid].client_stack_szB)) {
2367 if (VG_(clo_trace_signals))
2368 VG_(dmsg)(" -> extended stack base to %#lx\n",
2369 VG_PGROUNDDN(fault));
2370 return True;
2371 } else {
2372 VG_(umsg)("Stack overflow in thread %d: can't grow stack to %#lx\n",
2373 tid, fault);
2374 return False;
2375 }
2376 } else {
2377 return False;
2378 }
2379 }
2380
2381 static
sync_signalhandler_from_kernel(ThreadId tid,Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2382 void sync_signalhandler_from_kernel ( ThreadId tid,
2383 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2384 {
2385 /* Check to see if some part of Valgrind itself is interested in faults.
2386 The fault catcher should never be set whilst we're in generated code, so
2387 check for that. AFAIK the only use of the catcher right now is
2388 memcheck's leak detector. */
2389 if (fault_catcher) {
2390 vg_assert(VG_(in_generated_code) == False);
2391
2392 (*fault_catcher)(sigNo, (Addr)info->VKI_SIGINFO_si_addr);
2393 /* If the catcher returns, then it didn't handle the fault,
2394 so carry on panicking. */
2395 }
2396
2397 if (extend_stack_if_appropriate(tid, info)) {
2398 /* Stack extension occurred, so we don't need to do anything else; upon
2399 returning from this function, we'll restart the host (hence guest)
2400 instruction. */
2401 } else {
2402 /* OK, this is a signal we really have to deal with. If it came
2403 from the client's code, then we can jump back into the scheduler
2404 and have it delivered. Otherwise it's a Valgrind bug. */
2405 ThreadState *tst = VG_(get_ThreadState)(tid);
2406
2407 if (VG_(sigismember)(&tst->sig_mask, sigNo)) {
2408 /* signal is blocked, but they're not allowed to block faults */
2409 VG_(set_default_handler)(sigNo);
2410 }
2411
2412 if (VG_(in_generated_code)) {
2413 if (VG_(gdbserver_report_signal) (sigNo, tid)
2414 || VG_(sigismember)(&tst->sig_mask, sigNo)) {
2415 /* Can't continue; must longjmp back to the scheduler and thus
2416 enter the sighandler immediately. */
2417 deliver_signal(tid, info, uc);
2418 resume_scheduler(tid);
2419 }
2420 else
2421 resume_scheduler(tid);
2422 }
2423
2424 /* If resume_scheduler returns or its our fault, it means we
2425 don't have longjmp set up, implying that we weren't running
2426 client code, and therefore it was actually generated by
2427 Valgrind internally.
2428 */
2429 VG_(dmsg)("VALGRIND INTERNAL ERROR: Valgrind received "
2430 "a signal %d (%s) - exiting\n",
2431 sigNo, VG_(signame)(sigNo));
2432
2433 VG_(dmsg)("si_code=%x; Faulting address: %p; sp: %#lx\n",
2434 info->si_code, info->VKI_SIGINFO_si_addr,
2435 VG_UCONTEXT_STACK_PTR(uc));
2436
2437 if (0)
2438 VG_(kill_self)(sigNo); /* generate a core dump */
2439
2440 //if (tid == 0) /* could happen after everyone has exited */
2441 // tid = VG_(master_tid);
2442 vg_assert(tid != 0);
2443
2444 UnwindStartRegs startRegs;
2445 VG_(memset)(&startRegs, 0, sizeof(startRegs));
2446
2447 VG_UCONTEXT_TO_UnwindStartRegs(&startRegs, uc);
2448 VG_(core_panic_at)("Killed by fatal signal", &startRegs);
2449 }
2450 }
2451
2452 /*
2453 Receive a sync signal from the host.
2454 */
2455 static
sync_signalhandler(Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2456 void sync_signalhandler ( Int sigNo,
2457 vki_siginfo_t *info, struct vki_ucontext *uc )
2458 {
2459 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2460 Bool from_user;
2461
2462 if (0)
2463 VG_(printf)("sync_sighandler(%d, %p, %p)\n", sigNo, info, uc);
2464
2465 vg_assert(info != NULL);
2466 vg_assert(info->si_signo == sigNo);
2467 vg_assert(sigNo == VKI_SIGSEGV ||
2468 sigNo == VKI_SIGBUS ||
2469 sigNo == VKI_SIGFPE ||
2470 sigNo == VKI_SIGILL ||
2471 sigNo == VKI_SIGTRAP);
2472
2473 info->si_code = sanitize_si_code(info->si_code);
2474
2475 from_user = !is_signal_from_kernel(tid, sigNo, info->si_code);
2476
2477 if (VG_(clo_trace_signals)) {
2478 VG_(dmsg)("sync signal handler: "
2479 "signal=%d, si_code=%d, EIP=%#lx, eip=%#lx, from %s\n",
2480 sigNo, info->si_code, VG_(get_IP)(tid),
2481 VG_UCONTEXT_INSTR_PTR(uc),
2482 ( from_user ? "user" : "kernel" ));
2483 }
2484 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
2485
2486 /* // debug code:
2487 if (0) {
2488 VG_(printf)("info->si_signo %d\n", info->si_signo);
2489 VG_(printf)("info->si_errno %d\n", info->si_errno);
2490 VG_(printf)("info->si_code %d\n", info->si_code);
2491 VG_(printf)("info->si_pid %d\n", info->si_pid);
2492 VG_(printf)("info->si_uid %d\n", info->si_uid);
2493 VG_(printf)("info->si_status %d\n", info->si_status);
2494 VG_(printf)("info->si_addr %p\n", info->si_addr);
2495 }
2496 */
2497
2498 /* Figure out if the signal is being sent from outside the process.
2499 (Why do we care?) If the signal is from the user rather than the
2500 kernel, then treat it more like an async signal than a sync signal --
2501 that is, merely queue it for later delivery. */
2502 if (from_user) {
2503 sync_signalhandler_from_user( tid, sigNo, info, uc);
2504 } else {
2505 sync_signalhandler_from_kernel(tid, sigNo, info, uc);
2506 }
2507 }
2508
2509
2510 /*
2511 Kill this thread. Makes it leave any syscall it might be currently
2512 blocked in, and return to the scheduler. This doesn't mark the thread
2513 as exiting; that's the caller's job.
2514 */
sigvgkill_handler(int signo,vki_siginfo_t * si,struct vki_ucontext * uc)2515 static void sigvgkill_handler(int signo, vki_siginfo_t *si,
2516 struct vki_ucontext *uc)
2517 {
2518 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2519 ThreadStatus at_signal = VG_(threads)[tid].status;
2520
2521 if (VG_(clo_trace_signals))
2522 VG_(dmsg)("sigvgkill for lwp %d tid %d\n", VG_(gettid)(), tid);
2523
2524 VG_(acquire_BigLock)(tid, "sigvgkill_handler");
2525
2526 vg_assert(signo == VG_SIGVGKILL);
2527 vg_assert(si->si_signo == signo);
2528
2529 /* jrs 2006 August 3: the following assertion seems incorrect to
2530 me, and fails on AIX. sigvgkill could be sent to a thread which
2531 is runnable - see VG_(nuke_all_threads_except) in the scheduler.
2532 Hence comment these out ..
2533
2534 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
2535 VG_(post_syscall)(tid);
2536
2537 and instead do:
2538 */
2539 if (at_signal == VgTs_WaitSys)
2540 VG_(post_syscall)(tid);
2541 /* jrs 2006 August 3 ends */
2542
2543 resume_scheduler(tid);
2544
2545 VG_(core_panic)("sigvgkill_handler couldn't return to the scheduler\n");
2546 }
2547
2548 static __attribute((unused))
pp_ksigaction(vki_sigaction_toK_t * sa)2549 void pp_ksigaction ( vki_sigaction_toK_t* sa )
2550 {
2551 Int i;
2552 VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
2553 sa->ksa_handler,
2554 (UInt)sa->sa_flags,
2555 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2556 sa->sa_restorer
2557 # else
2558 (void*)0
2559 # endif
2560 );
2561 VG_(printf)("pp_ksigaction: { ");
2562 for (i = 1; i <= VG_(max_signal); i++)
2563 if (VG_(sigismember(&(sa->sa_mask),i)))
2564 VG_(printf)("%d ", i);
2565 VG_(printf)("}\n");
2566 }
2567
2568 /*
2569 Force signal handler to default
2570 */
VG_(set_default_handler)2571 void VG_(set_default_handler)(Int signo)
2572 {
2573 vki_sigaction_toK_t sa;
2574
2575 sa.ksa_handler = VKI_SIG_DFL;
2576 sa.sa_flags = 0;
2577 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2578 sa.sa_restorer = 0;
2579 # endif
2580 VG_(sigemptyset)(&sa.sa_mask);
2581
2582 VG_(do_sys_sigaction)(signo, &sa, NULL);
2583 }
2584
2585 /*
2586 Poll for pending signals, and set the next one up for delivery.
2587 */
VG_(poll_signals)2588 void VG_(poll_signals)(ThreadId tid)
2589 {
2590 vki_siginfo_t si, *sip;
2591 vki_sigset_t pollset;
2592 ThreadState *tst = VG_(get_ThreadState)(tid);
2593 vki_sigset_t saved_mask;
2594
2595 /* look for all the signals this thread isn't blocking */
2596 /* pollset = ~tst->sig_mask */
2597 VG_(sigcomplementset)( &pollset, &tst->sig_mask );
2598
2599 block_all_host_signals(&saved_mask); // protect signal queue
2600
2601 /* First look for any queued pending signals */
2602 sip = next_queued(tid, &pollset); /* this thread */
2603
2604 if (sip == NULL)
2605 sip = next_queued(0, &pollset); /* process-wide */
2606
2607 /* If there was nothing queued, ask the kernel for a pending signal */
2608 if (sip == NULL && VG_(sigtimedwait_zero)(&pollset, &si) > 0) {
2609 if (VG_(clo_trace_signals))
2610 VG_(dmsg)("poll_signals: got signal %d for thread %d\n",
2611 si.si_signo, tid);
2612 sip = &si;
2613 }
2614
2615 if (sip != NULL) {
2616 /* OK, something to do; deliver it */
2617 if (VG_(clo_trace_signals))
2618 VG_(dmsg)("Polling found signal %d for tid %d\n", sip->si_signo, tid);
2619 if (!is_sig_ign(sip->si_signo, tid))
2620 deliver_signal(tid, sip, NULL);
2621 else if (VG_(clo_trace_signals))
2622 VG_(dmsg)(" signal %d ignored\n", sip->si_signo);
2623
2624 sip->si_signo = 0; /* remove from signal queue, if that's
2625 where it came from */
2626 }
2627
2628 restore_all_host_signals(&saved_mask);
2629 }
2630
2631 /* At startup, copy the process' real signal state to the SCSS.
2632 Whilst doing this, block all real signals. Then calculate SKSS and
2633 set the kernel to that. Also initialise DCSS.
2634 */
VG_(sigstartup_actions)2635 void VG_(sigstartup_actions) ( void )
2636 {
2637 Int i, ret, vKI_SIGRTMIN;
2638 vki_sigset_t saved_procmask;
2639 vki_sigaction_fromK_t sa;
2640
2641 VG_(memset)(&scss, 0, sizeof(scss));
2642 VG_(memset)(&skss, 0, sizeof(skss));
2643
2644 # if defined(VKI_SIGRTMIN)
2645 vKI_SIGRTMIN = VKI_SIGRTMIN;
2646 # else
2647 vKI_SIGRTMIN = 0; /* eg Darwin */
2648 # endif
2649
2650 /* VG_(printf)("SIGSTARTUP\n"); */
2651 /* Block all signals. saved_procmask remembers the previous mask,
2652 which the first thread inherits.
2653 */
2654 block_all_host_signals( &saved_procmask );
2655
2656 /* Copy per-signal settings to SCSS. */
2657 for (i = 1; i <= _VKI_NSIG; i++) {
2658 /* Get the old host action */
2659 ret = VG_(sigaction)(i, NULL, &sa);
2660
2661 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2662 /* apparently we may not even ask about the disposition of these
2663 signals, let alone change them */
2664 if (ret != 0 && (i == VKI_SIGKILL || i == VKI_SIGSTOP))
2665 continue;
2666 # endif
2667
2668 if (ret != 0)
2669 break;
2670
2671 /* Try setting it back to see if this signal is really
2672 available */
2673 if (vKI_SIGRTMIN > 0 /* it actually exists on this platform */
2674 && i >= vKI_SIGRTMIN) {
2675 vki_sigaction_toK_t tsa, sa2;
2676
2677 tsa.ksa_handler = (void *)sync_signalhandler;
2678 tsa.sa_flags = VKI_SA_SIGINFO;
2679 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2680 tsa.sa_restorer = 0;
2681 # endif
2682 VG_(sigfillset)(&tsa.sa_mask);
2683
2684 /* try setting it to some arbitrary handler */
2685 if (VG_(sigaction)(i, &tsa, NULL) != 0) {
2686 /* failed - not really usable */
2687 break;
2688 }
2689
2690 VG_(convert_sigaction_fromK_to_toK)( &sa, &sa2 );
2691 ret = VG_(sigaction)(i, &sa2, NULL);
2692 vg_assert(ret == 0);
2693 }
2694
2695 VG_(max_signal) = i;
2696
2697 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
2698 VG_(printf)("snaffling handler 0x%lx for signal %d\n",
2699 (Addr)(sa.ksa_handler), i );
2700
2701 scss.scss_per_sig[i].scss_handler = sa.ksa_handler;
2702 scss.scss_per_sig[i].scss_flags = sa.sa_flags;
2703 scss.scss_per_sig[i].scss_mask = sa.sa_mask;
2704
2705 scss.scss_per_sig[i].scss_restorer = NULL;
2706 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2707 scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
2708 # endif
2709
2710 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2711 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2712 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2713 /*sa.sa_tramp;*/
2714 /* We can't know what it was, because Darwin's sys_sigaction
2715 doesn't tell us. */
2716 # endif
2717 }
2718
2719 if (VG_(clo_trace_signals))
2720 VG_(dmsg)("Max kernel-supported signal is %d\n", VG_(max_signal));
2721
2722 /* Our private internal signals are treated as ignored */
2723 scss.scss_per_sig[VG_SIGVGKILL].scss_handler = VKI_SIG_IGN;
2724 scss.scss_per_sig[VG_SIGVGKILL].scss_flags = VKI_SA_SIGINFO;
2725 VG_(sigfillset)(&scss.scss_per_sig[VG_SIGVGKILL].scss_mask);
2726
2727 /* Copy the process' signal mask into the root thread. */
2728 vg_assert(VG_(threads)[1].status == VgTs_Init);
2729 for (i = 2; i < VG_N_THREADS; i++)
2730 vg_assert(VG_(threads)[i].status == VgTs_Empty);
2731
2732 VG_(threads)[1].sig_mask = saved_procmask;
2733 VG_(threads)[1].tmp_sig_mask = saved_procmask;
2734
2735 /* Calculate SKSS and apply it. This also sets the initial kernel
2736 mask we need to run with. */
2737 handle_SCSS_change( True /* forced update */ );
2738
2739 /* Leave with all signals still blocked; the thread scheduler loop
2740 will set the appropriate mask at the appropriate time. */
2741 }
2742
2743 /*--------------------------------------------------------------------*/
2744 /*--- end ---*/
2745 /*--------------------------------------------------------------------*/
2746