1
2 /*--------------------------------------------------------------------*/
3 /*--- Implementation of POSIX signals. m_signals.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2000-2011 Julian Seward
11 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 /*
32 Signal handling.
33
34 There are 4 distinct classes of signal:
35
36 1. Synchronous, instruction-generated (SIGILL, FPE, BUS, SEGV and
37 TRAP): these are signals as a result of an instruction fault. If
38 we get one while running client code, then we just do the
39 appropriate thing. If it happens while running Valgrind code, then
40 it indicates a Valgrind bug. Note that we "manually" implement
41 automatic stack growth, such that if a fault happens near the
42 client process stack, it is extended in the same way the kernel
43 would, and the fault is never reported to the client program.
44
45 2. Asynchronous variants of the above signals: If the kernel tries
46 to deliver a sync signal while it is blocked, it just kills the
47 process. Therefore, we can't block those signals if we want to be
48 able to report on bugs in Valgrind. This means that we're also
49 open to receiving those signals from other processes, sent with
50 kill. We could get away with just dropping them, since they aren't
51 really signals that processes send to each other.
52
53 3. Synchronous, general signals. If a thread/process sends itself
54 a signal with kill, its expected to be synchronous: ie, the signal
55 will have been delivered by the time the syscall finishes.
56
57 4. Asynchronous, general signals. All other signals, sent by
58 another process with kill. These are generally blocked, except for
59 two special cases: we poll for them each time we're about to run a
60 thread for a time quanta, and while running blocking syscalls.
61
62
63 In addition, we reserve one signal for internal use: SIGVGKILL.
64 SIGVGKILL is used to terminate threads. When one thread wants
65 another to exit, it will set its exitreason and send it SIGVGKILL
66 if it appears to be blocked in a syscall.
67
68
69 We use a kernel thread for each application thread. When the
70 thread allows itself to be open to signals, it sets the thread
71 signal mask to what the client application set it to. This means
72 that we get the kernel to do all signal routing: under Valgrind,
73 signals get delivered in the same way as in the non-Valgrind case
74 (the exception being for the sync signal set, since they're almost
75 always unblocked).
76 */
77
78 /*
79 Some more details...
80
81 First off, we take note of the client's requests (via sys_sigaction
82 and sys_sigprocmask) to set the signal state (handlers for each
83 signal, which are process-wide, + a mask for each signal, which is
84 per-thread). This info is duly recorded in the SCSS (static Client
85 signal state) in m_signals.c, and if the client later queries what
86 the state is, we merely fish the relevant info out of SCSS and give
87 it back.
88
89 However, we set the real signal state in the kernel to something
90 entirely different. This is recorded in SKSS, the static Kernel
91 signal state. What's nice (to the extent that anything is nice w.r.t
92 signals) is that there's a pure function to calculate SKSS from SCSS,
93 calculate_SKSS_from_SCSS. So when the client changes SCSS then we
94 recompute the associated SKSS and apply any changes from the previous
95 SKSS through to the kernel.
96
97 Now, that said, the general scheme we have now is, that regardless of
98 what the client puts into the SCSS (viz, asks for), what we would
99 like to do is as follows:
100
101 (1) run code on the virtual CPU with all signals blocked
102
103 (2) at convenient moments for us (that is, when the VCPU stops, and
104 control is back with the scheduler), ask the kernel "do you have
105 any signals for me?" and if it does, collect up the info, and
106 deliver them to the client (by building sigframes).
107
108 And that's almost what we do. The signal polling is done by
109 VG_(poll_signals), which calls through to VG_(sigtimedwait_zero) to
110 do the dirty work. (of which more later).
111
112 By polling signals, rather than catching them, we get to deal with
113 them only at convenient moments, rather than having to recover from
114 taking a signal while generated code is running.
115
116 Now unfortunately .. the above scheme only works for so-called async
117 signals. An async signal is one which isn't associated with any
118 particular instruction, eg Control-C (SIGINT). For those, it doesn't
119 matter if we don't deliver the signal to the client immediately; it
120 only matters that we deliver it eventually. Hence polling is OK.
121
122 But the other group -- sync signals -- are all related by the fact
123 that they are various ways for the host CPU to fail to execute an
124 instruction: SIGILL, SIGSEGV, SIGFPU. And they can't be deferred,
125 because obviously if a host instruction can't execute, well then we
126 have to immediately do Plan B, whatever that is.
127
128 So the next approximation of what happens is:
129
130 (1) run code on vcpu with all async signals blocked
131
132 (2) at convenient moments (when NOT running the vcpu), poll for async
133 signals.
134
135 (1) and (2) together imply that if the host does deliver a signal to
136 async_signalhandler while the VCPU is running, something's
137 seriously wrong.
138
139 (3) when running code on vcpu, don't block sync signals. Instead
140 register sync_signalhandler and catch any such via that. Of
141 course, that means an ugly recovery path if we do -- the
142 sync_signalhandler has to longjump, exiting out of the generated
143 code, and the assembly-dispatcher thingy that runs it, and gets
144 caught in m_scheduler, which then tells m_signals to deliver the
145 signal.
146
147 Now naturally (ha ha) even that might be tolerable, but there's
148 something worse: dealing with signals delivered to threads in
149 syscalls.
150
151 Obviously from the above, SKSS's signal mask (viz, what we really run
152 with) is way different from SCSS's signal mask (viz, what the client
153 thread thought it asked for). (eg) It may well be that the client
154 did not block control-C, so that it just expects to drop dead if it
155 receives ^C whilst blocked in a syscall, but by default we are
156 running with all async signals blocked, and so that signal could be
157 arbitrarily delayed, or perhaps even lost (not sure).
158
159 So what we have to do, when doing any syscall which SfMayBlock, is to
160 quickly switch in the SCSS-specified signal mask just before the
161 syscall, and switch it back just afterwards, and hope that we don't
162 get caught up in some wierd race condition. This is the primary
163 purpose of the ultra-magical pieces of assembly code in
164 coregrind/m_syswrap/syscall-<plat>.S
165
166 -----------
167
168 The ways in which V can come to hear of signals that need to be
169 forwarded to the client as are follows:
170
171 sync signals: can arrive at any time whatsoever. These are caught
172 by sync_signalhandler
173
174 async signals:
175
176 if running generated code
177 then these are blocked, so we don't expect to catch them in
178 async_signalhandler
179
180 else
181 if thread is blocked in a syscall marked SfMayBlock
182 then signals may be delivered to async_sighandler, since we
183 temporarily unblocked them for the duration of the syscall,
184 by using the real (SCSS) mask for this thread
185
186 else we're doing misc housekeeping activities (eg, making a translation,
187 washing our hair, etc). As in the normal case, these signals are
188 blocked, but we can and do poll for them using VG_(poll_signals).
189
190 Now, re VG_(poll_signals), it polls the kernel by doing
191 VG_(sigtimedwait_zero). This is trivial on Linux, since it's just a
192 syscall. But on Darwin and AIX, we have to cobble together the
193 functionality in a tedious, longwinded and probably error-prone way.
194
195 Finally, if a gdb is debugging the process under valgrind,
196 the signal can be ignored if gdb tells this. So, before resuming the
197 scheduler/delivering the signal, a call to VG_(gdbserver_report_signal)
198 is done. If this returns True, the signal is delivered.
199 */
200
201 #include "pub_core_basics.h"
202 #include "pub_core_vki.h"
203 #include "pub_core_vkiscnums.h"
204 #include "pub_core_debuglog.h"
205 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
206 #include "pub_core_threadstate.h"
207 #include "pub_core_xarray.h"
208 #include "pub_core_clientstate.h"
209 #include "pub_core_aspacemgr.h"
210 #include "pub_core_debugger.h" // For VG_(start_debugger)
211 #include "pub_core_errormgr.h"
212 #include "pub_core_gdbserver.h"
213 #include "pub_core_libcbase.h"
214 #include "pub_core_libcassert.h"
215 #include "pub_core_libcprint.h"
216 #include "pub_core_libcproc.h"
217 #include "pub_core_libcsignal.h"
218 #include "pub_core_machine.h"
219 #include "pub_core_mallocfree.h"
220 #include "pub_core_options.h"
221 #include "pub_core_scheduler.h"
222 #include "pub_core_signals.h"
223 #include "pub_core_sigframe.h" // For VG_(sigframe_create)()
224 #include "pub_core_stacks.h" // For VG_(change_stack)()
225 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
226 #include "pub_core_syscall.h"
227 #include "pub_core_syswrap.h"
228 #include "pub_core_tooliface.h"
229 #include "pub_core_coredump.h"
230
231
232 /* ---------------------------------------------------------------------
233 Forwards decls.
234 ------------------------------------------------------------------ */
235
236 static void sync_signalhandler ( Int sigNo, vki_siginfo_t *info,
237 struct vki_ucontext * );
238 static void async_signalhandler ( Int sigNo, vki_siginfo_t *info,
239 struct vki_ucontext * );
240 static void sigvgkill_handler ( Int sigNo, vki_siginfo_t *info,
241 struct vki_ucontext * );
242
243 static const Char *signame(Int sigNo);
244
245 /* Maximum usable signal. */
246 Int VG_(max_signal) = _VKI_NSIG;
247
248 #define N_QUEUED_SIGNALS 8
249
250 typedef struct SigQueue {
251 Int next;
252 vki_siginfo_t sigs[N_QUEUED_SIGNALS];
253 } SigQueue;
254
255 /* ------ Macros for pulling stuff out of ucontexts ------ */
256
257 /* Q: what does VG_UCONTEXT_SYSCALL_SYSRES do? A: let's suppose the
258 machine context (uc) reflects the situation that a syscall had just
259 completed, quite literally -- that is, that the program counter was
260 now at the instruction following the syscall. (or we're slightly
261 downstream, but we're sure no relevant register has yet changed
262 value.) Then VG_UCONTEXT_SYSCALL_SYSRES returns a SysRes reflecting
263 the result of the syscall; it does this by fishing relevant bits of
264 the machine state out of the uc. Of course if the program counter
265 was somewhere else entirely then the result is likely to be
266 meaningless, so the caller of VG_UCONTEXT_SYSCALL_SYSRES has to be
267 very careful to pay attention to the results only when it is sure
268 that the said constraint on the program counter is indeed valid. */
269
270 #if defined(VGP_x86_linux)
271 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
272 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
273 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
274 /* Convert the value in uc_mcontext.eax into a SysRes. */ \
275 VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
276 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
277 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.eip); \
278 (srP)->r_sp = (ULong)((uc)->uc_mcontext.esp); \
279 (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.ebp; \
280 }
281
282 #elif defined(VGP_amd64_linux)
283 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.rip)
284 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.rsp)
285 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
286 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
287 VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
288 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
289 { (srP)->r_pc = (uc)->uc_mcontext.rip; \
290 (srP)->r_sp = (uc)->uc_mcontext.rsp; \
291 (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.rbp; \
292 }
293
294 #elif defined(VGP_ppc32_linux)
295 /* Comments from Paul Mackerras 25 Nov 05:
296
297 > I'm tracking down a problem where V's signal handling doesn't
298 > work properly on a ppc440gx running 2.4.20. The problem is that
299 > the ucontext being presented to V's sighandler seems completely
300 > bogus.
301
302 > V's kernel headers and hence ucontext layout are derived from
303 > 2.6.9. I compared include/asm-ppc/ucontext.h from 2.4.20 and
304 > 2.6.13.
305
306 > Can I just check my interpretation: the 2.4.20 one contains the
307 > uc_mcontext field in line, whereas the 2.6.13 one has a pointer
308 > to said struct? And so if V is using the 2.6.13 struct then a
309 > 2.4.20 one will make no sense to it.
310
311 Not quite... what is inline in the 2.4.20 version is a
312 sigcontext_struct, not an mcontext. The sigcontext looks like
313 this:
314
315 struct sigcontext_struct {
316 unsigned long _unused[4];
317 int signal;
318 unsigned long handler;
319 unsigned long oldmask;
320 struct pt_regs *regs;
321 };
322
323 The regs pointer of that struct ends up at the same offset as the
324 uc_regs of the 2.6 struct ucontext, and a struct pt_regs is the
325 same as the mc_gregs field of the mcontext. In fact the integer
326 regs are followed in memory by the floating point regs on 2.4.20.
327
328 Thus if you are using the 2.6 definitions, it should work on 2.4.20
329 provided that you go via uc->uc_regs rather than looking in
330 uc->uc_mcontext directly.
331
332 There is another subtlety: 2.4.20 doesn't save the vector regs when
333 delivering a signal, and 2.6.x only saves the vector regs if the
334 process has ever used an altivec instructions. If 2.6.x does save
335 the vector regs, it sets the MSR_VEC bit in
336 uc->uc_regs->mc_gregs[PT_MSR], otherwise it clears it. That bit
337 will always be clear under 2.4.20. So you can use that bit to tell
338 whether uc->uc_regs->mc_vregs is valid. */
339 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_NIP])
340 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
341 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
342 /* Convert the values in uc_mcontext r3,cr into a SysRes. */ \
343 VG_(mk_SysRes_ppc32_linux)( \
344 (uc)->uc_regs->mc_gregs[VKI_PT_R3], \
345 (((uc)->uc_regs->mc_gregs[VKI_PT_CCR] >> 28) & 1) \
346 )
347 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
348 { (srP)->r_pc = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_NIP]); \
349 (srP)->r_sp = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_R1]); \
350 (srP)->misc.PPC32.r_lr = (uc)->uc_regs->mc_gregs[VKI_PT_LNK]; \
351 }
352
353 #elif defined(VGP_ppc64_linux)
354 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_NIP])
355 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
356 /* Dubious hack: if there is an error, only consider the lowest 8
357 bits of r3. memcheck/tests/post-syscall shows a case where an
358 interrupted syscall should have produced a ucontext with 0x4
359 (VKI_EINTR) in r3 but is in fact producing 0x204. */
360 /* Awaiting clarification from PaulM. Evidently 0x204 is
361 ERESTART_RESTARTBLOCK, which shouldn't have made it into user
362 space. */
VG_UCONTEXT_SYSCALL_SYSRES(struct vki_ucontext * uc)363 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( struct vki_ucontext* uc )
364 {
365 ULong err = (uc->uc_mcontext.gp_regs[VKI_PT_CCR] >> 28) & 1;
366 ULong r3 = uc->uc_mcontext.gp_regs[VKI_PT_R3];
367 if (err) r3 &= 0xFF;
368 return VG_(mk_SysRes_ppc64_linux)( r3, err );
369 }
370 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
371 { (srP)->r_pc = (uc)->uc_mcontext.gp_regs[VKI_PT_NIP]; \
372 (srP)->r_sp = (uc)->uc_mcontext.gp_regs[VKI_PT_R1]; \
373 (srP)->misc.PPC64.r_lr = (uc)->uc_mcontext.gp_regs[VKI_PT_LNK]; \
374 }
375
376 #elif defined(VGP_arm_linux)
377 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.arm_pc)
378 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.arm_sp)
379 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
380 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
381 VG_(mk_SysRes_arm_linux)( (uc)->uc_mcontext.arm_r0 )
382 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
383 { (srP)->r_pc = (uc)->uc_mcontext.arm_pc; \
384 (srP)->r_sp = (uc)->uc_mcontext.arm_sp; \
385 (srP)->misc.ARM.r14 = (uc)->uc_mcontext.arm_lr; \
386 (srP)->misc.ARM.r12 = (uc)->uc_mcontext.arm_ip; \
387 (srP)->misc.ARM.r11 = (uc)->uc_mcontext.arm_fp; \
388 (srP)->misc.ARM.r7 = (uc)->uc_mcontext.arm_r7; \
389 }
390
391 #elif defined(VGP_x86_darwin)
392
VG_UCONTEXT_INSTR_PTR(void * ucV)393 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
394 ucontext_t* uc = (ucontext_t*)ucV;
395 struct __darwin_mcontext32* mc = uc->uc_mcontext;
396 struct __darwin_i386_thread_state* ss = &mc->__ss;
397 return ss->__eip;
398 }
VG_UCONTEXT_STACK_PTR(void * ucV)399 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
400 ucontext_t* uc = (ucontext_t*)ucV;
401 struct __darwin_mcontext32* mc = uc->uc_mcontext;
402 struct __darwin_i386_thread_state* ss = &mc->__ss;
403 return ss->__esp;
404 }
VG_UCONTEXT_SYSCALL_SYSRES(void * ucV,UWord scclass)405 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
406 UWord scclass ) {
407 /* this is complicated by the problem that there are 3 different
408 kinds of syscalls, each with its own return convention.
409 NB: scclass is a host word, hence UWord is good for both
410 amd64-darwin and x86-darwin */
411 ucontext_t* uc = (ucontext_t*)ucV;
412 struct __darwin_mcontext32* mc = uc->uc_mcontext;
413 struct __darwin_i386_thread_state* ss = &mc->__ss;
414 /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
415 UInt carry = 1 & ss->__eflags;
416 UInt err = 0;
417 UInt wLO = 0;
418 UInt wHI = 0;
419 switch (scclass) {
420 case VG_DARWIN_SYSCALL_CLASS_UNIX:
421 err = carry;
422 wLO = ss->__eax;
423 wHI = ss->__edx;
424 break;
425 case VG_DARWIN_SYSCALL_CLASS_MACH:
426 wLO = ss->__eax;
427 break;
428 case VG_DARWIN_SYSCALL_CLASS_MDEP:
429 wLO = ss->__eax;
430 break;
431 default:
432 vg_assert(0);
433 break;
434 }
435 return VG_(mk_SysRes_x86_darwin)( scclass, err ? True : False,
436 wHI, wLO );
437 }
438 static inline
VG_UCONTEXT_TO_UnwindStartRegs(UnwindStartRegs * srP,void * ucV)439 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
440 void* ucV ) {
441 ucontext_t* uc = (ucontext_t*)(ucV);
442 struct __darwin_mcontext32* mc = uc->uc_mcontext;
443 struct __darwin_i386_thread_state* ss = &mc->__ss;
444 srP->r_pc = (ULong)(ss->__eip);
445 srP->r_sp = (ULong)(ss->__esp);
446 srP->misc.X86.r_ebp = (UInt)(ss->__ebp);
447 }
448
449 #elif defined(VGP_amd64_darwin)
450
VG_UCONTEXT_INSTR_PTR(void * ucV)451 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
452 I_die_here;
453 }
VG_UCONTEXT_STACK_PTR(void * ucV)454 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
455 I_die_here;
456 }
VG_UCONTEXT_SYSCALL_SYSRES(void * ucV,UWord scclass)457 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
458 UWord scclass ) {
459 I_die_here;
460 }
461 static inline
VG_UCONTEXT_TO_UnwindStartRegs(UnwindStartRegs * srP,void * ucV)462 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
463 void* ucV ) {
464 I_die_here;
465 }
466
467 #elif defined(VGP_s390x_linux)
468
469 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.regs.psw.addr)
470 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.regs.gprs[15])
471 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.regs.gprs[11])
472 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
473 VG_(mk_SysRes_s390x_linux)((uc)->uc_mcontext.regs.gprs[2])
474 # define VG_UCONTEXT_LINK_REG(uc) ((uc)->uc_mcontext.regs.gprs[14])
475
476 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
477 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.regs.psw.addr); \
478 (srP)->r_sp = (ULong)((uc)->uc_mcontext.regs.gprs[15]); \
479 (srP)->misc.S390X.r_fp = (uc)->uc_mcontext.regs.gprs[11]; \
480 (srP)->misc.S390X.r_lr = (uc)->uc_mcontext.regs.gprs[14]; \
481 }
482
483
484 #else
485 # error Unknown platform
486 #endif
487
488
489 /* ------ Macros for pulling stuff out of siginfos ------ */
490
491 /* These macros allow use of uniform names when working with
492 both the Linux and AIX vki definitions. */
493 #if defined(VGO_linux)
494 # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
495 # define VKI_SIGINFO_si_pid _sifields._kill._pid
496 #elif defined(VGO_darwin)
497 # define VKI_SIGINFO_si_addr si_addr
498 # define VKI_SIGINFO_si_pid si_pid
499 #else
500 # error Unknown OS
501 #endif
502
503
504 /* ---------------------------------------------------------------------
505 HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
506 ------------------------------------------------------------------ */
507
508 /* ---------------------------------------------------------------------
509 Signal state for this process.
510 ------------------------------------------------------------------ */
511
512
513 /* Base-ment of these arrays[_VKI_NSIG].
514
515 Valid signal numbers are 1 .. _VKI_NSIG inclusive.
516 Rather than subtracting 1 for indexing these arrays, which
517 is tedious and error-prone, they are simply dimensioned 1 larger,
518 and entry [0] is not used.
519 */
520
521
522 /* -----------------------------------------------------
523 Static client signal state (SCSS). This is the state
524 that the client thinks it has the kernel in.
525 SCSS records verbatim the client's settings. These
526 are mashed around only when SKSS is calculated from it.
527 -------------------------------------------------- */
528
529 typedef
530 struct {
531 void* scss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
532 client's handler */
533 UInt scss_flags;
534 vki_sigset_t scss_mask;
535 void* scss_restorer; /* where sigreturn goes */
536 void* scss_sa_tramp; /* sa_tramp setting, Darwin only */
537 /* re _restorer and _sa_tramp, we merely record the values
538 supplied when the client does 'sigaction' and give them back
539 when requested. Otherwise they are simply ignored. */
540 }
541 SCSS_Per_Signal;
542
543 typedef
544 struct {
545 /* per-signal info */
546 SCSS_Per_Signal scss_per_sig[1+_VKI_NSIG];
547
548 /* Additional elements to SCSS not stored here:
549 - for each thread, the thread's blocking mask
550 - for each thread in WaitSIG, the set of waited-on sigs
551 */
552 }
553 SCSS;
554
555 static SCSS scss;
556
557
558 /* -----------------------------------------------------
559 Static kernel signal state (SKSS). This is the state
560 that we have the kernel in. It is computed from SCSS.
561 -------------------------------------------------- */
562
563 /* Let's do:
564 sigprocmask assigns to all thread masks
565 so that at least everything is always consistent
566 Flags:
567 SA_SIGINFO -- we always set it, and honour it for the client
568 SA_NOCLDSTOP -- passed to kernel
569 SA_ONESHOT or SA_RESETHAND -- pass through
570 SA_RESTART -- we observe this but set our handlers to always restart
571 SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
572 SA_ONSTACK -- pass through
573 SA_NOCLDWAIT -- pass through
574 */
575
576
577 typedef
578 struct {
579 void* skss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN
580 or ptr to our handler */
581 UInt skss_flags;
582 /* There is no skss_mask, since we know that we will always ask
583 for all signals to be blocked in our sighandlers. */
584 /* Also there is no skss_restorer. */
585 }
586 SKSS_Per_Signal;
587
588 typedef
589 struct {
590 SKSS_Per_Signal skss_per_sig[1+_VKI_NSIG];
591 }
592 SKSS;
593
594 static SKSS skss;
595
596 /* returns True if signal is to be ignored.
597 To check this, possibly call gdbserver with tid. */
is_sig_ign(Int sigNo,ThreadId tid)598 static Bool is_sig_ign(Int sigNo, ThreadId tid)
599 {
600 vg_assert(sigNo >= 1 && sigNo <= _VKI_NSIG);
601
602 return scss.scss_per_sig[sigNo].scss_handler == VKI_SIG_IGN
603 || !VG_(gdbserver_report_signal) (sigNo, tid);
604 }
605
606 /* ---------------------------------------------------------------------
607 Compute the SKSS required by the current SCSS.
608 ------------------------------------------------------------------ */
609
610 static
pp_SKSS(void)611 void pp_SKSS ( void )
612 {
613 Int sig;
614 VG_(printf)("\n\nSKSS:\n");
615 for (sig = 1; sig <= _VKI_NSIG; sig++) {
616 VG_(printf)("sig %d: handler %p, flags 0x%x\n", sig,
617 skss.skss_per_sig[sig].skss_handler,
618 skss.skss_per_sig[sig].skss_flags );
619
620 }
621 }
622
623 /* This is the core, clever bit. Computation is as follows:
624
625 For each signal
626 handler = if client has a handler, then our handler
627 else if client is DFL, then our handler as well
628 else (client must be IGN)
629 then hander is IGN
630 */
631 static
calculate_SKSS_from_SCSS(SKSS * dst)632 void calculate_SKSS_from_SCSS ( SKSS* dst )
633 {
634 Int sig;
635 UInt scss_flags;
636 UInt skss_flags;
637
638 for (sig = 1; sig <= _VKI_NSIG; sig++) {
639 void *skss_handler;
640 void *scss_handler;
641
642 scss_handler = scss.scss_per_sig[sig].scss_handler;
643 scss_flags = scss.scss_per_sig[sig].scss_flags;
644
645 switch(sig) {
646 case VKI_SIGSEGV:
647 case VKI_SIGBUS:
648 case VKI_SIGFPE:
649 case VKI_SIGILL:
650 case VKI_SIGTRAP:
651 /* For these, we always want to catch them and report, even
652 if the client code doesn't. */
653 skss_handler = sync_signalhandler;
654 break;
655
656 case VKI_SIGCONT:
657 /* Let the kernel handle SIGCONT unless the client is actually
658 catching it. */
659 case VKI_SIGCHLD:
660 case VKI_SIGWINCH:
661 case VKI_SIGURG:
662 /* For signals which are have a default action of Ignore,
663 only set a handler if the client has set a signal handler.
664 Otherwise the kernel will interrupt a syscall which
665 wouldn't have otherwise been interrupted. */
666 if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
667 skss_handler = VKI_SIG_DFL;
668 else if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
669 skss_handler = VKI_SIG_IGN;
670 else
671 skss_handler = async_signalhandler;
672 break;
673
674 default:
675 // VKI_SIGVG* are runtime variables, so we can't make them
676 // cases in the switch, so we handle them in the 'default' case.
677 if (sig == VG_SIGVGKILL)
678 skss_handler = sigvgkill_handler;
679 else {
680 if (scss_handler == VKI_SIG_IGN)
681 skss_handler = VKI_SIG_IGN;
682 else
683 skss_handler = async_signalhandler;
684 }
685 break;
686 }
687
688 /* Flags */
689
690 skss_flags = 0;
691
692 /* SA_NOCLDSTOP, SA_NOCLDWAIT: pass to kernel */
693 skss_flags |= scss_flags & (VKI_SA_NOCLDSTOP | VKI_SA_NOCLDWAIT);
694
695 /* SA_ONESHOT: ignore client setting */
696
697 /* SA_RESTART: ignore client setting and always set it for us.
698 Though we never rely on the kernel to restart a
699 syscall, we observe whether it wanted to restart the syscall
700 or not, which is needed by
701 VG_(fixup_guest_state_after_syscall_interrupted) */
702 skss_flags |= VKI_SA_RESTART;
703
704 /* SA_NOMASK: ignore it */
705
706 /* SA_ONSTACK: client setting is irrelevant here */
707 /* We don't set a signal stack, so ignore */
708
709 /* always ask for SA_SIGINFO */
710 skss_flags |= VKI_SA_SIGINFO;
711
712 /* use our own restorer */
713 skss_flags |= VKI_SA_RESTORER;
714
715 /* Create SKSS entry for this signal. */
716 if (sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
717 dst->skss_per_sig[sig].skss_handler = skss_handler;
718 else
719 dst->skss_per_sig[sig].skss_handler = VKI_SIG_DFL;
720
721 dst->skss_per_sig[sig].skss_flags = skss_flags;
722 }
723
724 /* Sanity checks. */
725 vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler == VKI_SIG_DFL);
726 vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler == VKI_SIG_DFL);
727
728 if (0)
729 pp_SKSS();
730 }
731
732
733 /* ---------------------------------------------------------------------
734 After a possible SCSS change, update SKSS and the kernel itself.
735 ------------------------------------------------------------------ */
736
737 // We need two levels of macro-expansion here to convert __NR_rt_sigreturn
738 // to a number before converting it to a string... sigh.
739 extern void my_sigreturn(void);
740
741 #if defined(VGP_x86_linux)
742 # define _MY_SIGRETURN(name) \
743 ".text\n" \
744 "my_sigreturn:\n" \
745 " movl $" #name ", %eax\n" \
746 " int $0x80\n" \
747 ".previous\n"
748
749 #elif defined(VGP_amd64_linux)
750 # define _MY_SIGRETURN(name) \
751 ".text\n" \
752 "my_sigreturn:\n" \
753 " movq $" #name ", %rax\n" \
754 " syscall\n" \
755 ".previous\n"
756
757 #elif defined(VGP_ppc32_linux)
758 # define _MY_SIGRETURN(name) \
759 ".text\n" \
760 "my_sigreturn:\n" \
761 " li 0, " #name "\n" \
762 " sc\n" \
763 ".previous\n"
764
765 #elif defined(VGP_ppc64_linux)
766 # define _MY_SIGRETURN(name) \
767 ".align 2\n" \
768 ".globl my_sigreturn\n" \
769 ".section \".opd\",\"aw\"\n" \
770 ".align 3\n" \
771 "my_sigreturn:\n" \
772 ".quad .my_sigreturn,.TOC.@tocbase,0\n" \
773 ".previous\n" \
774 ".type .my_sigreturn,@function\n" \
775 ".globl .my_sigreturn\n" \
776 ".my_sigreturn:\n" \
777 " li 0, " #name "\n" \
778 " sc\n"
779
780 #elif defined(VGP_arm_linux)
781 # define _MY_SIGRETURN(name) \
782 ".text\n" \
783 "my_sigreturn:\n\t" \
784 " mov r7, #" #name "\n\t" \
785 " svc 0x00000000\n" \
786 ".previous\n"
787
788 #elif defined(VGP_x86_darwin)
789 # define _MY_SIGRETURN(name) \
790 ".text\n" \
791 "my_sigreturn:\n" \
792 "movl $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%eax\n" \
793 "int $0x80"
794
795 #elif defined(VGP_amd64_darwin)
796 // DDD: todo
797 # define _MY_SIGRETURN(name) \
798 ".text\n" \
799 "my_sigreturn:\n" \
800 "ud2\n"
801
802 #elif defined(VGP_s390x_linux)
803 # define _MY_SIGRETURN(name) \
804 ".text\n" \
805 "my_sigreturn:\n" \
806 " svc " #name "\n" \
807 ".previous\n"
808
809 #else
810 # error Unknown platform
811 #endif
812
813 #define MY_SIGRETURN(name) _MY_SIGRETURN(name)
814 asm(
815 MY_SIGRETURN(__NR_rt_sigreturn)
816 );
817
818
handle_SCSS_change(Bool force_update)819 static void handle_SCSS_change ( Bool force_update )
820 {
821 Int res, sig;
822 SKSS skss_old;
823 vki_sigaction_toK_t ksa;
824 vki_sigaction_fromK_t ksa_old;
825
826 /* Remember old SKSS and calculate new one. */
827 skss_old = skss;
828 calculate_SKSS_from_SCSS ( &skss );
829
830 /* Compare the new SKSS entries vs the old ones, and update kernel
831 where they differ. */
832 for (sig = 1; sig <= VG_(max_signal); sig++) {
833
834 /* Trying to do anything with SIGKILL is pointless; just ignore
835 it. */
836 if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
837 continue;
838
839 if (!force_update) {
840 if ((skss_old.skss_per_sig[sig].skss_handler
841 == skss.skss_per_sig[sig].skss_handler)
842 && (skss_old.skss_per_sig[sig].skss_flags
843 == skss.skss_per_sig[sig].skss_flags))
844 /* no difference */
845 continue;
846 }
847
848 ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
849 ksa.sa_flags = skss.skss_per_sig[sig].skss_flags;
850 # if !defined(VGP_ppc32_linux) && \
851 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
852 ksa.sa_restorer = my_sigreturn;
853 # endif
854 /* Re above ifdef (also the assertion below), PaulM says:
855 The sa_restorer field is not used at all on ppc. Glibc
856 converts the sigaction you give it into a kernel sigaction,
857 but it doesn't put anything in the sa_restorer field.
858 */
859
860 /* block all signals in handler */
861 VG_(sigfillset)( &ksa.sa_mask );
862 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGKILL );
863 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGSTOP );
864
865 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
866 VG_(dmsg)("setting ksig %d to: hdlr %p, flags 0x%lx, "
867 "mask(msb..lsb) 0x%llx 0x%llx\n",
868 sig, ksa.ksa_handler,
869 (UWord)ksa.sa_flags,
870 _VKI_NSIG_WORDS > 1 ? (ULong)ksa.sa_mask.sig[1] : 0,
871 (ULong)ksa.sa_mask.sig[0]);
872
873 res = VG_(sigaction)( sig, &ksa, &ksa_old );
874 vg_assert(res == 0);
875
876 /* Since we got the old sigaction more or less for free, might
877 as well extract the maximum sanity-check value from it. */
878 if (!force_update) {
879 vg_assert(ksa_old.ksa_handler
880 == skss_old.skss_per_sig[sig].skss_handler);
881 vg_assert(ksa_old.sa_flags
882 == skss_old.skss_per_sig[sig].skss_flags);
883 # if !defined(VGP_ppc32_linux) && \
884 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
885 vg_assert(ksa_old.sa_restorer
886 == my_sigreturn);
887 # endif
888 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
889 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGSTOP );
890 vg_assert(VG_(isfullsigset)( &ksa_old.sa_mask ));
891 }
892 }
893 }
894
895
896 /* ---------------------------------------------------------------------
897 Update/query SCSS in accordance with client requests.
898 ------------------------------------------------------------------ */
899
900 /* Logic for this alt-stack stuff copied directly from do_sigaltstack
901 in kernel/signal.[ch] */
902
903 /* True if we are on the alternate signal stack. */
on_sig_stack(ThreadId tid,Addr m_SP)904 static Bool on_sig_stack ( ThreadId tid, Addr m_SP )
905 {
906 ThreadState *tst = VG_(get_ThreadState)(tid);
907
908 return (m_SP - (Addr)tst->altstack.ss_sp < (Addr)tst->altstack.ss_size);
909 }
910
sas_ss_flags(ThreadId tid,Addr m_SP)911 static Int sas_ss_flags ( ThreadId tid, Addr m_SP )
912 {
913 ThreadState *tst = VG_(get_ThreadState)(tid);
914
915 return (tst->altstack.ss_size == 0
916 ? VKI_SS_DISABLE
917 : on_sig_stack(tid, m_SP) ? VKI_SS_ONSTACK : 0);
918 }
919
920
VG_(do_sys_sigaltstack)921 SysRes VG_(do_sys_sigaltstack) ( ThreadId tid, vki_stack_t* ss, vki_stack_t* oss )
922 {
923 Addr m_SP;
924
925 vg_assert(VG_(is_valid_tid)(tid));
926 m_SP = VG_(get_SP)(tid);
927
928 if (VG_(clo_trace_signals))
929 VG_(dmsg)("sys_sigaltstack: tid %d, "
930 "ss %p{%p,sz=%llu,flags=0x%llx}, oss %p (current SP %p)\n",
931 tid, (void*)ss,
932 ss ? ss->ss_sp : 0,
933 (ULong)(ss ? ss->ss_size : 0),
934 (ULong)(ss ? ss->ss_flags : 0),
935 (void*)oss, (void*)m_SP);
936
937 if (oss != NULL) {
938 oss->ss_sp = VG_(threads)[tid].altstack.ss_sp;
939 oss->ss_size = VG_(threads)[tid].altstack.ss_size;
940 oss->ss_flags = VG_(threads)[tid].altstack.ss_flags
941 | sas_ss_flags(tid, m_SP);
942 }
943
944 if (ss != NULL) {
945 if (on_sig_stack(tid, VG_(get_SP)(tid))) {
946 return VG_(mk_SysRes_Error)( VKI_EPERM );
947 }
948 if (ss->ss_flags != VKI_SS_DISABLE
949 && ss->ss_flags != VKI_SS_ONSTACK
950 && ss->ss_flags != 0) {
951 return VG_(mk_SysRes_Error)( VKI_EINVAL );
952 }
953 if (ss->ss_flags == VKI_SS_DISABLE) {
954 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
955 } else {
956 if (ss->ss_size < VKI_MINSIGSTKSZ) {
957 return VG_(mk_SysRes_Error)( VKI_ENOMEM );
958 }
959
960 VG_(threads)[tid].altstack.ss_sp = ss->ss_sp;
961 VG_(threads)[tid].altstack.ss_size = ss->ss_size;
962 VG_(threads)[tid].altstack.ss_flags = 0;
963 }
964 }
965 return VG_(mk_SysRes_Success)( 0 );
966 }
967
968
VG_(do_sys_sigaction)969 SysRes VG_(do_sys_sigaction) ( Int signo,
970 const vki_sigaction_toK_t* new_act,
971 vki_sigaction_fromK_t* old_act )
972 {
973 if (VG_(clo_trace_signals))
974 VG_(dmsg)("sys_sigaction: sigNo %d, "
975 "new %#lx, old %#lx, new flags 0x%llx\n",
976 signo, (UWord)new_act, (UWord)old_act,
977 (ULong)(new_act ? new_act->sa_flags : 0));
978
979 /* Rule out various error conditions. The aim is to ensure that if
980 when the call is passed to the kernel it will definitely
981 succeed. */
982
983 /* Reject out-of-range signal numbers. */
984 if (signo < 1 || signo > VG_(max_signal)) goto bad_signo;
985
986 /* don't let them use our signals */
987 if ( (signo > VG_SIGVGRTUSERMAX)
988 && new_act
989 && !(new_act->ksa_handler == VKI_SIG_DFL
990 || new_act->ksa_handler == VKI_SIG_IGN) )
991 goto bad_signo_reserved;
992
993 /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
994 if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
995 && new_act
996 && new_act->ksa_handler != VKI_SIG_DFL)
997 goto bad_sigkill_or_sigstop;
998
999 /* If the client supplied non-NULL old_act, copy the relevant SCSS
1000 entry into it. */
1001 if (old_act) {
1002 old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
1003 old_act->sa_flags = scss.scss_per_sig[signo].scss_flags;
1004 old_act->sa_mask = scss.scss_per_sig[signo].scss_mask;
1005 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1006 old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
1007 # endif
1008 }
1009
1010 /* And now copy new SCSS entry from new_act. */
1011 if (new_act) {
1012 scss.scss_per_sig[signo].scss_handler = new_act->ksa_handler;
1013 scss.scss_per_sig[signo].scss_flags = new_act->sa_flags;
1014 scss.scss_per_sig[signo].scss_mask = new_act->sa_mask;
1015
1016 scss.scss_per_sig[signo].scss_restorer = NULL;
1017 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1018 scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
1019 # endif
1020
1021 scss.scss_per_sig[signo].scss_sa_tramp = NULL;
1022 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1023 scss.scss_per_sig[signo].scss_sa_tramp = new_act->sa_tramp;
1024 # endif
1025
1026 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
1027 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
1028 }
1029
1030 /* All happy bunnies ... */
1031 if (new_act) {
1032 handle_SCSS_change( False /* lazy update */ );
1033 }
1034 return VG_(mk_SysRes_Success)( 0 );
1035
1036 bad_signo:
1037 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1038 VG_(umsg)("Warning: bad signal number %d in sigaction()\n", signo);
1039 }
1040 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1041
1042 bad_signo_reserved:
1043 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1044 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1045 signame(signo));
1046 VG_(umsg)(" the %s signal is used internally by Valgrind\n",
1047 signame(signo));
1048 }
1049 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1050
1051 bad_sigkill_or_sigstop:
1052 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1053 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1054 signame(signo));
1055 VG_(umsg)(" the %s signal is uncatchable\n",
1056 signame(signo));
1057 }
1058 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1059 }
1060
1061
1062 static
do_sigprocmask_bitops(Int vki_how,vki_sigset_t * orig_set,vki_sigset_t * modifier)1063 void do_sigprocmask_bitops ( Int vki_how,
1064 vki_sigset_t* orig_set,
1065 vki_sigset_t* modifier )
1066 {
1067 switch (vki_how) {
1068 case VKI_SIG_BLOCK:
1069 VG_(sigaddset_from_set)( orig_set, modifier );
1070 break;
1071 case VKI_SIG_UNBLOCK:
1072 VG_(sigdelset_from_set)( orig_set, modifier );
1073 break;
1074 case VKI_SIG_SETMASK:
1075 *orig_set = *modifier;
1076 break;
1077 default:
1078 VG_(core_panic)("do_sigprocmask_bitops");
1079 break;
1080 }
1081 }
1082
1083 static
format_sigset(const vki_sigset_t * set)1084 HChar* format_sigset ( const vki_sigset_t* set )
1085 {
1086 static HChar buf[128];
1087 int w;
1088
1089 VG_(strcpy)(buf, "");
1090
1091 for (w = _VKI_NSIG_WORDS - 1; w >= 0; w--)
1092 {
1093 # if _VKI_NSIG_BPW == 32
1094 VG_(sprintf)(buf + VG_(strlen)(buf), "%08llx",
1095 set ? (ULong)set->sig[w] : 0);
1096 # elif _VKI_NSIG_BPW == 64
1097 VG_(sprintf)(buf + VG_(strlen)(buf), "%16llx",
1098 set ? (ULong)set->sig[w] : 0);
1099 # else
1100 # error "Unsupported value for _VKI_NSIG_BPW"
1101 # endif
1102 }
1103
1104 return buf;
1105 }
1106
1107 /*
1108 This updates the thread's signal mask. There's no such thing as a
1109 process-wide signal mask.
1110
1111 Note that the thread signal masks are an implicit part of SCSS,
1112 which is why this routine is allowed to mess with them.
1113 */
1114 static
do_setmask(ThreadId tid,Int how,vki_sigset_t * newset,vki_sigset_t * oldset)1115 void do_setmask ( ThreadId tid,
1116 Int how,
1117 vki_sigset_t* newset,
1118 vki_sigset_t* oldset )
1119 {
1120 if (VG_(clo_trace_signals))
1121 VG_(dmsg)("do_setmask: tid = %d how = %d (%s), newset = %p (%s)\n",
1122 tid, how,
1123 how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
1124 how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
1125 how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
1126 newset, newset ? format_sigset(newset) : "NULL" );
1127
1128 /* Just do this thread. */
1129 vg_assert(VG_(is_valid_tid)(tid));
1130 if (oldset) {
1131 *oldset = VG_(threads)[tid].sig_mask;
1132 if (VG_(clo_trace_signals))
1133 VG_(dmsg)("\toldset=%p %s\n", oldset, format_sigset(oldset));
1134 }
1135 if (newset) {
1136 do_sigprocmask_bitops (how, &VG_(threads)[tid].sig_mask, newset );
1137 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGKILL);
1138 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGSTOP);
1139 VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
1140 }
1141 }
1142
1143
VG_(do_sys_sigprocmask)1144 SysRes VG_(do_sys_sigprocmask) ( ThreadId tid,
1145 Int how,
1146 vki_sigset_t* set,
1147 vki_sigset_t* oldset )
1148 {
1149 switch(how) {
1150 case VKI_SIG_BLOCK:
1151 case VKI_SIG_UNBLOCK:
1152 case VKI_SIG_SETMASK:
1153 vg_assert(VG_(is_valid_tid)(tid));
1154 do_setmask ( tid, how, set, oldset );
1155 return VG_(mk_SysRes_Success)( 0 );
1156
1157 default:
1158 VG_(dmsg)("sigprocmask: unknown 'how' field %d\n", how);
1159 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1160 }
1161 }
1162
1163
1164 /* ---------------------------------------------------------------------
1165 LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
1166 ------------------------------------------------------------------ */
1167
1168 /* ---------------------------------------------------------------------
1169 Handy utilities to block/restore all host signals.
1170 ------------------------------------------------------------------ */
1171
1172 /* Block all host signals, dumping the old mask in *saved_mask. */
block_all_host_signals(vki_sigset_t * saved_mask)1173 static void block_all_host_signals ( /* OUT */ vki_sigset_t* saved_mask )
1174 {
1175 Int ret;
1176 vki_sigset_t block_procmask;
1177 VG_(sigfillset)(&block_procmask);
1178 ret = VG_(sigprocmask)
1179 (VKI_SIG_SETMASK, &block_procmask, saved_mask);
1180 vg_assert(ret == 0);
1181 }
1182
1183 /* Restore the blocking mask using the supplied saved one. */
restore_all_host_signals(vki_sigset_t * saved_mask)1184 static void restore_all_host_signals ( /* IN */ vki_sigset_t* saved_mask )
1185 {
1186 Int ret;
1187 ret = VG_(sigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
1188 vg_assert(ret == 0);
1189 }
1190
VG_(clear_out_queued_signals)1191 void VG_(clear_out_queued_signals)( ThreadId tid, vki_sigset_t* saved_mask )
1192 {
1193 block_all_host_signals(saved_mask);
1194 if (VG_(threads)[tid].sig_queue != NULL) {
1195 VG_(arena_free)(VG_AR_CORE, VG_(threads)[tid].sig_queue);
1196 VG_(threads)[tid].sig_queue = NULL;
1197 }
1198 restore_all_host_signals(saved_mask);
1199 }
1200
1201 /* ---------------------------------------------------------------------
1202 The signal simulation proper. A simplified version of what the
1203 Linux kernel does.
1204 ------------------------------------------------------------------ */
1205
1206 /* Set up a stack frame (VgSigContext) for the client's signal
1207 handler. */
1208 static
push_signal_frame(ThreadId tid,const vki_siginfo_t * siginfo,const struct vki_ucontext * uc)1209 void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo,
1210 const struct vki_ucontext *uc )
1211 {
1212 Addr esp_top_of_frame;
1213 ThreadState* tst;
1214 Int sigNo = siginfo->si_signo;
1215
1216 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
1217 vg_assert(VG_(is_valid_tid)(tid));
1218 tst = & VG_(threads)[tid];
1219
1220 if (VG_(clo_trace_signals)) {
1221 VG_(dmsg)("push_signal_frame (thread %d): signal %d\n", tid, sigNo);
1222 VG_(get_and_pp_StackTrace)(tid, 10);
1223 }
1224
1225 if (/* this signal asked to run on an alt stack */
1226 (scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
1227 && /* there is a defined and enabled alt stack, which we're not
1228 already using. Logic from get_sigframe in
1229 arch/i386/kernel/signal.c. */
1230 sas_ss_flags(tid, VG_(get_SP)(tid)) == 0
1231 ) {
1232 esp_top_of_frame
1233 = (Addr)(tst->altstack.ss_sp) + tst->altstack.ss_size;
1234 if (VG_(clo_trace_signals))
1235 VG_(dmsg)("delivering signal %d (%s) to thread %d: "
1236 "on ALT STACK (%p-%p; %ld bytes)\n",
1237 sigNo, signame(sigNo), tid, tst->altstack.ss_sp,
1238 (UChar *)tst->altstack.ss_sp + tst->altstack.ss_size,
1239 (Word)tst->altstack.ss_size );
1240
1241 /* Signal delivery to tools */
1242 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/True );
1243
1244 } else {
1245 esp_top_of_frame = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
1246
1247 /* Signal delivery to tools */
1248 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/False );
1249 }
1250
1251 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
1252 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
1253
1254 /* This may fail if the client stack is busted; if that happens,
1255 the whole process will exit rather than simply calling the
1256 signal handler. */
1257 VG_(sigframe_create) (tid, esp_top_of_frame, siginfo, uc,
1258 scss.scss_per_sig[sigNo].scss_handler,
1259 scss.scss_per_sig[sigNo].scss_flags,
1260 &tst->sig_mask,
1261 scss.scss_per_sig[sigNo].scss_restorer);
1262 }
1263
1264
signame(Int sigNo)1265 static const Char *signame(Int sigNo)
1266 {
1267 static Char buf[20];
1268
1269 switch(sigNo) {
1270 case VKI_SIGHUP: return "SIGHUP";
1271 case VKI_SIGINT: return "SIGINT";
1272 case VKI_SIGQUIT: return "SIGQUIT";
1273 case VKI_SIGILL: return "SIGILL";
1274 case VKI_SIGTRAP: return "SIGTRAP";
1275 case VKI_SIGABRT: return "SIGABRT";
1276 case VKI_SIGBUS: return "SIGBUS";
1277 case VKI_SIGFPE: return "SIGFPE";
1278 case VKI_SIGKILL: return "SIGKILL";
1279 case VKI_SIGUSR1: return "SIGUSR1";
1280 case VKI_SIGUSR2: return "SIGUSR2";
1281 case VKI_SIGSEGV: return "SIGSEGV";
1282 case VKI_SIGPIPE: return "SIGPIPE";
1283 case VKI_SIGALRM: return "SIGALRM";
1284 case VKI_SIGTERM: return "SIGTERM";
1285 # if defined(VKI_SIGSTKFLT)
1286 case VKI_SIGSTKFLT: return "SIGSTKFLT";
1287 # endif
1288 case VKI_SIGCHLD: return "SIGCHLD";
1289 case VKI_SIGCONT: return "SIGCONT";
1290 case VKI_SIGSTOP: return "SIGSTOP";
1291 case VKI_SIGTSTP: return "SIGTSTP";
1292 case VKI_SIGTTIN: return "SIGTTIN";
1293 case VKI_SIGTTOU: return "SIGTTOU";
1294 case VKI_SIGURG: return "SIGURG";
1295 case VKI_SIGXCPU: return "SIGXCPU";
1296 case VKI_SIGXFSZ: return "SIGXFSZ";
1297 case VKI_SIGVTALRM: return "SIGVTALRM";
1298 case VKI_SIGPROF: return "SIGPROF";
1299 case VKI_SIGWINCH: return "SIGWINCH";
1300 case VKI_SIGIO: return "SIGIO";
1301 # if defined(VKI_SIGPWR)
1302 case VKI_SIGPWR: return "SIGPWR";
1303 # endif
1304 # if defined(VKI_SIGUNUSED)
1305 case VKI_SIGUNUSED: return "SIGUNUSED";
1306 # endif
1307
1308 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1309 case VKI_SIGRTMIN ... VKI_SIGRTMAX:
1310 VG_(sprintf)(buf, "SIGRT%d", sigNo-VKI_SIGRTMIN);
1311 return buf;
1312 # endif
1313
1314 default:
1315 VG_(sprintf)(buf, "SIG%d", sigNo);
1316 return buf;
1317 }
1318 }
1319
1320 /* Hit ourselves with a signal using the default handler */
VG_(kill_self)1321 void VG_(kill_self)(Int sigNo)
1322 {
1323 Int r;
1324 vki_sigset_t mask, origmask;
1325 vki_sigaction_toK_t sa, origsa2;
1326 vki_sigaction_fromK_t origsa;
1327
1328 sa.ksa_handler = VKI_SIG_DFL;
1329 sa.sa_flags = 0;
1330 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1331 sa.sa_restorer = 0;
1332 # endif
1333 VG_(sigemptyset)(&sa.sa_mask);
1334
1335 VG_(sigaction)(sigNo, &sa, &origsa);
1336
1337 VG_(sigemptyset)(&mask);
1338 VG_(sigaddset)(&mask, sigNo);
1339 VG_(sigprocmask)(VKI_SIG_UNBLOCK, &mask, &origmask);
1340
1341 r = VG_(kill)(VG_(getpid)(), sigNo);
1342 # if defined(VGO_linux)
1343 /* This sometimes fails with EPERM on Darwin. I don't know why. */
1344 vg_assert(r == 0);
1345 # endif
1346
1347 VG_(convert_sigaction_fromK_to_toK)( &origsa, &origsa2 );
1348 VG_(sigaction)(sigNo, &origsa2, NULL);
1349 VG_(sigprocmask)(VKI_SIG_SETMASK, &origmask, NULL);
1350 }
1351
1352 // The si_code describes where the signal came from. Some come from the
1353 // kernel, eg.: seg faults, illegal opcodes. Some come from the user, eg.:
1354 // from kill() (SI_USER), or timer_settime() (SI_TIMER), or an async I/O
1355 // request (SI_ASYNCIO). There's lots of implementation-defined leeway in
1356 // POSIX, but the user vs. kernal distinction is what we want here. We also
1357 // pass in some other details that can help when si_code is unreliable.
is_signal_from_kernel(ThreadId tid,int signum,int si_code)1358 static Bool is_signal_from_kernel(ThreadId tid, int signum, int si_code)
1359 {
1360 # if defined(VGO_linux)
1361 // On Linux, SI_USER is zero, negative values are from the user, positive
1362 // values are from the kernel. There are SI_FROMUSER and SI_FROMKERNEL
1363 // macros but we don't use them here because other platforms don't have
1364 // them.
1365 return ( si_code > VKI_SI_USER ? True : False );
1366
1367 # elif defined(VGO_darwin)
1368 // On Darwin 9.6.0, the si_code is completely unreliable. It should be the
1369 // case that 0 means "user", and >0 means "kernel". But:
1370 // - For SIGSEGV, it seems quite reliable.
1371 // - For SIGBUS, it's always 2.
1372 // - For SIGFPE, it's often 0, even for kernel ones (eg.
1373 // div-by-integer-zero always gives zero).
1374 // - For SIGILL, it's unclear.
1375 // - For SIGTRAP, it's always 1.
1376 // You can see the "NOTIMP" (not implemented) status of a number of the
1377 // sub-cases in sys/signal.h. Hopefully future versions of Darwin will
1378 // get this right.
1379
1380 // If we're blocked waiting on a syscall, it must be a user signal, because
1381 // the kernel won't generate sync signals within syscalls.
1382 if (VG_(threads)[tid].status == VgTs_WaitSys) {
1383 return False;
1384
1385 // If it's a SIGSEGV, use the proper condition, since it's fairly reliable.
1386 } else if (SIGSEGV == signum) {
1387 return ( si_code > 0 ? True : False );
1388
1389 // If it's anything else, assume it's kernel-generated. Reason being that
1390 // kernel-generated sync signals are more common, and it's probable that
1391 // misdiagnosing a user signal as a kernel signal is better than the
1392 // opposite.
1393 } else {
1394 return True;
1395 }
1396 # else
1397 # error Unknown OS
1398 # endif
1399 }
1400
1401 // This is an arbitrary si_code that we only use internally. It corresponds
1402 // to the value SI_KERNEL on Linux, but that's not really of any significance
1403 // as far as I can determine.
1404 #define VKI_SEGV_MADE_UP_GPF 0x80
1405
1406 /*
1407 Perform the default action of a signal. If the signal is fatal, it
1408 marks all threads as needing to exit, but it doesn't actually kill
1409 the process or thread.
1410
1411 If we're not being quiet, then print out some more detail about
1412 fatal signals (esp. core dumping signals).
1413 */
default_action(const vki_siginfo_t * info,ThreadId tid)1414 static void default_action(const vki_siginfo_t *info, ThreadId tid)
1415 {
1416 Int sigNo = info->si_signo;
1417 Bool terminate = False; /* kills process */
1418 Bool core = False; /* kills process w/ core */
1419 struct vki_rlimit corelim;
1420 Bool could_core;
1421
1422 vg_assert(VG_(is_running_thread)(tid));
1423
1424 switch(sigNo) {
1425 case VKI_SIGQUIT: /* core */
1426 case VKI_SIGILL: /* core */
1427 case VKI_SIGABRT: /* core */
1428 case VKI_SIGFPE: /* core */
1429 case VKI_SIGSEGV: /* core */
1430 case VKI_SIGBUS: /* core */
1431 case VKI_SIGTRAP: /* core */
1432 case VKI_SIGXCPU: /* core */
1433 case VKI_SIGXFSZ: /* core */
1434 terminate = True;
1435 core = True;
1436 break;
1437
1438 case VKI_SIGHUP: /* term */
1439 case VKI_SIGINT: /* term */
1440 case VKI_SIGKILL: /* term - we won't see this */
1441 case VKI_SIGPIPE: /* term */
1442 case VKI_SIGALRM: /* term */
1443 case VKI_SIGTERM: /* term */
1444 case VKI_SIGUSR1: /* term */
1445 case VKI_SIGUSR2: /* term */
1446 case VKI_SIGIO: /* term */
1447 # if defined(VKI_SIGPWR)
1448 case VKI_SIGPWR: /* term */
1449 # endif
1450 case VKI_SIGSYS: /* term */
1451 case VKI_SIGPROF: /* term */
1452 case VKI_SIGVTALRM: /* term */
1453 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1454 case VKI_SIGRTMIN ... VKI_SIGRTMAX: /* term */
1455 # endif
1456 terminate = True;
1457 break;
1458 }
1459
1460 vg_assert(!core || (core && terminate));
1461
1462 if (VG_(clo_trace_signals))
1463 VG_(dmsg)("delivering %d (code %d) to default handler; action: %s%s\n",
1464 sigNo, info->si_code, terminate ? "terminate" : "ignore",
1465 core ? "+core" : "");
1466
1467 if (!terminate)
1468 return; /* nothing to do */
1469
1470 could_core = core;
1471
1472 if (core) {
1473 /* If they set the core-size limit to zero, don't generate a
1474 core file */
1475
1476 VG_(getrlimit)(VKI_RLIMIT_CORE, &corelim);
1477
1478 if (corelim.rlim_cur == 0)
1479 core = False;
1480 }
1481
1482 if ( (VG_(clo_verbosity) > 1 ||
1483 (could_core && is_signal_from_kernel(tid, sigNo, info->si_code))
1484 ) &&
1485 !VG_(clo_xml) ) {
1486 VG_(umsg)(
1487 "\n"
1488 "Process terminating with default action of signal %d (%s)%s\n",
1489 sigNo, signame(sigNo), core ? ": dumping core" : "");
1490
1491 /* Be helpful - decode some more details about this fault */
1492 if (is_signal_from_kernel(tid, sigNo, info->si_code)) {
1493 const Char *event = NULL;
1494 Bool haveaddr = True;
1495
1496 switch(sigNo) {
1497 case VKI_SIGSEGV:
1498 switch(info->si_code) {
1499 case VKI_SEGV_MAPERR: event = "Access not within mapped region";
1500 break;
1501 case VKI_SEGV_ACCERR: event = "Bad permissions for mapped region";
1502 break;
1503 case VKI_SEGV_MADE_UP_GPF:
1504 /* General Protection Fault: The CPU/kernel
1505 isn't telling us anything useful, but this
1506 is commonly the result of exceeding a
1507 segment limit. */
1508 event = "General Protection Fault";
1509 haveaddr = False;
1510 break;
1511 }
1512 #if 0
1513 {
1514 HChar buf[110];
1515 VG_(am_show_nsegments)(0,"post segfault");
1516 VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1517 VG_(system)(buf);
1518 }
1519 #endif
1520 break;
1521
1522 case VKI_SIGILL:
1523 switch(info->si_code) {
1524 case VKI_ILL_ILLOPC: event = "Illegal opcode"; break;
1525 case VKI_ILL_ILLOPN: event = "Illegal operand"; break;
1526 case VKI_ILL_ILLADR: event = "Illegal addressing mode"; break;
1527 case VKI_ILL_ILLTRP: event = "Illegal trap"; break;
1528 case VKI_ILL_PRVOPC: event = "Privileged opcode"; break;
1529 case VKI_ILL_PRVREG: event = "Privileged register"; break;
1530 case VKI_ILL_COPROC: event = "Coprocessor error"; break;
1531 case VKI_ILL_BADSTK: event = "Internal stack error"; break;
1532 }
1533 break;
1534
1535 case VKI_SIGFPE:
1536 switch (info->si_code) {
1537 case VKI_FPE_INTDIV: event = "Integer divide by zero"; break;
1538 case VKI_FPE_INTOVF: event = "Integer overflow"; break;
1539 case VKI_FPE_FLTDIV: event = "FP divide by zero"; break;
1540 case VKI_FPE_FLTOVF: event = "FP overflow"; break;
1541 case VKI_FPE_FLTUND: event = "FP underflow"; break;
1542 case VKI_FPE_FLTRES: event = "FP inexact"; break;
1543 case VKI_FPE_FLTINV: event = "FP invalid operation"; break;
1544 case VKI_FPE_FLTSUB: event = "FP subscript out of range"; break;
1545 }
1546 break;
1547
1548 case VKI_SIGBUS:
1549 switch (info->si_code) {
1550 case VKI_BUS_ADRALN: event = "Invalid address alignment"; break;
1551 case VKI_BUS_ADRERR: event = "Non-existent physical address"; break;
1552 case VKI_BUS_OBJERR: event = "Hardware error"; break;
1553 }
1554 break;
1555 } /* switch (sigNo) */
1556
1557 if (event != NULL) {
1558 if (haveaddr)
1559 VG_(umsg)(" %s at address %p\n",
1560 event, info->VKI_SIGINFO_si_addr);
1561 else
1562 VG_(umsg)(" %s\n", event);
1563 }
1564 }
1565 /* Print a stack trace. Be cautious if the thread's SP is in an
1566 obviously stupid place (not mapped readable) that would
1567 likely cause a segfault. */
1568 if (VG_(is_valid_tid)(tid)) {
1569 ExeContext* ec = VG_(am_is_valid_for_client)
1570 (VG_(get_SP)(tid), sizeof(Addr), VKI_PROT_READ)
1571 ? VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ )
1572 : VG_(record_depth_1_ExeContext)( tid );
1573 vg_assert(ec);
1574 VG_(pp_ExeContext)( ec );
1575 }
1576 if (sigNo == VKI_SIGSEGV
1577 && info && is_signal_from_kernel(tid, sigNo, info->si_code)
1578 && info->si_code == VKI_SEGV_MAPERR) {
1579 VG_(umsg)(" If you believe this happened as a result of a stack\n" );
1580 VG_(umsg)(" overflow in your program's main thread (unlikely but\n");
1581 VG_(umsg)(" possible), you can try to increase the size of the\n" );
1582 VG_(umsg)(" main thread stack using the --main-stacksize= flag.\n" );
1583 // FIXME: assumes main ThreadId == 1
1584 if (VG_(is_valid_tid)(1)) {
1585 VG_(umsg)(
1586 " The main thread stack size used in this run was %d.\n",
1587 (Int)VG_(threads)[1].client_stack_szB);
1588 }
1589 }
1590 }
1591
1592 if (VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) )) {
1593 VG_(start_debugger)( tid );
1594 }
1595
1596 if (core) {
1597 const static struct vki_rlimit zero = { 0, 0 };
1598
1599 VG_(make_coredump)(tid, info, corelim.rlim_cur);
1600
1601 /* Make sure we don't get a confusing kernel-generated
1602 coredump when we finally exit */
1603 VG_(setrlimit)(VKI_RLIMIT_CORE, &zero);
1604 }
1605
1606 /* stash fatal signal in main thread */
1607 // what's this for?
1608 //VG_(threads)[VG_(master_tid)].os_state.fatalsig = sigNo;
1609
1610 /* everyone dies */
1611 VG_(nuke_all_threads_except)(tid, VgSrc_FatalSig);
1612 VG_(threads)[tid].exitreason = VgSrc_FatalSig;
1613 VG_(threads)[tid].os_state.fatalsig = sigNo;
1614 }
1615
1616 /*
1617 This does the business of delivering a signal to a thread. It may
1618 be called from either a real signal handler, or from normal code to
1619 cause the thread to enter the signal handler.
1620
1621 This updates the thread state, but it does not set it to be
1622 Runnable.
1623 */
deliver_signal(ThreadId tid,const vki_siginfo_t * info,const struct vki_ucontext * uc)1624 static void deliver_signal ( ThreadId tid, const vki_siginfo_t *info,
1625 const struct vki_ucontext *uc )
1626 {
1627 Int sigNo = info->si_signo;
1628 SCSS_Per_Signal *handler = &scss.scss_per_sig[sigNo];
1629 void *handler_fn;
1630 ThreadState *tst = VG_(get_ThreadState)(tid);
1631
1632 if (VG_(clo_trace_signals))
1633 VG_(dmsg)("delivering signal %d (%s):%d to thread %d\n",
1634 sigNo, signame(sigNo), info->si_code, tid );
1635
1636 if (sigNo == VG_SIGVGKILL) {
1637 /* If this is a SIGVGKILL, we're expecting it to interrupt any
1638 blocked syscall. It doesn't matter whether the VCPU state is
1639 set to restart or not, because we don't expect it will
1640 execute any more client instructions. */
1641 vg_assert(VG_(is_exiting)(tid));
1642 return;
1643 }
1644
1645 /* If the client specifies SIG_IGN, treat it as SIG_DFL.
1646
1647 If deliver_signal() is being called on a thread, we want
1648 the signal to get through no matter what; if they're ignoring
1649 it, then we do this override (this is so we can send it SIGSEGV,
1650 etc). */
1651 handler_fn = handler->scss_handler;
1652 if (handler_fn == VKI_SIG_IGN)
1653 handler_fn = VKI_SIG_DFL;
1654
1655 vg_assert(handler_fn != VKI_SIG_IGN);
1656
1657 if (handler_fn == VKI_SIG_DFL) {
1658 default_action(info, tid);
1659 } else {
1660 /* Create a signal delivery frame, and set the client's %ESP and
1661 %EIP so that when execution continues, we will enter the
1662 signal handler with the frame on top of the client's stack,
1663 as it expects.
1664
1665 Signal delivery can fail if the client stack is too small or
1666 missing, and we can't push the frame. If that happens,
1667 push_signal_frame will cause the whole process to exit when
1668 we next hit the scheduler.
1669 */
1670 vg_assert(VG_(is_valid_tid)(tid));
1671
1672 push_signal_frame ( tid, info, uc );
1673
1674 if (handler->scss_flags & VKI_SA_ONESHOT) {
1675 /* Do the ONESHOT thing. */
1676 handler->scss_handler = VKI_SIG_DFL;
1677
1678 handle_SCSS_change( False /* lazy update */ );
1679 }
1680
1681 /* At this point:
1682 tst->sig_mask is the current signal mask
1683 tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
1684 handler->scss_mask is the mask set by the handler
1685
1686 Handler gets a mask of tmp_sig_mask|handler_mask|signo
1687 */
1688 tst->sig_mask = tst->tmp_sig_mask;
1689 if (!(handler->scss_flags & VKI_SA_NOMASK)) {
1690 VG_(sigaddset_from_set)(&tst->sig_mask, &handler->scss_mask);
1691 VG_(sigaddset)(&tst->sig_mask, sigNo);
1692 tst->tmp_sig_mask = tst->sig_mask;
1693 }
1694 }
1695
1696 /* Thread state is ready to go - just add Runnable */
1697 }
1698
resume_scheduler(ThreadId tid)1699 static void resume_scheduler(ThreadId tid)
1700 {
1701 ThreadState *tst = VG_(get_ThreadState)(tid);
1702
1703 vg_assert(tst->os_state.lwpid == VG_(gettid)());
1704
1705 if (tst->sched_jmpbuf_valid) {
1706 /* Can't continue; must longjmp back to the scheduler and thus
1707 enter the sighandler immediately. */
1708 VG_MINIMAL_LONGJMP(tst->sched_jmpbuf);
1709 }
1710 }
1711
synth_fault_common(ThreadId tid,Addr addr,Int si_code)1712 static void synth_fault_common(ThreadId tid, Addr addr, Int si_code)
1713 {
1714 vki_siginfo_t info;
1715
1716 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1717
1718 VG_(memset)(&info, 0, sizeof(info));
1719 info.si_signo = VKI_SIGSEGV;
1720 info.si_code = si_code;
1721 info.VKI_SIGINFO_si_addr = (void*)addr;
1722
1723 /* even if gdbserver indicates to ignore the signal, we will deliver it */
1724 VG_(gdbserver_report_signal) (VKI_SIGSEGV, tid);
1725
1726 /* If they're trying to block the signal, force it to be delivered */
1727 if (VG_(sigismember)(&VG_(threads)[tid].sig_mask, VKI_SIGSEGV))
1728 VG_(set_default_handler)(VKI_SIGSEGV);
1729
1730 deliver_signal(tid, &info, NULL);
1731 }
1732
1733 // Synthesize a fault where the address is OK, but the page
1734 // permissions are bad.
VG_(synth_fault_perms)1735 void VG_(synth_fault_perms)(ThreadId tid, Addr addr)
1736 {
1737 synth_fault_common(tid, addr, VKI_SEGV_ACCERR);
1738 }
1739
1740 // Synthesize a fault where the address there's nothing mapped at the address.
VG_(synth_fault_mapping)1741 void VG_(synth_fault_mapping)(ThreadId tid, Addr addr)
1742 {
1743 synth_fault_common(tid, addr, VKI_SEGV_MAPERR);
1744 }
1745
1746 // Synthesize a misc memory fault.
VG_(synth_fault)1747 void VG_(synth_fault)(ThreadId tid)
1748 {
1749 synth_fault_common(tid, 0, VKI_SEGV_MADE_UP_GPF);
1750 }
1751
1752 // Synthesise a SIGILL.
VG_(synth_sigill)1753 void VG_(synth_sigill)(ThreadId tid, Addr addr)
1754 {
1755 vki_siginfo_t info;
1756
1757 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1758
1759 VG_(memset)(&info, 0, sizeof(info));
1760 info.si_signo = VKI_SIGILL;
1761 info.si_code = VKI_ILL_ILLOPC; /* jrs: no idea what this should be */
1762 info.VKI_SIGINFO_si_addr = (void*)addr;
1763
1764 if (VG_(gdbserver_report_signal) (VKI_SIGILL, tid)) {
1765 resume_scheduler(tid);
1766 deliver_signal(tid, &info, NULL);
1767 }
1768 else
1769 resume_scheduler(tid);
1770 }
1771
1772 // Synthesise a SIGBUS.
VG_(synth_sigbus)1773 void VG_(synth_sigbus)(ThreadId tid)
1774 {
1775 vki_siginfo_t info;
1776
1777 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1778
1779 VG_(memset)(&info, 0, sizeof(info));
1780 info.si_signo = VKI_SIGBUS;
1781 /* There are several meanings to SIGBUS (as per POSIX, presumably),
1782 but the most widely understood is "invalid address alignment",
1783 so let's use that. */
1784 info.si_code = VKI_BUS_ADRALN;
1785 /* If we knew the invalid address in question, we could put it
1786 in .si_addr. Oh well. */
1787 /* info.VKI_SIGINFO_si_addr = (void*)addr; */
1788
1789 if (VG_(gdbserver_report_signal) (VKI_SIGBUS, tid)) {
1790 resume_scheduler(tid);
1791 deliver_signal(tid, &info, NULL);
1792 }
1793 else
1794 resume_scheduler(tid);
1795 }
1796
1797 // Synthesise a SIGTRAP.
VG_(synth_sigtrap)1798 void VG_(synth_sigtrap)(ThreadId tid)
1799 {
1800 vki_siginfo_t info;
1801 struct vki_ucontext uc;
1802 # if defined(VGP_x86_darwin)
1803 struct __darwin_mcontext32 mc;
1804 # elif defined(VGP_amd64_darwin)
1805 struct __darwin_mcontext64 mc;
1806 # endif
1807
1808 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1809
1810 VG_(memset)(&info, 0, sizeof(info));
1811 VG_(memset)(&uc, 0, sizeof(uc));
1812 info.si_signo = VKI_SIGTRAP;
1813 info.si_code = VKI_TRAP_BRKPT; /* tjh: only ever called for a brkpt ins */
1814
1815 # if defined(VGP_x86_linux) || defined(VGP_amd64_linux)
1816 uc.uc_mcontext.trapno = 3; /* tjh: this is the x86 trap number
1817 for a breakpoint trap... */
1818 uc.uc_mcontext.err = 0; /* tjh: no error code for x86
1819 breakpoint trap... */
1820 # elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1821 /* the same thing, but using Darwin field/struct names */
1822 VG_(memset)(&mc, 0, sizeof(mc));
1823 uc.uc_mcontext = &mc;
1824 uc.uc_mcontext->__es.__trapno = 3;
1825 uc.uc_mcontext->__es.__err = 0;
1826 # endif
1827
1828 /* fixs390: do we need to do anything here for s390 ? */
1829 if (VG_(gdbserver_report_signal) (VKI_SIGTRAP, tid)) {
1830 resume_scheduler(tid);
1831 deliver_signal(tid, &info, &uc);
1832 }
1833 else
1834 resume_scheduler(tid);
1835 }
1836
1837 /* Make a signal pending for a thread, for later delivery.
1838 VG_(poll_signals) will arrange for it to be delivered at the right
1839 time.
1840
1841 tid==0 means add it to the process-wide queue, and not sent it to a
1842 specific thread.
1843 */
1844 static
queue_signal(ThreadId tid,const vki_siginfo_t * si)1845 void queue_signal(ThreadId tid, const vki_siginfo_t *si)
1846 {
1847 ThreadState *tst;
1848 SigQueue *sq;
1849 vki_sigset_t savedmask;
1850
1851 tst = VG_(get_ThreadState)(tid);
1852
1853 /* Protect the signal queue against async deliveries */
1854 block_all_host_signals(&savedmask);
1855
1856 if (tst->sig_queue == NULL) {
1857 tst->sig_queue = VG_(arena_malloc)(VG_AR_CORE, "signals.qs.1",
1858 sizeof(*tst->sig_queue));
1859 VG_(memset)(tst->sig_queue, 0, sizeof(*tst->sig_queue));
1860 }
1861 sq = tst->sig_queue;
1862
1863 if (VG_(clo_trace_signals))
1864 VG_(dmsg)("Queueing signal %d (idx %d) to thread %d\n",
1865 si->si_signo, sq->next, tid);
1866
1867 /* Add signal to the queue. If the queue gets overrun, then old
1868 queued signals may get lost.
1869
1870 XXX We should also keep a sigset of pending signals, so that at
1871 least a non-siginfo signal gets deliviered.
1872 */
1873 if (sq->sigs[sq->next].si_signo != 0)
1874 VG_(umsg)("Signal %d being dropped from thread %d's queue\n",
1875 sq->sigs[sq->next].si_signo, tid);
1876
1877 sq->sigs[sq->next] = *si;
1878 sq->next = (sq->next+1) % N_QUEUED_SIGNALS;
1879
1880 restore_all_host_signals(&savedmask);
1881 }
1882
1883 /*
1884 Returns the next queued signal for thread tid which is in "set".
1885 tid==0 means process-wide signal. Set si_signo to 0 when the
1886 signal has been delivered.
1887
1888 Must be called with all signals blocked, to protect against async
1889 deliveries.
1890 */
next_queued(ThreadId tid,const vki_sigset_t * set)1891 static vki_siginfo_t *next_queued(ThreadId tid, const vki_sigset_t *set)
1892 {
1893 ThreadState *tst = VG_(get_ThreadState)(tid);
1894 SigQueue *sq;
1895 Int idx;
1896 vki_siginfo_t *ret = NULL;
1897
1898 sq = tst->sig_queue;
1899 if (sq == NULL)
1900 goto out;
1901
1902 idx = sq->next;
1903 do {
1904 if (0)
1905 VG_(printf)("idx=%d si_signo=%d inset=%d\n", idx,
1906 sq->sigs[idx].si_signo,
1907 VG_(sigismember)(set, sq->sigs[idx].si_signo));
1908
1909 if (sq->sigs[idx].si_signo != 0
1910 && VG_(sigismember)(set, sq->sigs[idx].si_signo)) {
1911 if (VG_(clo_trace_signals))
1912 VG_(dmsg)("Returning queued signal %d (idx %d) for thread %d\n",
1913 sq->sigs[idx].si_signo, idx, tid);
1914 ret = &sq->sigs[idx];
1915 goto out;
1916 }
1917
1918 idx = (idx + 1) % N_QUEUED_SIGNALS;
1919 } while(idx != sq->next);
1920 out:
1921 return ret;
1922 }
1923
sanitize_si_code(int si_code)1924 static int sanitize_si_code(int si_code)
1925 {
1926 #if defined(VGO_linux)
1927 /* The linux kernel uses the top 16 bits of si_code for it's own
1928 use and only exports the bottom 16 bits to user space - at least
1929 that is the theory, but it turns out that there are some kernels
1930 around that forget to mask out the top 16 bits so we do it here.
1931
1932 The kernel treats the bottom 16 bits as signed and (when it does
1933 mask them off) sign extends them when exporting to user space so
1934 we do the same thing here. */
1935 return (Short)si_code;
1936 #elif defined(VGO_darwin)
1937 return si_code;
1938 #else
1939 # error Unknown OS
1940 #endif
1941 }
1942
1943 /*
1944 Receive an async signal from the kernel.
1945
1946 This should only happen when the thread is blocked in a syscall,
1947 since that's the only time this set of signals is unblocked.
1948 */
1949 static
async_signalhandler(Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)1950 void async_signalhandler ( Int sigNo,
1951 vki_siginfo_t *info, struct vki_ucontext *uc )
1952 {
1953 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
1954 ThreadState* tst = VG_(get_ThreadState)(tid);
1955 SysRes sres;
1956
1957 /* The thread isn't currently running, make it so before going on */
1958 vg_assert(tst->status == VgTs_WaitSys);
1959 VG_(acquire_BigLock)(tid, "async_signalhandler");
1960
1961 info->si_code = sanitize_si_code(info->si_code);
1962
1963 if (VG_(clo_trace_signals))
1964 VG_(dmsg)("async signal handler: signal=%d, tid=%d, si_code=%d\n",
1965 sigNo, tid, info->si_code);
1966
1967 /* Update thread state properly. The signal can only have been
1968 delivered whilst we were in
1969 coregrind/m_syswrap/syscall-<PLAT>.S, and only then in the
1970 window between the two sigprocmask calls, since at all other
1971 times, we run with async signals on the host blocked. Hence
1972 make enquiries on the basis that we were in or very close to a
1973 syscall, and attempt to fix up the guest state accordingly.
1974
1975 (normal async signals occurring during computation are blocked,
1976 but periodically polled for using VG_(sigtimedwait_zero), and
1977 delivered at a point convenient for us. Hence this routine only
1978 deals with signals that are delivered to a thread during a
1979 syscall.) */
1980
1981 /* First, extract a SysRes from the ucontext_t* given to this
1982 handler. If it is subsequently established by
1983 VG_(fixup_guest_state_after_syscall_interrupted) that the
1984 syscall was complete but the results had not been committed yet
1985 to the guest state, then it'll have to commit the results itself
1986 "by hand", and so we need to extract the SysRes. Of course if
1987 the thread was not in that particular window then the
1988 SysRes will be meaningless, but that's OK too because
1989 VG_(fixup_guest_state_after_syscall_interrupted) will detect
1990 that the thread was not in said window and ignore the SysRes. */
1991
1992 /* To make matters more complex still, on Darwin we need to know
1993 the "class" of the syscall under consideration in order to be
1994 able to extract the a correct SysRes. The class will have been
1995 saved just before the syscall, by VG_(client_syscall), into this
1996 thread's tst->arch.vex.guest_SC_CLASS. Hence: */
1997 # if defined(VGO_darwin)
1998 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc, tst->arch.vex.guest_SC_CLASS);
1999 # else
2000 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc);
2001 # endif
2002
2003 /* (1) */
2004 VG_(fixup_guest_state_after_syscall_interrupted)(
2005 tid,
2006 VG_UCONTEXT_INSTR_PTR(uc),
2007 sres,
2008 !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
2009 );
2010
2011 /* (2) */
2012 /* Set up the thread's state to deliver a signal */
2013 if (!is_sig_ign(info->si_signo, tid))
2014 deliver_signal(tid, info, uc);
2015
2016 /* It's crucial that (1) and (2) happen in the order (1) then (2)
2017 and not the other way around. (1) fixes up the guest thread
2018 state to reflect the fact that the syscall was interrupted --
2019 either to restart the syscall or to return EINTR. (2) then sets
2020 up the thread state to deliver the signal. Then we resume
2021 execution. First, the signal handler is run, since that's the
2022 second adjustment we made to the thread state. If that returns,
2023 then we resume at the guest state created by (1), viz, either
2024 the syscall returns EINTR or is restarted.
2025
2026 If (2) was done before (1) the outcome would be completely
2027 different, and wrong. */
2028
2029 /* longjmp back to the thread's main loop to start executing the
2030 handler. */
2031 resume_scheduler(tid);
2032
2033 VG_(core_panic)("async_signalhandler: got unexpected signal "
2034 "while outside of scheduler");
2035 }
2036
2037 /* Extend the stack to cover addr. maxsize is the limit the stack can grow to.
2038
2039 Returns True on success, False on failure.
2040
2041 Succeeds without doing anything if addr is already within a segment.
2042
2043 Failure could be caused by:
2044 - addr not below a growable segment
2045 - new stack size would exceed maxsize
2046 - mmap failed for some other reason
2047 */
VG_(extend_stack)2048 Bool VG_(extend_stack)(Addr addr, UInt maxsize)
2049 {
2050 SizeT udelta;
2051
2052 /* Find the next Segment above addr */
2053 NSegment const* seg
2054 = VG_(am_find_nsegment)(addr);
2055 NSegment const* seg_next
2056 = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
2057 : NULL;
2058
2059 if (seg && seg->kind == SkAnonC)
2060 /* addr is already mapped. Nothing to do. */
2061 return True;
2062
2063 /* Check that the requested new base is in a shrink-down
2064 reservation section which abuts an anonymous mapping that
2065 belongs to the client. */
2066 if ( ! (seg
2067 && seg->kind == SkResvn
2068 && seg->smode == SmUpper
2069 && seg_next
2070 && seg_next->kind == SkAnonC
2071 && seg->end+1 == seg_next->start))
2072 return False;
2073
2074 udelta = VG_PGROUNDUP(seg_next->start - addr);
2075 VG_(debugLog)(1, "signals",
2076 "extending a stack base 0x%llx down by %lld\n",
2077 (ULong)seg_next->start, (ULong)udelta);
2078 if (! VG_(am_extend_into_adjacent_reservation_client)
2079 ( (NSegment*)seg_next, -(SSizeT)udelta )) {
2080 VG_(debugLog)(1, "signals", "extending a stack base: FAILED\n");
2081 return False;
2082 }
2083
2084 /* When we change the main stack, we have to let the stack handling
2085 code know about it. */
2086 VG_(change_stack)(VG_(clstk_id), addr, VG_(clstk_end));
2087
2088 if (VG_(clo_sanity_level) > 2)
2089 VG_(sanity_check_general)(False);
2090
2091 return True;
2092 }
2093
2094 static void (*fault_catcher)(Int sig, Addr addr) = NULL;
2095
VG_(set_fault_catcher)2096 void VG_(set_fault_catcher)(void (*catcher)(Int, Addr))
2097 {
2098 if (0)
2099 VG_(debugLog)(0, "signals", "set fault catcher to %p\n", catcher);
2100 vg_assert2(NULL == catcher || NULL == fault_catcher,
2101 "Fault catcher is already registered");
2102
2103 fault_catcher = catcher;
2104 }
2105
2106 static
sync_signalhandler_from_user(ThreadId tid,Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2107 void sync_signalhandler_from_user ( ThreadId tid,
2108 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2109 {
2110 ThreadId qtid;
2111
2112 /* If some user-process sent us a sync signal (ie. it's not the result
2113 of a faulting instruction), then how we treat it depends on when it
2114 arrives... */
2115
2116 if (VG_(threads)[tid].status == VgTs_WaitSys) {
2117 /* Signal arrived while we're blocked in a syscall. This means that
2118 the client's signal mask was applied. In other words, so we can't
2119 get here unless the client wants this signal right now. This means
2120 we can simply use the async_signalhandler. */
2121 if (VG_(clo_trace_signals))
2122 VG_(dmsg)("Delivering user-sent sync signal %d as async signal\n",
2123 sigNo);
2124
2125 async_signalhandler(sigNo, info, uc);
2126 VG_(core_panic)("async_signalhandler returned!?\n");
2127
2128 } else {
2129 /* Signal arrived while in generated client code, or while running
2130 Valgrind core code. That means that every thread has these signals
2131 unblocked, so we can't rely on the kernel to route them properly, so
2132 we need to queue them manually. */
2133 if (VG_(clo_trace_signals))
2134 VG_(dmsg)("Routing user-sent sync signal %d via queue\n", sigNo);
2135
2136 # if defined(VGO_linux)
2137 /* On Linux, first we have to do a sanity check of the siginfo. */
2138 if (info->VKI_SIGINFO_si_pid == 0) {
2139 /* There's a per-user limit of pending siginfo signals. If
2140 you exceed this, by having more than that number of
2141 pending signals with siginfo, then new signals are
2142 delivered without siginfo. This condition can be caused
2143 by any unrelated program you're running at the same time
2144 as Valgrind, if it has a large number of pending siginfo
2145 signals which it isn't taking delivery of.
2146
2147 Since we depend on siginfo to work out why we were sent a
2148 signal and what we should do about it, we really can't
2149 continue unless we get it. */
2150 VG_(umsg)("Signal %d (%s) appears to have lost its siginfo; "
2151 "I can't go on.\n", sigNo, signame(sigNo));
2152 VG_(printf)(
2153 " This may be because one of your programs has consumed your ration of\n"
2154 " siginfo structures. For more information, see:\n"
2155 " http://kerneltrap.org/mailarchive/1/message/25599/thread\n"
2156 " Basically, some program on your system is building up a large queue of\n"
2157 " pending signals, and this causes the siginfo data for other signals to\n"
2158 " be dropped because it's exceeding a system limit. However, Valgrind\n"
2159 " absolutely needs siginfo for SIGSEGV. A workaround is to track down the\n"
2160 " offending program and avoid running it while using Valgrind, but there\n"
2161 " is no easy way to do this. Apparently the problem was fixed in kernel\n"
2162 " 2.6.12.\n");
2163
2164 /* It's a fatal signal, so we force the default handler. */
2165 VG_(set_default_handler)(sigNo);
2166 deliver_signal(tid, info, uc);
2167 resume_scheduler(tid);
2168 VG_(exit)(99); /* If we can't resume, then just exit */
2169 }
2170 # endif
2171
2172 qtid = 0; /* shared pending by default */
2173 # if defined(VGO_linux)
2174 if (info->si_code == VKI_SI_TKILL)
2175 qtid = tid; /* directed to us specifically */
2176 # endif
2177 queue_signal(qtid, info);
2178 }
2179 }
2180
2181 /* Returns the reported fault address for an exact address */
fault_mask(Addr in)2182 static Addr fault_mask(Addr in)
2183 {
2184 /* We have to use VG_PGROUNDDN because faults on s390x only deliver
2185 the page address but not the address within a page.
2186 */
2187 # if defined(VGA_s390x)
2188 return VG_PGROUNDDN(in);
2189 # else
2190 return in;
2191 #endif
2192 }
2193
2194 /* Returns True if the sync signal was due to the stack requiring extension
2195 and the extension was successful.
2196 */
extend_stack_if_appropriate(ThreadId tid,vki_siginfo_t * info)2197 static Bool extend_stack_if_appropriate(ThreadId tid, vki_siginfo_t* info)
2198 {
2199 Addr fault;
2200 Addr esp;
2201 NSegment const* seg;
2202 NSegment const* seg_next;
2203
2204 if (info->si_signo != VKI_SIGSEGV)
2205 return False;
2206
2207 fault = (Addr)info->VKI_SIGINFO_si_addr;
2208 esp = VG_(get_SP)(tid);
2209 seg = VG_(am_find_nsegment)(fault);
2210 seg_next = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
2211 : NULL;
2212
2213 if (VG_(clo_trace_signals)) {
2214 if (seg == NULL)
2215 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2216 "seg=NULL\n",
2217 info->si_code, fault, tid, esp);
2218 else
2219 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2220 "seg=%#lx-%#lx\n",
2221 info->si_code, fault, tid, esp, seg->start, seg->end);
2222 }
2223
2224 if (info->si_code == VKI_SEGV_MAPERR
2225 && seg
2226 && seg->kind == SkResvn
2227 && seg->smode == SmUpper
2228 && seg_next
2229 && seg_next->kind == SkAnonC
2230 && seg->end+1 == seg_next->start
2231 && fault >= fault_mask(esp - VG_STACK_REDZONE_SZB)) {
2232 /* If the fault address is above esp but below the current known
2233 stack segment base, and it was a fault because there was
2234 nothing mapped there (as opposed to a permissions fault),
2235 then extend the stack segment.
2236 */
2237 Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
2238 if (VG_(extend_stack)(base, VG_(threads)[tid].client_stack_szB)) {
2239 if (VG_(clo_trace_signals))
2240 VG_(dmsg)(" -> extended stack base to %#lx\n",
2241 VG_PGROUNDDN(fault));
2242 return True;
2243 } else {
2244 VG_(umsg)("Stack overflow in thread %d: can't grow stack to %#lx\n",
2245 tid, fault);
2246 return False;
2247 }
2248 } else {
2249 return False;
2250 }
2251 }
2252
2253 static
sync_signalhandler_from_kernel(ThreadId tid,Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2254 void sync_signalhandler_from_kernel ( ThreadId tid,
2255 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2256 {
2257 /* Check to see if some part of Valgrind itself is interested in faults.
2258 The fault catcher should never be set whilst we're in generated code, so
2259 check for that. AFAIK the only use of the catcher right now is
2260 memcheck's leak detector. */
2261 if (fault_catcher) {
2262 vg_assert(VG_(in_generated_code) == False);
2263
2264 (*fault_catcher)(sigNo, (Addr)info->VKI_SIGINFO_si_addr);
2265 /* If the catcher returns, then it didn't handle the fault,
2266 so carry on panicking. */
2267 }
2268
2269 if (extend_stack_if_appropriate(tid, info)) {
2270 /* Stack extension occurred, so we don't need to do anything else; upon
2271 returning from this function, we'll restart the host (hence guest)
2272 instruction. */
2273 } else {
2274 /* OK, this is a signal we really have to deal with. If it came
2275 from the client's code, then we can jump back into the scheduler
2276 and have it delivered. Otherwise it's a Valgrind bug. */
2277 ThreadState *tst = VG_(get_ThreadState)(tid);
2278
2279 if (VG_(sigismember)(&tst->sig_mask, sigNo)) {
2280 /* signal is blocked, but they're not allowed to block faults */
2281 VG_(set_default_handler)(sigNo);
2282 }
2283
2284 if (VG_(in_generated_code)) {
2285 if (VG_(gdbserver_report_signal) (sigNo, tid)
2286 || VG_(sigismember)(&tst->sig_mask, sigNo)) {
2287 /* Can't continue; must longjmp back to the scheduler and thus
2288 enter the sighandler immediately. */
2289 deliver_signal(tid, info, uc);
2290 resume_scheduler(tid);
2291 }
2292 else
2293 resume_scheduler(tid);
2294 }
2295
2296 /* If resume_scheduler returns or its our fault, it means we
2297 don't have longjmp set up, implying that we weren't running
2298 client code, and therefore it was actually generated by
2299 Valgrind internally.
2300 */
2301 VG_(dmsg)("VALGRIND INTERNAL ERROR: Valgrind received "
2302 "a signal %d (%s) - exiting\n",
2303 sigNo, signame(sigNo));
2304
2305 VG_(dmsg)("si_code=%x; Faulting address: %p; sp: %#lx\n",
2306 info->si_code, info->VKI_SIGINFO_si_addr,
2307 VG_UCONTEXT_STACK_PTR(uc));
2308
2309 if (0)
2310 VG_(kill_self)(sigNo); /* generate a core dump */
2311
2312 //if (tid == 0) /* could happen after everyone has exited */
2313 // tid = VG_(master_tid);
2314 vg_assert(tid != 0);
2315
2316 UnwindStartRegs startRegs;
2317 VG_(memset)(&startRegs, 0, sizeof(startRegs));
2318
2319 VG_UCONTEXT_TO_UnwindStartRegs(&startRegs, uc);
2320 VG_(core_panic_at)("Killed by fatal signal", &startRegs);
2321 }
2322 }
2323
2324 /*
2325 Receive a sync signal from the host.
2326 */
2327 static
sync_signalhandler(Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2328 void sync_signalhandler ( Int sigNo,
2329 vki_siginfo_t *info, struct vki_ucontext *uc )
2330 {
2331 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2332 Bool from_user;
2333
2334 if (0)
2335 VG_(printf)("sync_sighandler(%d, %p, %p)\n", sigNo, info, uc);
2336
2337 vg_assert(info != NULL);
2338 vg_assert(info->si_signo == sigNo);
2339 vg_assert(sigNo == VKI_SIGSEGV ||
2340 sigNo == VKI_SIGBUS ||
2341 sigNo == VKI_SIGFPE ||
2342 sigNo == VKI_SIGILL ||
2343 sigNo == VKI_SIGTRAP);
2344
2345 info->si_code = sanitize_si_code(info->si_code);
2346
2347 from_user = !is_signal_from_kernel(tid, sigNo, info->si_code);
2348
2349 if (VG_(clo_trace_signals)) {
2350 VG_(dmsg)("sync signal handler: "
2351 "signal=%d, si_code=%d, EIP=%#lx, eip=%#lx, from %s\n",
2352 sigNo, info->si_code, VG_(get_IP)(tid),
2353 VG_UCONTEXT_INSTR_PTR(uc),
2354 ( from_user ? "user" : "kernel" ));
2355 }
2356 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
2357
2358 /* // debug code:
2359 if (0) {
2360 VG_(printf)("info->si_signo %d\n", info->si_signo);
2361 VG_(printf)("info->si_errno %d\n", info->si_errno);
2362 VG_(printf)("info->si_code %d\n", info->si_code);
2363 VG_(printf)("info->si_pid %d\n", info->si_pid);
2364 VG_(printf)("info->si_uid %d\n", info->si_uid);
2365 VG_(printf)("info->si_status %d\n", info->si_status);
2366 VG_(printf)("info->si_addr %p\n", info->si_addr);
2367 }
2368 */
2369
2370 /* Figure out if the signal is being sent from outside the process.
2371 (Why do we care?) If the signal is from the user rather than the
2372 kernel, then treat it more like an async signal than a sync signal --
2373 that is, merely queue it for later delivery. */
2374 if (from_user) {
2375 sync_signalhandler_from_user( tid, sigNo, info, uc);
2376 } else {
2377 sync_signalhandler_from_kernel(tid, sigNo, info, uc);
2378 }
2379 }
2380
2381
2382 /*
2383 Kill this thread. Makes it leave any syscall it might be currently
2384 blocked in, and return to the scheduler. This doesn't mark the thread
2385 as exiting; that's the caller's job.
2386 */
sigvgkill_handler(int signo,vki_siginfo_t * si,struct vki_ucontext * uc)2387 static void sigvgkill_handler(int signo, vki_siginfo_t *si,
2388 struct vki_ucontext *uc)
2389 {
2390 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2391 ThreadStatus at_signal = VG_(threads)[tid].status;
2392
2393 if (VG_(clo_trace_signals))
2394 VG_(dmsg)("sigvgkill for lwp %d tid %d\n", VG_(gettid)(), tid);
2395
2396 VG_(acquire_BigLock)(tid, "sigvgkill_handler");
2397
2398 vg_assert(signo == VG_SIGVGKILL);
2399 vg_assert(si->si_signo == signo);
2400
2401 /* jrs 2006 August 3: the following assertion seems incorrect to
2402 me, and fails on AIX. sigvgkill could be sent to a thread which
2403 is runnable - see VG_(nuke_all_threads_except) in the scheduler.
2404 Hence comment these out ..
2405
2406 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
2407 VG_(post_syscall)(tid);
2408
2409 and instead do:
2410 */
2411 if (at_signal == VgTs_WaitSys)
2412 VG_(post_syscall)(tid);
2413 /* jrs 2006 August 3 ends */
2414
2415 resume_scheduler(tid);
2416
2417 VG_(core_panic)("sigvgkill_handler couldn't return to the scheduler\n");
2418 }
2419
2420 static __attribute((unused))
pp_ksigaction(vki_sigaction_toK_t * sa)2421 void pp_ksigaction ( vki_sigaction_toK_t* sa )
2422 {
2423 Int i;
2424 VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
2425 sa->ksa_handler,
2426 (UInt)sa->sa_flags,
2427 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2428 sa->sa_restorer
2429 # else
2430 (void*)0
2431 # endif
2432 );
2433 VG_(printf)("pp_ksigaction: { ");
2434 for (i = 1; i <= VG_(max_signal); i++)
2435 if (VG_(sigismember(&(sa->sa_mask),i)))
2436 VG_(printf)("%d ", i);
2437 VG_(printf)("}\n");
2438 }
2439
2440 /*
2441 Force signal handler to default
2442 */
VG_(set_default_handler)2443 void VG_(set_default_handler)(Int signo)
2444 {
2445 vki_sigaction_toK_t sa;
2446
2447 sa.ksa_handler = VKI_SIG_DFL;
2448 sa.sa_flags = 0;
2449 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2450 sa.sa_restorer = 0;
2451 # endif
2452 VG_(sigemptyset)(&sa.sa_mask);
2453
2454 VG_(do_sys_sigaction)(signo, &sa, NULL);
2455 }
2456
2457 /*
2458 Poll for pending signals, and set the next one up for delivery.
2459 */
VG_(poll_signals)2460 void VG_(poll_signals)(ThreadId tid)
2461 {
2462 vki_siginfo_t si, *sip;
2463 vki_sigset_t pollset;
2464 ThreadState *tst = VG_(get_ThreadState)(tid);
2465 vki_sigset_t saved_mask;
2466
2467 /* look for all the signals this thread isn't blocking */
2468 /* pollset = ~tst->sig_mask */
2469 VG_(sigcomplementset)( &pollset, &tst->sig_mask );
2470
2471 block_all_host_signals(&saved_mask); // protect signal queue
2472
2473 /* First look for any queued pending signals */
2474 sip = next_queued(tid, &pollset); /* this thread */
2475
2476 if (sip == NULL)
2477 sip = next_queued(0, &pollset); /* process-wide */
2478
2479 /* If there was nothing queued, ask the kernel for a pending signal */
2480 if (sip == NULL && VG_(sigtimedwait_zero)(&pollset, &si) > 0) {
2481 if (VG_(clo_trace_signals))
2482 VG_(dmsg)("poll_signals: got signal %d for thread %d\n",
2483 si.si_signo, tid);
2484 sip = &si;
2485 }
2486
2487 if (sip != NULL) {
2488 /* OK, something to do; deliver it */
2489 if (VG_(clo_trace_signals))
2490 VG_(dmsg)("Polling found signal %d for tid %d\n", sip->si_signo, tid);
2491 if (!is_sig_ign(sip->si_signo, tid))
2492 deliver_signal(tid, sip, NULL);
2493 else if (VG_(clo_trace_signals))
2494 VG_(dmsg)(" signal %d ignored\n", sip->si_signo);
2495
2496 sip->si_signo = 0; /* remove from signal queue, if that's
2497 where it came from */
2498 }
2499
2500 restore_all_host_signals(&saved_mask);
2501 }
2502
2503 /* At startup, copy the process' real signal state to the SCSS.
2504 Whilst doing this, block all real signals. Then calculate SKSS and
2505 set the kernel to that. Also initialise DCSS.
2506 */
VG_(sigstartup_actions)2507 void VG_(sigstartup_actions) ( void )
2508 {
2509 Int i, ret, vKI_SIGRTMIN;
2510 vki_sigset_t saved_procmask;
2511 vki_sigaction_fromK_t sa;
2512
2513 VG_(memset)(&scss, 0, sizeof(scss));
2514 VG_(memset)(&skss, 0, sizeof(skss));
2515
2516 # if defined(VKI_SIGRTMIN)
2517 vKI_SIGRTMIN = VKI_SIGRTMIN;
2518 # else
2519 vKI_SIGRTMIN = 0; /* eg Darwin */
2520 # endif
2521
2522 /* VG_(printf)("SIGSTARTUP\n"); */
2523 /* Block all signals. saved_procmask remembers the previous mask,
2524 which the first thread inherits.
2525 */
2526 block_all_host_signals( &saved_procmask );
2527
2528 /* Copy per-signal settings to SCSS. */
2529 for (i = 1; i <= _VKI_NSIG; i++) {
2530 /* Get the old host action */
2531 ret = VG_(sigaction)(i, NULL, &sa);
2532
2533 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2534 /* apparently we may not even ask about the disposition of these
2535 signals, let alone change them */
2536 if (ret != 0 && (i == VKI_SIGKILL || i == VKI_SIGSTOP))
2537 continue;
2538 # endif
2539
2540 if (ret != 0)
2541 break;
2542
2543 /* Try setting it back to see if this signal is really
2544 available */
2545 if (vKI_SIGRTMIN > 0 /* it actually exists on this platform */
2546 && i >= vKI_SIGRTMIN) {
2547 vki_sigaction_toK_t tsa, sa2;
2548
2549 tsa.ksa_handler = (void *)sync_signalhandler;
2550 tsa.sa_flags = VKI_SA_SIGINFO;
2551 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2552 tsa.sa_restorer = 0;
2553 # endif
2554 VG_(sigfillset)(&tsa.sa_mask);
2555
2556 /* try setting it to some arbitrary handler */
2557 if (VG_(sigaction)(i, &tsa, NULL) != 0) {
2558 /* failed - not really usable */
2559 break;
2560 }
2561
2562 VG_(convert_sigaction_fromK_to_toK)( &sa, &sa2 );
2563 ret = VG_(sigaction)(i, &sa2, NULL);
2564 vg_assert(ret == 0);
2565 }
2566
2567 VG_(max_signal) = i;
2568
2569 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
2570 VG_(printf)("snaffling handler 0x%lx for signal %d\n",
2571 (Addr)(sa.ksa_handler), i );
2572
2573 scss.scss_per_sig[i].scss_handler = sa.ksa_handler;
2574 scss.scss_per_sig[i].scss_flags = sa.sa_flags;
2575 scss.scss_per_sig[i].scss_mask = sa.sa_mask;
2576
2577 scss.scss_per_sig[i].scss_restorer = NULL;
2578 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2579 scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
2580 # endif
2581
2582 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2583 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2584 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2585 /*sa.sa_tramp;*/
2586 /* We can't know what it was, because Darwin's sys_sigaction
2587 doesn't tell us. */
2588 # endif
2589 }
2590
2591 if (VG_(clo_trace_signals))
2592 VG_(dmsg)("Max kernel-supported signal is %d\n", VG_(max_signal));
2593
2594 /* Our private internal signals are treated as ignored */
2595 scss.scss_per_sig[VG_SIGVGKILL].scss_handler = VKI_SIG_IGN;
2596 scss.scss_per_sig[VG_SIGVGKILL].scss_flags = VKI_SA_SIGINFO;
2597 VG_(sigfillset)(&scss.scss_per_sig[VG_SIGVGKILL].scss_mask);
2598
2599 /* Copy the process' signal mask into the root thread. */
2600 vg_assert(VG_(threads)[1].status == VgTs_Init);
2601 for (i = 2; i < VG_N_THREADS; i++)
2602 vg_assert(VG_(threads)[i].status == VgTs_Empty);
2603
2604 VG_(threads)[1].sig_mask = saved_procmask;
2605 VG_(threads)[1].tmp_sig_mask = saved_procmask;
2606
2607 /* Calculate SKSS and apply it. This also sets the initial kernel
2608 mask we need to run with. */
2609 handle_SCSS_change( True /* forced update */ );
2610
2611 /* Leave with all signals still blocked; the thread scheduler loop
2612 will set the appropriate mask at the appropriate time. */
2613 }
2614
2615 /*--------------------------------------------------------------------*/
2616 /*--- end ---*/
2617 /*--------------------------------------------------------------------*/
2618