1
2 /*--------------------------------------------------------------------*/
3 /*--- Implementation of POSIX signals. m_signals.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2000-2013 Julian Seward
11 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 /*
32 Signal handling.
33
34 There are 4 distinct classes of signal:
35
36 1. Synchronous, instruction-generated (SIGILL, FPE, BUS, SEGV and
37 TRAP): these are signals as a result of an instruction fault. If
38 we get one while running client code, then we just do the
39 appropriate thing. If it happens while running Valgrind code, then
40 it indicates a Valgrind bug. Note that we "manually" implement
41 automatic stack growth, such that if a fault happens near the
42 client process stack, it is extended in the same way the kernel
43 would, and the fault is never reported to the client program.
44
45 2. Asynchronous variants of the above signals: If the kernel tries
46 to deliver a sync signal while it is blocked, it just kills the
47 process. Therefore, we can't block those signals if we want to be
48 able to report on bugs in Valgrind. This means that we're also
49 open to receiving those signals from other processes, sent with
50 kill. We could get away with just dropping them, since they aren't
51 really signals that processes send to each other.
52
53 3. Synchronous, general signals. If a thread/process sends itself
54 a signal with kill, its expected to be synchronous: ie, the signal
55 will have been delivered by the time the syscall finishes.
56
57 4. Asynchronous, general signals. All other signals, sent by
58 another process with kill. These are generally blocked, except for
59 two special cases: we poll for them each time we're about to run a
60 thread for a time quanta, and while running blocking syscalls.
61
62
63 In addition, we reserve one signal for internal use: SIGVGKILL.
64 SIGVGKILL is used to terminate threads. When one thread wants
65 another to exit, it will set its exitreason and send it SIGVGKILL
66 if it appears to be blocked in a syscall.
67
68
69 We use a kernel thread for each application thread. When the
70 thread allows itself to be open to signals, it sets the thread
71 signal mask to what the client application set it to. This means
72 that we get the kernel to do all signal routing: under Valgrind,
73 signals get delivered in the same way as in the non-Valgrind case
74 (the exception being for the sync signal set, since they're almost
75 always unblocked).
76 */
77
78 /*
79 Some more details...
80
81 First off, we take note of the client's requests (via sys_sigaction
82 and sys_sigprocmask) to set the signal state (handlers for each
83 signal, which are process-wide, + a mask for each signal, which is
84 per-thread). This info is duly recorded in the SCSS (static Client
85 signal state) in m_signals.c, and if the client later queries what
86 the state is, we merely fish the relevant info out of SCSS and give
87 it back.
88
89 However, we set the real signal state in the kernel to something
90 entirely different. This is recorded in SKSS, the static Kernel
91 signal state. What's nice (to the extent that anything is nice w.r.t
92 signals) is that there's a pure function to calculate SKSS from SCSS,
93 calculate_SKSS_from_SCSS. So when the client changes SCSS then we
94 recompute the associated SKSS and apply any changes from the previous
95 SKSS through to the kernel.
96
97 Now, that said, the general scheme we have now is, that regardless of
98 what the client puts into the SCSS (viz, asks for), what we would
99 like to do is as follows:
100
101 (1) run code on the virtual CPU with all signals blocked
102
103 (2) at convenient moments for us (that is, when the VCPU stops, and
104 control is back with the scheduler), ask the kernel "do you have
105 any signals for me?" and if it does, collect up the info, and
106 deliver them to the client (by building sigframes).
107
108 And that's almost what we do. The signal polling is done by
109 VG_(poll_signals), which calls through to VG_(sigtimedwait_zero) to
110 do the dirty work. (of which more later).
111
112 By polling signals, rather than catching them, we get to deal with
113 them only at convenient moments, rather than having to recover from
114 taking a signal while generated code is running.
115
116 Now unfortunately .. the above scheme only works for so-called async
117 signals. An async signal is one which isn't associated with any
118 particular instruction, eg Control-C (SIGINT). For those, it doesn't
119 matter if we don't deliver the signal to the client immediately; it
120 only matters that we deliver it eventually. Hence polling is OK.
121
122 But the other group -- sync signals -- are all related by the fact
123 that they are various ways for the host CPU to fail to execute an
124 instruction: SIGILL, SIGSEGV, SIGFPU. And they can't be deferred,
125 because obviously if a host instruction can't execute, well then we
126 have to immediately do Plan B, whatever that is.
127
128 So the next approximation of what happens is:
129
130 (1) run code on vcpu with all async signals blocked
131
132 (2) at convenient moments (when NOT running the vcpu), poll for async
133 signals.
134
135 (1) and (2) together imply that if the host does deliver a signal to
136 async_signalhandler while the VCPU is running, something's
137 seriously wrong.
138
139 (3) when running code on vcpu, don't block sync signals. Instead
140 register sync_signalhandler and catch any such via that. Of
141 course, that means an ugly recovery path if we do -- the
142 sync_signalhandler has to longjump, exiting out of the generated
143 code, and the assembly-dispatcher thingy that runs it, and gets
144 caught in m_scheduler, which then tells m_signals to deliver the
145 signal.
146
147 Now naturally (ha ha) even that might be tolerable, but there's
148 something worse: dealing with signals delivered to threads in
149 syscalls.
150
151 Obviously from the above, SKSS's signal mask (viz, what we really run
152 with) is way different from SCSS's signal mask (viz, what the client
153 thread thought it asked for). (eg) It may well be that the client
154 did not block control-C, so that it just expects to drop dead if it
155 receives ^C whilst blocked in a syscall, but by default we are
156 running with all async signals blocked, and so that signal could be
157 arbitrarily delayed, or perhaps even lost (not sure).
158
159 So what we have to do, when doing any syscall which SfMayBlock, is to
160 quickly switch in the SCSS-specified signal mask just before the
161 syscall, and switch it back just afterwards, and hope that we don't
162 get caught up in some wierd race condition. This is the primary
163 purpose of the ultra-magical pieces of assembly code in
164 coregrind/m_syswrap/syscall-<plat>.S
165
166 -----------
167
168 The ways in which V can come to hear of signals that need to be
169 forwarded to the client as are follows:
170
171 sync signals: can arrive at any time whatsoever. These are caught
172 by sync_signalhandler
173
174 async signals:
175
176 if running generated code
177 then these are blocked, so we don't expect to catch them in
178 async_signalhandler
179
180 else
181 if thread is blocked in a syscall marked SfMayBlock
182 then signals may be delivered to async_sighandler, since we
183 temporarily unblocked them for the duration of the syscall,
184 by using the real (SCSS) mask for this thread
185
186 else we're doing misc housekeeping activities (eg, making a translation,
187 washing our hair, etc). As in the normal case, these signals are
188 blocked, but we can and do poll for them using VG_(poll_signals).
189
190 Now, re VG_(poll_signals), it polls the kernel by doing
191 VG_(sigtimedwait_zero). This is trivial on Linux, since it's just a
192 syscall. But on Darwin and AIX, we have to cobble together the
193 functionality in a tedious, longwinded and probably error-prone way.
194
195 Finally, if a gdb is debugging the process under valgrind,
196 the signal can be ignored if gdb tells this. So, before resuming the
197 scheduler/delivering the signal, a call to VG_(gdbserver_report_signal)
198 is done. If this returns True, the signal is delivered.
199 */
200
201 #include "pub_core_basics.h"
202 #include "pub_core_vki.h"
203 #include "pub_core_vkiscnums.h"
204 #include "pub_core_debuglog.h"
205 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
206 #include "pub_core_threadstate.h"
207 #include "pub_core_xarray.h"
208 #include "pub_core_clientstate.h"
209 #include "pub_core_aspacemgr.h"
210 #include "pub_core_debugger.h" // For VG_(start_debugger)
211 #include "pub_core_errormgr.h"
212 #include "pub_core_gdbserver.h"
213 #include "pub_core_libcbase.h"
214 #include "pub_core_libcassert.h"
215 #include "pub_core_libcprint.h"
216 #include "pub_core_libcproc.h"
217 #include "pub_core_libcsignal.h"
218 #include "pub_core_machine.h"
219 #include "pub_core_mallocfree.h"
220 #include "pub_core_options.h"
221 #include "pub_core_scheduler.h"
222 #include "pub_core_signals.h"
223 #include "pub_core_sigframe.h" // For VG_(sigframe_create)()
224 #include "pub_core_stacks.h" // For VG_(change_stack)()
225 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
226 #include "pub_core_syscall.h"
227 #include "pub_core_syswrap.h"
228 #include "pub_core_tooliface.h"
229 #include "pub_core_coredump.h"
230
231
232 /* ---------------------------------------------------------------------
233 Forwards decls.
234 ------------------------------------------------------------------ */
235
236 static void sync_signalhandler ( Int sigNo, vki_siginfo_t *info,
237 struct vki_ucontext * );
238 static void async_signalhandler ( Int sigNo, vki_siginfo_t *info,
239 struct vki_ucontext * );
240 static void sigvgkill_handler ( Int sigNo, vki_siginfo_t *info,
241 struct vki_ucontext * );
242
243 /* Maximum usable signal. */
244 Int VG_(max_signal) = _VKI_NSIG;
245
246 #define N_QUEUED_SIGNALS 8
247
248 typedef struct SigQueue {
249 Int next;
250 vki_siginfo_t sigs[N_QUEUED_SIGNALS];
251 } SigQueue;
252
253 /* ------ Macros for pulling stuff out of ucontexts ------ */
254
255 /* Q: what does VG_UCONTEXT_SYSCALL_SYSRES do? A: let's suppose the
256 machine context (uc) reflects the situation that a syscall had just
257 completed, quite literally -- that is, that the program counter was
258 now at the instruction following the syscall. (or we're slightly
259 downstream, but we're sure no relevant register has yet changed
260 value.) Then VG_UCONTEXT_SYSCALL_SYSRES returns a SysRes reflecting
261 the result of the syscall; it does this by fishing relevant bits of
262 the machine state out of the uc. Of course if the program counter
263 was somewhere else entirely then the result is likely to be
264 meaningless, so the caller of VG_UCONTEXT_SYSCALL_SYSRES has to be
265 very careful to pay attention to the results only when it is sure
266 that the said constraint on the program counter is indeed valid. */
267
268 #if defined(VGP_x86_linux)
269 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
270 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
271 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
272 /* Convert the value in uc_mcontext.eax into a SysRes. */ \
273 VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
274 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
275 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.eip); \
276 (srP)->r_sp = (ULong)((uc)->uc_mcontext.esp); \
277 (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.ebp; \
278 }
279
280 #elif defined(VGP_amd64_linux)
281 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.rip)
282 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.rsp)
283 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
284 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
285 VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
286 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
287 { (srP)->r_pc = (uc)->uc_mcontext.rip; \
288 (srP)->r_sp = (uc)->uc_mcontext.rsp; \
289 (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.rbp; \
290 }
291
292 #elif defined(VGP_ppc32_linux)
293 /* Comments from Paul Mackerras 25 Nov 05:
294
295 > I'm tracking down a problem where V's signal handling doesn't
296 > work properly on a ppc440gx running 2.4.20. The problem is that
297 > the ucontext being presented to V's sighandler seems completely
298 > bogus.
299
300 > V's kernel headers and hence ucontext layout are derived from
301 > 2.6.9. I compared include/asm-ppc/ucontext.h from 2.4.20 and
302 > 2.6.13.
303
304 > Can I just check my interpretation: the 2.4.20 one contains the
305 > uc_mcontext field in line, whereas the 2.6.13 one has a pointer
306 > to said struct? And so if V is using the 2.6.13 struct then a
307 > 2.4.20 one will make no sense to it.
308
309 Not quite... what is inline in the 2.4.20 version is a
310 sigcontext_struct, not an mcontext. The sigcontext looks like
311 this:
312
313 struct sigcontext_struct {
314 unsigned long _unused[4];
315 int signal;
316 unsigned long handler;
317 unsigned long oldmask;
318 struct pt_regs *regs;
319 };
320
321 The regs pointer of that struct ends up at the same offset as the
322 uc_regs of the 2.6 struct ucontext, and a struct pt_regs is the
323 same as the mc_gregs field of the mcontext. In fact the integer
324 regs are followed in memory by the floating point regs on 2.4.20.
325
326 Thus if you are using the 2.6 definitions, it should work on 2.4.20
327 provided that you go via uc->uc_regs rather than looking in
328 uc->uc_mcontext directly.
329
330 There is another subtlety: 2.4.20 doesn't save the vector regs when
331 delivering a signal, and 2.6.x only saves the vector regs if the
332 process has ever used an altivec instructions. If 2.6.x does save
333 the vector regs, it sets the MSR_VEC bit in
334 uc->uc_regs->mc_gregs[PT_MSR], otherwise it clears it. That bit
335 will always be clear under 2.4.20. So you can use that bit to tell
336 whether uc->uc_regs->mc_vregs is valid. */
337 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_NIP])
338 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
339 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
340 /* Convert the values in uc_mcontext r3,cr into a SysRes. */ \
341 VG_(mk_SysRes_ppc32_linux)( \
342 (uc)->uc_regs->mc_gregs[VKI_PT_R3], \
343 (((uc)->uc_regs->mc_gregs[VKI_PT_CCR] >> 28) & 1) \
344 )
345 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
346 { (srP)->r_pc = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_NIP]); \
347 (srP)->r_sp = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_R1]); \
348 (srP)->misc.PPC32.r_lr = (uc)->uc_regs->mc_gregs[VKI_PT_LNK]; \
349 }
350
351 #elif defined(VGP_ppc64_linux)
352 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_NIP])
353 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
354 /* Dubious hack: if there is an error, only consider the lowest 8
355 bits of r3. memcheck/tests/post-syscall shows a case where an
356 interrupted syscall should have produced a ucontext with 0x4
357 (VKI_EINTR) in r3 but is in fact producing 0x204. */
358 /* Awaiting clarification from PaulM. Evidently 0x204 is
359 ERESTART_RESTARTBLOCK, which shouldn't have made it into user
360 space. */
VG_UCONTEXT_SYSCALL_SYSRES(struct vki_ucontext * uc)361 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( struct vki_ucontext* uc )
362 {
363 ULong err = (uc->uc_mcontext.gp_regs[VKI_PT_CCR] >> 28) & 1;
364 ULong r3 = uc->uc_mcontext.gp_regs[VKI_PT_R3];
365 if (err) r3 &= 0xFF;
366 return VG_(mk_SysRes_ppc64_linux)( r3, err );
367 }
368 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
369 { (srP)->r_pc = (uc)->uc_mcontext.gp_regs[VKI_PT_NIP]; \
370 (srP)->r_sp = (uc)->uc_mcontext.gp_regs[VKI_PT_R1]; \
371 (srP)->misc.PPC64.r_lr = (uc)->uc_mcontext.gp_regs[VKI_PT_LNK]; \
372 }
373
374 #elif defined(VGP_arm_linux)
375 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.arm_pc)
376 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.arm_sp)
377 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
378 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
379 VG_(mk_SysRes_arm_linux)( (uc)->uc_mcontext.arm_r0 )
380 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
381 { (srP)->r_pc = (uc)->uc_mcontext.arm_pc; \
382 (srP)->r_sp = (uc)->uc_mcontext.arm_sp; \
383 (srP)->misc.ARM.r14 = (uc)->uc_mcontext.arm_lr; \
384 (srP)->misc.ARM.r12 = (uc)->uc_mcontext.arm_ip; \
385 (srP)->misc.ARM.r11 = (uc)->uc_mcontext.arm_fp; \
386 (srP)->misc.ARM.r7 = (uc)->uc_mcontext.arm_r7; \
387 }
388
389 #elif defined(VGP_arm64_linux)
390 # define VG_UCONTEXT_INSTR_PTR(uc) ((UWord)((uc)->uc_mcontext.pc))
391 # define VG_UCONTEXT_STACK_PTR(uc) ((UWord)((uc)->uc_mcontext.sp))
392 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
393 /* Convert the value in uc_mcontext.regs[0] into a SysRes. */ \
394 VG_(mk_SysRes_arm64_linux)( (uc)->uc_mcontext.regs[0] )
395 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
396 { (srP)->r_pc = (uc)->uc_mcontext.pc; \
397 (srP)->r_sp = (uc)->uc_mcontext.sp; \
398 (srP)->misc.ARM64.x29 = (uc)->uc_mcontext.regs[29]; \
399 (srP)->misc.ARM64.x30 = (uc)->uc_mcontext.regs[30]; \
400 }
401
402 #elif defined(VGP_x86_darwin)
403
VG_UCONTEXT_INSTR_PTR(void * ucV)404 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
405 ucontext_t* uc = (ucontext_t*)ucV;
406 struct __darwin_mcontext32* mc = uc->uc_mcontext;
407 struct __darwin_i386_thread_state* ss = &mc->__ss;
408 return ss->__eip;
409 }
VG_UCONTEXT_STACK_PTR(void * ucV)410 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
411 ucontext_t* uc = (ucontext_t*)ucV;
412 struct __darwin_mcontext32* mc = uc->uc_mcontext;
413 struct __darwin_i386_thread_state* ss = &mc->__ss;
414 return ss->__esp;
415 }
VG_UCONTEXT_SYSCALL_SYSRES(void * ucV,UWord scclass)416 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
417 UWord scclass ) {
418 /* this is complicated by the problem that there are 3 different
419 kinds of syscalls, each with its own return convention.
420 NB: scclass is a host word, hence UWord is good for both
421 amd64-darwin and x86-darwin */
422 ucontext_t* uc = (ucontext_t*)ucV;
423 struct __darwin_mcontext32* mc = uc->uc_mcontext;
424 struct __darwin_i386_thread_state* ss = &mc->__ss;
425 /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
426 UInt carry = 1 & ss->__eflags;
427 UInt err = 0;
428 UInt wLO = 0;
429 UInt wHI = 0;
430 switch (scclass) {
431 case VG_DARWIN_SYSCALL_CLASS_UNIX:
432 err = carry;
433 wLO = ss->__eax;
434 wHI = ss->__edx;
435 break;
436 case VG_DARWIN_SYSCALL_CLASS_MACH:
437 wLO = ss->__eax;
438 break;
439 case VG_DARWIN_SYSCALL_CLASS_MDEP:
440 wLO = ss->__eax;
441 break;
442 default:
443 vg_assert(0);
444 break;
445 }
446 return VG_(mk_SysRes_x86_darwin)( scclass, err ? True : False,
447 wHI, wLO );
448 }
449 static inline
VG_UCONTEXT_TO_UnwindStartRegs(UnwindStartRegs * srP,void * ucV)450 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
451 void* ucV ) {
452 ucontext_t* uc = (ucontext_t*)(ucV);
453 struct __darwin_mcontext32* mc = uc->uc_mcontext;
454 struct __darwin_i386_thread_state* ss = &mc->__ss;
455 srP->r_pc = (ULong)(ss->__eip);
456 srP->r_sp = (ULong)(ss->__esp);
457 srP->misc.X86.r_ebp = (UInt)(ss->__ebp);
458 }
459
460 #elif defined(VGP_amd64_darwin)
461
VG_UCONTEXT_INSTR_PTR(void * ucV)462 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
463 ucontext_t* uc = (ucontext_t*)ucV;
464 struct __darwin_mcontext64* mc = uc->uc_mcontext;
465 struct __darwin_x86_thread_state64* ss = &mc->__ss;
466 return ss->__rip;
467 }
VG_UCONTEXT_STACK_PTR(void * ucV)468 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
469 ucontext_t* uc = (ucontext_t*)ucV;
470 struct __darwin_mcontext64* mc = uc->uc_mcontext;
471 struct __darwin_x86_thread_state64* ss = &mc->__ss;
472 return ss->__rsp;
473 }
VG_UCONTEXT_SYSCALL_SYSRES(void * ucV,UWord scclass)474 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
475 UWord scclass ) {
476 /* This is copied from the x86-darwin case. I'm not sure if it
477 is correct. */
478 ucontext_t* uc = (ucontext_t*)ucV;
479 struct __darwin_mcontext64* mc = uc->uc_mcontext;
480 struct __darwin_x86_thread_state64* ss = &mc->__ss;
481 /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
482 ULong carry = 1 & ss->__rflags;
483 ULong err = 0;
484 ULong wLO = 0;
485 ULong wHI = 0;
486 switch (scclass) {
487 case VG_DARWIN_SYSCALL_CLASS_UNIX:
488 err = carry;
489 wLO = ss->__rax;
490 wHI = ss->__rdx;
491 break;
492 case VG_DARWIN_SYSCALL_CLASS_MACH:
493 wLO = ss->__rax;
494 break;
495 case VG_DARWIN_SYSCALL_CLASS_MDEP:
496 wLO = ss->__rax;
497 break;
498 default:
499 vg_assert(0);
500 break;
501 }
502 return VG_(mk_SysRes_amd64_darwin)( scclass, err ? True : False,
503 wHI, wLO );
504 }
505 static inline
VG_UCONTEXT_TO_UnwindStartRegs(UnwindStartRegs * srP,void * ucV)506 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
507 void* ucV ) {
508 ucontext_t* uc = (ucontext_t*)ucV;
509 struct __darwin_mcontext64* mc = uc->uc_mcontext;
510 struct __darwin_x86_thread_state64* ss = &mc->__ss;
511 srP->r_pc = (ULong)(ss->__rip);
512 srP->r_sp = (ULong)(ss->__rsp);
513 srP->misc.AMD64.r_rbp = (ULong)(ss->__rbp);
514 }
515
516 #elif defined(VGP_s390x_linux)
517
518 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.regs.psw.addr)
519 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.regs.gprs[15])
520 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.regs.gprs[11])
521 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
522 VG_(mk_SysRes_s390x_linux)((uc)->uc_mcontext.regs.gprs[2])
523 # define VG_UCONTEXT_LINK_REG(uc) ((uc)->uc_mcontext.regs.gprs[14])
524
525 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
526 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.regs.psw.addr); \
527 (srP)->r_sp = (ULong)((uc)->uc_mcontext.regs.gprs[15]); \
528 (srP)->misc.S390X.r_fp = (uc)->uc_mcontext.regs.gprs[11]; \
529 (srP)->misc.S390X.r_lr = (uc)->uc_mcontext.regs.gprs[14]; \
530 }
531
532 #elif defined(VGP_mips32_linux)
533 # define VG_UCONTEXT_INSTR_PTR(uc) ((UWord)(((uc)->uc_mcontext.sc_pc)))
534 # define VG_UCONTEXT_STACK_PTR(uc) ((UWord)((uc)->uc_mcontext.sc_regs[29]))
535 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.sc_regs[30])
536 # define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.sc_regs[2])
537 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
538 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
539 VG_(mk_SysRes_mips32_linux)( (uc)->uc_mcontext.sc_regs[2], \
540 (uc)->uc_mcontext.sc_regs[3], \
541 (uc)->uc_mcontext.sc_regs[7])
542
543 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
544 { (srP)->r_pc = (uc)->uc_mcontext.sc_pc; \
545 (srP)->r_sp = (uc)->uc_mcontext.sc_regs[29]; \
546 (srP)->misc.MIPS32.r30 = (uc)->uc_mcontext.sc_regs[30]; \
547 (srP)->misc.MIPS32.r31 = (uc)->uc_mcontext.sc_regs[31]; \
548 (srP)->misc.MIPS32.r28 = (uc)->uc_mcontext.sc_regs[28]; \
549 }
550
551 #elif defined(VGP_mips64_linux)
552 # define VG_UCONTEXT_INSTR_PTR(uc) (((uc)->uc_mcontext.sc_pc))
553 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.sc_regs[29])
554 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.sc_regs[30])
555 # define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.sc_regs[2])
556 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
557 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
558 VG_(mk_SysRes_mips64_linux)((uc)->uc_mcontext.sc_regs[2], \
559 (uc)->uc_mcontext.sc_regs[3], \
560 (uc)->uc_mcontext.sc_regs[7])
561
562 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
563 { (srP)->r_pc = (uc)->uc_mcontext.sc_pc; \
564 (srP)->r_sp = (uc)->uc_mcontext.sc_regs[29]; \
565 (srP)->misc.MIPS64.r30 = (uc)->uc_mcontext.sc_regs[30]; \
566 (srP)->misc.MIPS64.r31 = (uc)->uc_mcontext.sc_regs[31]; \
567 (srP)->misc.MIPS64.r28 = (uc)->uc_mcontext.sc_regs[28]; \
568 }
569
570 #else
571 # error Unknown platform
572 #endif
573
574
575 /* ------ Macros for pulling stuff out of siginfos ------ */
576
577 /* These macros allow use of uniform names when working with
578 both the Linux and AIX vki definitions. */
579 #if defined(VGO_linux)
580 # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
581 # define VKI_SIGINFO_si_pid _sifields._kill._pid
582 #elif defined(VGO_darwin)
583 # define VKI_SIGINFO_si_addr si_addr
584 # define VKI_SIGINFO_si_pid si_pid
585 #else
586 # error Unknown OS
587 #endif
588
589
590 /* ---------------------------------------------------------------------
591 HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
592 ------------------------------------------------------------------ */
593
594 /* ---------------------------------------------------------------------
595 Signal state for this process.
596 ------------------------------------------------------------------ */
597
598
599 /* Base-ment of these arrays[_VKI_NSIG].
600
601 Valid signal numbers are 1 .. _VKI_NSIG inclusive.
602 Rather than subtracting 1 for indexing these arrays, which
603 is tedious and error-prone, they are simply dimensioned 1 larger,
604 and entry [0] is not used.
605 */
606
607
608 /* -----------------------------------------------------
609 Static client signal state (SCSS). This is the state
610 that the client thinks it has the kernel in.
611 SCSS records verbatim the client's settings. These
612 are mashed around only when SKSS is calculated from it.
613 -------------------------------------------------- */
614
615 typedef
616 struct {
617 void* scss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
618 client's handler */
619 UInt scss_flags;
620 vki_sigset_t scss_mask;
621 void* scss_restorer; /* where sigreturn goes */
622 void* scss_sa_tramp; /* sa_tramp setting, Darwin only */
623 /* re _restorer and _sa_tramp, we merely record the values
624 supplied when the client does 'sigaction' and give them back
625 when requested. Otherwise they are simply ignored. */
626 }
627 SCSS_Per_Signal;
628
629 typedef
630 struct {
631 /* per-signal info */
632 SCSS_Per_Signal scss_per_sig[1+_VKI_NSIG];
633
634 /* Additional elements to SCSS not stored here:
635 - for each thread, the thread's blocking mask
636 - for each thread in WaitSIG, the set of waited-on sigs
637 */
638 }
639 SCSS;
640
641 static SCSS scss;
642
643
644 /* -----------------------------------------------------
645 Static kernel signal state (SKSS). This is the state
646 that we have the kernel in. It is computed from SCSS.
647 -------------------------------------------------- */
648
649 /* Let's do:
650 sigprocmask assigns to all thread masks
651 so that at least everything is always consistent
652 Flags:
653 SA_SIGINFO -- we always set it, and honour it for the client
654 SA_NOCLDSTOP -- passed to kernel
655 SA_ONESHOT or SA_RESETHAND -- pass through
656 SA_RESTART -- we observe this but set our handlers to always restart
657 SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
658 SA_ONSTACK -- pass through
659 SA_NOCLDWAIT -- pass through
660 */
661
662
663 typedef
664 struct {
665 void* skss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN
666 or ptr to our handler */
667 UInt skss_flags;
668 /* There is no skss_mask, since we know that we will always ask
669 for all signals to be blocked in our sighandlers. */
670 /* Also there is no skss_restorer. */
671 }
672 SKSS_Per_Signal;
673
674 typedef
675 struct {
676 SKSS_Per_Signal skss_per_sig[1+_VKI_NSIG];
677 }
678 SKSS;
679
680 static SKSS skss;
681
682 /* returns True if signal is to be ignored.
683 To check this, possibly call gdbserver with tid. */
is_sig_ign(Int sigNo,ThreadId tid)684 static Bool is_sig_ign(Int sigNo, ThreadId tid)
685 {
686 vg_assert(sigNo >= 1 && sigNo <= _VKI_NSIG);
687
688 return scss.scss_per_sig[sigNo].scss_handler == VKI_SIG_IGN
689 || !VG_(gdbserver_report_signal) (sigNo, tid);
690 }
691
692 /* ---------------------------------------------------------------------
693 Compute the SKSS required by the current SCSS.
694 ------------------------------------------------------------------ */
695
696 static
pp_SKSS(void)697 void pp_SKSS ( void )
698 {
699 Int sig;
700 VG_(printf)("\n\nSKSS:\n");
701 for (sig = 1; sig <= _VKI_NSIG; sig++) {
702 VG_(printf)("sig %d: handler %p, flags 0x%x\n", sig,
703 skss.skss_per_sig[sig].skss_handler,
704 skss.skss_per_sig[sig].skss_flags );
705
706 }
707 }
708
709 /* This is the core, clever bit. Computation is as follows:
710
711 For each signal
712 handler = if client has a handler, then our handler
713 else if client is DFL, then our handler as well
714 else (client must be IGN)
715 then hander is IGN
716 */
717 static
calculate_SKSS_from_SCSS(SKSS * dst)718 void calculate_SKSS_from_SCSS ( SKSS* dst )
719 {
720 Int sig;
721 UInt scss_flags;
722 UInt skss_flags;
723
724 for (sig = 1; sig <= _VKI_NSIG; sig++) {
725 void *skss_handler;
726 void *scss_handler;
727
728 scss_handler = scss.scss_per_sig[sig].scss_handler;
729 scss_flags = scss.scss_per_sig[sig].scss_flags;
730
731 switch(sig) {
732 case VKI_SIGSEGV:
733 case VKI_SIGBUS:
734 case VKI_SIGFPE:
735 case VKI_SIGILL:
736 case VKI_SIGTRAP:
737 /* For these, we always want to catch them and report, even
738 if the client code doesn't. */
739 skss_handler = sync_signalhandler;
740 break;
741
742 case VKI_SIGCONT:
743 /* Let the kernel handle SIGCONT unless the client is actually
744 catching it. */
745 case VKI_SIGCHLD:
746 case VKI_SIGWINCH:
747 case VKI_SIGURG:
748 /* For signals which are have a default action of Ignore,
749 only set a handler if the client has set a signal handler.
750 Otherwise the kernel will interrupt a syscall which
751 wouldn't have otherwise been interrupted. */
752 if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
753 skss_handler = VKI_SIG_DFL;
754 else if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
755 skss_handler = VKI_SIG_IGN;
756 else
757 skss_handler = async_signalhandler;
758 break;
759
760 default:
761 // VKI_SIGVG* are runtime variables, so we can't make them
762 // cases in the switch, so we handle them in the 'default' case.
763 if (sig == VG_SIGVGKILL)
764 skss_handler = sigvgkill_handler;
765 else {
766 if (scss_handler == VKI_SIG_IGN)
767 skss_handler = VKI_SIG_IGN;
768 else
769 skss_handler = async_signalhandler;
770 }
771 break;
772 }
773
774 /* Flags */
775
776 skss_flags = 0;
777
778 /* SA_NOCLDSTOP, SA_NOCLDWAIT: pass to kernel */
779 skss_flags |= scss_flags & (VKI_SA_NOCLDSTOP | VKI_SA_NOCLDWAIT);
780
781 /* SA_ONESHOT: ignore client setting */
782
783 /* SA_RESTART: ignore client setting and always set it for us.
784 Though we never rely on the kernel to restart a
785 syscall, we observe whether it wanted to restart the syscall
786 or not, which is needed by
787 VG_(fixup_guest_state_after_syscall_interrupted) */
788 skss_flags |= VKI_SA_RESTART;
789
790 /* SA_NOMASK: ignore it */
791
792 /* SA_ONSTACK: client setting is irrelevant here */
793 /* We don't set a signal stack, so ignore */
794
795 /* always ask for SA_SIGINFO */
796 skss_flags |= VKI_SA_SIGINFO;
797
798 /* use our own restorer */
799 skss_flags |= VKI_SA_RESTORER;
800
801 /* Create SKSS entry for this signal. */
802 if (sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
803 dst->skss_per_sig[sig].skss_handler = skss_handler;
804 else
805 dst->skss_per_sig[sig].skss_handler = VKI_SIG_DFL;
806
807 dst->skss_per_sig[sig].skss_flags = skss_flags;
808 }
809
810 /* Sanity checks. */
811 vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler == VKI_SIG_DFL);
812 vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler == VKI_SIG_DFL);
813
814 if (0)
815 pp_SKSS();
816 }
817
818
819 /* ---------------------------------------------------------------------
820 After a possible SCSS change, update SKSS and the kernel itself.
821 ------------------------------------------------------------------ */
822
823 // We need two levels of macro-expansion here to convert __NR_rt_sigreturn
824 // to a number before converting it to a string... sigh.
825 extern void my_sigreturn(void);
826
827 #if defined(VGP_x86_linux)
828 # define _MY_SIGRETURN(name) \
829 ".text\n" \
830 ".globl my_sigreturn\n" \
831 "my_sigreturn:\n" \
832 " movl $" #name ", %eax\n" \
833 " int $0x80\n" \
834 ".previous\n"
835
836 #elif defined(VGP_amd64_linux)
837 # define _MY_SIGRETURN(name) \
838 ".text\n" \
839 ".globl my_sigreturn\n" \
840 "my_sigreturn:\n" \
841 " movq $" #name ", %rax\n" \
842 " syscall\n" \
843 ".previous\n"
844
845 #elif defined(VGP_ppc32_linux)
846 # define _MY_SIGRETURN(name) \
847 ".text\n" \
848 ".globl my_sigreturn\n" \
849 "my_sigreturn:\n" \
850 " li 0, " #name "\n" \
851 " sc\n" \
852 ".previous\n"
853
854 #elif defined(VGP_ppc64_linux)
855 # define _MY_SIGRETURN(name) \
856 ".align 2\n" \
857 ".globl my_sigreturn\n" \
858 ".section \".opd\",\"aw\"\n" \
859 ".align 3\n" \
860 "my_sigreturn:\n" \
861 ".quad .my_sigreturn,.TOC.@tocbase,0\n" \
862 ".previous\n" \
863 ".type .my_sigreturn,@function\n" \
864 ".globl .my_sigreturn\n" \
865 ".my_sigreturn:\n" \
866 " li 0, " #name "\n" \
867 " sc\n"
868
869 #elif defined(VGP_arm_linux)
870 # define _MY_SIGRETURN(name) \
871 ".text\n" \
872 ".globl my_sigreturn\n" \
873 "my_sigreturn:\n\t" \
874 " mov r7, #" #name "\n\t" \
875 " svc 0x00000000\n" \
876 ".previous\n"
877
878 #elif defined(VGP_arm64_linux)
879 # define _MY_SIGRETURN(name) \
880 ".text\n" \
881 ".globl my_sigreturn\n" \
882 "my_sigreturn:\n\t" \
883 " mov x8, #" #name "\n\t" \
884 " svc 0x0\n" \
885 ".previous\n"
886
887 #elif defined(VGP_x86_darwin)
888 # define _MY_SIGRETURN(name) \
889 ".text\n" \
890 ".globl my_sigreturn\n" \
891 "my_sigreturn:\n" \
892 "movl $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%eax\n" \
893 "int $0x80"
894
895 #elif defined(VGP_amd64_darwin)
896 // DDD: todo
897 # define _MY_SIGRETURN(name) \
898 ".text\n" \
899 ".globl my_sigreturn\n" \
900 "my_sigreturn:\n" \
901 "ud2\n"
902
903 #elif defined(VGP_s390x_linux)
904 # define _MY_SIGRETURN(name) \
905 ".text\n" \
906 ".globl my_sigreturn\n" \
907 "my_sigreturn:\n" \
908 " svc " #name "\n" \
909 ".previous\n"
910
911 #elif defined(VGP_mips32_linux)
912 # define _MY_SIGRETURN(name) \
913 ".text\n" \
914 "my_sigreturn:\n" \
915 " li $2, " #name "\n" /* apparently $2 is v0 */ \
916 " syscall\n" \
917 ".previous\n"
918
919 #elif defined(VGP_mips64_linux)
920 # define _MY_SIGRETURN(name) \
921 ".text\n" \
922 "my_sigreturn:\n" \
923 " li $2, " #name "\n" \
924 " syscall\n" \
925 ".previous\n"
926
927 #else
928 # error Unknown platform
929 #endif
930
931 #define MY_SIGRETURN(name) _MY_SIGRETURN(name)
932 asm(
933 MY_SIGRETURN(__NR_rt_sigreturn)
934 );
935
936
handle_SCSS_change(Bool force_update)937 static void handle_SCSS_change ( Bool force_update )
938 {
939 Int res, sig;
940 SKSS skss_old;
941 vki_sigaction_toK_t ksa;
942 vki_sigaction_fromK_t ksa_old;
943
944 /* Remember old SKSS and calculate new one. */
945 skss_old = skss;
946 calculate_SKSS_from_SCSS ( &skss );
947
948 /* Compare the new SKSS entries vs the old ones, and update kernel
949 where they differ. */
950 for (sig = 1; sig <= VG_(max_signal); sig++) {
951
952 /* Trying to do anything with SIGKILL is pointless; just ignore
953 it. */
954 if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
955 continue;
956
957 if (!force_update) {
958 if ((skss_old.skss_per_sig[sig].skss_handler
959 == skss.skss_per_sig[sig].skss_handler)
960 && (skss_old.skss_per_sig[sig].skss_flags
961 == skss.skss_per_sig[sig].skss_flags))
962 /* no difference */
963 continue;
964 }
965
966 ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
967 ksa.sa_flags = skss.skss_per_sig[sig].skss_flags;
968 # if !defined(VGP_ppc32_linux) && \
969 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
970 !defined(VGP_mips32_linux)
971 ksa.sa_restorer = my_sigreturn;
972 # endif
973 /* Re above ifdef (also the assertion below), PaulM says:
974 The sa_restorer field is not used at all on ppc. Glibc
975 converts the sigaction you give it into a kernel sigaction,
976 but it doesn't put anything in the sa_restorer field.
977 */
978
979 /* block all signals in handler */
980 VG_(sigfillset)( &ksa.sa_mask );
981 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGKILL );
982 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGSTOP );
983
984 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
985 VG_(dmsg)("setting ksig %d to: hdlr %p, flags 0x%lx, "
986 "mask(msb..lsb) 0x%llx 0x%llx\n",
987 sig, ksa.ksa_handler,
988 (UWord)ksa.sa_flags,
989 _VKI_NSIG_WORDS > 1 ? (ULong)ksa.sa_mask.sig[1] : 0,
990 (ULong)ksa.sa_mask.sig[0]);
991
992 res = VG_(sigaction)( sig, &ksa, &ksa_old );
993 vg_assert(res == 0);
994
995 /* Since we got the old sigaction more or less for free, might
996 as well extract the maximum sanity-check value from it. */
997 if (!force_update) {
998 vg_assert(ksa_old.ksa_handler
999 == skss_old.skss_per_sig[sig].skss_handler);
1000 vg_assert(ksa_old.sa_flags
1001 == skss_old.skss_per_sig[sig].skss_flags);
1002 # if !defined(VGP_ppc32_linux) && \
1003 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
1004 !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux)
1005 vg_assert(ksa_old.sa_restorer == my_sigreturn);
1006 # endif
1007 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
1008 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGSTOP );
1009 vg_assert(VG_(isfullsigset)( &ksa_old.sa_mask ));
1010 }
1011 }
1012 }
1013
1014
1015 /* ---------------------------------------------------------------------
1016 Update/query SCSS in accordance with client requests.
1017 ------------------------------------------------------------------ */
1018
1019 /* Logic for this alt-stack stuff copied directly from do_sigaltstack
1020 in kernel/signal.[ch] */
1021
1022 /* True if we are on the alternate signal stack. */
on_sig_stack(ThreadId tid,Addr m_SP)1023 static Bool on_sig_stack ( ThreadId tid, Addr m_SP )
1024 {
1025 ThreadState *tst = VG_(get_ThreadState)(tid);
1026
1027 return (m_SP - (Addr)tst->altstack.ss_sp < (Addr)tst->altstack.ss_size);
1028 }
1029
sas_ss_flags(ThreadId tid,Addr m_SP)1030 static Int sas_ss_flags ( ThreadId tid, Addr m_SP )
1031 {
1032 ThreadState *tst = VG_(get_ThreadState)(tid);
1033
1034 return (tst->altstack.ss_size == 0
1035 ? VKI_SS_DISABLE
1036 : on_sig_stack(tid, m_SP) ? VKI_SS_ONSTACK : 0);
1037 }
1038
1039
VG_(do_sys_sigaltstack)1040 SysRes VG_(do_sys_sigaltstack) ( ThreadId tid, vki_stack_t* ss, vki_stack_t* oss )
1041 {
1042 Addr m_SP;
1043
1044 vg_assert(VG_(is_valid_tid)(tid));
1045 m_SP = VG_(get_SP)(tid);
1046
1047 if (VG_(clo_trace_signals))
1048 VG_(dmsg)("sys_sigaltstack: tid %d, "
1049 "ss %p{%p,sz=%llu,flags=0x%llx}, oss %p (current SP %p)\n",
1050 tid, (void*)ss,
1051 ss ? ss->ss_sp : 0,
1052 (ULong)(ss ? ss->ss_size : 0),
1053 (ULong)(ss ? ss->ss_flags : 0),
1054 (void*)oss, (void*)m_SP);
1055
1056 if (oss != NULL) {
1057 oss->ss_sp = VG_(threads)[tid].altstack.ss_sp;
1058 oss->ss_size = VG_(threads)[tid].altstack.ss_size;
1059 oss->ss_flags = VG_(threads)[tid].altstack.ss_flags
1060 | sas_ss_flags(tid, m_SP);
1061 }
1062
1063 if (ss != NULL) {
1064 if (on_sig_stack(tid, VG_(get_SP)(tid))) {
1065 return VG_(mk_SysRes_Error)( VKI_EPERM );
1066 }
1067 if (ss->ss_flags != VKI_SS_DISABLE
1068 && ss->ss_flags != VKI_SS_ONSTACK
1069 && ss->ss_flags != 0) {
1070 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1071 }
1072 if (ss->ss_flags == VKI_SS_DISABLE) {
1073 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
1074 } else {
1075 if (ss->ss_size < VKI_MINSIGSTKSZ) {
1076 return VG_(mk_SysRes_Error)( VKI_ENOMEM );
1077 }
1078
1079 VG_(threads)[tid].altstack.ss_sp = ss->ss_sp;
1080 VG_(threads)[tid].altstack.ss_size = ss->ss_size;
1081 VG_(threads)[tid].altstack.ss_flags = 0;
1082 }
1083 }
1084 return VG_(mk_SysRes_Success)( 0 );
1085 }
1086
1087
VG_(do_sys_sigaction)1088 SysRes VG_(do_sys_sigaction) ( Int signo,
1089 const vki_sigaction_toK_t* new_act,
1090 vki_sigaction_fromK_t* old_act )
1091 {
1092 if (VG_(clo_trace_signals))
1093 VG_(dmsg)("sys_sigaction: sigNo %d, "
1094 "new %#lx, old %#lx, new flags 0x%llx\n",
1095 signo, (UWord)new_act, (UWord)old_act,
1096 (ULong)(new_act ? new_act->sa_flags : 0));
1097
1098 /* Rule out various error conditions. The aim is to ensure that if
1099 when the call is passed to the kernel it will definitely
1100 succeed. */
1101
1102 /* Reject out-of-range signal numbers. */
1103 if (signo < 1 || signo > VG_(max_signal)) goto bad_signo;
1104
1105 /* don't let them use our signals */
1106 if ( (signo > VG_SIGVGRTUSERMAX)
1107 && new_act
1108 && !(new_act->ksa_handler == VKI_SIG_DFL
1109 || new_act->ksa_handler == VKI_SIG_IGN) )
1110 goto bad_signo_reserved;
1111
1112 /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
1113 if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
1114 && new_act
1115 && new_act->ksa_handler != VKI_SIG_DFL)
1116 goto bad_sigkill_or_sigstop;
1117
1118 /* If the client supplied non-NULL old_act, copy the relevant SCSS
1119 entry into it. */
1120 if (old_act) {
1121 old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
1122 old_act->sa_flags = scss.scss_per_sig[signo].scss_flags;
1123 old_act->sa_mask = scss.scss_per_sig[signo].scss_mask;
1124 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1125 old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
1126 # endif
1127 }
1128
1129 /* And now copy new SCSS entry from new_act. */
1130 if (new_act) {
1131 scss.scss_per_sig[signo].scss_handler = new_act->ksa_handler;
1132 scss.scss_per_sig[signo].scss_flags = new_act->sa_flags;
1133 scss.scss_per_sig[signo].scss_mask = new_act->sa_mask;
1134
1135 scss.scss_per_sig[signo].scss_restorer = NULL;
1136 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1137 scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
1138 # endif
1139
1140 scss.scss_per_sig[signo].scss_sa_tramp = NULL;
1141 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1142 scss.scss_per_sig[signo].scss_sa_tramp = new_act->sa_tramp;
1143 # endif
1144
1145 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
1146 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
1147 }
1148
1149 /* All happy bunnies ... */
1150 if (new_act) {
1151 handle_SCSS_change( False /* lazy update */ );
1152 }
1153 return VG_(mk_SysRes_Success)( 0 );
1154
1155 bad_signo:
1156 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1157 VG_(umsg)("Warning: bad signal number %d in sigaction()\n", signo);
1158 }
1159 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1160
1161 bad_signo_reserved:
1162 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1163 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1164 VG_(signame)(signo));
1165 VG_(umsg)(" the %s signal is used internally by Valgrind\n",
1166 VG_(signame)(signo));
1167 }
1168 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1169
1170 bad_sigkill_or_sigstop:
1171 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1172 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1173 VG_(signame)(signo));
1174 VG_(umsg)(" the %s signal is uncatchable\n",
1175 VG_(signame)(signo));
1176 }
1177 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1178 }
1179
1180
1181 static
do_sigprocmask_bitops(Int vki_how,vki_sigset_t * orig_set,vki_sigset_t * modifier)1182 void do_sigprocmask_bitops ( Int vki_how,
1183 vki_sigset_t* orig_set,
1184 vki_sigset_t* modifier )
1185 {
1186 switch (vki_how) {
1187 case VKI_SIG_BLOCK:
1188 VG_(sigaddset_from_set)( orig_set, modifier );
1189 break;
1190 case VKI_SIG_UNBLOCK:
1191 VG_(sigdelset_from_set)( orig_set, modifier );
1192 break;
1193 case VKI_SIG_SETMASK:
1194 *orig_set = *modifier;
1195 break;
1196 default:
1197 VG_(core_panic)("do_sigprocmask_bitops");
1198 break;
1199 }
1200 }
1201
1202 static
format_sigset(const vki_sigset_t * set)1203 HChar* format_sigset ( const vki_sigset_t* set )
1204 {
1205 static HChar buf[128];
1206 int w;
1207
1208 VG_(strcpy)(buf, "");
1209
1210 for (w = _VKI_NSIG_WORDS - 1; w >= 0; w--)
1211 {
1212 # if _VKI_NSIG_BPW == 32
1213 VG_(sprintf)(buf + VG_(strlen)(buf), "%08llx",
1214 set ? (ULong)set->sig[w] : 0);
1215 # elif _VKI_NSIG_BPW == 64
1216 VG_(sprintf)(buf + VG_(strlen)(buf), "%16llx",
1217 set ? (ULong)set->sig[w] : 0);
1218 # else
1219 # error "Unsupported value for _VKI_NSIG_BPW"
1220 # endif
1221 }
1222
1223 return buf;
1224 }
1225
1226 /*
1227 This updates the thread's signal mask. There's no such thing as a
1228 process-wide signal mask.
1229
1230 Note that the thread signal masks are an implicit part of SCSS,
1231 which is why this routine is allowed to mess with them.
1232 */
1233 static
do_setmask(ThreadId tid,Int how,vki_sigset_t * newset,vki_sigset_t * oldset)1234 void do_setmask ( ThreadId tid,
1235 Int how,
1236 vki_sigset_t* newset,
1237 vki_sigset_t* oldset )
1238 {
1239 if (VG_(clo_trace_signals))
1240 VG_(dmsg)("do_setmask: tid = %d how = %d (%s), newset = %p (%s)\n",
1241 tid, how,
1242 how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
1243 how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
1244 how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
1245 newset, newset ? format_sigset(newset) : "NULL" );
1246
1247 /* Just do this thread. */
1248 vg_assert(VG_(is_valid_tid)(tid));
1249 if (oldset) {
1250 *oldset = VG_(threads)[tid].sig_mask;
1251 if (VG_(clo_trace_signals))
1252 VG_(dmsg)("\toldset=%p %s\n", oldset, format_sigset(oldset));
1253 }
1254 if (newset) {
1255 do_sigprocmask_bitops (how, &VG_(threads)[tid].sig_mask, newset );
1256 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGKILL);
1257 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGSTOP);
1258 VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
1259 }
1260 }
1261
1262
VG_(do_sys_sigprocmask)1263 SysRes VG_(do_sys_sigprocmask) ( ThreadId tid,
1264 Int how,
1265 vki_sigset_t* set,
1266 vki_sigset_t* oldset )
1267 {
1268 switch(how) {
1269 case VKI_SIG_BLOCK:
1270 case VKI_SIG_UNBLOCK:
1271 case VKI_SIG_SETMASK:
1272 vg_assert(VG_(is_valid_tid)(tid));
1273 do_setmask ( tid, how, set, oldset );
1274 return VG_(mk_SysRes_Success)( 0 );
1275
1276 default:
1277 VG_(dmsg)("sigprocmask: unknown 'how' field %d\n", how);
1278 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1279 }
1280 }
1281
1282
1283 /* ---------------------------------------------------------------------
1284 LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
1285 ------------------------------------------------------------------ */
1286
1287 /* ---------------------------------------------------------------------
1288 Handy utilities to block/restore all host signals.
1289 ------------------------------------------------------------------ */
1290
1291 /* Block all host signals, dumping the old mask in *saved_mask. */
block_all_host_signals(vki_sigset_t * saved_mask)1292 static void block_all_host_signals ( /* OUT */ vki_sigset_t* saved_mask )
1293 {
1294 Int ret;
1295 vki_sigset_t block_procmask;
1296 VG_(sigfillset)(&block_procmask);
1297 ret = VG_(sigprocmask)
1298 (VKI_SIG_SETMASK, &block_procmask, saved_mask);
1299 vg_assert(ret == 0);
1300 }
1301
1302 /* Restore the blocking mask using the supplied saved one. */
restore_all_host_signals(vki_sigset_t * saved_mask)1303 static void restore_all_host_signals ( /* IN */ vki_sigset_t* saved_mask )
1304 {
1305 Int ret;
1306 ret = VG_(sigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
1307 vg_assert(ret == 0);
1308 }
1309
VG_(clear_out_queued_signals)1310 void VG_(clear_out_queued_signals)( ThreadId tid, vki_sigset_t* saved_mask )
1311 {
1312 block_all_host_signals(saved_mask);
1313 if (VG_(threads)[tid].sig_queue != NULL) {
1314 VG_(arena_free)(VG_AR_CORE, VG_(threads)[tid].sig_queue);
1315 VG_(threads)[tid].sig_queue = NULL;
1316 }
1317 restore_all_host_signals(saved_mask);
1318 }
1319
1320 /* ---------------------------------------------------------------------
1321 The signal simulation proper. A simplified version of what the
1322 Linux kernel does.
1323 ------------------------------------------------------------------ */
1324
1325 /* Set up a stack frame (VgSigContext) for the client's signal
1326 handler. */
1327 static
push_signal_frame(ThreadId tid,const vki_siginfo_t * siginfo,const struct vki_ucontext * uc)1328 void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo,
1329 const struct vki_ucontext *uc )
1330 {
1331 Addr esp_top_of_frame;
1332 ThreadState* tst;
1333 Int sigNo = siginfo->si_signo;
1334
1335 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
1336 vg_assert(VG_(is_valid_tid)(tid));
1337 tst = & VG_(threads)[tid];
1338
1339 if (VG_(clo_trace_signals)) {
1340 VG_(dmsg)("push_signal_frame (thread %d): signal %d\n", tid, sigNo);
1341 VG_(get_and_pp_StackTrace)(tid, 10);
1342 }
1343
1344 if (/* this signal asked to run on an alt stack */
1345 (scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
1346 && /* there is a defined and enabled alt stack, which we're not
1347 already using. Logic from get_sigframe in
1348 arch/i386/kernel/signal.c. */
1349 sas_ss_flags(tid, VG_(get_SP)(tid)) == 0
1350 ) {
1351 esp_top_of_frame
1352 = (Addr)(tst->altstack.ss_sp) + tst->altstack.ss_size;
1353 if (VG_(clo_trace_signals))
1354 VG_(dmsg)("delivering signal %d (%s) to thread %d: "
1355 "on ALT STACK (%p-%p; %ld bytes)\n",
1356 sigNo, VG_(signame)(sigNo), tid, tst->altstack.ss_sp,
1357 (UChar *)tst->altstack.ss_sp + tst->altstack.ss_size,
1358 (Word)tst->altstack.ss_size );
1359
1360 /* Signal delivery to tools */
1361 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/True );
1362
1363 } else {
1364 esp_top_of_frame = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
1365
1366 /* Signal delivery to tools */
1367 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/False );
1368 }
1369
1370 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
1371 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
1372
1373 /* This may fail if the client stack is busted; if that happens,
1374 the whole process will exit rather than simply calling the
1375 signal handler. */
1376 VG_(sigframe_create) (tid, esp_top_of_frame, siginfo, uc,
1377 scss.scss_per_sig[sigNo].scss_handler,
1378 scss.scss_per_sig[sigNo].scss_flags,
1379 &tst->sig_mask,
1380 scss.scss_per_sig[sigNo].scss_restorer);
1381 }
1382
1383
VG_(signame)1384 const HChar *VG_(signame)(Int sigNo)
1385 {
1386 static HChar buf[20];
1387
1388 switch(sigNo) {
1389 case VKI_SIGHUP: return "SIGHUP";
1390 case VKI_SIGINT: return "SIGINT";
1391 case VKI_SIGQUIT: return "SIGQUIT";
1392 case VKI_SIGILL: return "SIGILL";
1393 case VKI_SIGTRAP: return "SIGTRAP";
1394 case VKI_SIGABRT: return "SIGABRT";
1395 case VKI_SIGBUS: return "SIGBUS";
1396 case VKI_SIGFPE: return "SIGFPE";
1397 case VKI_SIGKILL: return "SIGKILL";
1398 case VKI_SIGUSR1: return "SIGUSR1";
1399 case VKI_SIGUSR2: return "SIGUSR2";
1400 case VKI_SIGSEGV: return "SIGSEGV";
1401 case VKI_SIGPIPE: return "SIGPIPE";
1402 case VKI_SIGALRM: return "SIGALRM";
1403 case VKI_SIGTERM: return "SIGTERM";
1404 # if defined(VKI_SIGSTKFLT)
1405 case VKI_SIGSTKFLT: return "SIGSTKFLT";
1406 # endif
1407 case VKI_SIGCHLD: return "SIGCHLD";
1408 case VKI_SIGCONT: return "SIGCONT";
1409 case VKI_SIGSTOP: return "SIGSTOP";
1410 case VKI_SIGTSTP: return "SIGTSTP";
1411 case VKI_SIGTTIN: return "SIGTTIN";
1412 case VKI_SIGTTOU: return "SIGTTOU";
1413 case VKI_SIGURG: return "SIGURG";
1414 case VKI_SIGXCPU: return "SIGXCPU";
1415 case VKI_SIGXFSZ: return "SIGXFSZ";
1416 case VKI_SIGVTALRM: return "SIGVTALRM";
1417 case VKI_SIGPROF: return "SIGPROF";
1418 case VKI_SIGWINCH: return "SIGWINCH";
1419 case VKI_SIGIO: return "SIGIO";
1420 # if defined(VKI_SIGPWR)
1421 case VKI_SIGPWR: return "SIGPWR";
1422 # endif
1423 # if defined(VKI_SIGUNUSED)
1424 case VKI_SIGUNUSED: return "SIGUNUSED";
1425 # endif
1426
1427 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1428 case VKI_SIGRTMIN ... VKI_SIGRTMAX:
1429 VG_(sprintf)(buf, "SIGRT%d", sigNo-VKI_SIGRTMIN);
1430 return buf;
1431 # endif
1432
1433 default:
1434 VG_(sprintf)(buf, "SIG%d", sigNo);
1435 return buf;
1436 }
1437 }
1438
1439 /* Hit ourselves with a signal using the default handler */
VG_(kill_self)1440 void VG_(kill_self)(Int sigNo)
1441 {
1442 Int r;
1443 vki_sigset_t mask, origmask;
1444 vki_sigaction_toK_t sa, origsa2;
1445 vki_sigaction_fromK_t origsa;
1446
1447 sa.ksa_handler = VKI_SIG_DFL;
1448 sa.sa_flags = 0;
1449 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1450 sa.sa_restorer = 0;
1451 # endif
1452 VG_(sigemptyset)(&sa.sa_mask);
1453
1454 VG_(sigaction)(sigNo, &sa, &origsa);
1455
1456 VG_(sigemptyset)(&mask);
1457 VG_(sigaddset)(&mask, sigNo);
1458 VG_(sigprocmask)(VKI_SIG_UNBLOCK, &mask, &origmask);
1459
1460 r = VG_(kill)(VG_(getpid)(), sigNo);
1461 # if defined(VGO_linux)
1462 /* This sometimes fails with EPERM on Darwin. I don't know why. */
1463 vg_assert(r == 0);
1464 # endif
1465
1466 VG_(convert_sigaction_fromK_to_toK)( &origsa, &origsa2 );
1467 VG_(sigaction)(sigNo, &origsa2, NULL);
1468 VG_(sigprocmask)(VKI_SIG_SETMASK, &origmask, NULL);
1469 }
1470
1471 // The si_code describes where the signal came from. Some come from the
1472 // kernel, eg.: seg faults, illegal opcodes. Some come from the user, eg.:
1473 // from kill() (SI_USER), or timer_settime() (SI_TIMER), or an async I/O
1474 // request (SI_ASYNCIO). There's lots of implementation-defined leeway in
1475 // POSIX, but the user vs. kernal distinction is what we want here. We also
1476 // pass in some other details that can help when si_code is unreliable.
is_signal_from_kernel(ThreadId tid,int signum,int si_code)1477 static Bool is_signal_from_kernel(ThreadId tid, int signum, int si_code)
1478 {
1479 # if defined(VGO_linux)
1480 // On Linux, SI_USER is zero, negative values are from the user, positive
1481 // values are from the kernel. There are SI_FROMUSER and SI_FROMKERNEL
1482 // macros but we don't use them here because other platforms don't have
1483 // them.
1484 return ( si_code > VKI_SI_USER ? True : False );
1485
1486 # elif defined(VGO_darwin)
1487 // On Darwin 9.6.0, the si_code is completely unreliable. It should be the
1488 // case that 0 means "user", and >0 means "kernel". But:
1489 // - For SIGSEGV, it seems quite reliable.
1490 // - For SIGBUS, it's always 2.
1491 // - For SIGFPE, it's often 0, even for kernel ones (eg.
1492 // div-by-integer-zero always gives zero).
1493 // - For SIGILL, it's unclear.
1494 // - For SIGTRAP, it's always 1.
1495 // You can see the "NOTIMP" (not implemented) status of a number of the
1496 // sub-cases in sys/signal.h. Hopefully future versions of Darwin will
1497 // get this right.
1498
1499 // If we're blocked waiting on a syscall, it must be a user signal, because
1500 // the kernel won't generate sync signals within syscalls.
1501 if (VG_(threads)[tid].status == VgTs_WaitSys) {
1502 return False;
1503
1504 // If it's a SIGSEGV, use the proper condition, since it's fairly reliable.
1505 } else if (SIGSEGV == signum) {
1506 return ( si_code > 0 ? True : False );
1507
1508 // If it's anything else, assume it's kernel-generated. Reason being that
1509 // kernel-generated sync signals are more common, and it's probable that
1510 // misdiagnosing a user signal as a kernel signal is better than the
1511 // opposite.
1512 } else {
1513 return True;
1514 }
1515 # else
1516 # error Unknown OS
1517 # endif
1518 }
1519
1520 // This is an arbitrary si_code that we only use internally. It corresponds
1521 // to the value SI_KERNEL on Linux, but that's not really of any significance
1522 // as far as I can determine.
1523 #define VKI_SEGV_MADE_UP_GPF 0x80
1524
1525 /*
1526 Perform the default action of a signal. If the signal is fatal, it
1527 marks all threads as needing to exit, but it doesn't actually kill
1528 the process or thread.
1529
1530 If we're not being quiet, then print out some more detail about
1531 fatal signals (esp. core dumping signals).
1532 */
default_action(const vki_siginfo_t * info,ThreadId tid)1533 static void default_action(const vki_siginfo_t *info, ThreadId tid)
1534 {
1535 Int sigNo = info->si_signo;
1536 Bool terminate = False; /* kills process */
1537 Bool core = False; /* kills process w/ core */
1538 struct vki_rlimit corelim;
1539 Bool could_core;
1540
1541 vg_assert(VG_(is_running_thread)(tid));
1542
1543 switch(sigNo) {
1544 case VKI_SIGQUIT: /* core */
1545 case VKI_SIGILL: /* core */
1546 case VKI_SIGABRT: /* core */
1547 case VKI_SIGFPE: /* core */
1548 case VKI_SIGSEGV: /* core */
1549 case VKI_SIGBUS: /* core */
1550 case VKI_SIGTRAP: /* core */
1551 case VKI_SIGXCPU: /* core */
1552 case VKI_SIGXFSZ: /* core */
1553 terminate = True;
1554 core = True;
1555 break;
1556
1557 case VKI_SIGHUP: /* term */
1558 case VKI_SIGINT: /* term */
1559 case VKI_SIGKILL: /* term - we won't see this */
1560 case VKI_SIGPIPE: /* term */
1561 case VKI_SIGALRM: /* term */
1562 case VKI_SIGTERM: /* term */
1563 case VKI_SIGUSR1: /* term */
1564 case VKI_SIGUSR2: /* term */
1565 case VKI_SIGIO: /* term */
1566 # if defined(VKI_SIGPWR)
1567 case VKI_SIGPWR: /* term */
1568 # endif
1569 case VKI_SIGSYS: /* term */
1570 case VKI_SIGPROF: /* term */
1571 case VKI_SIGVTALRM: /* term */
1572 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1573 case VKI_SIGRTMIN ... VKI_SIGRTMAX: /* term */
1574 # endif
1575 terminate = True;
1576 break;
1577 }
1578
1579 vg_assert(!core || (core && terminate));
1580
1581 if (VG_(clo_trace_signals))
1582 VG_(dmsg)("delivering %d (code %d) to default handler; action: %s%s\n",
1583 sigNo, info->si_code, terminate ? "terminate" : "ignore",
1584 core ? "+core" : "");
1585
1586 if (!terminate)
1587 return; /* nothing to do */
1588
1589 could_core = core;
1590
1591 if (core) {
1592 /* If they set the core-size limit to zero, don't generate a
1593 core file */
1594
1595 VG_(getrlimit)(VKI_RLIMIT_CORE, &corelim);
1596
1597 if (corelim.rlim_cur == 0)
1598 core = False;
1599 }
1600
1601 if ( (VG_(clo_verbosity) > 1 ||
1602 (could_core && is_signal_from_kernel(tid, sigNo, info->si_code))
1603 ) &&
1604 !VG_(clo_xml) ) {
1605 VG_(umsg)(
1606 "\n"
1607 "Process terminating with default action of signal %d (%s)%s\n",
1608 sigNo, VG_(signame)(sigNo), core ? ": dumping core" : "");
1609
1610 /* Be helpful - decode some more details about this fault */
1611 if (is_signal_from_kernel(tid, sigNo, info->si_code)) {
1612 const HChar *event = NULL;
1613 Bool haveaddr = True;
1614
1615 switch(sigNo) {
1616 case VKI_SIGSEGV:
1617 switch(info->si_code) {
1618 case VKI_SEGV_MAPERR: event = "Access not within mapped region";
1619 break;
1620 case VKI_SEGV_ACCERR: event = "Bad permissions for mapped region";
1621 break;
1622 case VKI_SEGV_MADE_UP_GPF:
1623 /* General Protection Fault: The CPU/kernel
1624 isn't telling us anything useful, but this
1625 is commonly the result of exceeding a
1626 segment limit. */
1627 event = "General Protection Fault";
1628 haveaddr = False;
1629 break;
1630 }
1631 #if 0
1632 {
1633 HChar buf[110];
1634 VG_(am_show_nsegments)(0,"post segfault");
1635 VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1636 VG_(system)(buf);
1637 }
1638 #endif
1639 break;
1640
1641 case VKI_SIGILL:
1642 switch(info->si_code) {
1643 case VKI_ILL_ILLOPC: event = "Illegal opcode"; break;
1644 case VKI_ILL_ILLOPN: event = "Illegal operand"; break;
1645 case VKI_ILL_ILLADR: event = "Illegal addressing mode"; break;
1646 case VKI_ILL_ILLTRP: event = "Illegal trap"; break;
1647 case VKI_ILL_PRVOPC: event = "Privileged opcode"; break;
1648 case VKI_ILL_PRVREG: event = "Privileged register"; break;
1649 case VKI_ILL_COPROC: event = "Coprocessor error"; break;
1650 case VKI_ILL_BADSTK: event = "Internal stack error"; break;
1651 }
1652 break;
1653
1654 case VKI_SIGFPE:
1655 switch (info->si_code) {
1656 case VKI_FPE_INTDIV: event = "Integer divide by zero"; break;
1657 case VKI_FPE_INTOVF: event = "Integer overflow"; break;
1658 case VKI_FPE_FLTDIV: event = "FP divide by zero"; break;
1659 case VKI_FPE_FLTOVF: event = "FP overflow"; break;
1660 case VKI_FPE_FLTUND: event = "FP underflow"; break;
1661 case VKI_FPE_FLTRES: event = "FP inexact"; break;
1662 case VKI_FPE_FLTINV: event = "FP invalid operation"; break;
1663 case VKI_FPE_FLTSUB: event = "FP subscript out of range"; break;
1664 }
1665 break;
1666
1667 case VKI_SIGBUS:
1668 switch (info->si_code) {
1669 case VKI_BUS_ADRALN: event = "Invalid address alignment"; break;
1670 case VKI_BUS_ADRERR: event = "Non-existent physical address"; break;
1671 case VKI_BUS_OBJERR: event = "Hardware error"; break;
1672 }
1673 break;
1674 } /* switch (sigNo) */
1675
1676 if (event != NULL) {
1677 if (haveaddr)
1678 VG_(umsg)(" %s at address %p\n",
1679 event, info->VKI_SIGINFO_si_addr);
1680 else
1681 VG_(umsg)(" %s\n", event);
1682 }
1683 }
1684 /* Print a stack trace. Be cautious if the thread's SP is in an
1685 obviously stupid place (not mapped readable) that would
1686 likely cause a segfault. */
1687 if (VG_(is_valid_tid)(tid)) {
1688 Word first_ip_delta = 0;
1689 #if defined(VGO_linux)
1690 /* Make sure that the address stored in the stack pointer is
1691 located in a mapped page. That is not necessarily so. E.g.
1692 consider the scenario where the stack pointer was decreased
1693 and now has a value that is just below the end of a page that has
1694 not been mapped yet. In that case VG_(am_is_valid_for_client)
1695 will consider the address of the stack pointer invalid and that
1696 would cause a back-trace of depth 1 to be printed, instead of a
1697 full back-trace. */
1698 if (tid == 1) { // main thread
1699 Addr esp = VG_(get_SP)(tid);
1700 Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
1701 if (VG_(extend_stack)(base, VG_(threads)[tid].client_stack_szB)) {
1702 if (VG_(clo_trace_signals))
1703 VG_(dmsg)(" -> extended stack base to %#lx\n",
1704 VG_PGROUNDDN(esp));
1705 }
1706 }
1707 #endif
1708 #if defined(VGA_s390x)
1709 if (sigNo == VKI_SIGILL) {
1710 /* The guest instruction address has been adjusted earlier to
1711 point to the insn following the one that could not be decoded.
1712 When printing the back-trace here we need to undo that
1713 adjustment so the first line in the back-trace reports the
1714 correct address. */
1715 Addr addr = (Addr)info->VKI_SIGINFO_si_addr;
1716 UChar byte = ((UChar *)addr)[0];
1717 Int insn_length = ((((byte >> 6) + 1) >> 1) + 1) << 1;
1718
1719 first_ip_delta = -insn_length;
1720 }
1721 #endif
1722 ExeContext* ec = VG_(am_is_valid_for_client)
1723 (VG_(get_SP)(tid), sizeof(Addr), VKI_PROT_READ)
1724 ? VG_(record_ExeContext)( tid, first_ip_delta )
1725 : VG_(record_depth_1_ExeContext)( tid,
1726 first_ip_delta );
1727 vg_assert(ec);
1728 VG_(pp_ExeContext)( ec );
1729 }
1730 if (sigNo == VKI_SIGSEGV
1731 && is_signal_from_kernel(tid, sigNo, info->si_code)
1732 && info->si_code == VKI_SEGV_MAPERR) {
1733 VG_(umsg)(" If you believe this happened as a result of a stack\n" );
1734 VG_(umsg)(" overflow in your program's main thread (unlikely but\n");
1735 VG_(umsg)(" possible), you can try to increase the size of the\n" );
1736 VG_(umsg)(" main thread stack using the --main-stacksize= flag.\n" );
1737 // FIXME: assumes main ThreadId == 1
1738 if (VG_(is_valid_tid)(1)) {
1739 VG_(umsg)(
1740 " The main thread stack size used in this run was %lu.\n",
1741 VG_(threads)[1].client_stack_szB);
1742 }
1743 }
1744 }
1745
1746 if (VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) )) {
1747 VG_(start_debugger)( tid );
1748 }
1749
1750 if (core) {
1751 const static struct vki_rlimit zero = { 0, 0 };
1752
1753 VG_(make_coredump)(tid, info, corelim.rlim_cur);
1754
1755 /* Make sure we don't get a confusing kernel-generated
1756 coredump when we finally exit */
1757 VG_(setrlimit)(VKI_RLIMIT_CORE, &zero);
1758 }
1759
1760 /* stash fatal signal in main thread */
1761 // what's this for?
1762 //VG_(threads)[VG_(master_tid)].os_state.fatalsig = sigNo;
1763
1764 /* everyone dies */
1765 VG_(nuke_all_threads_except)(tid, VgSrc_FatalSig);
1766 VG_(threads)[tid].exitreason = VgSrc_FatalSig;
1767 VG_(threads)[tid].os_state.fatalsig = sigNo;
1768 }
1769
1770 /*
1771 This does the business of delivering a signal to a thread. It may
1772 be called from either a real signal handler, or from normal code to
1773 cause the thread to enter the signal handler.
1774
1775 This updates the thread state, but it does not set it to be
1776 Runnable.
1777 */
deliver_signal(ThreadId tid,const vki_siginfo_t * info,const struct vki_ucontext * uc)1778 static void deliver_signal ( ThreadId tid, const vki_siginfo_t *info,
1779 const struct vki_ucontext *uc )
1780 {
1781 Int sigNo = info->si_signo;
1782 SCSS_Per_Signal *handler = &scss.scss_per_sig[sigNo];
1783 void *handler_fn;
1784 ThreadState *tst = VG_(get_ThreadState)(tid);
1785
1786 if (VG_(clo_trace_signals))
1787 VG_(dmsg)("delivering signal %d (%s):%d to thread %d\n",
1788 sigNo, VG_(signame)(sigNo), info->si_code, tid );
1789
1790 if (sigNo == VG_SIGVGKILL) {
1791 /* If this is a SIGVGKILL, we're expecting it to interrupt any
1792 blocked syscall. It doesn't matter whether the VCPU state is
1793 set to restart or not, because we don't expect it will
1794 execute any more client instructions. */
1795 vg_assert(VG_(is_exiting)(tid));
1796 return;
1797 }
1798
1799 /* If the client specifies SIG_IGN, treat it as SIG_DFL.
1800
1801 If deliver_signal() is being called on a thread, we want
1802 the signal to get through no matter what; if they're ignoring
1803 it, then we do this override (this is so we can send it SIGSEGV,
1804 etc). */
1805 handler_fn = handler->scss_handler;
1806 if (handler_fn == VKI_SIG_IGN)
1807 handler_fn = VKI_SIG_DFL;
1808
1809 vg_assert(handler_fn != VKI_SIG_IGN);
1810
1811 if (handler_fn == VKI_SIG_DFL) {
1812 default_action(info, tid);
1813 } else {
1814 /* Create a signal delivery frame, and set the client's %ESP and
1815 %EIP so that when execution continues, we will enter the
1816 signal handler with the frame on top of the client's stack,
1817 as it expects.
1818
1819 Signal delivery can fail if the client stack is too small or
1820 missing, and we can't push the frame. If that happens,
1821 push_signal_frame will cause the whole process to exit when
1822 we next hit the scheduler.
1823 */
1824 vg_assert(VG_(is_valid_tid)(tid));
1825
1826 push_signal_frame ( tid, info, uc );
1827
1828 if (handler->scss_flags & VKI_SA_ONESHOT) {
1829 /* Do the ONESHOT thing. */
1830 handler->scss_handler = VKI_SIG_DFL;
1831
1832 handle_SCSS_change( False /* lazy update */ );
1833 }
1834
1835 /* At this point:
1836 tst->sig_mask is the current signal mask
1837 tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
1838 handler->scss_mask is the mask set by the handler
1839
1840 Handler gets a mask of tmp_sig_mask|handler_mask|signo
1841 */
1842 tst->sig_mask = tst->tmp_sig_mask;
1843 if (!(handler->scss_flags & VKI_SA_NOMASK)) {
1844 VG_(sigaddset_from_set)(&tst->sig_mask, &handler->scss_mask);
1845 VG_(sigaddset)(&tst->sig_mask, sigNo);
1846 tst->tmp_sig_mask = tst->sig_mask;
1847 }
1848 }
1849
1850 /* Thread state is ready to go - just add Runnable */
1851 }
1852
resume_scheduler(ThreadId tid)1853 static void resume_scheduler(ThreadId tid)
1854 {
1855 ThreadState *tst = VG_(get_ThreadState)(tid);
1856
1857 vg_assert(tst->os_state.lwpid == VG_(gettid)());
1858
1859 if (tst->sched_jmpbuf_valid) {
1860 /* Can't continue; must longjmp back to the scheduler and thus
1861 enter the sighandler immediately. */
1862 VG_MINIMAL_LONGJMP(tst->sched_jmpbuf);
1863 }
1864 }
1865
synth_fault_common(ThreadId tid,Addr addr,Int si_code)1866 static void synth_fault_common(ThreadId tid, Addr addr, Int si_code)
1867 {
1868 vki_siginfo_t info;
1869
1870 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1871
1872 VG_(memset)(&info, 0, sizeof(info));
1873 info.si_signo = VKI_SIGSEGV;
1874 info.si_code = si_code;
1875 info.VKI_SIGINFO_si_addr = (void*)addr;
1876
1877 /* Even if gdbserver indicates to ignore the signal, we must deliver it.
1878 So ignore the return value of VG_(gdbserver_report_signal). */
1879 (void) VG_(gdbserver_report_signal) (VKI_SIGSEGV, tid);
1880
1881 /* If they're trying to block the signal, force it to be delivered */
1882 if (VG_(sigismember)(&VG_(threads)[tid].sig_mask, VKI_SIGSEGV))
1883 VG_(set_default_handler)(VKI_SIGSEGV);
1884
1885 deliver_signal(tid, &info, NULL);
1886 }
1887
1888 // Synthesize a fault where the address is OK, but the page
1889 // permissions are bad.
VG_(synth_fault_perms)1890 void VG_(synth_fault_perms)(ThreadId tid, Addr addr)
1891 {
1892 synth_fault_common(tid, addr, VKI_SEGV_ACCERR);
1893 }
1894
1895 // Synthesize a fault where the address there's nothing mapped at the address.
VG_(synth_fault_mapping)1896 void VG_(synth_fault_mapping)(ThreadId tid, Addr addr)
1897 {
1898 synth_fault_common(tid, addr, VKI_SEGV_MAPERR);
1899 }
1900
1901 // Synthesize a misc memory fault.
VG_(synth_fault)1902 void VG_(synth_fault)(ThreadId tid)
1903 {
1904 synth_fault_common(tid, 0, VKI_SEGV_MADE_UP_GPF);
1905 }
1906
1907 // Synthesise a SIGILL.
VG_(synth_sigill)1908 void VG_(synth_sigill)(ThreadId tid, Addr addr)
1909 {
1910 vki_siginfo_t info;
1911
1912 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1913
1914 VG_(memset)(&info, 0, sizeof(info));
1915 info.si_signo = VKI_SIGILL;
1916 info.si_code = VKI_ILL_ILLOPC; /* jrs: no idea what this should be */
1917 info.VKI_SIGINFO_si_addr = (void*)addr;
1918
1919 if (VG_(gdbserver_report_signal) (VKI_SIGILL, tid)) {
1920 resume_scheduler(tid);
1921 deliver_signal(tid, &info, NULL);
1922 }
1923 else
1924 resume_scheduler(tid);
1925 }
1926
1927 // Synthesise a SIGBUS.
VG_(synth_sigbus)1928 void VG_(synth_sigbus)(ThreadId tid)
1929 {
1930 vki_siginfo_t info;
1931
1932 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1933
1934 VG_(memset)(&info, 0, sizeof(info));
1935 info.si_signo = VKI_SIGBUS;
1936 /* There are several meanings to SIGBUS (as per POSIX, presumably),
1937 but the most widely understood is "invalid address alignment",
1938 so let's use that. */
1939 info.si_code = VKI_BUS_ADRALN;
1940 /* If we knew the invalid address in question, we could put it
1941 in .si_addr. Oh well. */
1942 /* info.VKI_SIGINFO_si_addr = (void*)addr; */
1943
1944 if (VG_(gdbserver_report_signal) (VKI_SIGBUS, tid)) {
1945 resume_scheduler(tid);
1946 deliver_signal(tid, &info, NULL);
1947 }
1948 else
1949 resume_scheduler(tid);
1950 }
1951
1952 // Synthesise a SIGTRAP.
VG_(synth_sigtrap)1953 void VG_(synth_sigtrap)(ThreadId tid)
1954 {
1955 vki_siginfo_t info;
1956 struct vki_ucontext uc;
1957 # if defined(VGP_x86_darwin)
1958 struct __darwin_mcontext32 mc;
1959 # elif defined(VGP_amd64_darwin)
1960 struct __darwin_mcontext64 mc;
1961 # endif
1962
1963 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1964
1965 VG_(memset)(&info, 0, sizeof(info));
1966 VG_(memset)(&uc, 0, sizeof(uc));
1967 info.si_signo = VKI_SIGTRAP;
1968 info.si_code = VKI_TRAP_BRKPT; /* tjh: only ever called for a brkpt ins */
1969
1970 # if defined(VGP_x86_linux) || defined(VGP_amd64_linux)
1971 uc.uc_mcontext.trapno = 3; /* tjh: this is the x86 trap number
1972 for a breakpoint trap... */
1973 uc.uc_mcontext.err = 0; /* tjh: no error code for x86
1974 breakpoint trap... */
1975 # elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1976 /* the same thing, but using Darwin field/struct names */
1977 VG_(memset)(&mc, 0, sizeof(mc));
1978 uc.uc_mcontext = &mc;
1979 uc.uc_mcontext->__es.__trapno = 3;
1980 uc.uc_mcontext->__es.__err = 0;
1981 # endif
1982
1983 /* fixs390: do we need to do anything here for s390 ? */
1984 if (VG_(gdbserver_report_signal) (VKI_SIGTRAP, tid)) {
1985 resume_scheduler(tid);
1986 deliver_signal(tid, &info, &uc);
1987 }
1988 else
1989 resume_scheduler(tid);
1990 }
1991
1992 // Synthesise a SIGFPE.
VG_(synth_sigfpe)1993 void VG_(synth_sigfpe)(ThreadId tid, UInt code)
1994 {
1995 // Only tested on mips32 and mips64
1996 #if !defined(VGA_mips32) && !defined(VGA_mips64)
1997 vg_assert(0);
1998 #else
1999 vki_siginfo_t info;
2000 struct vki_ucontext uc;
2001
2002 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
2003
2004 VG_(memset)(&info, 0, sizeof(info));
2005 VG_(memset)(&uc, 0, sizeof(uc));
2006 info.si_signo = VKI_SIGFPE;
2007 info.si_code = code;
2008
2009 if (VG_(gdbserver_report_signal) (VKI_SIGFPE, tid)) {
2010 resume_scheduler(tid);
2011 deliver_signal(tid, &info, &uc);
2012 }
2013 else
2014 resume_scheduler(tid);
2015 #endif
2016 }
2017
2018 /* Make a signal pending for a thread, for later delivery.
2019 VG_(poll_signals) will arrange for it to be delivered at the right
2020 time.
2021
2022 tid==0 means add it to the process-wide queue, and not sent it to a
2023 specific thread.
2024 */
2025 static
queue_signal(ThreadId tid,const vki_siginfo_t * si)2026 void queue_signal(ThreadId tid, const vki_siginfo_t *si)
2027 {
2028 ThreadState *tst;
2029 SigQueue *sq;
2030 vki_sigset_t savedmask;
2031
2032 tst = VG_(get_ThreadState)(tid);
2033
2034 /* Protect the signal queue against async deliveries */
2035 block_all_host_signals(&savedmask);
2036
2037 if (tst->sig_queue == NULL) {
2038 tst->sig_queue = VG_(arena_malloc)(VG_AR_CORE, "signals.qs.1",
2039 sizeof(*tst->sig_queue));
2040 VG_(memset)(tst->sig_queue, 0, sizeof(*tst->sig_queue));
2041 }
2042 sq = tst->sig_queue;
2043
2044 if (VG_(clo_trace_signals))
2045 VG_(dmsg)("Queueing signal %d (idx %d) to thread %d\n",
2046 si->si_signo, sq->next, tid);
2047
2048 /* Add signal to the queue. If the queue gets overrun, then old
2049 queued signals may get lost.
2050
2051 XXX We should also keep a sigset of pending signals, so that at
2052 least a non-siginfo signal gets deliviered.
2053 */
2054 if (sq->sigs[sq->next].si_signo != 0)
2055 VG_(umsg)("Signal %d being dropped from thread %d's queue\n",
2056 sq->sigs[sq->next].si_signo, tid);
2057
2058 sq->sigs[sq->next] = *si;
2059 sq->next = (sq->next+1) % N_QUEUED_SIGNALS;
2060
2061 restore_all_host_signals(&savedmask);
2062 }
2063
2064 /*
2065 Returns the next queued signal for thread tid which is in "set".
2066 tid==0 means process-wide signal. Set si_signo to 0 when the
2067 signal has been delivered.
2068
2069 Must be called with all signals blocked, to protect against async
2070 deliveries.
2071 */
next_queued(ThreadId tid,const vki_sigset_t * set)2072 static vki_siginfo_t *next_queued(ThreadId tid, const vki_sigset_t *set)
2073 {
2074 ThreadState *tst = VG_(get_ThreadState)(tid);
2075 SigQueue *sq;
2076 Int idx;
2077 vki_siginfo_t *ret = NULL;
2078
2079 sq = tst->sig_queue;
2080 if (sq == NULL)
2081 goto out;
2082
2083 idx = sq->next;
2084 do {
2085 if (0)
2086 VG_(printf)("idx=%d si_signo=%d inset=%d\n", idx,
2087 sq->sigs[idx].si_signo,
2088 VG_(sigismember)(set, sq->sigs[idx].si_signo));
2089
2090 if (sq->sigs[idx].si_signo != 0
2091 && VG_(sigismember)(set, sq->sigs[idx].si_signo)) {
2092 if (VG_(clo_trace_signals))
2093 VG_(dmsg)("Returning queued signal %d (idx %d) for thread %d\n",
2094 sq->sigs[idx].si_signo, idx, tid);
2095 ret = &sq->sigs[idx];
2096 goto out;
2097 }
2098
2099 idx = (idx + 1) % N_QUEUED_SIGNALS;
2100 } while(idx != sq->next);
2101 out:
2102 return ret;
2103 }
2104
sanitize_si_code(int si_code)2105 static int sanitize_si_code(int si_code)
2106 {
2107 #if defined(VGO_linux)
2108 /* The linux kernel uses the top 16 bits of si_code for it's own
2109 use and only exports the bottom 16 bits to user space - at least
2110 that is the theory, but it turns out that there are some kernels
2111 around that forget to mask out the top 16 bits so we do it here.
2112
2113 The kernel treats the bottom 16 bits as signed and (when it does
2114 mask them off) sign extends them when exporting to user space so
2115 we do the same thing here. */
2116 return (Short)si_code;
2117 #elif defined(VGO_darwin)
2118 return si_code;
2119 #else
2120 # error Unknown OS
2121 #endif
2122 }
2123
2124 /*
2125 Receive an async signal from the kernel.
2126
2127 This should only happen when the thread is blocked in a syscall,
2128 since that's the only time this set of signals is unblocked.
2129 */
2130 static
async_signalhandler(Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2131 void async_signalhandler ( Int sigNo,
2132 vki_siginfo_t *info, struct vki_ucontext *uc )
2133 {
2134 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2135 ThreadState* tst = VG_(get_ThreadState)(tid);
2136 SysRes sres;
2137
2138 /* The thread isn't currently running, make it so before going on */
2139 vg_assert(tst->status == VgTs_WaitSys);
2140 VG_(acquire_BigLock)(tid, "async_signalhandler");
2141
2142 info->si_code = sanitize_si_code(info->si_code);
2143
2144 if (VG_(clo_trace_signals))
2145 VG_(dmsg)("async signal handler: signal=%d, tid=%d, si_code=%d\n",
2146 sigNo, tid, info->si_code);
2147
2148 /* Update thread state properly. The signal can only have been
2149 delivered whilst we were in
2150 coregrind/m_syswrap/syscall-<PLAT>.S, and only then in the
2151 window between the two sigprocmask calls, since at all other
2152 times, we run with async signals on the host blocked. Hence
2153 make enquiries on the basis that we were in or very close to a
2154 syscall, and attempt to fix up the guest state accordingly.
2155
2156 (normal async signals occurring during computation are blocked,
2157 but periodically polled for using VG_(sigtimedwait_zero), and
2158 delivered at a point convenient for us. Hence this routine only
2159 deals with signals that are delivered to a thread during a
2160 syscall.) */
2161
2162 /* First, extract a SysRes from the ucontext_t* given to this
2163 handler. If it is subsequently established by
2164 VG_(fixup_guest_state_after_syscall_interrupted) that the
2165 syscall was complete but the results had not been committed yet
2166 to the guest state, then it'll have to commit the results itself
2167 "by hand", and so we need to extract the SysRes. Of course if
2168 the thread was not in that particular window then the
2169 SysRes will be meaningless, but that's OK too because
2170 VG_(fixup_guest_state_after_syscall_interrupted) will detect
2171 that the thread was not in said window and ignore the SysRes. */
2172
2173 /* To make matters more complex still, on Darwin we need to know
2174 the "class" of the syscall under consideration in order to be
2175 able to extract the a correct SysRes. The class will have been
2176 saved just before the syscall, by VG_(client_syscall), into this
2177 thread's tst->arch.vex.guest_SC_CLASS. Hence: */
2178 # if defined(VGO_darwin)
2179 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc, tst->arch.vex.guest_SC_CLASS);
2180 # else
2181 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc);
2182 # endif
2183
2184 /* (1) */
2185 VG_(fixup_guest_state_after_syscall_interrupted)(
2186 tid,
2187 VG_UCONTEXT_INSTR_PTR(uc),
2188 sres,
2189 !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
2190 );
2191
2192 /* (2) */
2193 /* Set up the thread's state to deliver a signal */
2194 if (!is_sig_ign(info->si_signo, tid))
2195 deliver_signal(tid, info, uc);
2196
2197 /* It's crucial that (1) and (2) happen in the order (1) then (2)
2198 and not the other way around. (1) fixes up the guest thread
2199 state to reflect the fact that the syscall was interrupted --
2200 either to restart the syscall or to return EINTR. (2) then sets
2201 up the thread state to deliver the signal. Then we resume
2202 execution. First, the signal handler is run, since that's the
2203 second adjustment we made to the thread state. If that returns,
2204 then we resume at the guest state created by (1), viz, either
2205 the syscall returns EINTR or is restarted.
2206
2207 If (2) was done before (1) the outcome would be completely
2208 different, and wrong. */
2209
2210 /* longjmp back to the thread's main loop to start executing the
2211 handler. */
2212 resume_scheduler(tid);
2213
2214 VG_(core_panic)("async_signalhandler: got unexpected signal "
2215 "while outside of scheduler");
2216 }
2217
2218 /* Extend the stack to cover addr. maxsize is the limit the stack can grow to.
2219
2220 Returns True on success, False on failure.
2221
2222 Succeeds without doing anything if addr is already within a segment.
2223
2224 Failure could be caused by:
2225 - addr not below a growable segment
2226 - new stack size would exceed maxsize
2227 - mmap failed for some other reason
2228 */
VG_(extend_stack)2229 Bool VG_(extend_stack)(Addr addr, UInt maxsize)
2230 {
2231 SizeT udelta;
2232
2233 /* Find the next Segment above addr */
2234 NSegment const* seg
2235 = VG_(am_find_nsegment)(addr);
2236 NSegment const* seg_next
2237 = seg ? VG_(am_next_nsegment)( seg, True/*fwds*/ )
2238 : NULL;
2239
2240 if (seg && seg->kind == SkAnonC)
2241 /* addr is already mapped. Nothing to do. */
2242 return True;
2243
2244 /* Check that the requested new base is in a shrink-down
2245 reservation section which abuts an anonymous mapping that
2246 belongs to the client. */
2247 if ( ! (seg
2248 && seg->kind == SkResvn
2249 && seg->smode == SmUpper
2250 && seg_next
2251 && seg_next->kind == SkAnonC
2252 && seg->end+1 == seg_next->start))
2253 return False;
2254
2255 udelta = VG_PGROUNDUP(seg_next->start - addr);
2256 VG_(debugLog)(1, "signals",
2257 "extending a stack base 0x%llx down by %lld\n",
2258 (ULong)seg_next->start, (ULong)udelta);
2259 if (! VG_(am_extend_into_adjacent_reservation_client)
2260 ( seg_next, -(SSizeT)udelta )) {
2261 VG_(debugLog)(1, "signals", "extending a stack base: FAILED\n");
2262 return False;
2263 }
2264
2265 /* When we change the main stack, we have to let the stack handling
2266 code know about it. */
2267 VG_(change_stack)(VG_(clstk_id), addr, VG_(clstk_end));
2268
2269 if (VG_(clo_sanity_level) > 2)
2270 VG_(sanity_check_general)(False);
2271
2272 return True;
2273 }
2274
2275 static void (*fault_catcher)(Int sig, Addr addr) = NULL;
2276
VG_(set_fault_catcher)2277 void VG_(set_fault_catcher)(void (*catcher)(Int, Addr))
2278 {
2279 if (0)
2280 VG_(debugLog)(0, "signals", "set fault catcher to %p\n", catcher);
2281 vg_assert2(NULL == catcher || NULL == fault_catcher,
2282 "Fault catcher is already registered");
2283
2284 fault_catcher = catcher;
2285 }
2286
2287 static
sync_signalhandler_from_user(ThreadId tid,Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2288 void sync_signalhandler_from_user ( ThreadId tid,
2289 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2290 {
2291 ThreadId qtid;
2292
2293 /* If some user-process sent us a sync signal (ie. it's not the result
2294 of a faulting instruction), then how we treat it depends on when it
2295 arrives... */
2296
2297 if (VG_(threads)[tid].status == VgTs_WaitSys) {
2298 /* Signal arrived while we're blocked in a syscall. This means that
2299 the client's signal mask was applied. In other words, so we can't
2300 get here unless the client wants this signal right now. This means
2301 we can simply use the async_signalhandler. */
2302 if (VG_(clo_trace_signals))
2303 VG_(dmsg)("Delivering user-sent sync signal %d as async signal\n",
2304 sigNo);
2305
2306 async_signalhandler(sigNo, info, uc);
2307 VG_(core_panic)("async_signalhandler returned!?\n");
2308
2309 } else {
2310 /* Signal arrived while in generated client code, or while running
2311 Valgrind core code. That means that every thread has these signals
2312 unblocked, so we can't rely on the kernel to route them properly, so
2313 we need to queue them manually. */
2314 if (VG_(clo_trace_signals))
2315 VG_(dmsg)("Routing user-sent sync signal %d via queue\n", sigNo);
2316
2317 # if defined(VGO_linux)
2318 /* On Linux, first we have to do a sanity check of the siginfo. */
2319 if (info->VKI_SIGINFO_si_pid == 0) {
2320 /* There's a per-user limit of pending siginfo signals. If
2321 you exceed this, by having more than that number of
2322 pending signals with siginfo, then new signals are
2323 delivered without siginfo. This condition can be caused
2324 by any unrelated program you're running at the same time
2325 as Valgrind, if it has a large number of pending siginfo
2326 signals which it isn't taking delivery of.
2327
2328 Since we depend on siginfo to work out why we were sent a
2329 signal and what we should do about it, we really can't
2330 continue unless we get it. */
2331 VG_(umsg)("Signal %d (%s) appears to have lost its siginfo; "
2332 "I can't go on.\n", sigNo, VG_(signame)(sigNo));
2333 VG_(printf)(
2334 " This may be because one of your programs has consumed your ration of\n"
2335 " siginfo structures. For more information, see:\n"
2336 " http://kerneltrap.org/mailarchive/1/message/25599/thread\n"
2337 " Basically, some program on your system is building up a large queue of\n"
2338 " pending signals, and this causes the siginfo data for other signals to\n"
2339 " be dropped because it's exceeding a system limit. However, Valgrind\n"
2340 " absolutely needs siginfo for SIGSEGV. A workaround is to track down the\n"
2341 " offending program and avoid running it while using Valgrind, but there\n"
2342 " is no easy way to do this. Apparently the problem was fixed in kernel\n"
2343 " 2.6.12.\n");
2344
2345 /* It's a fatal signal, so we force the default handler. */
2346 VG_(set_default_handler)(sigNo);
2347 deliver_signal(tid, info, uc);
2348 resume_scheduler(tid);
2349 VG_(exit)(99); /* If we can't resume, then just exit */
2350 }
2351 # endif
2352
2353 qtid = 0; /* shared pending by default */
2354 # if defined(VGO_linux)
2355 if (info->si_code == VKI_SI_TKILL)
2356 qtid = tid; /* directed to us specifically */
2357 # endif
2358 queue_signal(qtid, info);
2359 }
2360 }
2361
2362 /* Returns the reported fault address for an exact address */
fault_mask(Addr in)2363 static Addr fault_mask(Addr in)
2364 {
2365 /* We have to use VG_PGROUNDDN because faults on s390x only deliver
2366 the page address but not the address within a page.
2367 */
2368 # if defined(VGA_s390x)
2369 return VG_PGROUNDDN(in);
2370 # else
2371 return in;
2372 #endif
2373 }
2374
2375 /* Returns True if the sync signal was due to the stack requiring extension
2376 and the extension was successful.
2377 */
extend_stack_if_appropriate(ThreadId tid,vki_siginfo_t * info)2378 static Bool extend_stack_if_appropriate(ThreadId tid, vki_siginfo_t* info)
2379 {
2380 Addr fault;
2381 Addr esp;
2382 NSegment const* seg;
2383 NSegment const* seg_next;
2384
2385 if (info->si_signo != VKI_SIGSEGV)
2386 return False;
2387
2388 fault = (Addr)info->VKI_SIGINFO_si_addr;
2389 esp = VG_(get_SP)(tid);
2390 seg = VG_(am_find_nsegment)(fault);
2391 seg_next = seg ? VG_(am_next_nsegment)( seg, True/*fwds*/ )
2392 : NULL;
2393
2394 if (VG_(clo_trace_signals)) {
2395 if (seg == NULL)
2396 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2397 "seg=NULL\n",
2398 info->si_code, fault, tid, esp);
2399 else
2400 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2401 "seg=%#lx-%#lx\n",
2402 info->si_code, fault, tid, esp, seg->start, seg->end);
2403 }
2404
2405 if (info->si_code == VKI_SEGV_MAPERR
2406 && seg
2407 && seg->kind == SkResvn
2408 && seg->smode == SmUpper
2409 && seg_next
2410 && seg_next->kind == SkAnonC
2411 && seg->end+1 == seg_next->start
2412 && fault >= fault_mask(esp - VG_STACK_REDZONE_SZB)) {
2413 /* If the fault address is above esp but below the current known
2414 stack segment base, and it was a fault because there was
2415 nothing mapped there (as opposed to a permissions fault),
2416 then extend the stack segment.
2417 */
2418 Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
2419 if (VG_(extend_stack)(base, VG_(threads)[tid].client_stack_szB)) {
2420 if (VG_(clo_trace_signals))
2421 VG_(dmsg)(" -> extended stack base to %#lx\n",
2422 VG_PGROUNDDN(fault));
2423 return True;
2424 } else {
2425 VG_(umsg)("Stack overflow in thread %d: can't grow stack to %#lx\n",
2426 tid, fault);
2427 return False;
2428 }
2429 } else {
2430 return False;
2431 }
2432 }
2433
2434 static
sync_signalhandler_from_kernel(ThreadId tid,Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2435 void sync_signalhandler_from_kernel ( ThreadId tid,
2436 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2437 {
2438 /* Check to see if some part of Valgrind itself is interested in faults.
2439 The fault catcher should never be set whilst we're in generated code, so
2440 check for that. AFAIK the only use of the catcher right now is
2441 memcheck's leak detector. */
2442 if (fault_catcher) {
2443 vg_assert(VG_(in_generated_code) == False);
2444
2445 (*fault_catcher)(sigNo, (Addr)info->VKI_SIGINFO_si_addr);
2446 /* If the catcher returns, then it didn't handle the fault,
2447 so carry on panicking. */
2448 }
2449
2450 if (extend_stack_if_appropriate(tid, info)) {
2451 /* Stack extension occurred, so we don't need to do anything else; upon
2452 returning from this function, we'll restart the host (hence guest)
2453 instruction. */
2454 } else {
2455 /* OK, this is a signal we really have to deal with. If it came
2456 from the client's code, then we can jump back into the scheduler
2457 and have it delivered. Otherwise it's a Valgrind bug. */
2458 ThreadState *tst = VG_(get_ThreadState)(tid);
2459
2460 if (VG_(sigismember)(&tst->sig_mask, sigNo)) {
2461 /* signal is blocked, but they're not allowed to block faults */
2462 VG_(set_default_handler)(sigNo);
2463 }
2464
2465 if (VG_(in_generated_code)) {
2466 if (VG_(gdbserver_report_signal) (sigNo, tid)
2467 || VG_(sigismember)(&tst->sig_mask, sigNo)) {
2468 /* Can't continue; must longjmp back to the scheduler and thus
2469 enter the sighandler immediately. */
2470 deliver_signal(tid, info, uc);
2471 resume_scheduler(tid);
2472 }
2473 else
2474 resume_scheduler(tid);
2475 }
2476
2477 /* If resume_scheduler returns or its our fault, it means we
2478 don't have longjmp set up, implying that we weren't running
2479 client code, and therefore it was actually generated by
2480 Valgrind internally.
2481 */
2482 VG_(dmsg)("VALGRIND INTERNAL ERROR: Valgrind received "
2483 "a signal %d (%s) - exiting\n",
2484 sigNo, VG_(signame)(sigNo));
2485
2486 VG_(dmsg)("si_code=%x; Faulting address: %p; sp: %#lx\n",
2487 info->si_code, info->VKI_SIGINFO_si_addr,
2488 VG_UCONTEXT_STACK_PTR(uc));
2489
2490 if (0)
2491 VG_(kill_self)(sigNo); /* generate a core dump */
2492
2493 //if (tid == 0) /* could happen after everyone has exited */
2494 // tid = VG_(master_tid);
2495 vg_assert(tid != 0);
2496
2497 UnwindStartRegs startRegs;
2498 VG_(memset)(&startRegs, 0, sizeof(startRegs));
2499
2500 VG_UCONTEXT_TO_UnwindStartRegs(&startRegs, uc);
2501 VG_(core_panic_at)("Killed by fatal signal", &startRegs);
2502 }
2503 }
2504
2505 /*
2506 Receive a sync signal from the host.
2507 */
2508 static
sync_signalhandler(Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2509 void sync_signalhandler ( Int sigNo,
2510 vki_siginfo_t *info, struct vki_ucontext *uc )
2511 {
2512 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2513 Bool from_user;
2514
2515 if (0)
2516 VG_(printf)("sync_sighandler(%d, %p, %p)\n", sigNo, info, uc);
2517
2518 vg_assert(info != NULL);
2519 vg_assert(info->si_signo == sigNo);
2520 vg_assert(sigNo == VKI_SIGSEGV ||
2521 sigNo == VKI_SIGBUS ||
2522 sigNo == VKI_SIGFPE ||
2523 sigNo == VKI_SIGILL ||
2524 sigNo == VKI_SIGTRAP);
2525
2526 info->si_code = sanitize_si_code(info->si_code);
2527
2528 from_user = !is_signal_from_kernel(tid, sigNo, info->si_code);
2529
2530 if (VG_(clo_trace_signals)) {
2531 VG_(dmsg)("sync signal handler: "
2532 "signal=%d, si_code=%d, EIP=%#lx, eip=%#lx, from %s\n",
2533 sigNo, info->si_code, VG_(get_IP)(tid),
2534 VG_UCONTEXT_INSTR_PTR(uc),
2535 ( from_user ? "user" : "kernel" ));
2536 }
2537 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
2538
2539 /* // debug code:
2540 if (0) {
2541 VG_(printf)("info->si_signo %d\n", info->si_signo);
2542 VG_(printf)("info->si_errno %d\n", info->si_errno);
2543 VG_(printf)("info->si_code %d\n", info->si_code);
2544 VG_(printf)("info->si_pid %d\n", info->si_pid);
2545 VG_(printf)("info->si_uid %d\n", info->si_uid);
2546 VG_(printf)("info->si_status %d\n", info->si_status);
2547 VG_(printf)("info->si_addr %p\n", info->si_addr);
2548 }
2549 */
2550
2551 /* Figure out if the signal is being sent from outside the process.
2552 (Why do we care?) If the signal is from the user rather than the
2553 kernel, then treat it more like an async signal than a sync signal --
2554 that is, merely queue it for later delivery. */
2555 if (from_user) {
2556 sync_signalhandler_from_user( tid, sigNo, info, uc);
2557 } else {
2558 sync_signalhandler_from_kernel(tid, sigNo, info, uc);
2559 }
2560 }
2561
2562
2563 /*
2564 Kill this thread. Makes it leave any syscall it might be currently
2565 blocked in, and return to the scheduler. This doesn't mark the thread
2566 as exiting; that's the caller's job.
2567 */
sigvgkill_handler(int signo,vki_siginfo_t * si,struct vki_ucontext * uc)2568 static void sigvgkill_handler(int signo, vki_siginfo_t *si,
2569 struct vki_ucontext *uc)
2570 {
2571 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2572 ThreadStatus at_signal = VG_(threads)[tid].status;
2573
2574 if (VG_(clo_trace_signals))
2575 VG_(dmsg)("sigvgkill for lwp %d tid %d\n", VG_(gettid)(), tid);
2576
2577 VG_(acquire_BigLock)(tid, "sigvgkill_handler");
2578
2579 vg_assert(signo == VG_SIGVGKILL);
2580 vg_assert(si->si_signo == signo);
2581
2582 /* jrs 2006 August 3: the following assertion seems incorrect to
2583 me, and fails on AIX. sigvgkill could be sent to a thread which
2584 is runnable - see VG_(nuke_all_threads_except) in the scheduler.
2585 Hence comment these out ..
2586
2587 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
2588 VG_(post_syscall)(tid);
2589
2590 and instead do:
2591 */
2592 if (at_signal == VgTs_WaitSys)
2593 VG_(post_syscall)(tid);
2594 /* jrs 2006 August 3 ends */
2595
2596 resume_scheduler(tid);
2597
2598 VG_(core_panic)("sigvgkill_handler couldn't return to the scheduler\n");
2599 }
2600
2601 static __attribute((unused))
pp_ksigaction(vki_sigaction_toK_t * sa)2602 void pp_ksigaction ( vki_sigaction_toK_t* sa )
2603 {
2604 Int i;
2605 VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
2606 sa->ksa_handler,
2607 (UInt)sa->sa_flags,
2608 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2609 sa->sa_restorer
2610 # else
2611 (void*)0
2612 # endif
2613 );
2614 VG_(printf)("pp_ksigaction: { ");
2615 for (i = 1; i <= VG_(max_signal); i++)
2616 if (VG_(sigismember(&(sa->sa_mask),i)))
2617 VG_(printf)("%d ", i);
2618 VG_(printf)("}\n");
2619 }
2620
2621 /*
2622 Force signal handler to default
2623 */
VG_(set_default_handler)2624 void VG_(set_default_handler)(Int signo)
2625 {
2626 vki_sigaction_toK_t sa;
2627
2628 sa.ksa_handler = VKI_SIG_DFL;
2629 sa.sa_flags = 0;
2630 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2631 sa.sa_restorer = 0;
2632 # endif
2633 VG_(sigemptyset)(&sa.sa_mask);
2634
2635 VG_(do_sys_sigaction)(signo, &sa, NULL);
2636 }
2637
2638 /*
2639 Poll for pending signals, and set the next one up for delivery.
2640 */
VG_(poll_signals)2641 void VG_(poll_signals)(ThreadId tid)
2642 {
2643 vki_siginfo_t si, *sip;
2644 vki_sigset_t pollset;
2645 ThreadState *tst = VG_(get_ThreadState)(tid);
2646 vki_sigset_t saved_mask;
2647
2648 /* look for all the signals this thread isn't blocking */
2649 /* pollset = ~tst->sig_mask */
2650 VG_(sigcomplementset)( &pollset, &tst->sig_mask );
2651
2652 block_all_host_signals(&saved_mask); // protect signal queue
2653
2654 /* First look for any queued pending signals */
2655 sip = next_queued(tid, &pollset); /* this thread */
2656
2657 if (sip == NULL)
2658 sip = next_queued(0, &pollset); /* process-wide */
2659
2660 /* If there was nothing queued, ask the kernel for a pending signal */
2661 if (sip == NULL && VG_(sigtimedwait_zero)(&pollset, &si) > 0) {
2662 if (VG_(clo_trace_signals))
2663 VG_(dmsg)("poll_signals: got signal %d for thread %d\n",
2664 si.si_signo, tid);
2665 sip = &si;
2666 }
2667
2668 if (sip != NULL) {
2669 /* OK, something to do; deliver it */
2670 if (VG_(clo_trace_signals))
2671 VG_(dmsg)("Polling found signal %d for tid %d\n", sip->si_signo, tid);
2672 if (!is_sig_ign(sip->si_signo, tid))
2673 deliver_signal(tid, sip, NULL);
2674 else if (VG_(clo_trace_signals))
2675 VG_(dmsg)(" signal %d ignored\n", sip->si_signo);
2676
2677 sip->si_signo = 0; /* remove from signal queue, if that's
2678 where it came from */
2679 }
2680
2681 restore_all_host_signals(&saved_mask);
2682 }
2683
2684 /* At startup, copy the process' real signal state to the SCSS.
2685 Whilst doing this, block all real signals. Then calculate SKSS and
2686 set the kernel to that. Also initialise DCSS.
2687 */
VG_(sigstartup_actions)2688 void VG_(sigstartup_actions) ( void )
2689 {
2690 Int i, ret, vKI_SIGRTMIN;
2691 vki_sigset_t saved_procmask;
2692 vki_sigaction_fromK_t sa;
2693
2694 VG_(memset)(&scss, 0, sizeof(scss));
2695 VG_(memset)(&skss, 0, sizeof(skss));
2696
2697 # if defined(VKI_SIGRTMIN)
2698 vKI_SIGRTMIN = VKI_SIGRTMIN;
2699 # else
2700 vKI_SIGRTMIN = 0; /* eg Darwin */
2701 # endif
2702
2703 /* VG_(printf)("SIGSTARTUP\n"); */
2704 /* Block all signals. saved_procmask remembers the previous mask,
2705 which the first thread inherits.
2706 */
2707 block_all_host_signals( &saved_procmask );
2708
2709 /* Copy per-signal settings to SCSS. */
2710 for (i = 1; i <= _VKI_NSIG; i++) {
2711 /* Get the old host action */
2712 ret = VG_(sigaction)(i, NULL, &sa);
2713
2714 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2715 /* apparently we may not even ask about the disposition of these
2716 signals, let alone change them */
2717 if (ret != 0 && (i == VKI_SIGKILL || i == VKI_SIGSTOP))
2718 continue;
2719 # endif
2720
2721 if (ret != 0)
2722 break;
2723
2724 /* Try setting it back to see if this signal is really
2725 available */
2726 if (vKI_SIGRTMIN > 0 /* it actually exists on this platform */
2727 && i >= vKI_SIGRTMIN) {
2728 vki_sigaction_toK_t tsa, sa2;
2729
2730 tsa.ksa_handler = (void *)sync_signalhandler;
2731 tsa.sa_flags = VKI_SA_SIGINFO;
2732 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2733 tsa.sa_restorer = 0;
2734 # endif
2735 VG_(sigfillset)(&tsa.sa_mask);
2736
2737 /* try setting it to some arbitrary handler */
2738 if (VG_(sigaction)(i, &tsa, NULL) != 0) {
2739 /* failed - not really usable */
2740 break;
2741 }
2742
2743 VG_(convert_sigaction_fromK_to_toK)( &sa, &sa2 );
2744 ret = VG_(sigaction)(i, &sa2, NULL);
2745 vg_assert(ret == 0);
2746 }
2747
2748 VG_(max_signal) = i;
2749
2750 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
2751 VG_(printf)("snaffling handler 0x%lx for signal %d\n",
2752 (Addr)(sa.ksa_handler), i );
2753
2754 scss.scss_per_sig[i].scss_handler = sa.ksa_handler;
2755 scss.scss_per_sig[i].scss_flags = sa.sa_flags;
2756 scss.scss_per_sig[i].scss_mask = sa.sa_mask;
2757
2758 scss.scss_per_sig[i].scss_restorer = NULL;
2759 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2760 scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
2761 # endif
2762
2763 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2764 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2765 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2766 /*sa.sa_tramp;*/
2767 /* We can't know what it was, because Darwin's sys_sigaction
2768 doesn't tell us. */
2769 # endif
2770 }
2771
2772 if (VG_(clo_trace_signals))
2773 VG_(dmsg)("Max kernel-supported signal is %d\n", VG_(max_signal));
2774
2775 /* Our private internal signals are treated as ignored */
2776 scss.scss_per_sig[VG_SIGVGKILL].scss_handler = VKI_SIG_IGN;
2777 scss.scss_per_sig[VG_SIGVGKILL].scss_flags = VKI_SA_SIGINFO;
2778 VG_(sigfillset)(&scss.scss_per_sig[VG_SIGVGKILL].scss_mask);
2779
2780 /* Copy the process' signal mask into the root thread. */
2781 vg_assert(VG_(threads)[1].status == VgTs_Init);
2782 for (i = 2; i < VG_N_THREADS; i++)
2783 vg_assert(VG_(threads)[i].status == VgTs_Empty);
2784
2785 VG_(threads)[1].sig_mask = saved_procmask;
2786 VG_(threads)[1].tmp_sig_mask = saved_procmask;
2787
2788 /* Calculate SKSS and apply it. This also sets the initial kernel
2789 mask we need to run with. */
2790 handle_SCSS_change( True /* forced update */ );
2791
2792 /* Leave with all signals still blocked; the thread scheduler loop
2793 will set the appropriate mask at the appropriate time. */
2794 }
2795
2796 /*--------------------------------------------------------------------*/
2797 /*--- end ---*/
2798 /*--------------------------------------------------------------------*/
2799