1
2 /*--------------------------------------------------------------------*/
3 /*--- Darwin-specific syscalls, etc. syswrap-x86-darwin.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2005-2011 Apple Inc.
11 Greg Parker gparker@apple.com
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 #if defined(VGP_x86_darwin)
32
33 #include "pub_core_basics.h"
34 #include "pub_core_vki.h"
35 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
36 #include "pub_core_threadstate.h"
37 #include "pub_core_aspacemgr.h"
38 #include "pub_core_xarray.h"
39 #include "pub_core_clientstate.h"
40 #include "pub_core_debuglog.h"
41 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
42 #include "pub_core_transtab.h" // VG_(discard_translations)
43 #include "pub_core_libcbase.h"
44 #include "pub_core_libcassert.h"
45 #include "pub_core_libcfile.h"
46 #include "pub_core_libcprint.h"
47 #include "pub_core_libcproc.h"
48 #include "pub_core_libcsignal.h"
49 #include "pub_core_mallocfree.h"
50 #include "pub_core_options.h"
51 #include "pub_core_scheduler.h"
52 #include "pub_core_signals.h"
53 #include "pub_core_syscall.h"
54 #include "pub_core_syswrap.h"
55 #include "pub_core_tooliface.h"
56
57 #include "priv_types_n_macros.h"
58 #include "priv_syswrap-generic.h" /* for decls of generic wrappers */
59 #include "priv_syswrap-darwin.h" /* for decls of darwin-ish wrappers */
60 #include "priv_syswrap-main.h"
61
62
63 #include <mach/mach.h>
64
x86_thread_state32_from_vex(i386_thread_state_t * mach,VexGuestX86State * vex)65 static void x86_thread_state32_from_vex(i386_thread_state_t *mach,
66 VexGuestX86State *vex)
67 {
68 mach->__eax = vex->guest_EAX;
69 mach->__ebx = vex->guest_EBX;
70 mach->__ecx = vex->guest_ECX;
71 mach->__edx = vex->guest_EDX;
72 mach->__edi = vex->guest_EDI;
73 mach->__esi = vex->guest_ESI;
74 mach->__ebp = vex->guest_EBP;
75 mach->__esp = vex->guest_ESP;
76 mach->__ss = vex->guest_SS;
77 mach->__eflags = LibVEX_GuestX86_get_eflags(vex);
78 mach->__eip = vex->guest_EIP;
79 mach->__cs = vex->guest_CS;
80 mach->__ds = vex->guest_DS;
81 mach->__es = vex->guest_ES;
82 mach->__fs = vex->guest_FS;
83 mach->__gs = vex->guest_GS;
84 }
85
86
x86_float_state32_from_vex(i386_float_state_t * mach,VexGuestX86State * vex)87 static void x86_float_state32_from_vex(i386_float_state_t *mach,
88 VexGuestX86State *vex)
89 {
90 // DDD: #warning GrP fixme fp state
91
92 VG_(memcpy)(&mach->__fpu_xmm0, &vex->guest_XMM0, 8 * sizeof(mach->__fpu_xmm0));
93 }
94
95
thread_state_from_vex(thread_state_t mach_generic,thread_state_flavor_t flavor,mach_msg_type_number_t count,VexGuestArchState * vex_generic)96 void thread_state_from_vex(thread_state_t mach_generic,
97 thread_state_flavor_t flavor,
98 mach_msg_type_number_t count,
99 VexGuestArchState *vex_generic)
100 {
101 VexGuestX86State *vex = (VexGuestX86State *)vex_generic;
102
103 switch (flavor) {
104 case i386_THREAD_STATE:
105 vg_assert(count == i386_THREAD_STATE_COUNT);
106 x86_thread_state32_from_vex((i386_thread_state_t *)mach_generic, vex);
107 break;
108
109 case i386_FLOAT_STATE:
110 vg_assert(count == i386_FLOAT_STATE_COUNT);
111 x86_float_state32_from_vex((i386_float_state_t *)mach_generic, vex);
112 break;
113
114 default:
115 vg_assert(0);
116 }
117 }
118
119
x86_thread_state32_to_vex(const i386_thread_state_t * mach,VexGuestX86State * vex)120 static void x86_thread_state32_to_vex(const i386_thread_state_t *mach,
121 VexGuestX86State *vex)
122 {
123 LibVEX_GuestX86_initialise(vex);
124 vex->guest_EAX = mach->__eax;
125 vex->guest_EBX = mach->__ebx;
126 vex->guest_ECX = mach->__ecx;
127 vex->guest_EDX = mach->__edx;
128 vex->guest_EDI = mach->__edi;
129 vex->guest_ESI = mach->__esi;
130 vex->guest_EBP = mach->__ebp;
131 vex->guest_ESP = mach->__esp;
132 vex->guest_SS = mach->__ss;
133 // DDD: #warning GrP fixme eflags
134 vex->guest_EIP = mach->__eip;
135 vex->guest_CS = mach->__cs;
136 vex->guest_DS = mach->__ds;
137 vex->guest_ES = mach->__es;
138 vex->guest_FS = mach->__fs;
139 vex->guest_GS = mach->__gs;
140 }
141
x86_float_state32_to_vex(const i386_float_state_t * mach,VexGuestX86State * vex)142 static void x86_float_state32_to_vex(const i386_float_state_t *mach,
143 VexGuestX86State *vex)
144 {
145 // DDD: #warning GrP fixme fp state
146
147 VG_(memcpy)(&vex->guest_XMM0, &mach->__fpu_xmm0, 8 * sizeof(mach->__fpu_xmm0));
148 }
149
150
thread_state_to_vex(const thread_state_t mach_generic,thread_state_flavor_t flavor,mach_msg_type_number_t count,VexGuestArchState * vex_generic)151 void thread_state_to_vex(const thread_state_t mach_generic,
152 thread_state_flavor_t flavor,
153 mach_msg_type_number_t count,
154 VexGuestArchState *vex_generic)
155 {
156 VexGuestX86State *vex = (VexGuestX86State *)vex_generic;
157
158 switch(flavor) {
159 case i386_THREAD_STATE:
160 vg_assert(count == i386_THREAD_STATE_COUNT);
161 x86_thread_state32_to_vex((const i386_thread_state_t*)mach_generic,vex);
162 break;
163 case i386_FLOAT_STATE:
164 vg_assert(count == i386_FLOAT_STATE_COUNT);
165 x86_float_state32_to_vex((const i386_float_state_t*)mach_generic,vex);
166 break;
167
168 default:
169 vg_assert(0);
170 break;
171 }
172 }
173
174
build_thread(const thread_state_t state,thread_state_flavor_t flavor,mach_msg_type_number_t count)175 ThreadState *build_thread(const thread_state_t state,
176 thread_state_flavor_t flavor,
177 mach_msg_type_number_t count)
178 {
179 ThreadId tid = VG_(alloc_ThreadState)();
180 ThreadState *tst = VG_(get_ThreadState)(tid);
181
182 vg_assert(flavor == i386_THREAD_STATE);
183 vg_assert(count == i386_THREAD_STATE_COUNT);
184
185 // Initialize machine registers
186
187 thread_state_to_vex(state, flavor, count, &tst->arch.vex);
188
189 I_die_here;
190 // GrP fixme signals, sig_mask, tmp_sig_mask, os_state.parent
191
192 find_stack_segment(tid, tst->arch.vex.guest_ESP);
193
194 return tst;
195 }
196
197
198 // Edit the thread state to send to the real kernel.
199 // The real thread will run start_thread_NORETURN(tst)
200 // on a separate non-client stack.
hijack_thread_state(thread_state_t mach_generic,thread_state_flavor_t flavor,mach_msg_type_number_t count,ThreadState * tst)201 void hijack_thread_state(thread_state_t mach_generic,
202 thread_state_flavor_t flavor,
203 mach_msg_type_number_t count,
204 ThreadState *tst)
205 {
206 i386_thread_state_t *mach = (i386_thread_state_t *)mach_generic;
207 char *stack;
208
209 vg_assert(flavor == i386_THREAD_STATE);
210 vg_assert(count == i386_THREAD_STATE_COUNT);
211
212 stack = (char *)allocstack(tst->tid);
213 stack -= 64+320; // make room for top frame
214 memset(stack, 0, 64+320); // ...and clear it
215 *(uintptr_t *)stack = (uintptr_t)tst; // set parameter
216 stack -= sizeof(uintptr_t);
217 *(uintptr_t *)stack = 0; // push fake return address
218
219 mach->__eip = (uintptr_t)&start_thread_NORETURN;
220 mach->__esp = (uintptr_t)stack;
221 }
222
223
224 /* Call f(arg1), but first switch stacks, using 'stack' as the new
225 stack, and use 'retaddr' as f's return-to address. Also, clear all
226 the integer registers before entering f.*/
227 __attribute__((noreturn))
228 void call_on_new_stack_0_1 ( Addr stack,
229 Addr retaddr,
230 void (*f)(Word),
231 Word arg1 );
232 // 4(%esp) == stack (must be 16-byte aligned)
233 // 8(%esp) == retaddr
234 // 12(%esp) == f
235 // 16(%esp) == arg1
236 asm(
237 ".globl _call_on_new_stack_0_1\n"
238 "_call_on_new_stack_0_1:\n"
239 " movl %esp, %esi\n" // remember old stack pointer
240 " movl 4(%esi), %esp\n" // set new stack
241 " pushl $0\n" // align stack
242 " pushl $0\n" // align stack
243 " pushl $0\n" // align stack
244 " pushl 16(%esi)\n" // arg1 to stack
245 " pushl 8(%esi)\n" // retaddr to stack
246 " pushl 12(%esi)\n" // f to stack
247 " movl $0, %eax\n" // zero all GP regs
248 " movl $0, %ebx\n"
249 " movl $0, %ecx\n"
250 " movl $0, %edx\n"
251 " movl $0, %esi\n"
252 " movl $0, %edi\n"
253 " movl $0, %ebp\n"
254 " ret\n" // jump to f
255 " ud2\n" // should never get here
256 );
257
258
259 asm(
260 ".globl _pthread_hijack_asm\n"
261 "_pthread_hijack_asm:\n"
262 " movl %esp,%ebp\n"
263 " push $0\n" // alignment pad
264 " push %ebp\n" // original sp
265 " push %esi\n" // flags
266 " push %edi\n" // stacksize
267 " push %edx\n" // func_arg
268 " push %ecx\n" // func
269 " push %ebx\n" // kport
270 " push %eax\n" // self
271 " push $0\n" // fake return address
272 " jmp _pthread_hijack\n"
273 );
274
275
276
pthread_hijack(Addr self,Addr kport,Addr func,Addr func_arg,Addr stacksize,Addr flags,Addr sp)277 void pthread_hijack(Addr self, Addr kport, Addr func, Addr func_arg,
278 Addr stacksize, Addr flags, Addr sp)
279 {
280 vki_sigset_t blockall;
281 ThreadState *tst = (ThreadState *)func_arg;
282 VexGuestX86State *vex = &tst->arch.vex;
283
284 // VG_(printf)("pthread_hijack pthread %p, machthread %p, func %p, arg %p, stack %p, flags %p, stack %p\n", self, kport, func, func_arg, stacksize, flags, sp);
285
286 // Wait for parent thread's permission.
287 // The parent thread holds V's lock on our behalf.
288 semaphore_wait(tst->os_state.child_go);
289
290 /* Start the thread with all signals blocked. VG_(scheduler) will
291 set the mask correctly when we finally get there. */
292 VG_(sigfillset)(&blockall);
293 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);
294
295 // Set thread's registers
296 // Do this FIRST because some code below tries to collect a backtrace,
297 // which requires valid register data.
298 // DDD: need to do post_reg_write events here?
299 LibVEX_GuestX86_initialise(vex);
300 vex->guest_EIP = pthread_starter;
301 vex->guest_EAX = self;
302 vex->guest_EBX = kport;
303 vex->guest_ECX = func;
304 vex->guest_EDX = tst->os_state.func_arg;
305 vex->guest_EDI = stacksize;
306 vex->guest_ESI = flags;
307 vex->guest_ESP = sp;
308
309 // Record thread's stack and Mach port and pthread struct
310 tst->os_state.pthread = self;
311 tst->os_state.lwpid = kport;
312 record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "thread-%p");
313
314 if ((flags & 0x01000000) == 0) {
315 // kernel allocated stack - needs mapping
316 Addr stack = VG_PGROUNDUP(sp) - stacksize;
317 tst->client_stack_highest_word = stack+stacksize;
318 tst->client_stack_szB = stacksize;
319
320 // pthread structure
321 ML_(notify_core_and_tool_of_mmap)(
322 stack+stacksize, pthread_structsize,
323 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
324 // stack contents
325 ML_(notify_core_and_tool_of_mmap)(
326 stack, stacksize,
327 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
328 // guard page
329 ML_(notify_core_and_tool_of_mmap)(
330 stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
331 0, VKI_MAP_PRIVATE, -1, 0);
332 } else {
333 // client allocated stack
334 find_stack_segment(tst->tid, sp);
335 }
336 ML_(sync_mappings)("after", "pthread_hijack", 0);
337
338 // DDD: should this be here rather than in POST(sys_bsdthread_create)?
339 // But we don't have ptid here...
340 //VG_TRACK ( pre_thread_ll_create, ptid, tst->tid );
341
342 // Tell parent thread's POST(sys_bsdthread_create) that we're done
343 // initializing registers and mapping memory.
344 semaphore_signal(tst->os_state.child_done);
345 // LOCK IS GONE BELOW THIS POINT
346
347 // Go!
348 call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0,
349 start_thread_NORETURN, (Word)tst);
350
351 /*NOTREACHED*/
352 vg_assert(0);
353 }
354
355
356
357 asm(
358 ".globl _wqthread_hijack_asm\n"
359 "_wqthread_hijack_asm:\n"
360 " movl %esp,%ebp\n"
361 " push $0\n" // alignment
362 " push $0\n" // alignment
363 " push %ebp\n" // original sp
364 " push %edi\n" // reuse
365 " push %edx\n" // workitem
366 " push %ecx\n" // stackaddr
367 " push %ebx\n" // kport
368 " push %eax\n" // self
369 " push $0\n" // fake return address
370 " jmp _wqthread_hijack\n"
371 );
372
373
374 /* wqthread note: The kernel may create or destroy pthreads in the
375 wqthread pool at any time with no userspace interaction,
376 and wqthread_start may be entered at any time with no userspace
377 interaction.
378 To handle this in valgrind, we create and destroy a valgrind
379 thread for every work item.
380 */
wqthread_hijack(Addr self,Addr kport,Addr stackaddr,Addr workitem,Int reuse,Addr sp)381 void wqthread_hijack(Addr self, Addr kport, Addr stackaddr, Addr workitem,
382 Int reuse, Addr sp)
383 {
384 ThreadState *tst;
385 VexGuestX86State *vex;
386 Addr stack;
387 SizeT stacksize;
388 vki_sigset_t blockall;
389
390 /* When we enter here we hold no lock (!), so we better acquire it
391 pronto. Why do we hold no lock? Because (presumably) the only
392 way to get here is as a result of a SfMayBlock syscall
393 "workq_ops(WQOPS_THREAD_RETURN)", which will have dropped the
394 lock. At least that's clear for the 'reuse' case. The
395 non-reuse case? Dunno, perhaps it's a new thread the kernel
396 pulled out of a hat. In any case we still need to take a
397 lock. */
398 VG_(acquire_BigLock_LL)("wqthread_hijack");
399
400 /* Start the thread with all signals blocked. VG_(scheduler) will
401 set the mask correctly when we finally get there. */
402 VG_(sigfillset)(&blockall);
403 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);
404 if (reuse) {
405
406 /* For whatever reason, tst->os_state.pthread appear to have a
407 constant offset of 72 on 10.7, but zero on 10.6 and 10.5. No
408 idea why. */
409 # if DARWIN_VERS <= DARWIN_10_6
410 UWord magic_delta = 0;
411 # elif DARWIN_VERS == DARWIN_10_7
412 UWord magic_delta = 0x48;
413 # endif
414
415 // This thread already exists; we're merely re-entering
416 // after leaving via workq_ops(WQOPS_THREAD_RETURN).
417 // Don't allocate any V thread resources.
418 // Do reset thread registers.
419 ThreadId tid = VG_(lwpid_to_vgtid)(kport);
420 vg_assert(VG_(is_valid_tid)(tid));
421 vg_assert(mach_thread_self() == kport);
422
423 tst = VG_(get_ThreadState)(tid);
424
425 if (0) VG_(printf)("wqthread_hijack reuse %s: tid %d, tst %p, "
426 "tst->os_state.pthread %#lx, self %#lx\n",
427 tst->os_state.pthread == self ? "SAME" : "DIFF",
428 tid, tst, tst->os_state.pthread, self);
429
430 vex = &tst->arch.vex;
431 vg_assert(tst->os_state.pthread - magic_delta == self);
432 }
433 else {
434 // This is a new thread.
435 tst = VG_(get_ThreadState)(VG_(alloc_ThreadState)());
436 vex = &tst->arch.vex;
437 allocstack(tst->tid);
438 LibVEX_GuestX86_initialise(vex);
439 /* Tell threading tools the new thread exists. FIXME: we need
440 to know the identity (tid) of the parent thread, in order
441 that threading tools can make a dependency edge from it to
442 this thread. But we don't know what the parent thread is.
443 Hence pass 1 (the root thread). This is completely wrong in
444 general, and could cause large numbers of false races to be
445 reported. In fact, it's positively dangerous; we don't even
446 know if thread 1 is still alive, and the thread checkers are
447 likely to assert if it isn't. */
448 VG_TRACK(pre_thread_ll_create, 1/*BOGUS*/, tst->tid);
449 }
450
451 // Set thread's registers
452 // Do this FIRST because some code below tries to collect a backtrace,
453 // which requires valid register data.
454 vex->guest_EIP = wqthread_starter;
455 vex->guest_EAX = self;
456 vex->guest_EBX = kport;
457 vex->guest_ECX = stackaddr;
458 vex->guest_EDX = workitem;
459 vex->guest_EDI = reuse;
460 vex->guest_ESI = 0;
461 vex->guest_ESP = sp;
462
463 stacksize = 512*1024; // wq stacks are always DEFAULT_STACK_SIZE
464 stack = VG_PGROUNDUP(sp) - stacksize;
465
466 VG_TRACK(workq_task_start, tst->tid, workitem);
467 if (reuse) {
468 // Continue V's thread back in the scheduler.
469 // The client thread is of course in another location entirely.
470
471 /* Drop the lock before going into
472 ML_(wqthread_continue_NORETURN). The latter will immediately
473 attempt to reacquire it in non-LL mode, which is a bit
474 wasteful but I don't think is harmful. A better solution
475 would be to not drop the lock but instead "upgrade" it from a
476 LL lock to a full lock, but that's too much like hard work
477 right now. */
478 VG_(release_BigLock_LL)("wqthread_hijack(1)");
479 ML_(wqthread_continue_NORETURN)(tst->tid);
480 }
481 else {
482 // Record thread's stack and Mach port and pthread struct
483 tst->os_state.pthread = self;
484 tst->os_state.lwpid = kport;
485 record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "wqthread-%p");
486
487 // kernel allocated stack - needs mapping
488 tst->client_stack_highest_word = stack+stacksize;
489 tst->client_stack_szB = stacksize;
490
491 // tell the tool that we are at a point after the new thread
492 // has its registers set up (so we can take a stack snapshot),
493 // but before it has executed any instructions (or, really,
494 // before it has done any memory references).
495 VG_TRACK(pre_thread_first_insn, tst->tid);
496
497 // pthread structure
498 ML_(notify_core_and_tool_of_mmap)(
499 stack+stacksize, pthread_structsize,
500 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
501 // stack contents
502 // GrP fixme uninitialized!
503 ML_(notify_core_and_tool_of_mmap)(
504 stack, stacksize,
505 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
506 // guard page
507 // GrP fixme ban_mem_stack!
508 ML_(notify_core_and_tool_of_mmap)(
509 stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
510 0, VKI_MAP_PRIVATE, -1, 0);
511
512 ML_(sync_mappings)("after", "wqthread_hijack", 0);
513
514 // Go!
515 /* Same comments as the 'release' in the then-clause.
516 start_thread_NORETURN calls run_thread_NORETURN calls
517 thread_wrapper which acquires the lock before continuing.
518 Let's hope nothing non-thread-local happens until that point.
519
520 DDD: I think this is plain wrong .. if we get to
521 thread_wrapper not holding the lock, and someone has recycled
522 this thread slot in the meantime, we're hosed. Is that
523 possible, though? */
524 VG_(release_BigLock_LL)("wqthread_hijack(2)");
525 call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0,
526 start_thread_NORETURN, (Word)tst);
527 }
528
529 /*NOTREACHED*/
530 vg_assert(0);
531 }
532
533 #endif // defined(VGP_x86_darwin)
534
535 /*--------------------------------------------------------------------*/
536 /*--- end ---*/
537 /*--------------------------------------------------------------------*/
538