1
2 /*--------------------------------------------------------------------*/
3 /*--- Darwin-specific syscalls, etc. syswrap-amd64-darwin.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2005-2013 Apple Inc.
11 Greg Parker gparker@apple.com
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 #if defined(VGP_amd64_darwin)
32
33 #include "config.h" // DARWIN_VERS
34 #include "pub_core_basics.h"
35 #include "pub_core_vki.h"
36 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
37 #include "pub_core_threadstate.h"
38 #include "pub_core_aspacemgr.h"
39 #include "pub_core_xarray.h"
40 #include "pub_core_clientstate.h"
41 #include "pub_core_debuglog.h"
42 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
43 #include "pub_core_transtab.h" // VG_(discard_translations)
44 #include "pub_core_libcbase.h"
45 #include "pub_core_libcassert.h"
46 #include "pub_core_libcfile.h"
47 #include "pub_core_libcprint.h"
48 #include "pub_core_libcproc.h"
49 #include "pub_core_libcsignal.h"
50 #include "pub_core_mallocfree.h"
51 #include "pub_core_options.h"
52 #include "pub_core_scheduler.h"
53 #include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
54 #include "pub_core_signals.h"
55 #include "pub_core_syscall.h"
56 #include "pub_core_syswrap.h"
57 #include "pub_core_tooliface.h"
58
59 #include "priv_types_n_macros.h"
60 #include "priv_syswrap-generic.h" /* for decls of generic wrappers */
61 #include "priv_syswrap-darwin.h" /* for decls of darwin-ish wrappers */
62 #include "priv_syswrap-main.h"
63
64
65 #include <mach/mach.h>
66
x86_thread_state64_from_vex(x86_thread_state64_t * mach,VexGuestAMD64State * vex)67 static void x86_thread_state64_from_vex(x86_thread_state64_t *mach,
68 VexGuestAMD64State *vex)
69 {
70 mach->__rax = vex->guest_RAX;
71 mach->__rbx = vex->guest_RBX;
72 mach->__rcx = vex->guest_RCX;
73 mach->__rdx = vex->guest_RDX;
74 mach->__rdi = vex->guest_RDI;
75 mach->__rsi = vex->guest_RSI;
76 mach->__rbp = vex->guest_RBP;
77 mach->__rsp = vex->guest_RSP;
78 mach->__rflags = LibVEX_GuestAMD64_get_rflags(vex);
79 mach->__rip = vex->guest_RIP;
80 mach->__r8 = vex->guest_R8;
81 mach->__r9 = vex->guest_R9;
82 mach->__r10 = vex->guest_R10;
83 mach->__r11 = vex->guest_R11;
84 mach->__r12 = vex->guest_R12;
85 mach->__r13 = vex->guest_R13;
86 mach->__r14 = vex->guest_R14;
87 mach->__r15 = vex->guest_R15;
88 /* GrP fixme
89 mach->__cs = vex->guest_CS;
90 mach->__fs = vex->guest_FS;
91 mach->__gs = vex->guest_GS;
92 */
93 }
94
95
x86_float_state64_from_vex(x86_float_state64_t * mach,VexGuestAMD64State * vex)96 static void x86_float_state64_from_vex(x86_float_state64_t *mach,
97 VexGuestAMD64State *vex)
98 {
99 // DDD: #warning GrP fixme fp state
100 // JRS: what about the YMMHI bits? Are they important?
101 VG_(memcpy)(&mach->__fpu_xmm0, &vex->guest_YMM0, sizeof(mach->__fpu_xmm0));
102 VG_(memcpy)(&mach->__fpu_xmm1, &vex->guest_YMM1, sizeof(mach->__fpu_xmm1));
103 VG_(memcpy)(&mach->__fpu_xmm2, &vex->guest_YMM2, sizeof(mach->__fpu_xmm2));
104 VG_(memcpy)(&mach->__fpu_xmm3, &vex->guest_YMM3, sizeof(mach->__fpu_xmm3));
105 VG_(memcpy)(&mach->__fpu_xmm4, &vex->guest_YMM4, sizeof(mach->__fpu_xmm4));
106 VG_(memcpy)(&mach->__fpu_xmm5, &vex->guest_YMM5, sizeof(mach->__fpu_xmm5));
107 VG_(memcpy)(&mach->__fpu_xmm6, &vex->guest_YMM6, sizeof(mach->__fpu_xmm6));
108 VG_(memcpy)(&mach->__fpu_xmm7, &vex->guest_YMM7, sizeof(mach->__fpu_xmm7));
109 VG_(memcpy)(&mach->__fpu_xmm8, &vex->guest_YMM8, sizeof(mach->__fpu_xmm8));
110 VG_(memcpy)(&mach->__fpu_xmm9, &vex->guest_YMM9, sizeof(mach->__fpu_xmm9));
111 VG_(memcpy)(&mach->__fpu_xmm10, &vex->guest_YMM10, sizeof(mach->__fpu_xmm10));
112 VG_(memcpy)(&mach->__fpu_xmm11, &vex->guest_YMM11, sizeof(mach->__fpu_xmm11));
113 VG_(memcpy)(&mach->__fpu_xmm12, &vex->guest_YMM12, sizeof(mach->__fpu_xmm12));
114 VG_(memcpy)(&mach->__fpu_xmm13, &vex->guest_YMM13, sizeof(mach->__fpu_xmm13));
115 VG_(memcpy)(&mach->__fpu_xmm14, &vex->guest_YMM14, sizeof(mach->__fpu_xmm14));
116 VG_(memcpy)(&mach->__fpu_xmm15, &vex->guest_YMM15, sizeof(mach->__fpu_xmm15));
117 }
118
119
thread_state_from_vex(thread_state_t mach_generic,thread_state_flavor_t flavor,mach_msg_type_number_t count,VexGuestArchState * vex_generic)120 void thread_state_from_vex(thread_state_t mach_generic,
121 thread_state_flavor_t flavor,
122 mach_msg_type_number_t count,
123 VexGuestArchState *vex_generic)
124 {
125 VexGuestAMD64State *vex = (VexGuestAMD64State *)vex_generic;
126
127 switch (flavor) {
128 case x86_THREAD_STATE64:
129 vg_assert(count == x86_THREAD_STATE64_COUNT);
130 x86_thread_state64_from_vex((x86_thread_state64_t *)mach_generic, vex);
131 break;
132
133 case x86_FLOAT_STATE64:
134 vg_assert(count == x86_FLOAT_STATE64_COUNT);
135 x86_float_state64_from_vex((x86_float_state64_t *)mach_generic, vex);
136 break;
137
138 default:
139 vg_assert(0);
140 }
141 }
142
143
x86_thread_state64_to_vex(const x86_thread_state64_t * mach,VexGuestAMD64State * vex)144 static void x86_thread_state64_to_vex(const x86_thread_state64_t *mach,
145 VexGuestAMD64State *vex)
146 {
147 LibVEX_GuestAMD64_initialise(vex);
148 vex->guest_RAX = mach->__rax;
149 vex->guest_RBX = mach->__rbx;
150 vex->guest_RCX = mach->__rcx;
151 vex->guest_RDX = mach->__rdx;
152 vex->guest_RDI = mach->__rdi;
153 vex->guest_RSI = mach->__rsi;
154 vex->guest_RBP = mach->__rbp;
155 vex->guest_RSP = mach->__rsp;
156 // DDD: #warning GrP fixme eflags
157 vex->guest_RIP = mach->__rip;
158 vex->guest_R8 = mach->__r8;
159 vex->guest_R9 = mach->__r9;
160 vex->guest_R10 = mach->__r10;
161 vex->guest_R11 = mach->__r11;
162 vex->guest_R12 = mach->__r12;
163 vex->guest_R13 = mach->__r13;
164 vex->guest_R14 = mach->__r14;
165 vex->guest_R15 = mach->__r15;
166 /* GrP fixme
167 vex->guest_CS = mach->__cs;
168 vex->guest_FS = mach->__fs;
169 vex->guest_GS = mach->__gs;
170 */
171 }
172
x86_float_state64_to_vex(const x86_float_state64_t * mach,VexGuestAMD64State * vex)173 static void x86_float_state64_to_vex(const x86_float_state64_t *mach,
174 VexGuestAMD64State *vex)
175 {
176 // DDD: #warning GrP fixme fp state
177 // JRS: what about the YMMHI bits? Are they important?
178 VG_(memcpy)(&vex->guest_YMM0, &mach->__fpu_xmm0, sizeof(mach->__fpu_xmm0));
179 VG_(memcpy)(&vex->guest_YMM1, &mach->__fpu_xmm1, sizeof(mach->__fpu_xmm1));
180 VG_(memcpy)(&vex->guest_YMM2, &mach->__fpu_xmm2, sizeof(mach->__fpu_xmm2));
181 VG_(memcpy)(&vex->guest_YMM3, &mach->__fpu_xmm3, sizeof(mach->__fpu_xmm3));
182 VG_(memcpy)(&vex->guest_YMM4, &mach->__fpu_xmm4, sizeof(mach->__fpu_xmm4));
183 VG_(memcpy)(&vex->guest_YMM5, &mach->__fpu_xmm5, sizeof(mach->__fpu_xmm5));
184 VG_(memcpy)(&vex->guest_YMM6, &mach->__fpu_xmm6, sizeof(mach->__fpu_xmm6));
185 VG_(memcpy)(&vex->guest_YMM7, &mach->__fpu_xmm7, sizeof(mach->__fpu_xmm7));
186 VG_(memcpy)(&vex->guest_YMM8, &mach->__fpu_xmm8, sizeof(mach->__fpu_xmm8));
187 VG_(memcpy)(&vex->guest_YMM9, &mach->__fpu_xmm9, sizeof(mach->__fpu_xmm9));
188 VG_(memcpy)(&vex->guest_YMM10, &mach->__fpu_xmm10, sizeof(mach->__fpu_xmm10));
189 VG_(memcpy)(&vex->guest_YMM11, &mach->__fpu_xmm11, sizeof(mach->__fpu_xmm11));
190 VG_(memcpy)(&vex->guest_YMM12, &mach->__fpu_xmm12, sizeof(mach->__fpu_xmm12));
191 VG_(memcpy)(&vex->guest_YMM13, &mach->__fpu_xmm13, sizeof(mach->__fpu_xmm13));
192 VG_(memcpy)(&vex->guest_YMM14, &mach->__fpu_xmm14, sizeof(mach->__fpu_xmm14));
193 VG_(memcpy)(&vex->guest_YMM15, &mach->__fpu_xmm15, sizeof(mach->__fpu_xmm15));
194 }
195
196
thread_state_to_vex(const thread_state_t mach_generic,thread_state_flavor_t flavor,mach_msg_type_number_t count,VexGuestArchState * vex_generic)197 void thread_state_to_vex(const thread_state_t mach_generic,
198 thread_state_flavor_t flavor,
199 mach_msg_type_number_t count,
200 VexGuestArchState *vex_generic)
201 {
202 VexGuestAMD64State *vex = (VexGuestAMD64State *)vex_generic;
203
204 switch(flavor) {
205 case x86_THREAD_STATE64:
206 vg_assert(count == x86_THREAD_STATE64_COUNT);
207 x86_thread_state64_to_vex((const x86_thread_state64_t*)mach_generic,vex);
208 break;
209 case x86_FLOAT_STATE64:
210 vg_assert(count == x86_FLOAT_STATE64_COUNT);
211 x86_float_state64_to_vex((const x86_float_state64_t*)mach_generic,vex);
212 break;
213
214 default:
215 vg_assert(0);
216 break;
217 }
218 }
219
220
build_thread(const thread_state_t state,thread_state_flavor_t flavor,mach_msg_type_number_t count)221 ThreadState *build_thread(const thread_state_t state,
222 thread_state_flavor_t flavor,
223 mach_msg_type_number_t count)
224 {
225 ThreadId tid = VG_(alloc_ThreadState)();
226 ThreadState *tst = VG_(get_ThreadState)(tid);
227
228 vg_assert(flavor == x86_THREAD_STATE64);
229 vg_assert(count == x86_THREAD_STATE64_COUNT);
230
231 // Initialize machine registers
232
233 thread_state_to_vex(state, flavor, count, &tst->arch.vex);
234
235 I_die_here;
236 // GrP fixme signals, sig_mask, tmp_sig_mask, os_state.parent
237
238 find_stack_segment(tid, tst->arch.vex.guest_RSP);
239
240 return tst;
241 }
242
243
244 // Edit the thread state to send to the real kernel.
245 // The real thread will run start_thread_NORETURN(tst)
246 // on a separate non-client stack.
hijack_thread_state(thread_state_t mach_generic,thread_state_flavor_t flavor,mach_msg_type_number_t count,ThreadState * tst)247 void hijack_thread_state(thread_state_t mach_generic,
248 thread_state_flavor_t flavor,
249 mach_msg_type_number_t count,
250 ThreadState *tst)
251 {
252 x86_thread_state64_t *mach = (x86_thread_state64_t *)mach_generic;
253 char *stack;
254
255 vg_assert(flavor == x86_THREAD_STATE64);
256 vg_assert(count == x86_THREAD_STATE64_COUNT);
257
258 stack = (char *)allocstack(tst->tid);
259 stack -= 64+320; // make room for top frame
260 memset(stack, 0, 64+320); // ...and clear it
261 *(uintptr_t *)stack = 0; // push fake return address
262
263 mach->__rdi = (uintptr_t)tst; // arg1 = tst
264 mach->__rip = (uintptr_t)&start_thread_NORETURN;
265 mach->__rsp = (uintptr_t)stack;
266 }
267
268
269 /* Call f(arg1), but first switch stacks, using 'stack' as the new
270 stack, and use 'retaddr' as f's return-to address. Also, clear all
271 the integer registers before entering f.*/
272 __attribute__((noreturn))
273 void call_on_new_stack_0_1 ( Addr stack,
274 Addr retaddr,
275 void (*f)(Word),
276 Word arg1 );
277 // %rdi == stack (must be 16-byte aligned)
278 // %rsi == retaddr
279 // %rdx == f
280 // %rcx == arg1
281 asm(
282 ".globl _call_on_new_stack_0_1\n"
283 "_call_on_new_stack_0_1:\n"
284 " movq %rsp, %rbp\n" // remember old stack pointer
285 " movq %rdi, %rsp\n" // set new stack
286 " movq %rcx, %rdi\n" // set arg1
287 " pushq %rsi\n" // retaddr to new stack
288 " pushq %rdx\n" // f to new stack
289 " movq $0, %rax\n" // zero all other GP regs
290 " movq $0, %rbx\n"
291 " movq $0, %rcx\n"
292 " movq $0, %rdx\n"
293 " movq $0, %rsi\n"
294 " movq $0, %rbp\n"
295 " movq $0, %r8\n"
296 " movq $0, %r9\n"
297 " movq $0, %r10\n"
298 " movq $0, %r11\n"
299 " movq $0, %r12\n"
300 " movq $0, %r13\n"
301 " movq $0, %r14\n"
302 " movq $0, %r15\n"
303 " ret\n" // jump to f
304 " ud2\n" // should never get here
305 );
306
307 asm(
308 ".globl _pthread_hijack_asm\n"
309 "_pthread_hijack_asm:\n"
310 " movq %rsp,%rbp\n"
311 " push $0\n" // alignment pad
312 " push %rbp\n" // original sp
313 // other values stay where they are in registers
314 " push $0\n" // fake return address
315 " jmp _pthread_hijack\n"
316 );
317
318
319
pthread_hijack(Addr self,Addr kport,Addr func,Addr func_arg,Addr stacksize,Addr flags,Addr sp)320 void pthread_hijack(Addr self, Addr kport, Addr func, Addr func_arg,
321 Addr stacksize, Addr flags, Addr sp)
322 {
323 vki_sigset_t blockall;
324 ThreadState *tst = (ThreadState *)func_arg;
325 VexGuestAMD64State *vex = &tst->arch.vex;
326
327 // VG_(printf)("pthread_hijack pthread %p, machthread %p, func %p, arg %p, stack %p, flags %p, stack %p\n", self, kport, func, func_arg, stacksize, flags, sp);
328
329 // Wait for parent thread's permission.
330 // The parent thread holds V's lock on our behalf.
331 semaphore_wait(tst->os_state.child_go);
332
333 /* Start the thread with all signals blocked. VG_(scheduler) will
334 set the mask correctly when we finally get there. */
335 VG_(sigfillset)(&blockall);
336 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);
337
338 // Set thread's registers
339 // Do this FIRST because some code below tries to collect a backtrace,
340 // which requires valid register data.
341 LibVEX_GuestAMD64_initialise(vex);
342 vex->guest_RIP = pthread_starter;
343 vex->guest_RDI = self;
344 vex->guest_RSI = kport;
345 vex->guest_RDX = func;
346 vex->guest_RCX = tst->os_state.func_arg;
347 vex->guest_R8 = stacksize;
348 vex->guest_R9 = flags;
349 vex->guest_RSP = sp;
350
351 // Record thread's stack and Mach port and pthread struct
352 tst->os_state.pthread = self;
353 tst->os_state.lwpid = kport;
354 record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "thread-%p");
355
356 if ((flags & 0x01000000) == 0) {
357 // kernel allocated stack - needs mapping
358 Addr stack = VG_PGROUNDUP(sp) - stacksize;
359 tst->client_stack_highest_word = stack+stacksize;
360 tst->client_stack_szB = stacksize;
361
362 // pthread structure
363 ML_(notify_core_and_tool_of_mmap)(
364 stack+stacksize, pthread_structsize,
365 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
366 // stack contents
367 ML_(notify_core_and_tool_of_mmap)(
368 stack, stacksize,
369 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
370 // guard page
371 ML_(notify_core_and_tool_of_mmap)(
372 stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
373 0, VKI_MAP_PRIVATE, -1, 0);
374 } else {
375 // client allocated stack
376 find_stack_segment(tst->tid, sp);
377 }
378 ML_(sync_mappings)("after", "pthread_hijack", 0);
379
380 // DDD: should this be here rather than in POST(sys_bsdthread_create)?
381 // But we don't have ptid here...
382 //VG_TRACK ( pre_thread_ll_create, ptid, tst->tid );
383
384 // Tell parent thread's POST(sys_bsdthread_create) that we're done
385 // initializing registers and mapping memory.
386 semaphore_signal(tst->os_state.child_done);
387 // LOCK IS GONE BELOW THIS POINT
388
389 // Go!
390 call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0,
391 start_thread_NORETURN, (Word)tst);
392
393 /*NOTREACHED*/
394 vg_assert(0);
395 }
396
397
398
399 asm(
400 ".globl _wqthread_hijack_asm\n"
401 "_wqthread_hijack_asm:\n"
402 " movq %rsp,%r9\n" // original sp
403 // other values stay where they are in registers
404 " push $0\n" // fake return address
405 " jmp _wqthread_hijack\n"
406 );
407
408
409 /* wqthread note: The kernel may create or destroy pthreads in the
410 wqthread pool at any time with no userspace interaction,
411 and wqthread_start may be entered at any time with no userspace
412 interaction.
413 To handle this in valgrind, we create and destroy a valgrind
414 thread for every work item.
415 */
wqthread_hijack(Addr self,Addr kport,Addr stackaddr,Addr workitem,Int reuse,Addr sp)416 void wqthread_hijack(Addr self, Addr kport, Addr stackaddr, Addr workitem,
417 Int reuse, Addr sp)
418 {
419 ThreadState *tst;
420 VexGuestAMD64State *vex;
421 Addr stack;
422 SizeT stacksize;
423 vki_sigset_t blockall;
424
425 /* When we enter here we hold no lock (!), so we better acquire it
426 pronto. Why do we hold no lock? Because (presumably) the only
427 way to get here is as a result of a SfMayBlock syscall
428 "workq_ops(WQOPS_THREAD_RETURN)", which will have dropped the
429 lock. At least that's clear for the 'reuse' case. The
430 non-reuse case? Dunno, perhaps it's a new thread the kernel
431 pulled out of a hat. In any case we still need to take a
432 lock. */
433 VG_(acquire_BigLock_LL)("wqthread_hijack");
434
435 if (0) VG_(printf)(
436 "wqthread_hijack: self %#lx, kport %#lx, "
437 "stackaddr %#lx, workitem %#lx, reuse/flags %x, sp %#lx\n",
438 self, kport, stackaddr, workitem, reuse, sp);
439
440 /* Start the thread with all signals blocked. VG_(scheduler) will
441 set the mask correctly when we finally get there. */
442 VG_(sigfillset)(&blockall);
443 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);
444
445 /* For 10.7 and earlier, |reuse| appeared to be used as a simple
446 boolean. In 10.8 and later its name changed to |flags| and has
447 various other bits OR-d into it too, so it's necessary to fish
448 out just the relevant parts. Hence: */
449 # if DARWIN_VERS <= DARWIN_10_7
450 Bool is_reuse = reuse != 0;
451 # elif DARWIN_VERS == DARWIN_10_8
452 Bool is_reuse = (reuse & 0x20000 /* == WQ_FLAG_THREAD_REUSE */) != 0;
453 # endif
454
455 if (is_reuse) {
456
457 /* For whatever reason, tst->os_state.pthread appear to have a
458 constant offset of 96 on 10.7, but zero on 10.6 and 10.5. No
459 idea why. */
460 # if DARWIN_VERS <= DARWIN_10_6
461 UWord magic_delta = 0;
462 # elif DARWIN_VERS >= DARWIN_10_7
463 UWord magic_delta = 0x60;
464 # endif
465
466 // This thread already exists; we're merely re-entering
467 // after leaving via workq_ops(WQOPS_THREAD_RETURN).
468 // Don't allocate any V thread resources.
469 // Do reset thread registers.
470 ThreadId tid = VG_(lwpid_to_vgtid)(kport);
471 vg_assert(VG_(is_valid_tid)(tid));
472 vg_assert(mach_thread_self() == kport);
473
474 tst = VG_(get_ThreadState)(tid);
475
476 if (0) VG_(printf)("wqthread_hijack reuse %s: tid %d, tst %p, "
477 "tst->os_state.pthread %#lx\n",
478 tst->os_state.pthread == self ? "SAME" : "DIFF",
479 tid, tst, tst->os_state.pthread);
480
481 vex = &tst->arch.vex;
482 vg_assert(tst->os_state.pthread - magic_delta == self);
483 }
484 else {
485 // This is a new thread.
486 tst = VG_(get_ThreadState)(VG_(alloc_ThreadState)());
487 vex = &tst->arch.vex;
488 allocstack(tst->tid);
489 LibVEX_GuestAMD64_initialise(vex);
490 }
491
492 // Set thread's registers
493 // Do this FIRST because some code below tries to collect a backtrace,
494 // which requires valid register data.
495 vex->guest_RIP = wqthread_starter;
496 vex->guest_RDI = self;
497 vex->guest_RSI = kport;
498 vex->guest_RDX = stackaddr;
499 vex->guest_RCX = workitem;
500 vex->guest_R8 = reuse;
501 vex->guest_R9 = 0;
502 vex->guest_RSP = sp;
503
504 stacksize = 512*1024; // wq stacks are always DEFAULT_STACK_SIZE
505 stack = VG_PGROUNDUP(sp) - stacksize;
506
507 if (is_reuse) {
508 // Continue V's thread back in the scheduler.
509 // The client thread is of course in another location entirely.
510
511 /* Drop the lock before going into
512 ML_(wqthread_continue_NORETURN). The latter will immediately
513 attempt to reacquire it in non-LL mode, which is a bit
514 wasteful but I don't think is harmful. A better solution
515 would be to not drop the lock but instead "upgrade" it from a
516 LL lock to a full lock, but that's too much like hard work
517 right now. */
518 VG_(release_BigLock_LL)("wqthread_hijack(1)");
519 ML_(wqthread_continue_NORETURN)(tst->tid);
520 }
521 else {
522 // Record thread's stack and Mach port and pthread struct
523 tst->os_state.pthread = self;
524 tst->os_state.lwpid = kport;
525 record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "wqthread-%p");
526
527 // kernel allocated stack - needs mapping
528 tst->client_stack_highest_word = stack+stacksize;
529 tst->client_stack_szB = stacksize;
530
531 // GrP fixme scheduler lock?!
532
533 // pthread structure
534 ML_(notify_core_and_tool_of_mmap)(
535 stack+stacksize, pthread_structsize,
536 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
537 // stack contents
538 // GrP fixme uninitialized!
539 ML_(notify_core_and_tool_of_mmap)(
540 stack, stacksize,
541 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
542 // guard page
543 // GrP fixme ban_mem_stack!
544 ML_(notify_core_and_tool_of_mmap)(
545 stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
546 0, VKI_MAP_PRIVATE, -1, 0);
547
548 ML_(sync_mappings)("after", "wqthread_hijack", 0);
549
550 // Go!
551 /* Same comments as the 'release' in the then-clause.
552 start_thread_NORETURN calls run_thread_NORETURN calls
553 thread_wrapper which acquires the lock before continuing.
554 Let's hope nothing non-thread-local happens until that point.
555
556 DDD: I think this is plain wrong .. if we get to
557 thread_wrapper not holding the lock, and someone has recycled
558 this thread slot in the meantime, we're hosed. Is that
559 possible, though? */
560 VG_(release_BigLock_LL)("wqthread_hijack(2)");
561 call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0,
562 start_thread_NORETURN, (Word)tst);
563 }
564
565 /*NOTREACHED*/
566 vg_assert(0);
567 }
568
569 #endif // defined(VGP_amd64_darwin)
570
571 /*--------------------------------------------------------------------*/
572 /*--- end ---*/
573 /*--------------------------------------------------------------------*/
574