1
2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff. syswrap-ppc64-aix5.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2006-2010 OpenWorks LLP
11 info@open-works.co.uk
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
34 */
35
36 #if defined(VGP_ppc64_aix5)
37
38 #include "pub_core_basics.h"
39 #include "pub_core_vki.h"
40 #include "pub_core_vkiscnums.h"
41 #include "pub_core_threadstate.h"
42 #include "pub_core_debuglog.h"
43 #include "pub_core_libcassert.h"
44 #include "pub_core_libcprint.h"
45 #include "pub_core_libcproc.h"
46 #include "pub_core_options.h"
47 #include "pub_core_scheduler.h"
48 #include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
49 #include "pub_core_signals.h"
50 #include "pub_core_syscall.h"
51 #include "pub_core_syswrap.h"
52 #include "pub_core_tooliface.h"
53
54 #include "priv_types_n_macros.h"
55 #include "priv_syswrap-aix5.h" /* for decls of aix5-common wrappers */
56 #include "priv_syswrap-main.h"
57
58
59 /* --------- HACKS --------- */
60 /* XXXXXXXXXXXX these HACKS are copies of stuff in syswrap-linux.c;
61 check for duplication. */
62 /* HACK: is in syswrap-generic.c, but that doesn't get build on AIX. */
63 /* Dump out a summary, and a more detailed list, of open file descriptors. */
VG_(show_open_fds)64 void VG_(show_open_fds) ( void )
65 {
66 I_die_here;
67 }
i_am_the_only_thread(void)68 static Bool i_am_the_only_thread ( void )
69 {
70 Int c = VG_(count_living_threads)();
71 vg_assert(c >= 1); /* stay sane */
72 return c == 1;
73 }
VG_(reap_threads)74 void VG_(reap_threads)(ThreadId self)
75 {
76 while (!i_am_the_only_thread()) {
77 /* Let other thread(s) run */
78 VG_(vg_yield)();
79 VG_(poll_signals)(self);
80 }
81 vg_assert(i_am_the_only_thread());
82 }
VG_(init_preopened_fds)83 void VG_(init_preopened_fds) ( void )
84 {
85 I_die_here;
86 }
87
88
89 // Run a thread from beginning to end and return the thread's
90 // scheduler-return-code.
thread_wrapper(Word tidW)91 static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
92 {
93 VgSchedReturnCode ret;
94 ThreadId tid = (ThreadId)tidW;
95 ThreadState* tst = VG_(get_ThreadState)(tid);
96
97 VG_(debugLog)(1, "syswrap-aix64",
98 "thread_wrapper(tid=%lld): entry\n",
99 (ULong)tidW);
100
101 vg_assert(tst->status == VgTs_Init);
102
103 /* make sure we get the CPU lock before doing anything significant */
104 VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)");
105
106 if (0)
107 VG_(printf)("thread tid %d started: stack = %p\n",
108 tid, &tid);
109
110 VG_TRACK( pre_thread_first_insn, tid );
111
112 tst->os_state.lwpid = VG_(gettid)();
113 tst->os_state.threadgroup = VG_(getpid)();
114
115 /* Thread created with all signals blocked; scheduler will set the
116 appropriate mask */
117 ret = VG_(scheduler)(tid);
118
119 vg_assert(VG_(is_exiting)(tid));
120
121 vg_assert(tst->status == VgTs_Runnable);
122 vg_assert(VG_(is_running_thread)(tid));
123
124 VG_(debugLog)(1, "syswrap-aix64",
125 "thread_wrapper(tid=%lld): exit\n",
126 (ULong)tidW);
127
128 /* Return to caller, still holding the lock. */
129 return ret;
130 }
131
132
133 /* Run a thread all the way to the end, then do appropriate exit actions
134 (this is the last-one-out-turn-off-the-lights bit). */
run_a_thread_NORETURN(Word tidW)135 static void run_a_thread_NORETURN ( Word tidW )
136 {
137 ThreadId tid = (ThreadId)tidW;
138 VgSchedReturnCode src;
139 Int c;
140
141 VG_(debugLog)(1, "syswrap-aix64",
142 "run_a_thread_NORETURN(tid=%lld): pre-thread_wrapper\n",
143 (ULong)tidW);
144
145 /* Run the thread all the way through. */
146 src = thread_wrapper(tid);
147
148 VG_(debugLog)(1, "syswrap-aix64",
149 "run_a_thread_NORETURN(tid=%lld): post-thread_wrapper\n",
150 (ULong)tidW);
151
152 c = VG_(count_living_threads)();
153 vg_assert(c >= 1); /* stay sane */
154
155 vg_assert(src == VgSrc_ExitThread
156 || src == VgSrc_ExitProcess
157 || src == VgSrc_FatalSig);
158
159 if (c == 1 || src == VgSrc_ExitProcess) {
160
161 VG_(debugLog)(1, "syswrap-aix64",
162 "run_a_thread_NORETURN(tid=%lld): "
163 "exit process (%d threads remaining)\n",
164 (ULong)tidW, c);
165
166 /* We are the last one standing. Keep hold of the lock and
167 carry on to show final tool results, then exit the entire system.
168 Use the continuation pointer set at startup in m_main. */
169 ( * VG_(address_of_m_main_shutdown_actions_NORETURN) ) (tid, src);
170
171 } else {
172
173 ThreadState *tst;
174
175 VG_(debugLog)(1, "syswrap-aix64",
176 "run_a_thread_NORETURN(tid=%lld): "
177 "not last one standing\n",
178 (ULong)tidW);
179
180 /* OK, thread is dead, but others still exist. Just exit. */
181 vg_assert(c >= 2);
182 tst = VG_(get_ThreadState)(tid);
183
184 /* This releases the run lock */
185 VG_(exit_thread)(tid);
186 vg_assert(tst->status == VgTs_Zombie);
187
188 /* We have to use this sequence to terminate the thread to
189 prevent a subtle race. If VG_(exit_thread)() had left the
190 ThreadState as Empty, then it could have been reallocated,
191 reusing the stack while we're doing these last cleanups.
192 Instead, VG_(exit_thread) leaves it as Zombie to prevent
193 reallocation. We need to make sure we don't touch the stack
194 between marking it Empty and exiting. Hence the
195 assembler. */
196 { ULong block[4];
197 vg_assert(sizeof(tst->status == 8));
198 vg_assert(__NR_AIX5_thread_terminate
199 != __NR_AIX5_UNKNOWN);
200 block[0] = (ULong)VgTs_Empty;
201 block[1] = (ULong) & (tst->status);
202 block[2] = (ULong) tst->os_state.exitcode;
203 block[3] = __NR_AIX5_thread_terminate;
204 asm volatile (
205 "mr 29,%0\n\t" /* r29 = &block[0] */
206 "ld 20, 0(29)\n\t" /* r20 = VgTs_Empty */
207 "ld 21, 8(29)\n\t" /* r21 = & (tst->status) */
208 "ld 22, 16(29)\n\t" /* r22 = tst->os_state.exitcode */
209 "ld 23, 24(29)\n\t" /* r23 = __NR_exit */
210 /* after this point we can't safely use the stack. */
211 "std 20, 0(21)\n\t" /* tst->status = VgTs_Empty */
212 "mr 2,23\n\t" /* r2 = __NR_exit */
213 "mr 3,22\n\t" /* set r3 = tst->os_state.exitcode */
214 /* set up for syscall */
215 "crorc 6,6,6\n\t"
216 ".long 0x48000005\n\t" /* "bl here+4" */
217 "mflr 29\n\t"
218 "addi 29,29,16\n\t"
219 "mtlr 29\n\t"
220 "sc\n\t" /* exit(tst->os_state.exitcode) */
221 :
222 : "b" (&block[0])
223 : "lr", "memory", "r2", "r3", "r20", "r21", "r22", "r23", "r29"
224 );
225 }
226
227 VG_(core_panic)("Thread exit failed?\n");
228 }
229
230 /*NOTREACHED*/
231 vg_assert(0);
232 }
233
234
start_thread_NORETURN(void * arg)235 static Word start_thread_NORETURN ( void* arg )
236 {
237 ThreadState* tst = (ThreadState*)arg;
238 ThreadId tid = tst->tid;
239
240 run_a_thread_NORETURN ( (Word)tid );
241 /*NOTREACHED*/
242 vg_assert(0);
243 }
244
245
246 /* Call f(arg1), but first switch stacks, using 'stack' as the new
247 stack. f itself needs to never return. */
248 __attribute__((noreturn))
249 static
call_on_new_stack_0_1_NORETURN(Addr stack,void (* f_NORETURN)(Word),Word arg1)250 void call_on_new_stack_0_1_NORETURN ( Addr stack,
251 void (*f_NORETURN)(Word),
252 Word arg1 )
253 {
254 UWord* fdescr = (UWord*)f_NORETURN;
255 volatile UWord block[5];
256 block[0] = fdescr[0]; /* nia */
257 block[1] = stack; /* r1 */
258 block[2] = fdescr[1]; /* r2 */
259 block[3] = arg1; /* r3 */
260 block[4] = fdescr[2]; /* r11 */
261 __asm__ __volatile__(
262 "mr 4,%0\n\t" /* r4 = block */
263 "ld 1, 8(4)\n\t"
264 "ld 2, 16(4)\n\t"
265 "ld 3, 24(4)\n\t"
266 "ld 11,32(4)\n\t"
267 "ld 4, 0(4)\n\t"
268 "mtctr 4\n\t"
269 "bctr\n"
270 : /*out*/ : /*in*/ "b"(&block[0])
271 );
272 /*NOTREACHED*/
273 __asm__ __volatile__("trap");
274 while (1) {} /* convince gcc that this really doesn't return */
275 }
276
277
278 /* Allocate a stack for the main thread, and run it all the way to the
279 end. Although we already have a working VgStack
280 (VG_(interim_stack)) it's better to allocate a new one, so that
281 overflow detection works uniformly for all threads.
282 */
VG_(main_thread_wrapper_NORETURN)283 void VG_(main_thread_wrapper_NORETURN)(ThreadId tid)
284 {
285 Addr sp;
286 VG_(debugLog)(1, "syswrap-aix64",
287 "entering VG_(main_thread_wrapper_NORETURN)\n");
288
289 sp = ML_(allocstack)(tid);
290
291 /* If we can't even allocate the first thread's stack, we're hosed.
292 Give up. */
293 vg_assert2(sp != 0, "Cannot allocate main thread's stack.");
294
295 /* shouldn't be any other threads around yet */
296 vg_assert( VG_(count_living_threads)() == 1 );
297
298 /* make a stack frame */
299 sp -= 16;
300 sp &= ~0xF;
301 *(UWord *)sp = 0;
302
303 call_on_new_stack_0_1_NORETURN(
304 (Addr)sp, /* stack */
305 run_a_thread_NORETURN, /* fn to call */
306 (Word)tid /* arg to give it */
307 );
308
309 /*NOTREACHED*/
310 vg_assert(0);
311 }
312
313 /* --------- end HACKS --------- */
314
315
316 /* ---------------------------------------------------------------------
317 More thread stuff
318 ------------------------------------------------------------------ */
319
VG_(cleanup_thread)320 void VG_(cleanup_thread) ( ThreadArchState* arch )
321 {
322 }
323
324
325 /* ---------------------------------------------------------------------
326 PRE/POST wrappers for ppc64/AIX5-specific syscalls
327 ------------------------------------------------------------------ */
328
329 /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
330 #include <sys/thread.h>
331 /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
332
333
334 /* Add prototypes for the wrappers declared here, so that gcc doesn't
335 harass us for not having prototypes. Really this is a kludge --
336 the right thing to do is to make these wrappers 'static' since they
337 aren't visible outside this file, but that requires even more macro
338 magic. */
339
340 #define PRE(name) DEFN_PRE_TEMPLATE(ppc64_aix5, name)
341 #define POST(name) DEFN_POST_TEMPLATE(ppc64_aix5, name)
342
343 DECL_TEMPLATE(ppc64_aix5, sys__clock_gettime);
344 DECL_TEMPLATE(ppc64_aix5, sys__fp_fpscrx64_);
345 DECL_TEMPLATE(ppc64_aix5, sys_kload);
346 DECL_TEMPLATE(ppc64_aix5, sys_kunload64);
347 DECL_TEMPLATE(ppc64_aix5, sys_thread_setstate);
348 DECL_TEMPLATE(ppc64_aix5, sys_FAKE_SIGRETURN);
349
350
PRE(sys__clock_gettime)351 PRE(sys__clock_gettime)
352 {
353 /* Seems like ARG2 points at a destination buffer? */
354 /* _clock_gettime (UNDOCUMENTED) ( 0, 0xA, 0x2FF21808 ) */
355 PRINT("_clock_gettime (UNDOCUMENTED) ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3 );
356 PRE_REG_READ3(int, "_clock_gettime", int, arg1, int, arg2, void*, arg3);
357 PRE_MEM_WRITE( "_clock_gettime(dst)", ARG2, sizeof(struct timespec) );
358 }
POST(sys__clock_gettime)359 POST(sys__clock_gettime)
360 {
361 vg_assert(SUCCESS);
362 POST_MEM_WRITE( ARG2, sizeof(struct timespec) );
363 }
364
PRE(sys__fp_fpscrx64_)365 PRE(sys__fp_fpscrx64_)
366 {
367 PRINT("_fp_fpscrx64_ (BOGUS HANDLER)");
368 }
369
PRE(sys_kload)370 PRE(sys_kload)
371 {
372 PRINT("kload (UNDOCUMENTED)( %#lx(%s), %ld, %ld )",
373 ARG1,(Char*)ARG1, ARG2, ARG3 );
374 PRE_REG_READ3(void*, "kload", char*, name, long, arg2, char*, arg3);
375 }
POST(sys_kload)376 POST(sys_kload)
377 {
378 vg_assert(SUCCESS);
379 if (0) VG_(printf)("kload result = %#lx\n", RES);
380 if (RES)
381 POST_MEM_WRITE( RES, 64 );
382 ML_(aix5_rescan_procmap_after_load_or_unload)();
383 }
384
PRE(sys_kunload64)385 PRE(sys_kunload64)
386 {
387 PRINT("kunload64 (UNDOCUMENTED)( %#lx, %ld, %ld, %#lx )",
388 ARG1, ARG2, ARG3, ARG4 );
389 PRE_REG_READ4(long, "kunload64",
390 void*, arg1, long, arg2, long, arg3, void*, arg4);
391 }
POST(sys_kunload64)392 POST(sys_kunload64)
393 {
394 vg_assert(SUCCESS);
395 ML_(aix5_rescan_procmap_after_load_or_unload)();
396 }
397
PRE(sys_thread_setstate)398 PRE(sys_thread_setstate)
399 {
400 UWord dst_lwpid = (UWord)ARG1;
401 struct tstate* ats_new = (struct tstate*)ARG2;
402 struct tstate* ats_old = (struct tstate*)ARG3;
403 ThreadId dst_tid = VG_INVALID_THREADID;
404 ThreadState* dst_ts = NULL;
405 Int i;
406
407 /* Arrgh. We MUST retain the lock during this syscall. Reason is
408 that this is sometimes used for asynchronous thread cancellation
409 (nuking other threads). If we don't have the lock during the
410 syscall, then it's possible that the thread we're nuking might
411 get the lock before it gets killed off, and so we can never
412 re-acquire the lock after this syscall, and the system
413 deadlocks. */
414
415 /* 10 July 06: above comment is a misdiagnosis. It appears that
416 for thread cancellation (that is, with ->flags == TSTATE_INTR)
417 the target thread is has its PC changed by the the kernel to
418 something else, possibly to pthread_exit(), so that it can run
419 its cancellation handlers and exit. Currently is unknown how
420 the kernel knows what to set the target thread's PC to. I did
421 establish that all the other data passed in the struct is not
422 relevant: when ->flags == TSTATE_INTR, all the other words can
423 be set to 0x0 or 0xFFFFFFFF and the syscall still works. So the
424 address is not passed like that. Also I looked at args to
425 thread_setmystate_fast, which is used when a thread sets its
426 cancellation state, but none of those are code addresses.
427
428 Also, it's ok for the kernel to simply change the target
429 thread's PC to something else for async thread cancellation, but
430 for deferred cancellation something else is needed, and I can't
431 see how that would work either.
432
433 Anyway, net result is, target thread ends up not running on the
434 simulator (not dead), which is why it's necessary to hold onto
435 the lock at this point. */
436
437 /* 30 July 06: added kludge to intercept attempts to cancel another
438 thread and instead just force that thread to run
439 pthread_exit(PTHREAD_CANCELED). This allows V to keep
440 control. */
441
442 PRINT("thread_setstate (BOGUS HANDLER) "
443 "( %ld, %p,%p )", dst_lwpid, ats_new, ats_old);
444 if (1 && VG_(clo_trace_syscalls) && ats_new)
445 ML_(aix5debugstuff_show_tstate)((Addr)ats_new,
446 "thread_setstate (NEW)");
447
448 /* Intercept and handle ourselves any attempts to cancel
449 another thread (including this one). */
450
451 if (ats_new && (!ats_old) && ats_new->flags == TSTATE_INTR) {
452 dst_ts = NULL;
453 if (VG_(clo_trace_syscalls))
454 VG_(printf)("(INTR for lwpid %ld)", dst_lwpid);
455 dst_tid = VG_INVALID_THREADID;
456 for (i = 0; i < VG_N_THREADS; i++) {
457 dst_ts = VG_(get_ThreadState)(i);
458 if ((dst_ts->status == VgTs_Runnable
459 || dst_ts->status == VgTs_Yielding
460 || dst_ts->status == VgTs_WaitSys)
461 && dst_ts->os_state.lwpid == dst_lwpid) {
462 dst_tid = i;
463 break;
464 }
465 }
466 if (VG_(clo_trace_syscalls)) {
467 if (dst_tid == VG_INVALID_THREADID)
468 VG_(printf)("(== unknown tid)");
469 else
470 VG_(printf)("(== tid %d)", (Int)dst_tid);
471 }
472 if (dst_tid != VG_INVALID_THREADID) {
473 /* A cancel has been requested for ctid. If the target
474 thread has cancellation enabled, honour it right now. If
475 not, mark the thread as having a cancellation request, so
476 that if it later enables cancellation then the
477 cancellation will take effect. */
478 vg_assert(dst_ts);
479 if (dst_ts->os_state.cancel_progress == Canc_NoRequest) {
480 if (dst_ts->os_state.cancel_disabled) {
481 if (VG_(clo_trace_syscalls))
482 VG_(printf)("(target has cancel disabled"
483 "; request lodged)");
484 dst_ts->os_state.cancel_progress = Canc_Requested;
485 } else {
486 if (VG_(clo_trace_syscalls))
487 VG_(printf)("(forcing target into pthread_exit)");
488 dst_ts->os_state.cancel_progress = Canc_Actioned;
489 Bool ok = ML_(aix5_force_thread_into_pthread_exit)(dst_tid);
490 if (!ok) {
491 /* now at serious risk of deadlock/livelock. Give up
492 rather than continue. */
493 ML_(aix5_set_threadstate_for_emergency_exit)
494 (tid, "pthread_cancel(case2-64): "
495 "cannot find pthread_exit; aborting");
496 SET_STATUS_Success(0);
497 return;
498 }
499 }
500 }
501 SET_STATUS_Success(0);
502 return;
503 }
504 }
505
506 /* Well, it's not a cancellation request. Maybe it is the
507 initialisation of a previously created thread? */
508
509 if (ats_new && !ats_old) {
510 dst_tid = VG_INVALID_THREADID;
511 for (i = 0; i < VG_N_THREADS; i++) {
512 dst_ts = VG_(get_ThreadState)(i);
513 if (dst_ts->status == VgTs_Init
514 && dst_ts->os_state.lwpid == dst_lwpid) {
515 dst_tid = i;
516 break;
517 }
518 }
519 if (dst_tid != VG_INVALID_THREADID) {
520 /* Found the associated child */
521 if (VG_(clo_trace_syscalls))
522 VG_(printf)("(initialised child tid %d)", (Int)dst_tid);
523 dst_ts = VG_(get_ThreadState)(dst_tid);
524 UWord* stack = (UWord*)ML_(allocstack)(dst_tid);
525 /* XXX TODO: check allocstack failure */
526
527 /* copy the specified child register state into the guest
528 slot (we need that context to run on the simulated CPU,
529 not the real one) and put pointers to our own
530 run-the-simulator function into what we'll hand off to the
531 kernel instead. */
532
533 /* The guest thread is to start running whatever context
534 this syscall showed up with. */
535 dst_ts->arch.vex.guest_GPR0 = ats_new->mst.gpr[0];
536 dst_ts->arch.vex.guest_GPR1 = ats_new->mst.gpr[1]; /* sp */
537 dst_ts->arch.vex.guest_GPR2 = ats_new->mst.gpr[2]; /* toc */
538 dst_ts->arch.vex.guest_GPR3 = ats_new->mst.gpr[3]; /* initarg */
539 dst_ts->arch.vex.guest_GPR4 = ats_new->mst.gpr[4];
540 dst_ts->arch.vex.guest_GPR5 = ats_new->mst.gpr[5];
541 dst_ts->arch.vex.guest_GPR6 = ats_new->mst.gpr[6];
542 dst_ts->arch.vex.guest_GPR7 = ats_new->mst.gpr[7];
543 dst_ts->arch.vex.guest_GPR8 = ats_new->mst.gpr[8];
544 dst_ts->arch.vex.guest_GPR9 = ats_new->mst.gpr[9];
545 dst_ts->arch.vex.guest_GPR10 = ats_new->mst.gpr[10];
546 dst_ts->arch.vex.guest_GPR11 = ats_new->mst.gpr[11]; /* ?? */
547 dst_ts->arch.vex.guest_GPR12 = ats_new->mst.gpr[12];
548 dst_ts->arch.vex.guest_GPR13 = ats_new->mst.gpr[13];
549 dst_ts->arch.vex.guest_GPR14 = ats_new->mst.gpr[14];
550 dst_ts->arch.vex.guest_GPR15 = ats_new->mst.gpr[15];
551 dst_ts->arch.vex.guest_GPR16 = ats_new->mst.gpr[16];
552 dst_ts->arch.vex.guest_GPR17 = ats_new->mst.gpr[17];
553 dst_ts->arch.vex.guest_GPR18 = ats_new->mst.gpr[18];
554 dst_ts->arch.vex.guest_GPR19 = ats_new->mst.gpr[19];
555 dst_ts->arch.vex.guest_GPR20 = ats_new->mst.gpr[20];
556 dst_ts->arch.vex.guest_GPR21 = ats_new->mst.gpr[21];
557 dst_ts->arch.vex.guest_GPR22 = ats_new->mst.gpr[22];
558 dst_ts->arch.vex.guest_GPR23 = ats_new->mst.gpr[23];
559 dst_ts->arch.vex.guest_GPR24 = ats_new->mst.gpr[24];
560 dst_ts->arch.vex.guest_GPR25 = ats_new->mst.gpr[25];
561 dst_ts->arch.vex.guest_GPR26 = ats_new->mst.gpr[26];
562 dst_ts->arch.vex.guest_GPR27 = ats_new->mst.gpr[27];
563 dst_ts->arch.vex.guest_GPR28 = ats_new->mst.gpr[28];
564 dst_ts->arch.vex.guest_GPR29 = ats_new->mst.gpr[29];
565 dst_ts->arch.vex.guest_GPR30 = ats_new->mst.gpr[30];
566 dst_ts->arch.vex.guest_GPR31 = ats_new->mst.gpr[31];
567 dst_ts->arch.vex.guest_CIA = ats_new->mst.iar; /* pc */
568 dst_ts->arch.vex.guest_LR = ats_new->mst.lr;
569 dst_ts->arch.vex.guest_CTR = ats_new->mst.ctr;
570 LibVEX_GuestPPC64_put_CR( ats_new->mst.cr, &dst_ts->arch.vex );
571 LibVEX_GuestPPC64_put_XER( ats_new->mst.xer, &dst_ts->arch.vex );
572
573 /* Record what seems like the highest legitimate stack
574 address for this thread, so that the stack unwinder works
575 properly. It seems reasonable to use the R1 value
576 supplied here. */
577 dst_ts->client_stack_highest_word = dst_ts->arch.vex.guest_GPR1;
578
579 /* The host thread is to start running
580 start_thread_NORETURN */
581 UWord* wrapper_fdescr = (UWord*) & start_thread_NORETURN;
582 ats_new->mst.gpr[1] = (UWord)stack;
583 ats_new->mst.gpr[2] = wrapper_fdescr[1];
584 ats_new->mst.iar = wrapper_fdescr[0];
585 ats_new->mst.gpr[3] = (UWord)dst_ts;
586
587 /* Set initial cancellation status for the thread. */
588 dst_ts->os_state.cancel_async = False;
589 dst_ts->os_state.cancel_disabled = False;
590 dst_ts->os_state.cancel_progress = Canc_NoRequest;
591 }
592 }
593 }
POST(sys_thread_setstate)594 POST(sys_thread_setstate)
595 {
596 if (ARG3)
597 POST_MEM_WRITE( ARG3, sizeof(struct tstate) );
598 if (0 && VG_(clo_trace_syscalls) && ARG3)
599 ML_(aix5debugstuff_show_tstate)(ARG3, "thread_setstate (OLD)");
600 }
601
PRE(sys_FAKE_SIGRETURN)602 PRE(sys_FAKE_SIGRETURN)
603 {
604 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
605 an explanation of what follows. */
606 /* This handles the fake signal-return system call created by
607 sigframe-ppc64-aix5.c. */
608
609 PRINT("FAKE_SIGRETURN ( )");
610
611 vg_assert(VG_(is_valid_tid)(tid));
612 vg_assert(tid >= 1 && tid < VG_N_THREADS);
613 vg_assert(VG_(is_running_thread)(tid));
614
615 /* Remove the signal frame from this thread's (guest) stack,
616 in the process restoring the pre-signal guest state. */
617 VG_(sigframe_destroy)(tid, True);
618
619 /* Tell the driver not to update the guest state with the "result",
620 and set a bogus result to keep it happy. */
621 *flags |= SfNoWriteResult;
622 SET_STATUS_Success(0);
623
624 /* Check to see if any signals arose as a result of this. */
625 *flags |= SfPollAfter;
626 }
627
628
629 /* ---------------------------------------------------------------------
630 The ppc64/AIX5 syscall table
631 ------------------------------------------------------------------ */
632
633 typedef
634 struct {
635 UInt* pSysNo;
636 SyscallTableEntry wrappers;
637 }
638 AIX5SCTabEntry;
639
640 #undef PLAXY
641 #undef PLAX_
642
643 #define PLAXY(sysno, name) \
644 { & sysno, \
645 { & WRAPPER_PRE_NAME(ppc64_aix5, name), \
646 & WRAPPER_POST_NAME(ppc64_aix5, name) }}
647
648 #define PLAX_(sysno, name) \
649 { & sysno, \
650 { & WRAPPER_PRE_NAME(ppc64_aix5, name), \
651 NULL }}
652
653 static /* but not const */
654 AIX5SCTabEntry aix5_ppc64_syscall_table[]
655 = {
656 AIXXY(__NR_AIX5___libc_sbrk, sys___libc_sbrk),
657 AIXX_(__NR_AIX5___msleep, sys___msleep),
658 PLAXY(__NR_AIX5__clock_gettime, sys__clock_gettime),
659 AIXX_(__NR_AIX5__exit, sys__exit),
660 PLAX_(__NR_AIX5__fp_fpscrx64_, sys__fp_fpscrx64_),
661 AIXX_(__NR_AIX5__getpid, sys__getpid),
662 AIXXY(__NR_AIX5__nsleep, sys__nsleep),
663 AIXX_(__NR_AIX5__pause, sys__pause),
664 AIXXY(__NR_AIX5__poll, sys__poll),
665 AIXX_(__NR_AIX5__select, sys__select),
666 AIXX_(__NR_AIX5__sem_wait, sys__sem_wait),
667 AIXXY(__NR_AIX5__sigaction, sys__sigaction),
668 AIXX_(__NR_AIX5__thread_self, sys__thread_self),
669 AIXX_(__NR_AIX5_access, sys_access),
670 AIXX_(__NR_AIX5_accessx, sys_accessx),
671 AIXXY(__NR_AIX5_appgetrlimit, sys_appgetrlimit),
672 AIXXY(__NR_AIX5_appgetrusage, sys_appgetrusage),
673 AIXX_(__NR_AIX5_appsetrlimit, sys_appsetrlimit),
674 AIXX_(__NR_AIX5_appulimit, sys_appulimit),
675 AIXX_(__NR_AIX5_bind, sys_bind),
676 AIXX_(__NR_AIX5_chdir, sys_chdir),
677 AIXX_(__NR_AIX5_chmod, sys_chmod),
678 AIXX_(__NR_AIX5_chown, sys_chown),
679 AIXX_(__NR_AIX5_close, sys_close),
680 AIXX_(__NR_AIX5_connext, sys_connext),
681 AIXX_(__NR_AIX5_execve, sys_execve),
682 AIXXY(__NR_AIX5_finfo, sys_finfo),
683 AIXXY(__NR_AIX5_fstatfs, sys_fstatfs),
684 AIXXY(__NR_AIX5_fstatx, sys_fstatx),
685 AIXXY(__NR_AIX5_getdirent, sys_getdirent),
686 AIXXY(__NR_AIX5_getdirent64, sys_getdirent64),
687 AIXXY(__NR_AIX5_getdomainname, sys_getdomainname),
688 AIXX_(__NR_AIX5_getgidx, sys_getgidx),
689 AIXXY(__NR_AIX5_gethostname, sys_gethostname),
690 AIXXY(__NR_AIX5_getpriv, sys_getpriv),
691 AIXXY(__NR_AIX5_getprocs, sys_getprocs),
692 AIXXY(__NR_AIX5_getprocs64, sys_getprocs), /* XXX: correct? */
693 AIXX_(__NR_AIX5_getrpid, sys_getrpid),
694 AIXXY(__NR_AIX5_getsockopt, sys_getsockopt),
695 AIXX_(__NR_AIX5_gettimerid, sys_gettimerid),
696 AIXX_(__NR_AIX5_getuidx, sys_getuidx),
697 AIXXY(__NR_AIX5_incinterval, sys_incinterval),
698 AIXXY(__NR_AIX5_kfcntl, sys_kfcntl),
699 AIXX_(__NR_AIX5_kfork, sys_kfork),
700 AIXX_(__NR_AIX5_kill, sys_kill),
701 AIXXY(__NR_AIX5_kioctl, sys_kioctl),
702 PLAXY(__NR_AIX5_kload, sys_kload),
703 AIXX_(__NR_AIX5_klseek, sys_klseek),
704 AIXXY(__NR_AIX5_kread, sys_kread),
705 AIXXY(__NR_AIX5_kreadv, sys_kreadv),
706 AIXX_(__NR_AIX5_kthread_ctl, sys_kthread_ctl),
707 AIXX_(__NR_AIX5_ktruncate, sys_ktruncate),
708 PLAXY(__NR_AIX5_kunload64, sys_kunload64),
709 AIXXY(__NR_AIX5_kwaitpid, sys_kwaitpid),
710 AIXX_(__NR_AIX5_kwrite, sys_kwrite),
711 AIXX_(__NR_AIX5_kwritev, sys_kwritev),
712 AIXX_(__NR_AIX5_lseek, sys_lseek),
713 AIXX_(__NR_AIX5_mkdir, sys_mkdir),
714 AIXXY(__NR_AIX5_mmap, sys_mmap),
715 AIXXY(__NR_AIX5_mntctl, sys_mntctl),
716 AIXXY(__NR_AIX5_mprotect, sys_mprotect),
717 AIXXY(__NR_AIX5_munmap, sys_munmap),
718 AIXXY(__NR_AIX5_ngetpeername, sys_ngetpeername),
719 AIXXY(__NR_AIX5_ngetsockname, sys_ngetsockname),
720 AIXXY(__NR_AIX5_nrecvfrom, sys_nrecvfrom),
721 AIXX_(__NR_AIX5_nrecvmsg, sys_nrecvmsg),
722 AIXX_(__NR_AIX5_open, sys_open),
723 AIXXY(__NR_AIX5_pipe, sys_pipe),
724 AIXX_(__NR_AIX5_privcheck, sys_privcheck),
725 AIXX_(__NR_AIX5_rename, sys_rename),
726 AIXXY(__NR_AIX5_sbrk, sys_sbrk),
727 AIXXY(__NR_AIX5_sem_init, sys_sem_init),
728 AIXXY(__NR_AIX5_sem_post, sys_sem_post),
729 AIXX_(__NR_AIX5_send, sys_send),
730 AIXX_(__NR_AIX5_setgid, sys_setgid),
731 AIXX_(__NR_AIX5_setsockopt, sys_setsockopt),
732 AIXX_(__NR_AIX5_setuid, sys_setuid),
733 AIXXY(__NR_AIX5_shmat, sys_shmat),
734 AIXXY(__NR_AIX5_shmctl, sys_shmctl),
735 AIXXY(__NR_AIX5_shmdt, sys_shmdt),
736 AIXX_(__NR_AIX5_shmget, sys_shmget),
737 AIXX_(__NR_AIX5_shutdown, sys_shutdown),
738 AIXX_(__NR_AIX5_sigcleanup, sys_sigcleanup),
739 AIXXY(__NR_AIX5_sigprocmask, sys_sigprocmask),
740 AIXXY(__NR_AIX5_sys_parm, sys_sys_parm),
741 AIXXY(__NR_AIX5_sysconfig, sys_sysconfig),
742 AIXX_(__NR_AIX5_socket, sys_socket),
743 AIXXY(__NR_AIX5_statx, sys_statx),
744 AIXXY(__NR_AIX5_thread_create, sys_thread_create),
745 AIXX_(__NR_AIX5_thread_init, sys_thread_init),
746 AIXX_(__NR_AIX5_thread_kill, sys_thread_kill),
747 AIXXY(__NR_AIX5_thread_setmystate, sys_thread_setmystate),
748 AIXX_(__NR_AIX5_thread_setmystate_fast, sys_thread_setmystate_fast),
749 PLAXY(__NR_AIX5_thread_setstate, sys_thread_setstate),
750 AIXX_(__NR_AIX5_thread_terminate_unlock, sys_thread_terminate_unlock),
751 AIXX_(__NR_AIX5_thread_tsleep, sys_thread_tsleep),
752 AIXX_(__NR_AIX5_thread_twakeup, sys_thread_twakeup),
753 AIXX_(__NR_AIX5_thread_unlock, sys_thread_unlock),
754 AIXX_(__NR_AIX5_thread_waitlock_, sys_thread_waitlock_),
755 AIXXY(__NR_AIX5_times, sys_times),
756 AIXXY(__NR_AIX5_uname, sys_uname),
757 AIXX_(__NR_AIX5_unlink, sys_unlink),
758 AIXX_(__NR_AIX5_utimes, sys_utimes),
759 AIXXY(__NR_AIX5_vmgetinfo, sys_vmgetinfo),
760 AIXX_(__NR_AIX5_yield, sys_yield),
761 PLAX_(__NR_AIX5_FAKE_SIGRETURN, sys_FAKE_SIGRETURN)
762 };
763
ML_(get_ppc64_aix5_syscall_entry)764 SyscallTableEntry* ML_(get_ppc64_aix5_syscall_entry) ( UInt sysno )
765 {
766 Int i;
767 AIX5SCTabEntry tmp;
768
769 const Int tab_size = sizeof(aix5_ppc64_syscall_table)
770 / sizeof(aix5_ppc64_syscall_table[0]);
771
772 for (i = 0; i < tab_size; i++)
773 if (sysno == *(aix5_ppc64_syscall_table[i].pSysNo))
774 break;
775
776 vg_assert(i >= 0 && i <= tab_size);
777 if (i == tab_size)
778 return NULL; /* can't find a wrapper */
779
780 /* Move found one a bit closer to the front, so as to
781 make future searches cheaper. */
782 if (i > 0) {
783 tmp = aix5_ppc64_syscall_table[i-1];
784 aix5_ppc64_syscall_table[i-1] = aix5_ppc64_syscall_table[i];
785 aix5_ppc64_syscall_table[i] = tmp;
786 i--;
787 }
788
789 vg_assert(i >= 0 && i < tab_size);
790 return &aix5_ppc64_syscall_table[i].wrappers;
791 }
792
793 #endif // defined(VGP_ppc64_aix5)
794
795 /*--------------------------------------------------------------------*/
796 /*--- end ---*/
797 /*--------------------------------------------------------------------*/
798