1
2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff. syswrap-x86-linux.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2000-2012 Nicholas Nethercote
11 njn@valgrind.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 #if defined(VGP_x86_linux)
32
33 /* TODO/FIXME jrs 20050207: assignments to the syscall return result
34 in interrupted_syscall() need to be reviewed. They don't seem
35 to assign the shadow state.
36 */
37
38 #include "pub_core_basics.h"
39 #include "pub_core_vki.h"
40 #include "pub_core_vkiscnums.h"
41 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
42 #include "pub_core_threadstate.h"
43 #include "pub_core_aspacemgr.h"
44 #include "pub_core_debuglog.h"
45 #include "pub_core_libcbase.h"
46 #include "pub_core_libcassert.h"
47 #include "pub_core_libcprint.h"
48 #include "pub_core_libcproc.h"
49 #include "pub_core_libcsignal.h"
50 #include "pub_core_mallocfree.h"
51 #include "pub_core_options.h"
52 #include "pub_core_scheduler.h"
53 #include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
54 #include "pub_core_signals.h"
55 #include "pub_core_syscall.h"
56 #include "pub_core_syswrap.h"
57 #include "pub_core_tooliface.h"
58 #include "pub_core_stacks.h" // VG_(register_stack)
59
60 #include "priv_types_n_macros.h"
61 #include "priv_syswrap-generic.h" /* for decls of generic wrappers */
62 #include "priv_syswrap-linux.h" /* for decls of linux-ish wrappers */
63 #include "priv_syswrap-linux-variants.h" /* decls of linux variant wrappers */
64 #include "priv_syswrap-main.h"
65
66
67 /* ---------------------------------------------------------------------
68 clone() handling
69 ------------------------------------------------------------------ */
70
71 /* Call f(arg1), but first switch stacks, using 'stack' as the new
72 stack, and use 'retaddr' as f's return-to address. Also, clear all
73 the integer registers before entering f.*/
74 __attribute__((noreturn))
75 void ML_(call_on_new_stack_0_1) ( Addr stack,
76 Addr retaddr,
77 void (*f)(Word),
78 Word arg1 );
79 // 4(%esp) == stack
80 // 8(%esp) == retaddr
81 // 12(%esp) == f
82 // 16(%esp) == arg1
83 asm(
84 ".text\n"
85 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
86 "vgModuleLocal_call_on_new_stack_0_1:\n"
87 " movl %esp, %esi\n" // remember old stack pointer
88 " movl 4(%esi), %esp\n" // set stack
89 " pushl 16(%esi)\n" // arg1 to stack
90 " pushl 8(%esi)\n" // retaddr to stack
91 " pushl 12(%esi)\n" // f to stack
92 " movl $0, %eax\n" // zero all GP regs
93 " movl $0, %ebx\n"
94 " movl $0, %ecx\n"
95 " movl $0, %edx\n"
96 " movl $0, %esi\n"
97 " movl $0, %edi\n"
98 " movl $0, %ebp\n"
99 " ret\n" // jump to f
100 " ud2\n" // should never get here
101 ".previous\n"
102 );
103
104
105 /*
106 Perform a clone system call. clone is strange because it has
107 fork()-like return-twice semantics, so it needs special
108 handling here.
109
110 Upon entry, we have:
111
112 int (fn)(void*) in 0+FSZ(%esp)
113 void* child_stack in 4+FSZ(%esp)
114 int flags in 8+FSZ(%esp)
115 void* arg in 12+FSZ(%esp)
116 pid_t* child_tid in 16+FSZ(%esp)
117 pid_t* parent_tid in 20+FSZ(%esp)
118 void* tls_ptr in 24+FSZ(%esp)
119
120 System call requires:
121
122 int $__NR_clone in %eax
123 int flags in %ebx
124 void* child_stack in %ecx
125 pid_t* parent_tid in %edx
126 pid_t* child_tid in %edi
127 void* tls_ptr in %esi
128
129 Returns an Int encoded in the linux-x86 way, not a SysRes.
130 */
131 #define FSZ "4+4+4+4" /* frame size = retaddr+ebx+edi+esi */
132 #define __NR_CLONE VG_STRINGIFY(__NR_clone)
133 #define __NR_EXIT VG_STRINGIFY(__NR_exit)
134
135 extern
136 Int do_syscall_clone_x86_linux ( Word (*fn)(void *),
137 void* stack,
138 Int flags,
139 void* arg,
140 Int* child_tid,
141 Int* parent_tid,
142 vki_modify_ldt_t * );
143 asm(
144 ".text\n"
145 ".globl do_syscall_clone_x86_linux\n"
146 "do_syscall_clone_x86_linux:\n"
147 " push %ebx\n"
148 " push %edi\n"
149 " push %esi\n"
150
151 /* set up child stack with function and arg */
152 " movl 4+"FSZ"(%esp), %ecx\n" /* syscall arg2: child stack */
153 " movl 12+"FSZ"(%esp), %ebx\n" /* fn arg */
154 " movl 0+"FSZ"(%esp), %eax\n" /* fn */
155 " lea -8(%ecx), %ecx\n" /* make space on stack */
156 " movl %ebx, 4(%ecx)\n" /* fn arg */
157 " movl %eax, 0(%ecx)\n" /* fn */
158
159 /* get other args to clone */
160 " movl 8+"FSZ"(%esp), %ebx\n" /* syscall arg1: flags */
161 " movl 20+"FSZ"(%esp), %edx\n" /* syscall arg3: parent tid * */
162 " movl 16+"FSZ"(%esp), %edi\n" /* syscall arg5: child tid * */
163 " movl 24+"FSZ"(%esp), %esi\n" /* syscall arg4: tls_ptr * */
164 " movl $"__NR_CLONE", %eax\n"
165 " int $0x80\n" /* clone() */
166 " testl %eax, %eax\n" /* child if retval == 0 */
167 " jnz 1f\n"
168
169 /* CHILD - call thread function */
170 " popl %eax\n"
171 " call *%eax\n" /* call fn */
172
173 /* exit with result */
174 " movl %eax, %ebx\n" /* arg1: return value from fn */
175 " movl $"__NR_EXIT", %eax\n"
176 " int $0x80\n"
177
178 /* Hm, exit returned */
179 " ud2\n"
180
181 "1:\n" /* PARENT or ERROR */
182 " pop %esi\n"
183 " pop %edi\n"
184 " pop %ebx\n"
185 " ret\n"
186 ".previous\n"
187 );
188
189 #undef FSZ
190 #undef __NR_CLONE
191 #undef __NR_EXIT
192
193
194 // forward declarations
195 static void setup_child ( ThreadArchState*, ThreadArchState*, Bool );
196 static SysRes sys_set_thread_area ( ThreadId, vki_modify_ldt_t* );
197
198 /*
199 When a client clones, we need to keep track of the new thread. This means:
200 1. allocate a ThreadId+ThreadState+stack for the the thread
201
202 2. initialize the thread's new VCPU state
203
204 3. create the thread using the same args as the client requested,
205 but using the scheduler entrypoint for EIP, and a separate stack
206 for ESP.
207 */
do_clone(ThreadId ptid,UInt flags,Addr esp,Int * parent_tidptr,Int * child_tidptr,vki_modify_ldt_t * tlsinfo)208 static SysRes do_clone ( ThreadId ptid,
209 UInt flags, Addr esp,
210 Int* parent_tidptr,
211 Int* child_tidptr,
212 vki_modify_ldt_t *tlsinfo)
213 {
214 static const Bool debug = False;
215
216 ThreadId ctid = VG_(alloc_ThreadState)();
217 ThreadState* ptst = VG_(get_ThreadState)(ptid);
218 ThreadState* ctst = VG_(get_ThreadState)(ctid);
219 UWord* stack;
220 NSegment const* seg;
221 SysRes res;
222 Int eax;
223 vki_sigset_t blockall, savedmask;
224
225 VG_(sigfillset)(&blockall);
226
227 vg_assert(VG_(is_running_thread)(ptid));
228 vg_assert(VG_(is_valid_tid)(ctid));
229
230 stack = (UWord*)ML_(allocstack)(ctid);
231 if (stack == NULL) {
232 res = VG_(mk_SysRes_Error)( VKI_ENOMEM );
233 goto out;
234 }
235
236 /* Copy register state
237
238 Both parent and child return to the same place, and the code
239 following the clone syscall works out which is which, so we
240 don't need to worry about it.
241
242 The parent gets the child's new tid returned from clone, but the
243 child gets 0.
244
245 If the clone call specifies a NULL esp for the new thread, then
246 it actually gets a copy of the parent's esp.
247 */
248 /* Note: the clone call done by the Quadrics Elan3 driver specifies
249 clone flags of 0xF00, and it seems to rely on the assumption
250 that the child inherits a copy of the parent's GDT.
251 setup_child takes care of setting that up. */
252 setup_child( &ctst->arch, &ptst->arch, True );
253
254 /* Make sys_clone appear to have returned Success(0) in the
255 child. */
256 ctst->arch.vex.guest_EAX = 0;
257
258 if (esp != 0)
259 ctst->arch.vex.guest_ESP = esp;
260
261 ctst->os_state.parent = ptid;
262
263 /* inherit signal mask */
264 ctst->sig_mask = ptst->sig_mask;
265 ctst->tmp_sig_mask = ptst->sig_mask;
266
267 /* Start the child with its threadgroup being the same as the
268 parent's. This is so that any exit_group calls that happen
269 after the child is created but before it sets its
270 os_state.threadgroup field for real (in thread_wrapper in
271 syswrap-linux.c), really kill the new thread. a.k.a this avoids
272 a race condition in which the thread is unkillable (via
273 exit_group) because its threadgroup is not set. The race window
274 is probably only a few hundred or a few thousand cycles long.
275 See #226116. */
276 ctst->os_state.threadgroup = ptst->os_state.threadgroup;
277
278 /* We don't really know where the client stack is, because its
279 allocated by the client. The best we can do is look at the
280 memory mappings and try to derive some useful information. We
281 assume that esp starts near its highest possible value, and can
282 only go down to the start of the mmaped segment. */
283 seg = VG_(am_find_nsegment)((Addr)esp);
284 if (seg && seg->kind != SkResvn) {
285 ctst->client_stack_highest_word = (Addr)VG_PGROUNDUP(esp);
286 ctst->client_stack_szB = ctst->client_stack_highest_word - seg->start;
287
288 VG_(register_stack)(seg->start, ctst->client_stack_highest_word);
289
290 if (debug)
291 VG_(printf)("tid %d: guessed client stack range %#lx-%#lx\n",
292 ctid, seg->start, VG_PGROUNDUP(esp));
293 } else {
294 VG_(message)(Vg_UserMsg,
295 "!? New thread %d starts with ESP(%#lx) unmapped\n",
296 ctid, esp);
297 ctst->client_stack_szB = 0;
298 }
299
300 /* Assume the clone will succeed, and tell any tool that wants to
301 know that this thread has come into existence. We cannot defer
302 it beyond this point because sys_set_thread_area, just below,
303 causes tCheck to assert by making references to the new ThreadId
304 if we don't state the new thread exists prior to that point.
305 If the clone fails, we'll send out a ll_exit notification for it
306 at the out: label below, to clean up. */
307 vg_assert(VG_(owns_BigLock_LL)(ptid));
308 VG_TRACK ( pre_thread_ll_create, ptid, ctid );
309
310 if (flags & VKI_CLONE_SETTLS) {
311 if (debug)
312 VG_(printf)("clone child has SETTLS: tls info at %p: idx=%d "
313 "base=%#lx limit=%x; esp=%#x fs=%x gs=%x\n",
314 tlsinfo, tlsinfo->entry_number,
315 tlsinfo->base_addr, tlsinfo->limit,
316 ptst->arch.vex.guest_ESP,
317 ctst->arch.vex.guest_FS, ctst->arch.vex.guest_GS);
318 res = sys_set_thread_area(ctid, tlsinfo);
319 if (sr_isError(res))
320 goto out;
321 }
322
323 flags &= ~VKI_CLONE_SETTLS;
324
325 /* start the thread with everything blocked */
326 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);
327
328 /* Create the new thread */
329 eax = do_syscall_clone_x86_linux(
330 ML_(start_thread_NORETURN), stack, flags, &VG_(threads)[ctid],
331 child_tidptr, parent_tidptr, NULL
332 );
333 res = VG_(mk_SysRes_x86_linux)( eax );
334
335 VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);
336
337 out:
338 if (sr_isError(res)) {
339 /* clone failed */
340 VG_(cleanup_thread)(&ctst->arch);
341 ctst->status = VgTs_Empty;
342 /* oops. Better tell the tool the thread exited in a hurry :-) */
343 VG_TRACK( pre_thread_ll_exit, ctid );
344 }
345
346 return res;
347 }
348
349
350 /* ---------------------------------------------------------------------
351 LDT/GDT simulation
352 ------------------------------------------------------------------ */
353
354 /* Details of the LDT simulation
355 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
356
357 When a program runs natively, the linux kernel allows each *thread*
358 in it to have its own LDT. Almost all programs never do this --
359 it's wildly unportable, after all -- and so the kernel never
360 allocates the structure, which is just as well as an LDT occupies
361 64k of memory (8192 entries of size 8 bytes).
362
363 A thread may choose to modify its LDT entries, by doing the
364 __NR_modify_ldt syscall. In such a situation the kernel will then
365 allocate an LDT structure for it. Each LDT entry is basically a
366 (base, limit) pair. A virtual address in a specific segment is
367 translated to a linear address by adding the segment's base value.
368 In addition, the virtual address must not exceed the limit value.
369
370 To use an LDT entry, a thread loads one of the segment registers
371 (%cs, %ss, %ds, %es, %fs, %gs) with the index of the LDT entry (0
372 .. 8191) it wants to use. In fact, the required value is (index <<
373 3) + 7, but that's not important right now. Any normal instruction
374 which includes an addressing mode can then be made relative to that
375 LDT entry by prefixing the insn with a so-called segment-override
376 prefix, a byte which indicates which of the 6 segment registers
377 holds the LDT index.
378
379 Now, a key constraint is that valgrind's address checks operate in
380 terms of linear addresses. So we have to explicitly translate
381 virtual addrs into linear addrs, and that means doing a complete
382 LDT simulation.
383
384 Calls to modify_ldt are intercepted. For each thread, we maintain
385 an LDT (with the same normally-never-allocated optimisation that
386 the kernel does). This is updated as expected via calls to
387 modify_ldt.
388
389 When a thread does an amode calculation involving a segment
390 override prefix, the relevant LDT entry for the thread is
391 consulted. It all works.
392
393 There is a conceptual problem, which appears when switching back to
394 native execution, either temporarily to pass syscalls to the
395 kernel, or permanently, when debugging V. Problem at such points
396 is that it's pretty pointless to copy the simulated machine's
397 segment registers to the real machine, because we'd also need to
398 copy the simulated LDT into the real one, and that's prohibitively
399 expensive.
400
401 Fortunately it looks like no syscalls rely on the segment regs or
402 LDT being correct, so we can get away with it. Apart from that the
403 simulation is pretty straightforward. All 6 segment registers are
404 tracked, although only %ds, %es, %fs and %gs are allowed as
405 prefixes. Perhaps it could be restricted even more than that -- I
406 am not sure what is and isn't allowed in user-mode.
407 */
408
409 /* Translate a struct modify_ldt_ldt_s to a VexGuestX86SegDescr, using
410 the Linux kernel's logic (cut-n-paste of code in
411 linux/kernel/ldt.c). */
412
413 static
translate_to_hw_format(vki_modify_ldt_t * inn,VexGuestX86SegDescr * out,Int oldmode)414 void translate_to_hw_format ( /* IN */ vki_modify_ldt_t* inn,
415 /* OUT */ VexGuestX86SegDescr* out,
416 Int oldmode )
417 {
418 UInt entry_1, entry_2;
419 vg_assert(8 == sizeof(VexGuestX86SegDescr));
420
421 if (0)
422 VG_(printf)("translate_to_hw_format: base %#lx, limit %d\n",
423 inn->base_addr, inn->limit );
424
425 /* Allow LDTs to be cleared by the user. */
426 if (inn->base_addr == 0 && inn->limit == 0) {
427 if (oldmode ||
428 (inn->contents == 0 &&
429 inn->read_exec_only == 1 &&
430 inn->seg_32bit == 0 &&
431 inn->limit_in_pages == 0 &&
432 inn->seg_not_present == 1 &&
433 inn->useable == 0 )) {
434 entry_1 = 0;
435 entry_2 = 0;
436 goto install;
437 }
438 }
439
440 entry_1 = ((inn->base_addr & 0x0000ffff) << 16) |
441 (inn->limit & 0x0ffff);
442 entry_2 = (inn->base_addr & 0xff000000) |
443 ((inn->base_addr & 0x00ff0000) >> 16) |
444 (inn->limit & 0xf0000) |
445 ((inn->read_exec_only ^ 1) << 9) |
446 (inn->contents << 10) |
447 ((inn->seg_not_present ^ 1) << 15) |
448 (inn->seg_32bit << 22) |
449 (inn->limit_in_pages << 23) |
450 0x7000;
451 if (!oldmode)
452 entry_2 |= (inn->useable << 20);
453
454 /* Install the new entry ... */
455 install:
456 out->LdtEnt.Words.word1 = entry_1;
457 out->LdtEnt.Words.word2 = entry_2;
458 }
459
460 /* Create a zeroed-out GDT. */
alloc_zeroed_x86_GDT(void)461 static VexGuestX86SegDescr* alloc_zeroed_x86_GDT ( void )
462 {
463 Int nbytes = VEX_GUEST_X86_GDT_NENT * sizeof(VexGuestX86SegDescr);
464 return VG_(arena_calloc)(VG_AR_CORE, "di.syswrap-x86.azxG.1", nbytes, 1);
465 }
466
467 /* Create a zeroed-out LDT. */
alloc_zeroed_x86_LDT(void)468 static VexGuestX86SegDescr* alloc_zeroed_x86_LDT ( void )
469 {
470 Int nbytes = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr);
471 return VG_(arena_calloc)(VG_AR_CORE, "di.syswrap-x86.azxL.1", nbytes, 1);
472 }
473
474 /* Free up an LDT or GDT allocated by the above fns. */
free_LDT_or_GDT(VexGuestX86SegDescr * dt)475 static void free_LDT_or_GDT ( VexGuestX86SegDescr* dt )
476 {
477 vg_assert(dt);
478 VG_(arena_free)(VG_AR_CORE, (void*)dt);
479 }
480
481 /* Copy contents between two existing LDTs. */
copy_LDT_from_to(VexGuestX86SegDescr * src,VexGuestX86SegDescr * dst)482 static void copy_LDT_from_to ( VexGuestX86SegDescr* src,
483 VexGuestX86SegDescr* dst )
484 {
485 Int i;
486 vg_assert(src);
487 vg_assert(dst);
488 for (i = 0; i < VEX_GUEST_X86_LDT_NENT; i++)
489 dst[i] = src[i];
490 }
491
492 /* Copy contents between two existing GDTs. */
copy_GDT_from_to(VexGuestX86SegDescr * src,VexGuestX86SegDescr * dst)493 static void copy_GDT_from_to ( VexGuestX86SegDescr* src,
494 VexGuestX86SegDescr* dst )
495 {
496 Int i;
497 vg_assert(src);
498 vg_assert(dst);
499 for (i = 0; i < VEX_GUEST_X86_GDT_NENT; i++)
500 dst[i] = src[i];
501 }
502
503 /* Free this thread's DTs, if it has any. */
deallocate_LGDTs_for_thread(VexGuestX86State * vex)504 static void deallocate_LGDTs_for_thread ( VexGuestX86State* vex )
505 {
506 vg_assert(sizeof(HWord) == sizeof(void*));
507
508 if (0)
509 VG_(printf)("deallocate_LGDTs_for_thread: "
510 "ldt = 0x%lx, gdt = 0x%lx\n",
511 vex->guest_LDT, vex->guest_GDT );
512
513 if (vex->guest_LDT != (HWord)NULL) {
514 free_LDT_or_GDT( (VexGuestX86SegDescr*)vex->guest_LDT );
515 vex->guest_LDT = (HWord)NULL;
516 }
517
518 if (vex->guest_GDT != (HWord)NULL) {
519 free_LDT_or_GDT( (VexGuestX86SegDescr*)vex->guest_GDT );
520 vex->guest_GDT = (HWord)NULL;
521 }
522 }
523
524
525 /*
526 * linux/kernel/ldt.c
527 *
528 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
529 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
530 */
531
532 /*
533 * read_ldt() is not really atomic - this is not a problem since
534 * synchronization of reads and writes done to the LDT has to be
535 * assured by user-space anyway. Writes are atomic, to protect
536 * the security checks done on new descriptors.
537 */
538 static
read_ldt(ThreadId tid,UChar * ptr,UInt bytecount)539 SysRes read_ldt ( ThreadId tid, UChar* ptr, UInt bytecount )
540 {
541 SysRes res;
542 UInt i, size;
543 UChar* ldt;
544
545 if (0)
546 VG_(printf)("read_ldt: tid = %d, ptr = %p, bytecount = %d\n",
547 tid, ptr, bytecount );
548
549 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
550 vg_assert(8 == sizeof(VexGuestX86SegDescr));
551
552 ldt = (Char*)(VG_(threads)[tid].arch.vex.guest_LDT);
553 res = VG_(mk_SysRes_Success)( 0 );
554 if (ldt == NULL)
555 /* LDT not allocated, meaning all entries are null */
556 goto out;
557
558 size = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr);
559 if (size > bytecount)
560 size = bytecount;
561
562 res = VG_(mk_SysRes_Success)( size );
563 for (i = 0; i < size; i++)
564 ptr[i] = ldt[i];
565
566 out:
567 return res;
568 }
569
570
571 static
write_ldt(ThreadId tid,void * ptr,UInt bytecount,Int oldmode)572 SysRes write_ldt ( ThreadId tid, void* ptr, UInt bytecount, Int oldmode )
573 {
574 SysRes res;
575 VexGuestX86SegDescr* ldt;
576 vki_modify_ldt_t* ldt_info;
577
578 if (0)
579 VG_(printf)("write_ldt: tid = %d, ptr = %p, "
580 "bytecount = %d, oldmode = %d\n",
581 tid, ptr, bytecount, oldmode );
582
583 vg_assert(8 == sizeof(VexGuestX86SegDescr));
584 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
585
586 ldt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_LDT;
587 ldt_info = (vki_modify_ldt_t*)ptr;
588
589 res = VG_(mk_SysRes_Error)( VKI_EINVAL );
590 if (bytecount != sizeof(vki_modify_ldt_t))
591 goto out;
592
593 res = VG_(mk_SysRes_Error)( VKI_EINVAL );
594 if (ldt_info->entry_number >= VEX_GUEST_X86_LDT_NENT)
595 goto out;
596 if (ldt_info->contents == 3) {
597 if (oldmode)
598 goto out;
599 if (ldt_info->seg_not_present == 0)
600 goto out;
601 }
602
603 /* If this thread doesn't have an LDT, we'd better allocate it
604 now. */
605 if (ldt == NULL) {
606 ldt = alloc_zeroed_x86_LDT();
607 VG_(threads)[tid].arch.vex.guest_LDT = (HWord)ldt;
608 }
609
610 /* Install the new entry ... */
611 translate_to_hw_format ( ldt_info, &ldt[ldt_info->entry_number], oldmode );
612 res = VG_(mk_SysRes_Success)( 0 );
613
614 out:
615 return res;
616 }
617
618
sys_modify_ldt(ThreadId tid,Int func,void * ptr,UInt bytecount)619 static SysRes sys_modify_ldt ( ThreadId tid,
620 Int func, void* ptr, UInt bytecount )
621 {
622 SysRes ret = VG_(mk_SysRes_Error)( VKI_ENOSYS );
623
624 switch (func) {
625 case 0:
626 ret = read_ldt(tid, ptr, bytecount);
627 break;
628 case 1:
629 ret = write_ldt(tid, ptr, bytecount, 1);
630 break;
631 case 2:
632 VG_(unimplemented)("sys_modify_ldt: func == 2");
633 /* god knows what this is about */
634 /* ret = read_default_ldt(ptr, bytecount); */
635 /*UNREACHED*/
636 break;
637 case 0x11:
638 ret = write_ldt(tid, ptr, bytecount, 0);
639 break;
640 }
641 return ret;
642 }
643
644
sys_set_thread_area(ThreadId tid,vki_modify_ldt_t * info)645 static SysRes sys_set_thread_area ( ThreadId tid, vki_modify_ldt_t* info )
646 {
647 Int idx;
648 VexGuestX86SegDescr* gdt;
649
650 vg_assert(8 == sizeof(VexGuestX86SegDescr));
651 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
652
653 if (info == NULL)
654 return VG_(mk_SysRes_Error)( VKI_EFAULT );
655
656 gdt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_GDT;
657
658 /* If the thread doesn't have a GDT, allocate it now. */
659 if (!gdt) {
660 gdt = alloc_zeroed_x86_GDT();
661 VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt;
662 }
663
664 idx = info->entry_number;
665
666 if (idx == -1) {
667 /* Find and use the first free entry. Don't allocate entry
668 zero, because the hardware will never do that, and apparently
669 doing so confuses some code (perhaps stuff running on
670 Wine). */
671 for (idx = 1; idx < VEX_GUEST_X86_GDT_NENT; idx++) {
672 if (gdt[idx].LdtEnt.Words.word1 == 0
673 && gdt[idx].LdtEnt.Words.word2 == 0)
674 break;
675 }
676
677 if (idx == VEX_GUEST_X86_GDT_NENT)
678 return VG_(mk_SysRes_Error)( VKI_ESRCH );
679 } else if (idx < 0 || idx == 0 || idx >= VEX_GUEST_X86_GDT_NENT) {
680 /* Similarly, reject attempts to use GDT[0]. */
681 return VG_(mk_SysRes_Error)( VKI_EINVAL );
682 }
683
684 translate_to_hw_format(info, &gdt[idx], 0);
685
686 VG_TRACK( pre_mem_write, Vg_CoreSysCall, tid,
687 "set_thread_area(info->entry)",
688 (Addr) & info->entry_number, sizeof(unsigned int) );
689 info->entry_number = idx;
690 VG_TRACK( post_mem_write, Vg_CoreSysCall, tid,
691 (Addr) & info->entry_number, sizeof(unsigned int) );
692
693 return VG_(mk_SysRes_Success)( 0 );
694 }
695
696
sys_get_thread_area(ThreadId tid,vki_modify_ldt_t * info)697 static SysRes sys_get_thread_area ( ThreadId tid, vki_modify_ldt_t* info )
698 {
699 Int idx;
700 VexGuestX86SegDescr* gdt;
701
702 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
703 vg_assert(8 == sizeof(VexGuestX86SegDescr));
704
705 if (info == NULL)
706 return VG_(mk_SysRes_Error)( VKI_EFAULT );
707
708 idx = info->entry_number;
709
710 if (idx < 0 || idx >= VEX_GUEST_X86_GDT_NENT)
711 return VG_(mk_SysRes_Error)( VKI_EINVAL );
712
713 gdt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_GDT;
714
715 /* If the thread doesn't have a GDT, allocate it now. */
716 if (!gdt) {
717 gdt = alloc_zeroed_x86_GDT();
718 VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt;
719 }
720
721 info->base_addr = ( gdt[idx].LdtEnt.Bits.BaseHi << 24 ) |
722 ( gdt[idx].LdtEnt.Bits.BaseMid << 16 ) |
723 gdt[idx].LdtEnt.Bits.BaseLow;
724 info->limit = ( gdt[idx].LdtEnt.Bits.LimitHi << 16 ) |
725 gdt[idx].LdtEnt.Bits.LimitLow;
726 info->seg_32bit = gdt[idx].LdtEnt.Bits.Default_Big;
727 info->contents = ( gdt[idx].LdtEnt.Bits.Type >> 2 ) & 0x3;
728 info->read_exec_only = ( gdt[idx].LdtEnt.Bits.Type & 0x1 ) ^ 0x1;
729 info->limit_in_pages = gdt[idx].LdtEnt.Bits.Granularity;
730 info->seg_not_present = gdt[idx].LdtEnt.Bits.Pres ^ 0x1;
731 info->useable = gdt[idx].LdtEnt.Bits.Sys;
732 info->reserved = 0;
733
734 return VG_(mk_SysRes_Success)( 0 );
735 }
736
737 /* ---------------------------------------------------------------------
738 More thread stuff
739 ------------------------------------------------------------------ */
740
VG_(cleanup_thread)741 void VG_(cleanup_thread) ( ThreadArchState* arch )
742 {
743 /* Release arch-specific resources held by this thread. */
744 /* On x86, we have to dump the LDT and GDT. */
745 deallocate_LGDTs_for_thread( &arch->vex );
746 }
747
748
setup_child(ThreadArchState * child,ThreadArchState * parent,Bool inherit_parents_GDT)749 static void setup_child ( /*OUT*/ ThreadArchState *child,
750 /*IN*/ ThreadArchState *parent,
751 Bool inherit_parents_GDT )
752 {
753 /* We inherit our parent's guest state. */
754 child->vex = parent->vex;
755 child->vex_shadow1 = parent->vex_shadow1;
756 child->vex_shadow2 = parent->vex_shadow2;
757
758 /* We inherit our parent's LDT. */
759 if (parent->vex.guest_LDT == (HWord)NULL) {
760 /* We hope this is the common case. */
761 child->vex.guest_LDT = (HWord)NULL;
762 } else {
763 /* No luck .. we have to take a copy of the parent's. */
764 child->vex.guest_LDT = (HWord)alloc_zeroed_x86_LDT();
765 copy_LDT_from_to( (VexGuestX86SegDescr*)parent->vex.guest_LDT,
766 (VexGuestX86SegDescr*)child->vex.guest_LDT );
767 }
768
769 /* Either we start with an empty GDT (the usual case) or inherit a
770 copy of our parents' one (Quadrics Elan3 driver -style clone
771 only). */
772 child->vex.guest_GDT = (HWord)NULL;
773
774 if (inherit_parents_GDT && parent->vex.guest_GDT != (HWord)NULL) {
775 child->vex.guest_GDT = (HWord)alloc_zeroed_x86_GDT();
776 copy_GDT_from_to( (VexGuestX86SegDescr*)parent->vex.guest_GDT,
777 (VexGuestX86SegDescr*)child->vex.guest_GDT );
778 }
779 }
780
781
782 /* ---------------------------------------------------------------------
783 PRE/POST wrappers for x86/Linux-specific syscalls
784 ------------------------------------------------------------------ */
785
786 #define PRE(name) DEFN_PRE_TEMPLATE(x86_linux, name)
787 #define POST(name) DEFN_POST_TEMPLATE(x86_linux, name)
788
789 /* Add prototypes for the wrappers declared here, so that gcc doesn't
790 harass us for not having prototypes. Really this is a kludge --
791 the right thing to do is to make these wrappers 'static' since they
792 aren't visible outside this file, but that requires even more macro
793 magic. */
794 DECL_TEMPLATE(x86_linux, sys_socketcall);
795 DECL_TEMPLATE(x86_linux, sys_stat64);
796 DECL_TEMPLATE(x86_linux, sys_fstatat64);
797 DECL_TEMPLATE(x86_linux, sys_fstat64);
798 DECL_TEMPLATE(x86_linux, sys_lstat64);
799 DECL_TEMPLATE(x86_linux, sys_clone);
800 DECL_TEMPLATE(x86_linux, old_mmap);
801 DECL_TEMPLATE(x86_linux, sys_mmap2);
802 DECL_TEMPLATE(x86_linux, sys_sigreturn);
803 DECL_TEMPLATE(x86_linux, sys_ipc);
804 DECL_TEMPLATE(x86_linux, sys_rt_sigreturn);
805 DECL_TEMPLATE(x86_linux, sys_modify_ldt);
806 DECL_TEMPLATE(x86_linux, sys_set_thread_area);
807 DECL_TEMPLATE(x86_linux, sys_get_thread_area);
808 DECL_TEMPLATE(x86_linux, sys_ptrace);
809 DECL_TEMPLATE(x86_linux, sys_sigsuspend);
810 DECL_TEMPLATE(x86_linux, old_select);
811 DECL_TEMPLATE(x86_linux, sys_vm86old);
812 DECL_TEMPLATE(x86_linux, sys_vm86);
813 DECL_TEMPLATE(x86_linux, sys_syscall223);
814
PRE(old_select)815 PRE(old_select)
816 {
817 /* struct sel_arg_struct {
818 unsigned long n;
819 fd_set *inp, *outp, *exp;
820 struct timeval *tvp;
821 };
822 */
823 PRE_REG_READ1(long, "old_select", struct sel_arg_struct *, args);
824 PRE_MEM_READ( "old_select(args)", ARG1, 5*sizeof(UWord) );
825 *flags |= SfMayBlock;
826 {
827 UInt* arg_struct = (UInt*)ARG1;
828 UInt a1, a2, a3, a4, a5;
829
830 a1 = arg_struct[0];
831 a2 = arg_struct[1];
832 a3 = arg_struct[2];
833 a4 = arg_struct[3];
834 a5 = arg_struct[4];
835
836 PRINT("old_select ( %d, %#x, %#x, %#x, %#x )", a1,a2,a3,a4,a5);
837 if (a2 != (Addr)NULL)
838 PRE_MEM_READ( "old_select(readfds)", a2, a1/8 /* __FD_SETSIZE/8 */ );
839 if (a3 != (Addr)NULL)
840 PRE_MEM_READ( "old_select(writefds)", a3, a1/8 /* __FD_SETSIZE/8 */ );
841 if (a4 != (Addr)NULL)
842 PRE_MEM_READ( "old_select(exceptfds)", a4, a1/8 /* __FD_SETSIZE/8 */ );
843 if (a5 != (Addr)NULL)
844 PRE_MEM_READ( "old_select(timeout)", a5, sizeof(struct vki_timeval) );
845 }
846 }
847
PRE(sys_clone)848 PRE(sys_clone)
849 {
850 UInt cloneflags;
851 Bool badarg = False;
852
853 PRINT("sys_clone ( %lx, %#lx, %#lx, %#lx, %#lx )",ARG1,ARG2,ARG3,ARG4,ARG5);
854 PRE_REG_READ2(int, "clone",
855 unsigned long, flags,
856 void *, child_stack);
857
858 if (ARG1 & VKI_CLONE_PARENT_SETTID) {
859 if (VG_(tdict).track_pre_reg_read) {
860 PRA3("clone", int *, parent_tidptr);
861 }
862 PRE_MEM_WRITE("clone(parent_tidptr)", ARG3, sizeof(Int));
863 if (!VG_(am_is_valid_for_client)(ARG3, sizeof(Int),
864 VKI_PROT_WRITE)) {
865 badarg = True;
866 }
867 }
868 if (ARG1 & VKI_CLONE_SETTLS) {
869 if (VG_(tdict).track_pre_reg_read) {
870 PRA4("clone", vki_modify_ldt_t *, tlsinfo);
871 }
872 PRE_MEM_READ("clone(tlsinfo)", ARG4, sizeof(vki_modify_ldt_t));
873 if (!VG_(am_is_valid_for_client)(ARG4, sizeof(vki_modify_ldt_t),
874 VKI_PROT_READ)) {
875 badarg = True;
876 }
877 }
878 if (ARG1 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID)) {
879 if (VG_(tdict).track_pre_reg_read) {
880 PRA5("clone", int *, child_tidptr);
881 }
882 PRE_MEM_WRITE("clone(child_tidptr)", ARG5, sizeof(Int));
883 if (!VG_(am_is_valid_for_client)(ARG5, sizeof(Int),
884 VKI_PROT_WRITE)) {
885 badarg = True;
886 }
887 }
888
889 if (badarg) {
890 SET_STATUS_Failure( VKI_EFAULT );
891 return;
892 }
893
894 cloneflags = ARG1;
895
896 if (!ML_(client_signal_OK)(ARG1 & VKI_CSIGNAL)) {
897 SET_STATUS_Failure( VKI_EINVAL );
898 return;
899 }
900
901 /* Be ultra-paranoid and filter out any clone-variants we don't understand:
902 - ??? specifies clone flags of 0x100011
903 - ??? specifies clone flags of 0x1200011.
904 - NPTL specifies clone flags of 0x7D0F00.
905 - The Quadrics Elan3 driver specifies clone flags of 0xF00.
906 - Newer Quadrics Elan3 drivers with NTPL support specify 0x410F00.
907 Everything else is rejected.
908 */
909 if (
910 1 ||
911 /* 11 Nov 05: for the time being, disable this ultra-paranoia.
912 The switch below probably does a good enough job. */
913 (cloneflags == 0x100011 || cloneflags == 0x1200011
914 || cloneflags == 0x7D0F00
915 || cloneflags == 0x790F00
916 || cloneflags == 0x3D0F00
917 || cloneflags == 0x410F00
918 || cloneflags == 0xF00
919 || cloneflags == 0xF21)) {
920 /* OK */
921 }
922 else {
923 /* Nah. We don't like it. Go away. */
924 goto reject;
925 }
926
927 /* Only look at the flags we really care about */
928 switch (cloneflags & (VKI_CLONE_VM | VKI_CLONE_FS
929 | VKI_CLONE_FILES | VKI_CLONE_VFORK)) {
930 case VKI_CLONE_VM | VKI_CLONE_FS | VKI_CLONE_FILES:
931 /* thread creation */
932 SET_STATUS_from_SysRes(
933 do_clone(tid,
934 ARG1, /* flags */
935 (Addr)ARG2, /* child ESP */
936 (Int *)ARG3, /* parent_tidptr */
937 (Int *)ARG5, /* child_tidptr */
938 (vki_modify_ldt_t *)ARG4)); /* set_tls */
939 break;
940
941 case VKI_CLONE_VFORK | VKI_CLONE_VM: /* vfork */
942 /* FALLTHROUGH - assume vfork == fork */
943 cloneflags &= ~(VKI_CLONE_VFORK | VKI_CLONE_VM);
944
945 case 0: /* plain fork */
946 SET_STATUS_from_SysRes(
947 ML_(do_fork_clone)(tid,
948 cloneflags, /* flags */
949 (Int *)ARG3, /* parent_tidptr */
950 (Int *)ARG5)); /* child_tidptr */
951 break;
952
953 default:
954 reject:
955 /* should we just ENOSYS? */
956 VG_(message)(Vg_UserMsg, "\n");
957 VG_(message)(Vg_UserMsg, "Unsupported clone() flags: 0x%lx\n", ARG1);
958 VG_(message)(Vg_UserMsg, "\n");
959 VG_(message)(Vg_UserMsg, "The only supported clone() uses are:\n");
960 VG_(message)(Vg_UserMsg, " - via a threads library (LinuxThreads or NPTL)\n");
961 VG_(message)(Vg_UserMsg, " - via the implementation of fork or vfork\n");
962 VG_(message)(Vg_UserMsg, " - for the Quadrics Elan3 user-space driver\n");
963 VG_(unimplemented)
964 ("Valgrind does not support general clone().");
965 }
966
967 if (SUCCESS) {
968 if (ARG1 & VKI_CLONE_PARENT_SETTID)
969 POST_MEM_WRITE(ARG3, sizeof(Int));
970 if (ARG1 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID))
971 POST_MEM_WRITE(ARG5, sizeof(Int));
972
973 /* Thread creation was successful; let the child have the chance
974 to run */
975 *flags |= SfYieldAfter;
976 }
977 }
978
PRE(sys_sigreturn)979 PRE(sys_sigreturn)
980 {
981 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
982 an explanation of what follows. */
983
984 ThreadState* tst;
985 PRINT("sys_sigreturn ( )");
986
987 vg_assert(VG_(is_valid_tid)(tid));
988 vg_assert(tid >= 1 && tid < VG_N_THREADS);
989 vg_assert(VG_(is_running_thread)(tid));
990
991 /* Adjust esp to point to start of frame; skip back up over
992 sigreturn sequence's "popl %eax" and handler ret addr */
993 tst = VG_(get_ThreadState)(tid);
994 tst->arch.vex.guest_ESP -= sizeof(Addr)+sizeof(Word);
995 /* XXX why does ESP change differ from rt_sigreturn case below? */
996
997 /* This is only so that the EIP is (might be) useful to report if
998 something goes wrong in the sigreturn */
999 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
1000
1001 /* Restore register state from frame and remove it */
1002 VG_(sigframe_destroy)(tid, False);
1003
1004 /* Tell the driver not to update the guest state with the "result",
1005 and set a bogus result to keep it happy. */
1006 *flags |= SfNoWriteResult;
1007 SET_STATUS_Success(0);
1008
1009 /* Check to see if any signals arose as a result of this. */
1010 *flags |= SfPollAfter;
1011 }
1012
PRE(sys_rt_sigreturn)1013 PRE(sys_rt_sigreturn)
1014 {
1015 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
1016 an explanation of what follows. */
1017
1018 ThreadState* tst;
1019 PRINT("sys_rt_sigreturn ( )");
1020
1021 vg_assert(VG_(is_valid_tid)(tid));
1022 vg_assert(tid >= 1 && tid < VG_N_THREADS);
1023 vg_assert(VG_(is_running_thread)(tid));
1024
1025 /* Adjust esp to point to start of frame; skip back up over handler
1026 ret addr */
1027 tst = VG_(get_ThreadState)(tid);
1028 tst->arch.vex.guest_ESP -= sizeof(Addr);
1029 /* XXX why does ESP change differ from sigreturn case above? */
1030
1031 /* This is only so that the EIP is (might be) useful to report if
1032 something goes wrong in the sigreturn */
1033 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
1034
1035 /* Restore register state from frame and remove it */
1036 VG_(sigframe_destroy)(tid, True);
1037
1038 /* Tell the driver not to update the guest state with the "result",
1039 and set a bogus result to keep it happy. */
1040 *flags |= SfNoWriteResult;
1041 SET_STATUS_Success(0);
1042
1043 /* Check to see if any signals arose as a result of this. */
1044 *flags |= SfPollAfter;
1045 }
1046
PRE(sys_modify_ldt)1047 PRE(sys_modify_ldt)
1048 {
1049 PRINT("sys_modify_ldt ( %ld, %#lx, %ld )", ARG1,ARG2,ARG3);
1050 PRE_REG_READ3(int, "modify_ldt", int, func, void *, ptr,
1051 unsigned long, bytecount);
1052
1053 if (ARG1 == 0) {
1054 /* read the LDT into ptr */
1055 PRE_MEM_WRITE( "modify_ldt(ptr)", ARG2, ARG3 );
1056 }
1057 if (ARG1 == 1 || ARG1 == 0x11) {
1058 /* write the LDT with the entry pointed at by ptr */
1059 PRE_MEM_READ( "modify_ldt(ptr)", ARG2, sizeof(vki_modify_ldt_t) );
1060 }
1061 /* "do" the syscall ourselves; the kernel never sees it */
1062 SET_STATUS_from_SysRes( sys_modify_ldt( tid, ARG1, (void*)ARG2, ARG3 ) );
1063
1064 if (ARG1 == 0 && SUCCESS && RES > 0) {
1065 POST_MEM_WRITE( ARG2, RES );
1066 }
1067 }
1068
PRE(sys_set_thread_area)1069 PRE(sys_set_thread_area)
1070 {
1071 PRINT("sys_set_thread_area ( %#lx )", ARG1);
1072 PRE_REG_READ1(int, "set_thread_area", struct user_desc *, u_info)
1073 PRE_MEM_READ( "set_thread_area(u_info)", ARG1, sizeof(vki_modify_ldt_t) );
1074
1075 /* "do" the syscall ourselves; the kernel never sees it */
1076 SET_STATUS_from_SysRes( sys_set_thread_area( tid, (void *)ARG1 ) );
1077 }
1078
PRE(sys_get_thread_area)1079 PRE(sys_get_thread_area)
1080 {
1081 PRINT("sys_get_thread_area ( %#lx )", ARG1);
1082 PRE_REG_READ1(int, "get_thread_area", struct user_desc *, u_info)
1083 PRE_MEM_WRITE( "get_thread_area(u_info)", ARG1, sizeof(vki_modify_ldt_t) );
1084
1085 /* "do" the syscall ourselves; the kernel never sees it */
1086 SET_STATUS_from_SysRes( sys_get_thread_area( tid, (void *)ARG1 ) );
1087
1088 if (SUCCESS) {
1089 POST_MEM_WRITE( ARG1, sizeof(vki_modify_ldt_t) );
1090 }
1091 }
1092
1093 // Parts of this are x86-specific, but the *PEEK* cases are generic.
1094 //
1095 // ARG3 is only used for pointers into the traced process's address
1096 // space and for offsets into the traced process's struct
1097 // user_regs_struct. It is never a pointer into this process's memory
1098 // space, and we should therefore not check anything it points to.
PRE(sys_ptrace)1099 PRE(sys_ptrace)
1100 {
1101 PRINT("sys_ptrace ( %ld, %ld, %#lx, %#lx )", ARG1,ARG2,ARG3,ARG4);
1102 PRE_REG_READ4(int, "ptrace",
1103 long, request, long, pid, long, addr, long, data);
1104 switch (ARG1) {
1105 case VKI_PTRACE_PEEKTEXT:
1106 case VKI_PTRACE_PEEKDATA:
1107 case VKI_PTRACE_PEEKUSR:
1108 PRE_MEM_WRITE( "ptrace(peek)", ARG4,
1109 sizeof (long));
1110 break;
1111 case VKI_PTRACE_GETREGS:
1112 PRE_MEM_WRITE( "ptrace(getregs)", ARG4,
1113 sizeof (struct vki_user_regs_struct));
1114 break;
1115 case VKI_PTRACE_GETFPREGS:
1116 PRE_MEM_WRITE( "ptrace(getfpregs)", ARG4,
1117 sizeof (struct vki_user_i387_struct));
1118 break;
1119 case VKI_PTRACE_GETFPXREGS:
1120 PRE_MEM_WRITE( "ptrace(getfpxregs)", ARG4,
1121 sizeof(struct vki_user_fxsr_struct) );
1122 break;
1123 case VKI_PTRACE_SETREGS:
1124 PRE_MEM_READ( "ptrace(setregs)", ARG4,
1125 sizeof (struct vki_user_regs_struct));
1126 break;
1127 case VKI_PTRACE_SETFPREGS:
1128 PRE_MEM_READ( "ptrace(setfpregs)", ARG4,
1129 sizeof (struct vki_user_i387_struct));
1130 break;
1131 case VKI_PTRACE_SETFPXREGS:
1132 PRE_MEM_READ( "ptrace(setfpxregs)", ARG4,
1133 sizeof(struct vki_user_fxsr_struct) );
1134 break;
1135 case VKI_PTRACE_GETEVENTMSG:
1136 PRE_MEM_WRITE( "ptrace(geteventmsg)", ARG4, sizeof(unsigned long));
1137 break;
1138 case VKI_PTRACE_GETSIGINFO:
1139 PRE_MEM_WRITE( "ptrace(getsiginfo)", ARG4, sizeof(vki_siginfo_t));
1140 break;
1141 case VKI_PTRACE_SETSIGINFO:
1142 PRE_MEM_READ( "ptrace(setsiginfo)", ARG4, sizeof(vki_siginfo_t));
1143 break;
1144 default:
1145 break;
1146 }
1147 }
1148
POST(sys_ptrace)1149 POST(sys_ptrace)
1150 {
1151 switch (ARG1) {
1152 case VKI_PTRACE_PEEKTEXT:
1153 case VKI_PTRACE_PEEKDATA:
1154 case VKI_PTRACE_PEEKUSR:
1155 POST_MEM_WRITE( ARG4, sizeof (long));
1156 break;
1157 case VKI_PTRACE_GETREGS:
1158 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_regs_struct));
1159 break;
1160 case VKI_PTRACE_GETFPREGS:
1161 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_i387_struct));
1162 break;
1163 case VKI_PTRACE_GETFPXREGS:
1164 POST_MEM_WRITE( ARG4, sizeof(struct vki_user_fxsr_struct) );
1165 break;
1166 case VKI_PTRACE_GETEVENTMSG:
1167 POST_MEM_WRITE( ARG4, sizeof(unsigned long));
1168 break;
1169 case VKI_PTRACE_GETSIGINFO:
1170 /* XXX: This is a simplification. Different parts of the
1171 * siginfo_t are valid depending on the type of signal.
1172 */
1173 POST_MEM_WRITE( ARG4, sizeof(vki_siginfo_t));
1174 break;
1175 default:
1176 break;
1177 }
1178 }
1179
deref_Addr(ThreadId tid,Addr a,Char * s)1180 static Addr deref_Addr ( ThreadId tid, Addr a, Char* s )
1181 {
1182 Addr* a_p = (Addr*)a;
1183 PRE_MEM_READ( s, (Addr)a_p, sizeof(Addr) );
1184 return *a_p;
1185 }
1186
PRE(sys_ipc)1187 PRE(sys_ipc)
1188 {
1189 PRINT("sys_ipc ( %ld, %ld, %ld, %ld, %#lx, %ld )", ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
1190 // XXX: this is simplistic -- some args are not used in all circumstances.
1191 PRE_REG_READ6(int, "ipc",
1192 vki_uint, call, int, first, int, second, int, third,
1193 void *, ptr, long, fifth)
1194
1195 switch (ARG1 /* call */) {
1196 case VKI_SEMOP:
1197 ML_(generic_PRE_sys_semop)( tid, ARG2, ARG5, ARG3 );
1198 *flags |= SfMayBlock;
1199 break;
1200 case VKI_SEMGET:
1201 break;
1202 case VKI_SEMCTL:
1203 {
1204 UWord arg = deref_Addr( tid, ARG5, "semctl(arg)" );
1205 ML_(generic_PRE_sys_semctl)( tid, ARG2, ARG3, ARG4, arg );
1206 break;
1207 }
1208 case VKI_SEMTIMEDOP:
1209 ML_(generic_PRE_sys_semtimedop)( tid, ARG2, ARG5, ARG3, ARG6 );
1210 *flags |= SfMayBlock;
1211 break;
1212 case VKI_MSGSND:
1213 ML_(linux_PRE_sys_msgsnd)( tid, ARG2, ARG5, ARG3, ARG4 );
1214 if ((ARG4 & VKI_IPC_NOWAIT) == 0)
1215 *flags |= SfMayBlock;
1216 break;
1217 case VKI_MSGRCV:
1218 {
1219 Addr msgp;
1220 Word msgtyp;
1221
1222 msgp = deref_Addr( tid,
1223 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgp),
1224 "msgrcv(msgp)" );
1225 msgtyp = deref_Addr( tid,
1226 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgtyp),
1227 "msgrcv(msgp)" );
1228
1229 ML_(linux_PRE_sys_msgrcv)( tid, ARG2, msgp, ARG3, msgtyp, ARG4 );
1230
1231 if ((ARG4 & VKI_IPC_NOWAIT) == 0)
1232 *flags |= SfMayBlock;
1233 break;
1234 }
1235 case VKI_MSGGET:
1236 break;
1237 case VKI_MSGCTL:
1238 ML_(linux_PRE_sys_msgctl)( tid, ARG2, ARG3, ARG5 );
1239 break;
1240 case VKI_SHMAT:
1241 {
1242 UWord w;
1243 PRE_MEM_WRITE( "shmat(raddr)", ARG4, sizeof(Addr) );
1244 w = ML_(generic_PRE_sys_shmat)( tid, ARG2, ARG5, ARG3 );
1245 if (w == 0)
1246 SET_STATUS_Failure( VKI_EINVAL );
1247 else
1248 ARG5 = w;
1249 break;
1250 }
1251 case VKI_SHMDT:
1252 if (!ML_(generic_PRE_sys_shmdt)(tid, ARG5))
1253 SET_STATUS_Failure( VKI_EINVAL );
1254 break;
1255 case VKI_SHMGET:
1256 break;
1257 case VKI_SHMCTL: /* IPCOP_shmctl */
1258 ML_(generic_PRE_sys_shmctl)( tid, ARG2, ARG3, ARG5 );
1259 break;
1260 default:
1261 VG_(message)(Vg_DebugMsg, "FATAL: unhandled syscall(ipc) %ld\n", ARG1 );
1262 VG_(core_panic)("... bye!\n");
1263 break; /*NOTREACHED*/
1264 }
1265 }
1266
POST(sys_ipc)1267 POST(sys_ipc)
1268 {
1269 vg_assert(SUCCESS);
1270 switch (ARG1 /* call */) {
1271 case VKI_SEMOP:
1272 case VKI_SEMGET:
1273 break;
1274 case VKI_SEMCTL:
1275 {
1276 UWord arg = deref_Addr( tid, ARG5, "semctl(arg)" );
1277 ML_(generic_PRE_sys_semctl)( tid, ARG2, ARG3, ARG4, arg );
1278 break;
1279 }
1280 case VKI_SEMTIMEDOP:
1281 case VKI_MSGSND:
1282 break;
1283 case VKI_MSGRCV:
1284 {
1285 Addr msgp;
1286 Word msgtyp;
1287
1288 msgp = deref_Addr( tid,
1289 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgp),
1290 "msgrcv(msgp)" );
1291 msgtyp = deref_Addr( tid,
1292 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgtyp),
1293 "msgrcv(msgp)" );
1294
1295 ML_(linux_POST_sys_msgrcv)( tid, RES, ARG2, msgp, ARG3, msgtyp, ARG4 );
1296 break;
1297 }
1298 case VKI_MSGGET:
1299 break;
1300 case VKI_MSGCTL:
1301 ML_(linux_POST_sys_msgctl)( tid, RES, ARG2, ARG3, ARG5 );
1302 break;
1303 case VKI_SHMAT:
1304 {
1305 Addr addr;
1306
1307 /* force readability. before the syscall it is
1308 * indeed uninitialized, as can be seen in
1309 * glibc/sysdeps/unix/sysv/linux/shmat.c */
1310 POST_MEM_WRITE( ARG4, sizeof( Addr ) );
1311
1312 addr = deref_Addr ( tid, ARG4, "shmat(addr)" );
1313 ML_(generic_POST_sys_shmat)( tid, addr, ARG2, ARG5, ARG3 );
1314 break;
1315 }
1316 case VKI_SHMDT:
1317 ML_(generic_POST_sys_shmdt)( tid, RES, ARG5 );
1318 break;
1319 case VKI_SHMGET:
1320 break;
1321 case VKI_SHMCTL:
1322 ML_(generic_POST_sys_shmctl)( tid, RES, ARG2, ARG3, ARG5 );
1323 break;
1324 default:
1325 VG_(message)(Vg_DebugMsg,
1326 "FATAL: unhandled syscall(ipc) %ld\n",
1327 ARG1 );
1328 VG_(core_panic)("... bye!\n");
1329 break; /*NOTREACHED*/
1330 }
1331 }
1332
PRE(old_mmap)1333 PRE(old_mmap)
1334 {
1335 /* struct mmap_arg_struct {
1336 unsigned long addr;
1337 unsigned long len;
1338 unsigned long prot;
1339 unsigned long flags;
1340 unsigned long fd;
1341 unsigned long offset;
1342 }; */
1343 UWord a1, a2, a3, a4, a5, a6;
1344 SysRes r;
1345
1346 UWord* args = (UWord*)ARG1;
1347 PRE_REG_READ1(long, "old_mmap", struct mmap_arg_struct *, args);
1348 PRE_MEM_READ( "old_mmap(args)", (Addr)args, 6*sizeof(UWord) );
1349
1350 a1 = args[1-1];
1351 a2 = args[2-1];
1352 a3 = args[3-1];
1353 a4 = args[4-1];
1354 a5 = args[5-1];
1355 a6 = args[6-1];
1356
1357 PRINT("old_mmap ( %#lx, %llu, %ld, %ld, %ld, %ld )",
1358 a1, (ULong)a2, a3, a4, a5, a6 );
1359
1360 r = ML_(generic_PRE_sys_mmap)( tid, a1, a2, a3, a4, a5, (Off64T)a6 );
1361 SET_STATUS_from_SysRes(r);
1362 }
1363
PRE(sys_mmap2)1364 PRE(sys_mmap2)
1365 {
1366 SysRes r;
1367
1368 // Exactly like old_mmap() except:
1369 // - all 6 args are passed in regs, rather than in a memory-block.
1370 // - the file offset is specified in pagesize units rather than bytes,
1371 // so that it can be used for files bigger than 2^32 bytes.
1372 // pagesize or 4K-size units in offset? For ppc32/64-linux, this is
1373 // 4K-sized. Assert that the page size is 4K here for safety.
1374 vg_assert(VKI_PAGE_SIZE == 4096);
1375 PRINT("sys_mmap2 ( %#lx, %llu, %ld, %ld, %ld, %ld )",
1376 ARG1, (ULong)ARG2, ARG3, ARG4, ARG5, ARG6 );
1377 PRE_REG_READ6(long, "mmap2",
1378 unsigned long, start, unsigned long, length,
1379 unsigned long, prot, unsigned long, flags,
1380 unsigned long, fd, unsigned long, offset);
1381
1382 r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5,
1383 4096 * (Off64T)ARG6 );
1384 SET_STATUS_from_SysRes(r);
1385 }
1386
1387 // XXX: lstat64/fstat64/stat64 are generic, but not necessarily
1388 // applicable to every architecture -- I think only to 32-bit archs.
1389 // We're going to need something like linux/core_os32.h for such
1390 // things, eventually, I think. --njn
PRE(sys_lstat64)1391 PRE(sys_lstat64)
1392 {
1393 PRINT("sys_lstat64 ( %#lx(%s), %#lx )",ARG1,(char*)ARG1,ARG2);
1394 PRE_REG_READ2(long, "lstat64", char *, file_name, struct stat64 *, buf);
1395 PRE_MEM_RASCIIZ( "lstat64(file_name)", ARG1 );
1396 PRE_MEM_WRITE( "lstat64(buf)", ARG2, sizeof(struct vki_stat64) );
1397 }
1398
POST(sys_lstat64)1399 POST(sys_lstat64)
1400 {
1401 vg_assert(SUCCESS);
1402 if (RES == 0) {
1403 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
1404 }
1405 }
1406
PRE(sys_stat64)1407 PRE(sys_stat64)
1408 {
1409 FUSE_COMPATIBLE_MAY_BLOCK();
1410 PRINT("sys_stat64 ( %#lx(%s), %#lx )",ARG1,(char*)ARG1,ARG2);
1411 PRE_REG_READ2(long, "stat64", char *, file_name, struct stat64 *, buf);
1412 PRE_MEM_RASCIIZ( "stat64(file_name)", ARG1 );
1413 PRE_MEM_WRITE( "stat64(buf)", ARG2, sizeof(struct vki_stat64) );
1414 }
1415
POST(sys_stat64)1416 POST(sys_stat64)
1417 {
1418 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
1419 }
1420
PRE(sys_fstatat64)1421 PRE(sys_fstatat64)
1422 {
1423 FUSE_COMPATIBLE_MAY_BLOCK();
1424 PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx )",ARG1,ARG2,(char*)ARG2,ARG3);
1425 PRE_REG_READ3(long, "fstatat64",
1426 int, dfd, char *, file_name, struct stat64 *, buf);
1427 PRE_MEM_RASCIIZ( "fstatat64(file_name)", ARG2 );
1428 PRE_MEM_WRITE( "fstatat64(buf)", ARG3, sizeof(struct vki_stat64) );
1429 }
1430
POST(sys_fstatat64)1431 POST(sys_fstatat64)
1432 {
1433 POST_MEM_WRITE( ARG3, sizeof(struct vki_stat64) );
1434 }
1435
PRE(sys_fstat64)1436 PRE(sys_fstat64)
1437 {
1438 PRINT("sys_fstat64 ( %ld, %#lx )",ARG1,ARG2);
1439 PRE_REG_READ2(long, "fstat64", unsigned long, fd, struct stat64 *, buf);
1440 PRE_MEM_WRITE( "fstat64(buf)", ARG2, sizeof(struct vki_stat64) );
1441 }
1442
POST(sys_fstat64)1443 POST(sys_fstat64)
1444 {
1445 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
1446 }
1447
PRE(sys_socketcall)1448 PRE(sys_socketcall)
1449 {
1450 # define ARG2_0 (((UWord*)ARG2)[0])
1451 # define ARG2_1 (((UWord*)ARG2)[1])
1452 # define ARG2_2 (((UWord*)ARG2)[2])
1453 # define ARG2_3 (((UWord*)ARG2)[3])
1454 # define ARG2_4 (((UWord*)ARG2)[4])
1455 # define ARG2_5 (((UWord*)ARG2)[5])
1456
1457 *flags |= SfMayBlock;
1458 PRINT("sys_socketcall ( %ld, %#lx )",ARG1,ARG2);
1459 PRE_REG_READ2(long, "socketcall", int, call, unsigned long *, args);
1460
1461 switch (ARG1 /* request */) {
1462
1463 case VKI_SYS_SOCKETPAIR:
1464 /* int socketpair(int d, int type, int protocol, int sv[2]); */
1465 PRE_MEM_READ( "socketcall.socketpair(args)", ARG2, 4*sizeof(Addr) );
1466 ML_(generic_PRE_sys_socketpair)( tid, ARG2_0, ARG2_1, ARG2_2, ARG2_3 );
1467 break;
1468
1469 case VKI_SYS_SOCKET:
1470 /* int socket(int domain, int type, int protocol); */
1471 PRE_MEM_READ( "socketcall.socket(args)", ARG2, 3*sizeof(Addr) );
1472 break;
1473
1474 case VKI_SYS_BIND:
1475 /* int bind(int sockfd, struct sockaddr *my_addr,
1476 int addrlen); */
1477 PRE_MEM_READ( "socketcall.bind(args)", ARG2, 3*sizeof(Addr) );
1478 ML_(generic_PRE_sys_bind)( tid, ARG2_0, ARG2_1, ARG2_2 );
1479 break;
1480
1481 case VKI_SYS_LISTEN:
1482 /* int listen(int s, int backlog); */
1483 PRE_MEM_READ( "socketcall.listen(args)", ARG2, 2*sizeof(Addr) );
1484 break;
1485
1486 case VKI_SYS_ACCEPT: {
1487 /* int accept(int s, struct sockaddr *addr, int *addrlen); */
1488 PRE_MEM_READ( "socketcall.accept(args)", ARG2, 3*sizeof(Addr) );
1489 ML_(generic_PRE_sys_accept)( tid, ARG2_0, ARG2_1, ARG2_2 );
1490 break;
1491 }
1492
1493 case VKI_SYS_ACCEPT4: {
1494 /*int accept(int s, struct sockaddr *add, int *addrlen, int flags)*/
1495 PRE_MEM_READ( "socketcall.accept4(args)", ARG2, 4*sizeof(Addr) );
1496 ML_(generic_PRE_sys_accept)( tid, ARG2_0, ARG2_1, ARG2_2 );
1497 break;
1498 }
1499
1500 case VKI_SYS_SENDTO:
1501 /* int sendto(int s, const void *msg, int len,
1502 unsigned int flags,
1503 const struct sockaddr *to, int tolen); */
1504 PRE_MEM_READ( "socketcall.sendto(args)", ARG2, 6*sizeof(Addr) );
1505 ML_(generic_PRE_sys_sendto)( tid, ARG2_0, ARG2_1, ARG2_2,
1506 ARG2_3, ARG2_4, ARG2_5 );
1507 break;
1508
1509 case VKI_SYS_SEND:
1510 /* int send(int s, const void *msg, size_t len, int flags); */
1511 PRE_MEM_READ( "socketcall.send(args)", ARG2, 4*sizeof(Addr) );
1512 ML_(generic_PRE_sys_send)( tid, ARG2_0, ARG2_1, ARG2_2 );
1513 break;
1514
1515 case VKI_SYS_RECVFROM:
1516 /* int recvfrom(int s, void *buf, int len, unsigned int flags,
1517 struct sockaddr *from, int *fromlen); */
1518 PRE_MEM_READ( "socketcall.recvfrom(args)", ARG2, 6*sizeof(Addr) );
1519 ML_(generic_PRE_sys_recvfrom)( tid, ARG2_0, ARG2_1, ARG2_2,
1520 ARG2_3, ARG2_4, ARG2_5 );
1521 break;
1522
1523 case VKI_SYS_RECV:
1524 /* int recv(int s, void *buf, int len, unsigned int flags); */
1525 /* man 2 recv says:
1526 The recv call is normally used only on a connected socket
1527 (see connect(2)) and is identical to recvfrom with a NULL
1528 from parameter.
1529 */
1530 PRE_MEM_READ( "socketcall.recv(args)", ARG2, 4*sizeof(Addr) );
1531 ML_(generic_PRE_sys_recv)( tid, ARG2_0, ARG2_1, ARG2_2 );
1532 break;
1533
1534 case VKI_SYS_CONNECT:
1535 /* int connect(int sockfd,
1536 struct sockaddr *serv_addr, int addrlen ); */
1537 PRE_MEM_READ( "socketcall.connect(args)", ARG2, 3*sizeof(Addr) );
1538 ML_(generic_PRE_sys_connect)( tid, ARG2_0, ARG2_1, ARG2_2 );
1539 break;
1540
1541 case VKI_SYS_SETSOCKOPT:
1542 /* int setsockopt(int s, int level, int optname,
1543 const void *optval, int optlen); */
1544 PRE_MEM_READ( "socketcall.setsockopt(args)", ARG2, 5*sizeof(Addr) );
1545 ML_(generic_PRE_sys_setsockopt)( tid, ARG2_0, ARG2_1, ARG2_2,
1546 ARG2_3, ARG2_4 );
1547 break;
1548
1549 case VKI_SYS_GETSOCKOPT:
1550 /* int getsockopt(int s, int level, int optname,
1551 void *optval, socklen_t *optlen); */
1552 PRE_MEM_READ( "socketcall.getsockopt(args)", ARG2, 5*sizeof(Addr) );
1553 ML_(linux_PRE_sys_getsockopt)( tid, ARG2_0, ARG2_1, ARG2_2,
1554 ARG2_3, ARG2_4 );
1555 break;
1556
1557 case VKI_SYS_GETSOCKNAME:
1558 /* int getsockname(int s, struct sockaddr* name, int* namelen) */
1559 PRE_MEM_READ( "socketcall.getsockname(args)", ARG2, 3*sizeof(Addr) );
1560 ML_(generic_PRE_sys_getsockname)( tid, ARG2_0, ARG2_1, ARG2_2 );
1561 break;
1562
1563 case VKI_SYS_GETPEERNAME:
1564 /* int getpeername(int s, struct sockaddr* name, int* namelen) */
1565 PRE_MEM_READ( "socketcall.getpeername(args)", ARG2, 3*sizeof(Addr) );
1566 ML_(generic_PRE_sys_getpeername)( tid, ARG2_0, ARG2_1, ARG2_2 );
1567 break;
1568
1569 case VKI_SYS_SHUTDOWN:
1570 /* int shutdown(int s, int how); */
1571 PRE_MEM_READ( "socketcall.shutdown(args)", ARG2, 2*sizeof(Addr) );
1572 break;
1573
1574 case VKI_SYS_SENDMSG: {
1575 /* int sendmsg(int s, const struct msghdr *msg, int flags); */
1576
1577 /* this causes warnings, and I don't get why. glibc bug?
1578 * (after all it's glibc providing the arguments array)
1579 PRE_MEM_READ( "socketcall.sendmsg(args)", ARG2, 3*sizeof(Addr) );
1580 */
1581 ML_(generic_PRE_sys_sendmsg)( tid, "msg", (struct vki_msghdr *)ARG2_1 );
1582 break;
1583 }
1584
1585 case VKI_SYS_RECVMSG: {
1586 /* int recvmsg(int s, struct msghdr *msg, int flags); */
1587
1588 /* this causes warnings, and I don't get why. glibc bug?
1589 * (after all it's glibc providing the arguments array)
1590 PRE_MEM_READ("socketcall.recvmsg(args)", ARG2, 3*sizeof(Addr) );
1591 */
1592 ML_(generic_PRE_sys_recvmsg)( tid, "msg", (struct vki_msghdr *)ARG2_1 );
1593 break;
1594 }
1595
1596 default:
1597 VG_(message)(Vg_DebugMsg,"Warning: unhandled socketcall 0x%lx\n",ARG1);
1598 SET_STATUS_Failure( VKI_EINVAL );
1599 break;
1600 }
1601 # undef ARG2_0
1602 # undef ARG2_1
1603 # undef ARG2_2
1604 # undef ARG2_3
1605 # undef ARG2_4
1606 # undef ARG2_5
1607 }
1608
POST(sys_socketcall)1609 POST(sys_socketcall)
1610 {
1611 # define ARG2_0 (((UWord*)ARG2)[0])
1612 # define ARG2_1 (((UWord*)ARG2)[1])
1613 # define ARG2_2 (((UWord*)ARG2)[2])
1614 # define ARG2_3 (((UWord*)ARG2)[3])
1615 # define ARG2_4 (((UWord*)ARG2)[4])
1616 # define ARG2_5 (((UWord*)ARG2)[5])
1617
1618 SysRes r;
1619 vg_assert(SUCCESS);
1620 switch (ARG1 /* request */) {
1621
1622 case VKI_SYS_SOCKETPAIR:
1623 r = ML_(generic_POST_sys_socketpair)(
1624 tid, VG_(mk_SysRes_Success)(RES),
1625 ARG2_0, ARG2_1, ARG2_2, ARG2_3
1626 );
1627 SET_STATUS_from_SysRes(r);
1628 break;
1629
1630 case VKI_SYS_SOCKET:
1631 r = ML_(generic_POST_sys_socket)( tid, VG_(mk_SysRes_Success)(RES) );
1632 SET_STATUS_from_SysRes(r);
1633 break;
1634
1635 case VKI_SYS_BIND:
1636 /* int bind(int sockfd, struct sockaddr *my_addr,
1637 int addrlen); */
1638 break;
1639
1640 case VKI_SYS_LISTEN:
1641 /* int listen(int s, int backlog); */
1642 break;
1643
1644 case VKI_SYS_ACCEPT:
1645 case VKI_SYS_ACCEPT4:
1646 /* int accept(int s, struct sockaddr *addr, int *addrlen); */
1647 /* int accept4(int s, struct sockaddr *addr, int *addrlen, int flags); */
1648 r = ML_(generic_POST_sys_accept)( tid, VG_(mk_SysRes_Success)(RES),
1649 ARG2_0, ARG2_1, ARG2_2 );
1650 SET_STATUS_from_SysRes(r);
1651 break;
1652
1653 case VKI_SYS_SENDTO:
1654 break;
1655
1656 case VKI_SYS_SEND:
1657 break;
1658
1659 case VKI_SYS_RECVFROM:
1660 ML_(generic_POST_sys_recvfrom)( tid, VG_(mk_SysRes_Success)(RES),
1661 ARG2_0, ARG2_1, ARG2_2,
1662 ARG2_3, ARG2_4, ARG2_5 );
1663 break;
1664
1665 case VKI_SYS_RECV:
1666 ML_(generic_POST_sys_recv)( tid, RES, ARG2_0, ARG2_1, ARG2_2 );
1667 break;
1668
1669 case VKI_SYS_CONNECT:
1670 break;
1671
1672 case VKI_SYS_SETSOCKOPT:
1673 break;
1674
1675 case VKI_SYS_GETSOCKOPT:
1676 ML_(linux_POST_sys_getsockopt)( tid, VG_(mk_SysRes_Success)(RES),
1677 ARG2_0, ARG2_1,
1678 ARG2_2, ARG2_3, ARG2_4 );
1679 break;
1680
1681 case VKI_SYS_GETSOCKNAME:
1682 ML_(generic_POST_sys_getsockname)( tid, VG_(mk_SysRes_Success)(RES),
1683 ARG2_0, ARG2_1, ARG2_2 );
1684 break;
1685
1686 case VKI_SYS_GETPEERNAME:
1687 ML_(generic_POST_sys_getpeername)( tid, VG_(mk_SysRes_Success)(RES),
1688 ARG2_0, ARG2_1, ARG2_2 );
1689 break;
1690
1691 case VKI_SYS_SHUTDOWN:
1692 break;
1693
1694 case VKI_SYS_SENDMSG:
1695 break;
1696
1697 case VKI_SYS_RECVMSG:
1698 ML_(generic_POST_sys_recvmsg)( tid, "msg", (struct vki_msghdr *)ARG2_1, RES );
1699 break;
1700
1701 default:
1702 VG_(message)(Vg_DebugMsg,"FATAL: unhandled socketcall 0x%lx\n",ARG1);
1703 VG_(core_panic)("... bye!\n");
1704 break; /*NOTREACHED*/
1705 }
1706 # undef ARG2_0
1707 # undef ARG2_1
1708 # undef ARG2_2
1709 # undef ARG2_3
1710 # undef ARG2_4
1711 # undef ARG2_5
1712 }
1713
1714 /* NB: arm-linux has a clone of this one, and ppc32-linux has an almost
1715 identical version. */
PRE(sys_sigsuspend)1716 PRE(sys_sigsuspend)
1717 {
1718 /* The C library interface to sigsuspend just takes a pointer to
1719 a signal mask but this system call has three arguments - the first
1720 two don't appear to be used by the kernel and are always passed as
1721 zero by glibc and the third is the first word of the signal mask
1722 so only 32 signals are supported.
1723
1724 In fact glibc normally uses rt_sigsuspend if it is available as
1725 that takes a pointer to the signal mask so supports more signals.
1726 */
1727 *flags |= SfMayBlock;
1728 PRINT("sys_sigsuspend ( %ld, %ld, %ld )", ARG1,ARG2,ARG3 );
1729 PRE_REG_READ3(int, "sigsuspend",
1730 int, history0, int, history1,
1731 vki_old_sigset_t, mask);
1732 }
1733
PRE(sys_vm86old)1734 PRE(sys_vm86old)
1735 {
1736 PRINT("sys_vm86old ( %#lx )", ARG1);
1737 PRE_REG_READ1(int, "vm86old", struct vm86_struct *, info);
1738 PRE_MEM_WRITE( "vm86old(info)", ARG1, sizeof(struct vki_vm86_struct));
1739 }
1740
POST(sys_vm86old)1741 POST(sys_vm86old)
1742 {
1743 POST_MEM_WRITE( ARG1, sizeof(struct vki_vm86_struct));
1744 }
1745
PRE(sys_vm86)1746 PRE(sys_vm86)
1747 {
1748 PRINT("sys_vm86 ( %ld, %#lx )", ARG1,ARG2);
1749 PRE_REG_READ2(int, "vm86", unsigned long, fn, struct vm86plus_struct *, v86);
1750 if (ARG1 == VKI_VM86_ENTER || ARG1 == VKI_VM86_ENTER_NO_BYPASS)
1751 PRE_MEM_WRITE( "vm86(v86)", ARG2, sizeof(struct vki_vm86plus_struct));
1752 }
1753
POST(sys_vm86)1754 POST(sys_vm86)
1755 {
1756 if (ARG1 == VKI_VM86_ENTER || ARG1 == VKI_VM86_ENTER_NO_BYPASS)
1757 POST_MEM_WRITE( ARG2, sizeof(struct vki_vm86plus_struct));
1758 }
1759
1760
1761 /* ---------------------------------------------------------------
1762 PRE/POST wrappers for x86/Linux-variant specific syscalls
1763 ------------------------------------------------------------ */
1764
PRE(sys_syscall223)1765 PRE(sys_syscall223)
1766 {
1767 Int err;
1768
1769 /* 223 is used by sys_bproc. If we're not on a declared bproc
1770 variant, fail in the usual way. */
1771
1772 if (!VG_(strstr)(VG_(clo_kernel_variant), "bproc")) {
1773 PRINT("non-existent syscall! (syscall 223)");
1774 PRE_REG_READ0(long, "ni_syscall(223)");
1775 SET_STATUS_Failure( VKI_ENOSYS );
1776 return;
1777 }
1778
1779 err = ML_(linux_variant_PRE_sys_bproc)( ARG1, ARG2, ARG3,
1780 ARG4, ARG5, ARG6 );
1781 if (err) {
1782 SET_STATUS_Failure( err );
1783 return;
1784 }
1785 /* Let it go through. */
1786 *flags |= SfMayBlock; /* who knows? play safe. */
1787 }
1788
POST(sys_syscall223)1789 POST(sys_syscall223)
1790 {
1791 ML_(linux_variant_POST_sys_bproc)( ARG1, ARG2, ARG3,
1792 ARG4, ARG5, ARG6 );
1793 }
1794
1795 #undef PRE
1796 #undef POST
1797
1798
1799 /* ---------------------------------------------------------------------
1800 The x86/Linux syscall table
1801 ------------------------------------------------------------------ */
1802
1803 /* Add an x86-linux specific wrapper to a syscall table. */
1804 #define PLAX_(sysno, name) WRAPPER_ENTRY_X_(x86_linux, sysno, name)
1805 #define PLAXY(sysno, name) WRAPPER_ENTRY_XY(x86_linux, sysno, name)
1806
1807
1808 // This table maps from __NR_xxx syscall numbers (from
1809 // linux/include/asm-i386/unistd.h) to the appropriate PRE/POST sys_foo()
1810 // wrappers on x86 (as per sys_call_table in linux/arch/i386/kernel/entry.S).
1811 //
1812 // For those syscalls not handled by Valgrind, the annotation indicate its
1813 // arch/OS combination, eg. */* (generic), */Linux (Linux only), ?/?
1814 // (unknown).
1815
1816 static SyscallTableEntry syscall_table[] = {
1817 //zz // (restart_syscall) // 0
1818 GENX_(__NR_exit, sys_exit), // 1
1819 GENX_(__NR_fork, sys_fork), // 2
1820 GENXY(__NR_read, sys_read), // 3
1821 GENX_(__NR_write, sys_write), // 4
1822
1823 GENXY(__NR_open, sys_open), // 5
1824 GENXY(__NR_close, sys_close), // 6
1825 GENXY(__NR_waitpid, sys_waitpid), // 7
1826 GENXY(__NR_creat, sys_creat), // 8
1827 GENX_(__NR_link, sys_link), // 9
1828
1829 GENX_(__NR_unlink, sys_unlink), // 10
1830 GENX_(__NR_execve, sys_execve), // 11
1831 GENX_(__NR_chdir, sys_chdir), // 12
1832 GENXY(__NR_time, sys_time), // 13
1833 GENX_(__NR_mknod, sys_mknod), // 14
1834
1835 GENX_(__NR_chmod, sys_chmod), // 15
1836 //zz LINX_(__NR_lchown, sys_lchown16), // 16
1837 GENX_(__NR_break, sys_ni_syscall), // 17
1838 //zz // (__NR_oldstat, sys_stat), // 18 (obsolete)
1839 LINX_(__NR_lseek, sys_lseek), // 19
1840
1841 GENX_(__NR_getpid, sys_getpid), // 20
1842 LINX_(__NR_mount, sys_mount), // 21
1843 LINX_(__NR_umount, sys_oldumount), // 22
1844 LINX_(__NR_setuid, sys_setuid16), // 23 ## P
1845 LINX_(__NR_getuid, sys_getuid16), // 24 ## P
1846
1847 LINX_(__NR_stime, sys_stime), // 25 * (SVr4,SVID,X/OPEN)
1848 PLAXY(__NR_ptrace, sys_ptrace), // 26
1849 GENX_(__NR_alarm, sys_alarm), // 27
1850 //zz // (__NR_oldfstat, sys_fstat), // 28 * L -- obsolete
1851 GENX_(__NR_pause, sys_pause), // 29
1852
1853 LINX_(__NR_utime, sys_utime), // 30
1854 GENX_(__NR_stty, sys_ni_syscall), // 31
1855 GENX_(__NR_gtty, sys_ni_syscall), // 32
1856 GENX_(__NR_access, sys_access), // 33
1857 GENX_(__NR_nice, sys_nice), // 34
1858
1859 GENX_(__NR_ftime, sys_ni_syscall), // 35
1860 GENX_(__NR_sync, sys_sync), // 36
1861 GENX_(__NR_kill, sys_kill), // 37
1862 GENX_(__NR_rename, sys_rename), // 38
1863 GENX_(__NR_mkdir, sys_mkdir), // 39
1864
1865 GENX_(__NR_rmdir, sys_rmdir), // 40
1866 GENXY(__NR_dup, sys_dup), // 41
1867 LINXY(__NR_pipe, sys_pipe), // 42
1868 GENXY(__NR_times, sys_times), // 43
1869 GENX_(__NR_prof, sys_ni_syscall), // 44
1870 //zz
1871 GENX_(__NR_brk, sys_brk), // 45
1872 LINX_(__NR_setgid, sys_setgid16), // 46
1873 LINX_(__NR_getgid, sys_getgid16), // 47
1874 //zz // (__NR_signal, sys_signal), // 48 */* (ANSI C)
1875 LINX_(__NR_geteuid, sys_geteuid16), // 49
1876
1877 LINX_(__NR_getegid, sys_getegid16), // 50
1878 GENX_(__NR_acct, sys_acct), // 51
1879 LINX_(__NR_umount2, sys_umount), // 52
1880 GENX_(__NR_lock, sys_ni_syscall), // 53
1881 LINXY(__NR_ioctl, sys_ioctl), // 54
1882
1883 LINXY(__NR_fcntl, sys_fcntl), // 55
1884 GENX_(__NR_mpx, sys_ni_syscall), // 56
1885 GENX_(__NR_setpgid, sys_setpgid), // 57
1886 GENX_(__NR_ulimit, sys_ni_syscall), // 58
1887 //zz // (__NR_oldolduname, sys_olduname), // 59 Linux -- obsolete
1888 //zz
1889 GENX_(__NR_umask, sys_umask), // 60
1890 GENX_(__NR_chroot, sys_chroot), // 61
1891 //zz // (__NR_ustat, sys_ustat) // 62 SVr4 -- deprecated
1892 GENXY(__NR_dup2, sys_dup2), // 63
1893 GENX_(__NR_getppid, sys_getppid), // 64
1894
1895 GENX_(__NR_getpgrp, sys_getpgrp), // 65
1896 GENX_(__NR_setsid, sys_setsid), // 66
1897 LINXY(__NR_sigaction, sys_sigaction), // 67
1898 //zz // (__NR_sgetmask, sys_sgetmask), // 68 */* (ANSI C)
1899 //zz // (__NR_ssetmask, sys_ssetmask), // 69 */* (ANSI C)
1900 //zz
1901 LINX_(__NR_setreuid, sys_setreuid16), // 70
1902 LINX_(__NR_setregid, sys_setregid16), // 71
1903 PLAX_(__NR_sigsuspend, sys_sigsuspend), // 72
1904 LINXY(__NR_sigpending, sys_sigpending), // 73
1905 //zz // (__NR_sethostname, sys_sethostname), // 74 */*
1906 //zz
1907 GENX_(__NR_setrlimit, sys_setrlimit), // 75
1908 GENXY(__NR_getrlimit, sys_old_getrlimit), // 76
1909 GENXY(__NR_getrusage, sys_getrusage), // 77
1910 GENXY(__NR_gettimeofday, sys_gettimeofday), // 78
1911 GENX_(__NR_settimeofday, sys_settimeofday), // 79
1912
1913 LINXY(__NR_getgroups, sys_getgroups16), // 80
1914 LINX_(__NR_setgroups, sys_setgroups16), // 81
1915 PLAX_(__NR_select, old_select), // 82
1916 GENX_(__NR_symlink, sys_symlink), // 83
1917 //zz // (__NR_oldlstat, sys_lstat), // 84 -- obsolete
1918 //zz
1919 GENX_(__NR_readlink, sys_readlink), // 85
1920 //zz // (__NR_uselib, sys_uselib), // 86 */Linux
1921 //zz // (__NR_swapon, sys_swapon), // 87 */Linux
1922 //zz // (__NR_reboot, sys_reboot), // 88 */Linux
1923 //zz // (__NR_readdir, old_readdir), // 89 -- superseded
1924 //zz
1925 PLAX_(__NR_mmap, old_mmap), // 90
1926 GENXY(__NR_munmap, sys_munmap), // 91
1927 GENX_(__NR_truncate, sys_truncate), // 92
1928 GENX_(__NR_ftruncate, sys_ftruncate), // 93
1929 GENX_(__NR_fchmod, sys_fchmod), // 94
1930
1931 LINX_(__NR_fchown, sys_fchown16), // 95
1932 GENX_(__NR_getpriority, sys_getpriority), // 96
1933 GENX_(__NR_setpriority, sys_setpriority), // 97
1934 GENX_(__NR_profil, sys_ni_syscall), // 98
1935 GENXY(__NR_statfs, sys_statfs), // 99
1936
1937 GENXY(__NR_fstatfs, sys_fstatfs), // 100
1938 LINX_(__NR_ioperm, sys_ioperm), // 101
1939 PLAXY(__NR_socketcall, sys_socketcall), // 102 x86/Linux-only
1940 LINXY(__NR_syslog, sys_syslog), // 103
1941 GENXY(__NR_setitimer, sys_setitimer), // 104
1942
1943 GENXY(__NR_getitimer, sys_getitimer), // 105
1944 GENXY(__NR_stat, sys_newstat), // 106
1945 GENXY(__NR_lstat, sys_newlstat), // 107
1946 GENXY(__NR_fstat, sys_newfstat), // 108
1947 //zz // (__NR_olduname, sys_uname), // 109 -- obsolete
1948 //zz
1949 GENX_(__NR_iopl, sys_iopl), // 110
1950 LINX_(__NR_vhangup, sys_vhangup), // 111
1951 GENX_(__NR_idle, sys_ni_syscall), // 112
1952 PLAXY(__NR_vm86old, sys_vm86old), // 113 x86/Linux-only
1953 GENXY(__NR_wait4, sys_wait4), // 114
1954 //zz
1955 //zz // (__NR_swapoff, sys_swapoff), // 115 */Linux
1956 LINXY(__NR_sysinfo, sys_sysinfo), // 116
1957 PLAXY(__NR_ipc, sys_ipc), // 117
1958 GENX_(__NR_fsync, sys_fsync), // 118
1959 PLAX_(__NR_sigreturn, sys_sigreturn), // 119 ?/Linux
1960
1961 PLAX_(__NR_clone, sys_clone), // 120
1962 //zz // (__NR_setdomainname, sys_setdomainname), // 121 */*(?)
1963 GENXY(__NR_uname, sys_newuname), // 122
1964 PLAX_(__NR_modify_ldt, sys_modify_ldt), // 123
1965 LINXY(__NR_adjtimex, sys_adjtimex), // 124
1966
1967 GENXY(__NR_mprotect, sys_mprotect), // 125
1968 LINXY(__NR_sigprocmask, sys_sigprocmask), // 126
1969 //zz // Nb: create_module() was removed 2.4-->2.6
1970 GENX_(__NR_create_module, sys_ni_syscall), // 127
1971 LINX_(__NR_init_module, sys_init_module), // 128
1972 LINX_(__NR_delete_module, sys_delete_module), // 129
1973 //zz
1974 //zz // Nb: get_kernel_syms() was removed 2.4-->2.6
1975 GENX_(__NR_get_kernel_syms, sys_ni_syscall), // 130
1976 LINX_(__NR_quotactl, sys_quotactl), // 131
1977 GENX_(__NR_getpgid, sys_getpgid), // 132
1978 GENX_(__NR_fchdir, sys_fchdir), // 133
1979 //zz // (__NR_bdflush, sys_bdflush), // 134 */Linux
1980 //zz
1981 //zz // (__NR_sysfs, sys_sysfs), // 135 SVr4
1982 LINX_(__NR_personality, sys_personality), // 136
1983 GENX_(__NR_afs_syscall, sys_ni_syscall), // 137
1984 LINX_(__NR_setfsuid, sys_setfsuid16), // 138
1985 LINX_(__NR_setfsgid, sys_setfsgid16), // 139
1986
1987 LINXY(__NR__llseek, sys_llseek), // 140
1988 GENXY(__NR_getdents, sys_getdents), // 141
1989 GENX_(__NR__newselect, sys_select), // 142
1990 GENX_(__NR_flock, sys_flock), // 143
1991 GENX_(__NR_msync, sys_msync), // 144
1992
1993 GENXY(__NR_readv, sys_readv), // 145
1994 GENX_(__NR_writev, sys_writev), // 146
1995 GENX_(__NR_getsid, sys_getsid), // 147
1996 GENX_(__NR_fdatasync, sys_fdatasync), // 148
1997 LINXY(__NR__sysctl, sys_sysctl), // 149
1998
1999 GENX_(__NR_mlock, sys_mlock), // 150
2000 GENX_(__NR_munlock, sys_munlock), // 151
2001 GENX_(__NR_mlockall, sys_mlockall), // 152
2002 LINX_(__NR_munlockall, sys_munlockall), // 153
2003 LINXY(__NR_sched_setparam, sys_sched_setparam), // 154
2004
2005 LINXY(__NR_sched_getparam, sys_sched_getparam), // 155
2006 LINX_(__NR_sched_setscheduler, sys_sched_setscheduler), // 156
2007 LINX_(__NR_sched_getscheduler, sys_sched_getscheduler), // 157
2008 LINX_(__NR_sched_yield, sys_sched_yield), // 158
2009 LINX_(__NR_sched_get_priority_max, sys_sched_get_priority_max),// 159
2010
2011 LINX_(__NR_sched_get_priority_min, sys_sched_get_priority_min),// 160
2012 LINXY(__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 161
2013 GENXY(__NR_nanosleep, sys_nanosleep), // 162
2014 GENX_(__NR_mremap, sys_mremap), // 163
2015 LINX_(__NR_setresuid, sys_setresuid16), // 164
2016
2017 LINXY(__NR_getresuid, sys_getresuid16), // 165
2018 PLAXY(__NR_vm86, sys_vm86), // 166 x86/Linux-only
2019 GENX_(__NR_query_module, sys_ni_syscall), // 167
2020 GENXY(__NR_poll, sys_poll), // 168
2021 //zz // (__NR_nfsservctl, sys_nfsservctl), // 169 */Linux
2022 //zz
2023 LINX_(__NR_setresgid, sys_setresgid16), // 170
2024 LINXY(__NR_getresgid, sys_getresgid16), // 171
2025 LINXY(__NR_prctl, sys_prctl), // 172
2026 PLAX_(__NR_rt_sigreturn, sys_rt_sigreturn), // 173 x86/Linux only?
2027 LINXY(__NR_rt_sigaction, sys_rt_sigaction), // 174
2028
2029 LINXY(__NR_rt_sigprocmask, sys_rt_sigprocmask), // 175
2030 LINXY(__NR_rt_sigpending, sys_rt_sigpending), // 176
2031 LINXY(__NR_rt_sigtimedwait, sys_rt_sigtimedwait),// 177
2032 LINXY(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo),// 178
2033 LINX_(__NR_rt_sigsuspend, sys_rt_sigsuspend), // 179
2034
2035 GENXY(__NR_pread64, sys_pread64), // 180
2036 GENX_(__NR_pwrite64, sys_pwrite64), // 181
2037 LINX_(__NR_chown, sys_chown16), // 182
2038 GENXY(__NR_getcwd, sys_getcwd), // 183
2039 LINXY(__NR_capget, sys_capget), // 184
2040
2041 LINX_(__NR_capset, sys_capset), // 185
2042 GENXY(__NR_sigaltstack, sys_sigaltstack), // 186
2043 LINXY(__NR_sendfile, sys_sendfile), // 187
2044 GENXY(__NR_getpmsg, sys_getpmsg), // 188
2045 GENX_(__NR_putpmsg, sys_putpmsg), // 189
2046
2047 // Nb: we treat vfork as fork
2048 GENX_(__NR_vfork, sys_fork), // 190
2049 GENXY(__NR_ugetrlimit, sys_getrlimit), // 191
2050 PLAX_(__NR_mmap2, sys_mmap2), // 192
2051 GENX_(__NR_truncate64, sys_truncate64), // 193
2052 GENX_(__NR_ftruncate64, sys_ftruncate64), // 194
2053
2054 PLAXY(__NR_stat64, sys_stat64), // 195
2055 PLAXY(__NR_lstat64, sys_lstat64), // 196
2056 PLAXY(__NR_fstat64, sys_fstat64), // 197
2057 GENX_(__NR_lchown32, sys_lchown), // 198
2058 GENX_(__NR_getuid32, sys_getuid), // 199
2059
2060 GENX_(__NR_getgid32, sys_getgid), // 200
2061 GENX_(__NR_geteuid32, sys_geteuid), // 201
2062 GENX_(__NR_getegid32, sys_getegid), // 202
2063 GENX_(__NR_setreuid32, sys_setreuid), // 203
2064 GENX_(__NR_setregid32, sys_setregid), // 204
2065
2066 GENXY(__NR_getgroups32, sys_getgroups), // 205
2067 GENX_(__NR_setgroups32, sys_setgroups), // 206
2068 GENX_(__NR_fchown32, sys_fchown), // 207
2069 LINX_(__NR_setresuid32, sys_setresuid), // 208
2070 LINXY(__NR_getresuid32, sys_getresuid), // 209
2071
2072 LINX_(__NR_setresgid32, sys_setresgid), // 210
2073 LINXY(__NR_getresgid32, sys_getresgid), // 211
2074 GENX_(__NR_chown32, sys_chown), // 212
2075 GENX_(__NR_setuid32, sys_setuid), // 213
2076 GENX_(__NR_setgid32, sys_setgid), // 214
2077
2078 LINX_(__NR_setfsuid32, sys_setfsuid), // 215
2079 LINX_(__NR_setfsgid32, sys_setfsgid), // 216
2080 //zz // (__NR_pivot_root, sys_pivot_root), // 217 */Linux
2081 GENXY(__NR_mincore, sys_mincore), // 218
2082 GENX_(__NR_madvise, sys_madvise), // 219
2083
2084 GENXY(__NR_getdents64, sys_getdents64), // 220
2085 LINXY(__NR_fcntl64, sys_fcntl64), // 221
2086 GENX_(222, sys_ni_syscall), // 222
2087 PLAXY(223, sys_syscall223), // 223 // sys_bproc?
2088 LINX_(__NR_gettid, sys_gettid), // 224
2089
2090 LINX_(__NR_readahead, sys_readahead), // 225 */Linux
2091 LINX_(__NR_setxattr, sys_setxattr), // 226
2092 LINX_(__NR_lsetxattr, sys_lsetxattr), // 227
2093 LINX_(__NR_fsetxattr, sys_fsetxattr), // 228
2094 LINXY(__NR_getxattr, sys_getxattr), // 229
2095
2096 LINXY(__NR_lgetxattr, sys_lgetxattr), // 230
2097 LINXY(__NR_fgetxattr, sys_fgetxattr), // 231
2098 LINXY(__NR_listxattr, sys_listxattr), // 232
2099 LINXY(__NR_llistxattr, sys_llistxattr), // 233
2100 LINXY(__NR_flistxattr, sys_flistxattr), // 234
2101
2102 LINX_(__NR_removexattr, sys_removexattr), // 235
2103 LINX_(__NR_lremovexattr, sys_lremovexattr), // 236
2104 LINX_(__NR_fremovexattr, sys_fremovexattr), // 237
2105 LINXY(__NR_tkill, sys_tkill), // 238 */Linux
2106 LINXY(__NR_sendfile64, sys_sendfile64), // 239
2107
2108 LINXY(__NR_futex, sys_futex), // 240
2109 LINX_(__NR_sched_setaffinity, sys_sched_setaffinity), // 241
2110 LINXY(__NR_sched_getaffinity, sys_sched_getaffinity), // 242
2111 PLAX_(__NR_set_thread_area, sys_set_thread_area), // 243
2112 PLAX_(__NR_get_thread_area, sys_get_thread_area), // 244
2113
2114 LINXY(__NR_io_setup, sys_io_setup), // 245
2115 LINX_(__NR_io_destroy, sys_io_destroy), // 246
2116 LINXY(__NR_io_getevents, sys_io_getevents), // 247
2117 LINX_(__NR_io_submit, sys_io_submit), // 248
2118 LINXY(__NR_io_cancel, sys_io_cancel), // 249
2119
2120 LINX_(__NR_fadvise64, sys_fadvise64), // 250 */(Linux?)
2121 GENX_(251, sys_ni_syscall), // 251
2122 LINX_(__NR_exit_group, sys_exit_group), // 252
2123 LINXY(__NR_lookup_dcookie, sys_lookup_dcookie), // 253
2124 LINXY(__NR_epoll_create, sys_epoll_create), // 254
2125
2126 LINX_(__NR_epoll_ctl, sys_epoll_ctl), // 255
2127 LINXY(__NR_epoll_wait, sys_epoll_wait), // 256
2128 //zz // (__NR_remap_file_pages, sys_remap_file_pages), // 257 */Linux
2129 LINX_(__NR_set_tid_address, sys_set_tid_address), // 258
2130 LINXY(__NR_timer_create, sys_timer_create), // 259
2131
2132 LINXY(__NR_timer_settime, sys_timer_settime), // (timer_create+1)
2133 LINXY(__NR_timer_gettime, sys_timer_gettime), // (timer_create+2)
2134 LINX_(__NR_timer_getoverrun, sys_timer_getoverrun),//(timer_create+3)
2135 LINX_(__NR_timer_delete, sys_timer_delete), // (timer_create+4)
2136 LINX_(__NR_clock_settime, sys_clock_settime), // (timer_create+5)
2137
2138 LINXY(__NR_clock_gettime, sys_clock_gettime), // (timer_create+6)
2139 LINXY(__NR_clock_getres, sys_clock_getres), // (timer_create+7)
2140 LINXY(__NR_clock_nanosleep, sys_clock_nanosleep),// (timer_create+8) */*
2141 GENXY(__NR_statfs64, sys_statfs64), // 268
2142 GENXY(__NR_fstatfs64, sys_fstatfs64), // 269
2143
2144 LINX_(__NR_tgkill, sys_tgkill), // 270 */Linux
2145 GENX_(__NR_utimes, sys_utimes), // 271
2146 LINX_(__NR_fadvise64_64, sys_fadvise64_64), // 272 */(Linux?)
2147 GENX_(__NR_vserver, sys_ni_syscall), // 273
2148 LINX_(__NR_mbind, sys_mbind), // 274 ?/?
2149
2150 LINXY(__NR_get_mempolicy, sys_get_mempolicy), // 275 ?/?
2151 LINX_(__NR_set_mempolicy, sys_set_mempolicy), // 276 ?/?
2152 LINXY(__NR_mq_open, sys_mq_open), // 277
2153 LINX_(__NR_mq_unlink, sys_mq_unlink), // (mq_open+1)
2154 LINX_(__NR_mq_timedsend, sys_mq_timedsend), // (mq_open+2)
2155
2156 LINXY(__NR_mq_timedreceive, sys_mq_timedreceive),// (mq_open+3)
2157 LINX_(__NR_mq_notify, sys_mq_notify), // (mq_open+4)
2158 LINXY(__NR_mq_getsetattr, sys_mq_getsetattr), // (mq_open+5)
2159 GENX_(__NR_sys_kexec_load, sys_ni_syscall), // 283
2160 LINXY(__NR_waitid, sys_waitid), // 284
2161
2162 GENX_(285, sys_ni_syscall), // 285
2163 LINX_(__NR_add_key, sys_add_key), // 286
2164 LINX_(__NR_request_key, sys_request_key), // 287
2165 LINXY(__NR_keyctl, sys_keyctl), // 288
2166 LINX_(__NR_ioprio_set, sys_ioprio_set), // 289
2167
2168 LINX_(__NR_ioprio_get, sys_ioprio_get), // 290
2169 LINX_(__NR_inotify_init, sys_inotify_init), // 291
2170 LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 292
2171 LINX_(__NR_inotify_rm_watch, sys_inotify_rm_watch), // 293
2172 // LINX_(__NR_migrate_pages, sys_migrate_pages), // 294
2173
2174 LINXY(__NR_openat, sys_openat), // 295
2175 LINX_(__NR_mkdirat, sys_mkdirat), // 296
2176 LINX_(__NR_mknodat, sys_mknodat), // 297
2177 LINX_(__NR_fchownat, sys_fchownat), // 298
2178 LINX_(__NR_futimesat, sys_futimesat), // 299
2179
2180 PLAXY(__NR_fstatat64, sys_fstatat64), // 300
2181 LINX_(__NR_unlinkat, sys_unlinkat), // 301
2182 LINX_(__NR_renameat, sys_renameat), // 302
2183 LINX_(__NR_linkat, sys_linkat), // 303
2184 LINX_(__NR_symlinkat, sys_symlinkat), // 304
2185
2186 LINX_(__NR_readlinkat, sys_readlinkat), // 305
2187 LINX_(__NR_fchmodat, sys_fchmodat), // 306
2188 LINX_(__NR_faccessat, sys_faccessat), // 307
2189 LINX_(__NR_pselect6, sys_pselect6), // 308
2190 LINXY(__NR_ppoll, sys_ppoll), // 309
2191
2192 // LINX_(__NR_unshare, sys_unshare), // 310
2193 LINX_(__NR_set_robust_list, sys_set_robust_list), // 311
2194 LINXY(__NR_get_robust_list, sys_get_robust_list), // 312
2195 LINX_(__NR_splice, sys_splice), // 313
2196 LINX_(__NR_sync_file_range, sys_sync_file_range), // 314
2197
2198 LINX_(__NR_tee, sys_tee), // 315
2199 LINXY(__NR_vmsplice, sys_vmsplice), // 316
2200 LINXY(__NR_move_pages, sys_move_pages), // 317
2201 LINXY(__NR_getcpu, sys_getcpu), // 318
2202 LINXY(__NR_epoll_pwait, sys_epoll_pwait), // 319
2203
2204 LINX_(__NR_utimensat, sys_utimensat), // 320
2205 LINXY(__NR_signalfd, sys_signalfd), // 321
2206 LINXY(__NR_timerfd_create, sys_timerfd_create), // 322
2207 LINX_(__NR_eventfd, sys_eventfd), // 323
2208 LINX_(__NR_fallocate, sys_fallocate), // 324
2209
2210 LINXY(__NR_timerfd_settime, sys_timerfd_settime), // 325
2211 LINXY(__NR_timerfd_gettime, sys_timerfd_gettime), // 326
2212 LINXY(__NR_signalfd4, sys_signalfd4), // 327
2213 LINX_(__NR_eventfd2, sys_eventfd2), // 328
2214 LINXY(__NR_epoll_create1, sys_epoll_create1), // 329
2215
2216 LINXY(__NR_dup3, sys_dup3), // 330
2217 LINXY(__NR_pipe2, sys_pipe2), // 331
2218 LINXY(__NR_inotify_init1, sys_inotify_init1), // 332
2219 LINXY(__NR_preadv, sys_preadv), // 333
2220 LINX_(__NR_pwritev, sys_pwritev), // 334
2221
2222 LINXY(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo),// 335
2223 LINXY(__NR_perf_event_open, sys_perf_event_open), // 336
2224 LINXY(__NR_recvmmsg, sys_recvmmsg), // 337
2225 // LINX_(__NR_fanotify_init, sys_ni_syscall), // 338
2226 // LINX_(__NR_fanotify_mark, sys_ni_syscall), // 339
2227
2228 LINXY(__NR_prlimit64, sys_prlimit64), // 340
2229 // LINX_(__NR_name_to_handle_at, sys_ni_syscall), // 341
2230 // LINX_(__NR_open_by_handle_at, sys_ni_syscall), // 342
2231 // LINX_(__NR_clock_adjtime, sys_ni_syscall), // 343
2232 // LINX_(__NR_syncfs, sys_ni_syscall), // 344
2233
2234 LINXY(__NR_sendmmsg, sys_sendmmsg), // 345
2235 // LINX_(__NR_setns, sys_ni_syscall), // 346
2236 LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 347
2237 LINX_(__NR_process_vm_writev, sys_process_vm_writev) // 348
2238 };
2239
ML_(get_linux_syscall_entry)2240 SyscallTableEntry* ML_(get_linux_syscall_entry) ( UInt sysno )
2241 {
2242 const UInt syscall_table_size
2243 = sizeof(syscall_table) / sizeof(syscall_table[0]);
2244
2245 /* Is it in the contiguous initial section of the table? */
2246 if (sysno < syscall_table_size) {
2247 SyscallTableEntry* sys = &syscall_table[sysno];
2248 if (sys->before == NULL)
2249 return NULL; /* no entry */
2250 else
2251 return sys;
2252 }
2253
2254 /* Can't find a wrapper */
2255 return NULL;
2256 }
2257
2258 #endif // defined(VGP_x86_linux)
2259
2260 /*--------------------------------------------------------------------*/
2261 /*--- end ---*/
2262 /*--------------------------------------------------------------------*/
2263