• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff.    syswrap-amd64-solaris.c ---*/
4 /*--------------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2014-2015 Petr Pavlu
11       setup@dagobah.cz
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26    02111-1307, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 */
30 
31 #if defined(VGP_amd64_solaris)
32 
33 #include "libvex_guest_offsets.h"
34 #include "pub_core_basics.h"
35 #include "pub_core_debuglog.h"
36 #include "pub_core_vki.h"
37 #include "pub_core_libcassert.h"
38 #include "pub_core_libcbase.h"
39 #include "pub_core_libcprint.h"
40 #include "pub_core_libcsignal.h"
41 #include "pub_core_tooliface.h"
42 #include "pub_core_syswrap.h"
43 
44 #include "priv_types_n_macros.h"
45 #include "priv_syswrap-generic.h"
46 #include "priv_syswrap-solaris.h"
47 
48 
49 /* Call f(arg1), but first switch stacks, using 'stack' as the new stack, and
50    use 'retaddr' as f's return-to address.  Also, clear all the integer
51    registers before entering f. */
52 __attribute__((noreturn))
53 void ML_(call_on_new_stack_0_1)(Addr stack,             /* %rdi */
54                                 Addr retaddr,           /* %rsi */
55                                 void (*f)(Word),        /* %rdx */
56                                 Word arg1);             /* %rcx */
57 __asm__ (
58 ".text\n"
59 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
60 "vgModuleLocal_call_on_new_stack_0_1:\n"
61 "   movq  %rdi, %rsp\n"         /* set stack */
62 "   movq  %rcx, %rdi\n"         /* set arg1 */
63 "   pushq %rsi\n"               /* retaddr to stack */
64 "   pushq %rdx\n"               /* f to stack */
65 "   movq  $0, %rax\n"           /* zero all GP regs (except %rdi) */
66 "   movq  $0, %rbx\n"
67 "   movq  $0, %rcx\n"
68 "   movq  $0, %rdx\n"
69 "   movq  $0, %rsi\n"
70 "   movq  $0, %rbp\n"
71 "   movq  $0, %r8\n"
72 "   movq  $0, %r9\n"
73 "   movq  $0, %r10\n"
74 "   movq  $0, %r11\n"
75 "   movq  $0, %r12\n"
76 "   movq  $0, %r13\n"
77 "   movq  $0, %r14\n"
78 "   movq  $0, %r15\n"
79 "   ret\n"                      /* jump to f */
80 "   ud2\n"                      /* should never get here */
81 ".previous\n"
82 );
83 
84 /* This function is called to setup a context of a new Valgrind thread (which
85    will run the client code). */
ML_(setup_start_thread_context)86 void ML_(setup_start_thread_context)(ThreadId tid, vki_ucontext_t *uc)
87 {
88    ThreadState *tst = VG_(get_ThreadState)(tid);
89    UWord *stack = (UWord*)tst->os_state.valgrind_stack_init_SP;
90 
91    VG_(memset)(uc, 0, sizeof(*uc));
92    uc->uc_flags = VKI_UC_CPU | VKI_UC_SIGMASK;
93 
94    /* Start the thread with everything blocked. */
95    VG_(sigfillset)(&uc->uc_sigmask);
96 
97    /* Set up the stack, it should be always 16-byte aligned before doing
98       a function call, i.e. the first parameter is also 16-byte aligned. */
99    vg_assert(VG_IS_16_ALIGNED(stack));
100    stack -= 1;
101    stack[0] = 0; /* bogus return value */
102 
103    /* Set up the registers. */
104    uc->uc_mcontext.gregs[VKI_REG_RDI] = (UWord)tst; /* the parameter */
105    uc->uc_mcontext.gregs[VKI_REG_RIP] = (UWord)ML_(start_thread_NORETURN);
106    uc->uc_mcontext.gregs[VKI_REG_RSP] = (UWord)stack;
107 }
108 
109 /* Architecture-specific part of VG_(save_context). */
ML_(save_machine_context)110 void ML_(save_machine_context)(ThreadId tid, vki_ucontext_t *uc,
111                                CorePart part)
112 {
113    ThreadState *tst = VG_(get_ThreadState)(tid);
114    struct vki_fpchip_state *fs
115       = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
116    SizeT i;
117 
118    /* CPU */
119    /* Common registers */
120    uc->uc_mcontext.gregs[VKI_REG_RIP] = tst->arch.vex.guest_RIP;
121    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RIP,
122             (Addr)&uc->uc_mcontext.gregs[VKI_REG_RIP], sizeof(UWord));
123    uc->uc_mcontext.gregs[VKI_REG_RAX] = tst->arch.vex.guest_RAX;
124    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RAX,
125             (Addr)&uc->uc_mcontext.gregs[VKI_REG_RAX], sizeof(UWord));
126    uc->uc_mcontext.gregs[VKI_REG_RBX] = tst->arch.vex.guest_RBX;
127    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RBX,
128             (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBX], sizeof(UWord));
129    uc->uc_mcontext.gregs[VKI_REG_RCX] = tst->arch.vex.guest_RCX;
130    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RCX,
131             (Addr)&uc->uc_mcontext.gregs[VKI_REG_RCX], sizeof(UWord));
132    uc->uc_mcontext.gregs[VKI_REG_RDX] = tst->arch.vex.guest_RDX;
133    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RDX,
134             (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDX], sizeof(UWord));
135    uc->uc_mcontext.gregs[VKI_REG_RBP] = tst->arch.vex.guest_RBP;
136    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RBP,
137             (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBP], sizeof(UWord));
138    uc->uc_mcontext.gregs[VKI_REG_RSI] = tst->arch.vex.guest_RSI;
139    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RSI,
140             (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSI], sizeof(UWord));
141    uc->uc_mcontext.gregs[VKI_REG_RDI] = tst->arch.vex.guest_RDI;
142    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RDI,
143             (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDI], sizeof(UWord));
144    uc->uc_mcontext.gregs[VKI_REG_R8] = tst->arch.vex.guest_R8;
145    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R8,
146             (Addr)&uc->uc_mcontext.gregs[VKI_REG_R8], sizeof(UWord));
147    uc->uc_mcontext.gregs[VKI_REG_R9] = tst->arch.vex.guest_R9;
148    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R9,
149             (Addr)&uc->uc_mcontext.gregs[VKI_REG_R9], sizeof(UWord));
150    uc->uc_mcontext.gregs[VKI_REG_R10] = tst->arch.vex.guest_R10;
151    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R10,
152             (Addr)&uc->uc_mcontext.gregs[VKI_REG_R10], sizeof(UWord));
153    uc->uc_mcontext.gregs[VKI_REG_R11] = tst->arch.vex.guest_R11;
154    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R11,
155             (Addr)&uc->uc_mcontext.gregs[VKI_REG_R11], sizeof(UWord));
156    uc->uc_mcontext.gregs[VKI_REG_R12] = tst->arch.vex.guest_R12;
157    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R12,
158             (Addr)&uc->uc_mcontext.gregs[VKI_REG_R12], sizeof(UWord));
159    uc->uc_mcontext.gregs[VKI_REG_R13] = tst->arch.vex.guest_R13;
160    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R13,
161             (Addr)&uc->uc_mcontext.gregs[VKI_REG_R13], sizeof(UWord));
162    uc->uc_mcontext.gregs[VKI_REG_R14] = tst->arch.vex.guest_R14;
163    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R14,
164             (Addr)&uc->uc_mcontext.gregs[VKI_REG_R14], sizeof(UWord));
165    uc->uc_mcontext.gregs[VKI_REG_R15] = tst->arch.vex.guest_R15;
166    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R15,
167             (Addr)&uc->uc_mcontext.gregs[VKI_REG_R15], sizeof(UWord));
168    uc->uc_mcontext.gregs[VKI_REG_RSP] = tst->arch.vex.guest_RSP;
169    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RSP,
170             (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSP], sizeof(UWord));
171 
172    /* ERR and TRAPNO */
173    uc->uc_mcontext.gregs[VKI_REG_ERR] = 0;
174    VG_TRACK(post_mem_write, part, tid,
175             (Addr)&uc->uc_mcontext.gregs[VKI_REG_ERR], sizeof(UWord));
176    uc->uc_mcontext.gregs[VKI_REG_TRAPNO] = 0;
177    VG_TRACK(post_mem_write, part, tid,
178             (Addr)&uc->uc_mcontext.gregs[VKI_REG_TRAPNO], sizeof(UWord));
179 
180    /* Segment registers */
181    /* Valgrind does not support moves from/to segment registers on AMD64.  The
182       values returned below are the ones that are set by the kernel when
183       a program is started. */
184    uc->uc_mcontext.gregs[VKI_REG_CS] = VKI_UCS_SEL;
185    VG_TRACK(post_mem_write, part, tid,
186             (Addr)&uc->uc_mcontext.gregs[VKI_REG_CS], sizeof(UWord));
187    uc->uc_mcontext.gregs[VKI_REG_DS] = 0;
188    VG_TRACK(post_mem_write, part, tid,
189             (Addr)&uc->uc_mcontext.gregs[VKI_REG_DS], sizeof(UWord));
190    uc->uc_mcontext.gregs[VKI_REG_SS] = VKI_UDS_SEL;
191    VG_TRACK(post_mem_write, part, tid,
192             (Addr)&uc->uc_mcontext.gregs[VKI_REG_SS], sizeof(UWord));
193    uc->uc_mcontext.gregs[VKI_REG_ES] = 0;
194    VG_TRACK(post_mem_write, part, tid,
195             (Addr)&uc->uc_mcontext.gregs[VKI_REG_ES], sizeof(UWord));
196    uc->uc_mcontext.gregs[VKI_REG_FS] = 0;
197    VG_TRACK(post_mem_write, part, tid,
198             (Addr)&uc->uc_mcontext.gregs[VKI_REG_FS], sizeof(UWord));
199    uc->uc_mcontext.gregs[VKI_REG_GS] = 0;
200    VG_TRACK(post_mem_write, part, tid,
201             (Addr)&uc->uc_mcontext.gregs[VKI_REG_GS], sizeof(UWord));
202 
203    /* Segment bases */
204    uc->uc_mcontext.gregs[VKI_REG_FSBASE] = tst->arch.vex.guest_FS_CONST;
205    VG_TRACK(post_mem_write, part, tid,
206             (Addr)&uc->uc_mcontext.gregs[VKI_REG_FSBASE], sizeof(UWord));
207    uc->uc_mcontext.gregs[VKI_REG_GSBASE] = 0;
208    VG_TRACK(post_mem_write, part, tid,
209             (Addr)&uc->uc_mcontext.gregs[VKI_REG_GSBASE], sizeof(UWord));
210 
211    /* Handle rflags.  Refer to the x86-solaris variant of this code for
212       a detailed description. */
213    uc->uc_mcontext.gregs[VKI_REG_RFL] =
214       LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
215    VG_TRACK(post_mem_write, part, tid,
216          (Addr)&uc->uc_mcontext.gregs[VKI_REG_RFL], sizeof(UWord));
217    VKI_UC_GUEST_CC_OP(uc) = tst->arch.vex.guest_CC_OP;
218    VKI_UC_GUEST_CC_NDEP(uc) = tst->arch.vex.guest_CC_NDEP;
219    VKI_UC_GUEST_CC_DEP1(uc) = tst->arch.vex.guest_CC_DEP1;
220    VG_TRACK(copy_reg_to_mem, part, tid,
221             offsetof(VexGuestAMD64State, guest_CC_DEP1),
222             (Addr)&VKI_UC_GUEST_CC_DEP1(uc), sizeof(UWord));
223    VKI_UC_GUEST_CC_DEP2(uc) = tst->arch.vex.guest_CC_DEP2;
224    VG_TRACK(copy_reg_to_mem, part, tid,
225             offsetof(VexGuestAMD64State, guest_CC_DEP2),
226             (Addr)&VKI_UC_GUEST_CC_DEP2(uc), sizeof(UWord));
227    VKI_UC_GUEST_RFLAGS_NEG(uc) = ~uc->uc_mcontext.gregs[VKI_REG_RFL];
228    /* Calculate a checksum. */
229    {
230       ULong buf[5];
231       ULong checksum;
232 
233       buf[0] = VKI_UC_GUEST_CC_OP(uc);
234       buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
235       buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
236       buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
237       buf[4] = uc->uc_mcontext.gregs[VKI_REG_RFL];
238       checksum = ML_(fletcher64)((UInt*)&buf, sizeof(buf) / sizeof(UInt));
239       VKI_UC_GUEST_RFLAGS_CHECKSUM(uc) = checksum;
240    }
241 
242    /* FPU */
243    /* The fpregset_t structure on amd64 follows the layout that is used by the
244       FXSAVE instruction, therefore it is only necessary to call a VEX
245       function that simulates this instruction. */
246    LibVEX_GuestAMD64_fxsave(&tst->arch.vex, (HWord)fs);
247 
248    /* Control word */
249    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->cw, sizeof(fs->cw));
250    /* Status word */
251    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->sw, sizeof(fs->sw));
252    /* Compressed tag word */
253    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->fctw, sizeof(fs->fctw));
254    /* Unused */
255    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->__fx_rsvd,
256             sizeof(fs->__fx_rsvd));
257    vg_assert(fs->__fx_rsvd == 0);
258    /* Last x87 opcode */
259    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->fop, sizeof(fs->fop));
260    vg_assert(fs->fop == 0);
261    /* Last x87 instruction pointer */
262    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->rip, sizeof(fs->rip));
263    vg_assert(fs->rip == 0);
264    /* Last x87 data pointer */
265    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->rdp, sizeof(fs->rdp));
266    vg_assert(fs->rdp == 0);
267    /* Media-instruction control and status register */
268    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
269    /* Supported features in MXCSR */
270    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->mxcsr_mask,
271             sizeof(fs->mxcsr_mask));
272 
273    /* ST registers */
274    for (i = 0; i < 8; i++) {
275       Addr addr = (Addr)&fs->st[i];
276       /* x87 uses 80b FP registers but VEX uses only 64b registers, thus we
277          have to lie here. :< */
278       VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
279                guest_FPREG[i]), addr, sizeof(ULong));
280       VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
281                guest_FPREG[i]), addr + 8, sizeof(UShort));
282    }
283 
284    /* XMM registers */
285    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
286             guest_YMM0), (Addr)&fs->xmm[0], sizeof(U128));
287    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
288             guest_YMM1), (Addr)&fs->xmm[1], sizeof(U128));
289    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
290             guest_YMM2), (Addr)&fs->xmm[2], sizeof(U128));
291    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
292             guest_YMM3), (Addr)&fs->xmm[3], sizeof(U128));
293    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
294             guest_YMM4), (Addr)&fs->xmm[4], sizeof(U128));
295    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
296             guest_YMM5), (Addr)&fs->xmm[5], sizeof(U128));
297    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
298             guest_YMM6), (Addr)&fs->xmm[6], sizeof(U128));
299    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
300             guest_YMM7), (Addr)&fs->xmm[7], sizeof(U128));
301 
302    /* Status word (sw) at exception */
303    fs->status = 0;
304    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->status, sizeof(fs->status));
305 
306    /* MXCSR at exception */
307    fs->xstatus = 0;
308    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->xstatus,
309             sizeof(fs->xstatus));
310 }
311 
312 /* Architecture-specific part of VG_(restore_context). */
ML_(restore_machine_context)313 void ML_(restore_machine_context)(ThreadId tid, vki_ucontext_t *uc,
314                                   CorePart part, Bool esp_is_thrptr)
315 {
316    ThreadState *tst = VG_(get_ThreadState)(tid);
317    struct vki_fpchip_state *fs
318       = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
319 
320    /* CPU */
321    if (uc->uc_flags & VKI_UC_CPU) {
322       /* Common registers */
323       tst->arch.vex.guest_RIP = uc->uc_mcontext.gregs[VKI_REG_RIP];
324       VG_TRACK(copy_mem_to_reg, part, tid,
325                (Addr)&uc->uc_mcontext.gregs[VKI_REG_RIP], OFFSET_amd64_RIP,
326                sizeof(UWord));
327       tst->arch.vex.guest_RAX = uc->uc_mcontext.gregs[VKI_REG_RAX];
328       VG_TRACK(copy_mem_to_reg, part, tid,
329                (Addr)&uc->uc_mcontext.gregs[VKI_REG_RAX], OFFSET_amd64_RAX,
330                sizeof(UWord));
331       tst->arch.vex.guest_RBX = uc->uc_mcontext.gregs[VKI_REG_RBX];
332       VG_TRACK(copy_mem_to_reg, part, tid,
333                (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBX], OFFSET_amd64_RBX,
334                sizeof(UWord));
335       tst->arch.vex.guest_RCX = uc->uc_mcontext.gregs[VKI_REG_RCX];
336       VG_TRACK(copy_mem_to_reg, part, tid,
337                (Addr)&uc->uc_mcontext.gregs[VKI_REG_RCX], OFFSET_amd64_RCX,
338                sizeof(UWord));
339       tst->arch.vex.guest_RDX = uc->uc_mcontext.gregs[VKI_REG_RDX];
340       VG_TRACK(copy_mem_to_reg, part, tid,
341                (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDX], OFFSET_amd64_RDX,
342                sizeof(UWord));
343       tst->arch.vex.guest_RBP = uc->uc_mcontext.gregs[VKI_REG_RBP];
344       VG_TRACK(copy_mem_to_reg, part, tid,
345                (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBP], OFFSET_amd64_RBP,
346                sizeof(UWord));
347       tst->arch.vex.guest_RSI = uc->uc_mcontext.gregs[VKI_REG_RSI];
348       VG_TRACK(copy_mem_to_reg, part, tid,
349                (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSI], OFFSET_amd64_RSI,
350                sizeof(UWord));
351       tst->arch.vex.guest_RDI = uc->uc_mcontext.gregs[VKI_REG_RDI];
352       VG_TRACK(copy_mem_to_reg, part, tid,
353                (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDI], OFFSET_amd64_RDI,
354                sizeof(UWord));
355       tst->arch.vex.guest_R8 = uc->uc_mcontext.gregs[VKI_REG_R8];
356       VG_TRACK(copy_mem_to_reg, part, tid,
357                (Addr)&uc->uc_mcontext.gregs[VKI_REG_R8], OFFSET_amd64_R8,
358                sizeof(UWord));
359       tst->arch.vex.guest_R9 = uc->uc_mcontext.gregs[VKI_REG_R9];
360       VG_TRACK(copy_mem_to_reg, part, tid,
361                (Addr)&uc->uc_mcontext.gregs[VKI_REG_R9], OFFSET_amd64_R9,
362                sizeof(UWord));
363       tst->arch.vex.guest_R10 = uc->uc_mcontext.gregs[VKI_REG_R10];
364       VG_TRACK(copy_mem_to_reg, part, tid,
365                (Addr)&uc->uc_mcontext.gregs[VKI_REG_R10], OFFSET_amd64_R10,
366                sizeof(UWord));
367       tst->arch.vex.guest_R11 = uc->uc_mcontext.gregs[VKI_REG_R11];
368       VG_TRACK(copy_mem_to_reg, part, tid,
369                (Addr)&uc->uc_mcontext.gregs[VKI_REG_R11], OFFSET_amd64_R11,
370                sizeof(UWord));
371       tst->arch.vex.guest_R12 = uc->uc_mcontext.gregs[VKI_REG_R12];
372       VG_TRACK(copy_mem_to_reg, part, tid,
373                (Addr)&uc->uc_mcontext.gregs[VKI_REG_R12], OFFSET_amd64_R12,
374                sizeof(UWord));
375       tst->arch.vex.guest_R13 = uc->uc_mcontext.gregs[VKI_REG_R13];
376       VG_TRACK(copy_mem_to_reg, part, tid,
377                (Addr)&uc->uc_mcontext.gregs[VKI_REG_R13], OFFSET_amd64_R13,
378                sizeof(UWord));
379       tst->arch.vex.guest_R14 = uc->uc_mcontext.gregs[VKI_REG_R14];
380       VG_TRACK(copy_mem_to_reg, part, tid,
381                (Addr)&uc->uc_mcontext.gregs[VKI_REG_R14], OFFSET_amd64_R14,
382                sizeof(UWord));
383       tst->arch.vex.guest_R15 = uc->uc_mcontext.gregs[VKI_REG_R15];
384       VG_TRACK(copy_mem_to_reg, part, tid,
385                (Addr)&uc->uc_mcontext.gregs[VKI_REG_R15], OFFSET_amd64_R15,
386                sizeof(UWord));
387       tst->arch.vex.guest_RSP = uc->uc_mcontext.gregs[VKI_REG_RSP];
388       VG_TRACK(copy_mem_to_reg, part, tid,
389                (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSP], OFFSET_amd64_RSP,
390                sizeof(UWord));
391 
392       /* Ignore ERR and TRAPNO. */
393 
394       /* Ignore segment registers. */
395 
396       /* Segment bases */
397       tst->arch.vex.guest_FS_CONST = uc->uc_mcontext.gregs[VKI_REG_FSBASE];
398       VG_TRACK(copy_mem_to_reg, part, tid,
399                (Addr)&uc->uc_mcontext.gregs[VKI_REG_FSBASE],
400                offsetof(VexGuestAMD64State, guest_FS_CONST), sizeof(UWord));
401 
402       /* Rflags.  Refer to the x86-solaris variant of this code for a detailed
403          description. */
404       {
405          ULong rflags;
406          ULong orig_rflags;
407          ULong new_rflags;
408          Bool ok_restore = False;
409 
410          VG_TRACK(pre_mem_read, part, tid,
411                   "restore_machine_context(uc->uc_mcontext.gregs[VKI_REG_RFL])",
412                   (Addr)&uc->uc_mcontext.gregs[VKI_REG_RFL], sizeof(UWord));
413          rflags = uc->uc_mcontext.gregs[VKI_REG_RFL];
414          orig_rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
415          new_rflags = rflags;
416          /* The kernel disallows the ID flag to be changed via the setcontext
417             call, thus do the same. */
418          if (orig_rflags & VKI_RFLAGS_ID_BIT)
419             new_rflags |= VKI_RFLAGS_ID_BIT;
420          else
421             new_rflags &= ~VKI_RFLAGS_ID_BIT;
422          LibVEX_GuestAMD64_put_rflags(new_rflags, &tst->arch.vex);
423          VG_TRACK(post_reg_write, part, tid,
424                   offsetof(VexGuestAMD64State, guest_CC_DEP1), sizeof(UWord));
425          VG_TRACK(post_reg_write, part, tid,
426                   offsetof(VexGuestAMD64State, guest_CC_DEP2), sizeof(UWord));
427 
428          if (rflags != ~VKI_UC_GUEST_RFLAGS_NEG(uc)) {
429             VG_(debugLog)(1, "syswrap-solaris",
430                              "The rflags value was restored from an "
431                              "explicitly set value in thread %u.\n", tid);
432             ok_restore = True;
433          }
434          else {
435             ULong buf[5];
436             ULong checksum;
437 
438             buf[0] = VKI_UC_GUEST_CC_OP(uc);
439             buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
440             buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
441             buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
442             buf[4] = rflags;
443             checksum = ML_(fletcher64)((UInt*)&buf,
444                                        sizeof(buf) / sizeof(UInt));
445             if (checksum == VKI_UC_GUEST_RFLAGS_CHECKSUM(uc)) {
446                /* Check ok, the full restoration is possible. */
447                VG_(debugLog)(1, "syswrap-solaris",
448                                 "The CC_* guest state values were fully "
449                                 "restored in thread %u.\n", tid);
450                ok_restore = True;
451 
452                tst->arch.vex.guest_CC_OP = VKI_UC_GUEST_CC_OP(uc);
453                tst->arch.vex.guest_CC_NDEP = VKI_UC_GUEST_CC_NDEP(uc);
454                tst->arch.vex.guest_CC_DEP1 = VKI_UC_GUEST_CC_DEP1(uc);
455                VG_TRACK(copy_mem_to_reg, part, tid,
456                         (Addr)&VKI_UC_GUEST_CC_DEP1(uc),
457                         offsetof(VexGuestAMD64State, guest_CC_DEP1),
458                         sizeof(UWord));
459                tst->arch.vex.guest_CC_DEP2 = VKI_UC_GUEST_CC_DEP2(uc);
460                VG_TRACK(copy_mem_to_reg, part, tid,
461                         (Addr)&VKI_UC_GUEST_CC_DEP2(uc),
462                         offsetof(VexGuestAMD64State, guest_CC_DEP2),
463                         sizeof(UWord));
464             }
465          }
466 
467          if (!ok_restore)
468             VG_(debugLog)(1, "syswrap-solaris",
469                              "Cannot fully restore the CC_* guest state "
470                              "values, using approximate rflags in thread "
471                              "%u.\n", tid);
472       }
473    }
474 
475    if (uc->uc_flags & VKI_UC_FPU) {
476       /* FPU */
477       VexEmNote note;
478       SizeT i;
479 
480       /* x87 */
481       /* Control word */
482       VG_TRACK(pre_mem_read, part, tid,
483                "restore_machine_context(uc->uc_mcontext.fpregs..cw)",
484                (Addr)&fs->cw, sizeof(fs->cw));
485       /* Status word */
486       VG_TRACK(pre_mem_read, part, tid,
487                "restore_machine_context(uc->uc_mcontext.fpregs..sw)",
488                (Addr)&fs->sw, sizeof(fs->sw));
489       /* Compressed tag word */
490       VG_TRACK(pre_mem_read, part, tid,
491                "restore_machine_context(uc->uc_mcontext.fpregs..fctw)",
492                (Addr)&fs->fctw, sizeof(fs->fctw));
493       /* Last x87 opcode */
494       VG_TRACK(pre_mem_read, part, tid,
495                "restore_machine_context(uc->uc_mcontext.fpregs..fop)",
496                (Addr)&fs->fop, sizeof(fs->fop));
497       /* Last x87 instruction pointer */
498       VG_TRACK(pre_mem_read, part, tid,
499                "restore_machine_context(uc->uc_mcontext.fpregs..rip)",
500                (Addr)&fs->rip, sizeof(fs->rip));
501       /* Last x87 data pointer */
502       VG_TRACK(pre_mem_read, part, tid,
503                "restore_machine_context(uc->uc_mcontext.fpregs..rdp)",
504                (Addr)&fs->rdp, sizeof(fs->rdp));
505       /* Media-instruction control and status register */
506       VG_TRACK(pre_mem_read, part, tid,
507                "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr)",
508                (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
509       /* Supported features in MXCSR */
510       VG_TRACK(pre_mem_read, part, tid,
511                "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr_mask)",
512                (Addr)&fs->mxcsr_mask, sizeof(fs->mxcsr_mask));
513 
514       /* ST registers */
515       for (i = 0; i < 8; i++) {
516          Addr addr = (Addr)&fs->st[i];
517          VG_TRACK(copy_mem_to_reg, part, tid, addr,
518                   offsetof(VexGuestAMD64State, guest_FPREG[i]), sizeof(ULong));
519       }
520 
521       /* XMM registers */
522       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[0],
523                offsetof(VexGuestAMD64State, guest_YMM0), sizeof(U128));
524       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[1],
525                offsetof(VexGuestAMD64State, guest_YMM1), sizeof(U128));
526       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[2],
527                offsetof(VexGuestAMD64State, guest_YMM2), sizeof(U128));
528       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[3],
529                offsetof(VexGuestAMD64State, guest_YMM3), sizeof(U128));
530       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[4],
531                offsetof(VexGuestAMD64State, guest_YMM4), sizeof(U128));
532       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[5],
533                offsetof(VexGuestAMD64State, guest_YMM5), sizeof(U128));
534       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[6],
535                offsetof(VexGuestAMD64State, guest_YMM6), sizeof(U128));
536       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[7],
537                offsetof(VexGuestAMD64State, guest_YMM7), sizeof(U128));
538 
539       note = LibVEX_GuestAMD64_fxrstor((HWord)fs, &tst->arch.vex);
540       if (note != EmNote_NONE)
541          VG_(message)(Vg_UserMsg,
542                       "Error restoring FP state in thread %u: %s.\n",
543                       tid, LibVEX_EmNote_string(note));
544    }
545 }
546 
547 
548 /* ---------------------------------------------------------------------
549    PRE/POST wrappers for AMD64/Solaris-specific syscalls
550    ------------------------------------------------------------------ */
551 
552 #define PRE(name)       DEFN_PRE_TEMPLATE(amd64_solaris, name)
553 #define POST(name)      DEFN_POST_TEMPLATE(amd64_solaris, name)
554 
555 /* implementation */
556 
557 #undef PRE
558 #undef POST
559 
560 #endif // defined(VGP_amd64_solaris)
561 
562 /*--------------------------------------------------------------------*/
563 /*--- end                                                          ---*/
564 /*--------------------------------------------------------------------*/
565