1
2 /*---------------------------------------------------------------*/
3 /*--- begin guest_ppc_helpers.c ---*/
4 /*---------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2004-2015 OpenWorks LLP
11 info@open-works.net
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 02110-1301, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
34 */
35
36 #include "libvex_basictypes.h"
37 #include "libvex_emnote.h"
38 #include "libvex_guest_ppc32.h"
39 #include "libvex_guest_ppc64.h"
40 #include "libvex_ir.h"
41 #include "libvex.h"
42
43 #include "main_util.h"
44 #include "main_globals.h"
45 #include "guest_generic_bb_to_IR.h"
46 #include "guest_ppc_defs.h"
47
48
49 /* This file contains helper functions for ppc32 and ppc64 guest code.
50 Calls to these functions are generated by the back end. These
51 calls are of course in the host machine code and this file will be
52 compiled to host machine code, so that all makes sense.
53
54 Only change the signatures of these helper functions very
55 carefully. If you change the signature here, you'll have to change
56 the parameters passed to it in the IR calls constructed by
57 guest-ppc/toIR.c.
58 */
59
60
61 /*---------------------------------------------------------------*/
62 /*--- Misc integer helpers. ---*/
63 /*---------------------------------------------------------------*/
64
65 /* CALLED FROM GENERATED CODE */
66 /* DIRTY HELPER (non-referentially-transparent) */
67 /* Horrible hack. On non-ppc platforms, return 1. */
68 /* Reads a complete, consistent 64-bit TB value. */
ppcg_dirtyhelper_MFTB(void)69 ULong ppcg_dirtyhelper_MFTB ( void )
70 {
71 # if defined(__powerpc__)
72 ULong res;
73 UInt lo, hi1, hi2;
74 while (1) {
75 __asm__ __volatile__ ("\n"
76 "\tmftbu %0\n"
77 "\tmftb %1\n"
78 "\tmftbu %2\n"
79 : "=r" (hi1), "=r" (lo), "=r" (hi2)
80 );
81 if (hi1 == hi2) break;
82 }
83 res = ((ULong)hi1) << 32;
84 res |= (ULong)lo;
85 return res;
86 # else
87 return 1ULL;
88 # endif
89 }
90
91
92 /* CALLED FROM GENERATED CODE */
93 /* DIRTY HELPER (non-referentially transparent) */
ppc32g_dirtyhelper_MFSPR_268_269(UInt r269)94 UInt ppc32g_dirtyhelper_MFSPR_268_269 ( UInt r269 )
95 {
96 # if defined(__powerpc__)
97 UInt spr;
98 if (r269) {
99 __asm__ __volatile__("mfspr %0,269" : "=b"(spr));
100 } else {
101 __asm__ __volatile__("mfspr %0,268" : "=b"(spr));
102 }
103 return spr;
104 # else
105 return 0;
106 # endif
107 }
108
109
110 /* CALLED FROM GENERATED CODE */
111 /* DIRTY HELPER (I'm not really sure what the side effects are) */
ppc32g_dirtyhelper_MFSPR_287(void)112 UInt ppc32g_dirtyhelper_MFSPR_287 ( void )
113 {
114 # if defined(__powerpc__)
115 UInt spr;
116 __asm__ __volatile__("mfspr %0,287" : "=b"(spr));
117 return spr;
118 # else
119 return 0;
120 # endif
121 }
122
123
124 /* CALLED FROM GENERATED CODE */
125 /* DIRTY HELPER (reads guest state, writes guest mem) */
ppc32g_dirtyhelper_LVS(VexGuestPPC32State * gst,UInt vD_off,UInt sh,UInt shift_right)126 void ppc32g_dirtyhelper_LVS ( VexGuestPPC32State* gst,
127 UInt vD_off, UInt sh, UInt shift_right )
128 {
129 static
130 UChar ref[32] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
131 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
132 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
133 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F };
134 U128* pU128_src;
135 U128* pU128_dst;
136
137 vassert( vD_off <= sizeof(VexGuestPPC32State)-8 );
138 vassert( sh <= 15 );
139 vassert( shift_right <= 1 );
140 if (shift_right)
141 sh = 16-sh;
142 /* else shift left */
143
144 pU128_src = (U128*)&ref[sh];
145 pU128_dst = (U128*)( ((UChar*)gst) + vD_off );
146
147 (*pU128_dst)[0] = (*pU128_src)[0];
148 (*pU128_dst)[1] = (*pU128_src)[1];
149 (*pU128_dst)[2] = (*pU128_src)[2];
150 (*pU128_dst)[3] = (*pU128_src)[3];
151 }
152
153 /* CALLED FROM GENERATED CODE */
154 /* DIRTY HELPER (reads guest state, writes guest mem) */
ppc64g_dirtyhelper_LVS(VexGuestPPC64State * gst,UInt vD_off,UInt sh,UInt shift_right,UInt endness)155 void ppc64g_dirtyhelper_LVS ( VexGuestPPC64State* gst,
156 UInt vD_off, UInt sh, UInt shift_right,
157 UInt endness )
158 {
159 UChar ref[32];
160 ULong i;
161 Int k;
162 /* ref[] used to be a static const array, but this doesn't work on
163 ppc64 because VEX doesn't load the TOC pointer for the call here,
164 and so we wind up picking up some totally random other data.
165 (It's a wonder we don't segfault.) So, just to be clear, this
166 "fix" (vex r2073) is really a kludgearound for the fact that
167 VEX's 64-bit ppc code generation doesn't provide a valid TOC
168 pointer for helper function calls. Ick. (Bug 250038) */
169 for (i = 0; i < 32; i++) ref[i] = i;
170
171 U128* pU128_src;
172 U128* pU128_dst;
173
174 vassert( vD_off <= sizeof(VexGuestPPC64State)-8 );
175 vassert( sh <= 15 );
176 vassert( shift_right <= 1 );
177 if (shift_right)
178 sh = 16-sh;
179 /* else shift left */
180
181 pU128_src = (U128*)&ref[sh];
182 pU128_dst = (U128*)( ((UChar*)gst) + vD_off );
183
184 if ((0x1 & endness) == 0x0) {
185 /* Little endian */
186 unsigned char *srcp, *dstp;
187 srcp = (unsigned char *)pU128_src;
188 dstp = (unsigned char *)pU128_dst;
189 for (k = 15; k >= 0; k--, srcp++)
190 dstp[k] = *srcp;
191 } else {
192 (*pU128_dst)[0] = (*pU128_src)[0];
193 (*pU128_dst)[1] = (*pU128_src)[1];
194 (*pU128_dst)[2] = (*pU128_src)[2];
195 (*pU128_dst)[3] = (*pU128_src)[3];
196 }
197 }
198
199
200 /* Helper-function specialiser. */
201
guest_ppc32_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)202 IRExpr* guest_ppc32_spechelper ( const HChar* function_name,
203 IRExpr** args,
204 IRStmt** precedingStmts,
205 Int n_precedingStmts )
206 {
207 return NULL;
208 }
209
guest_ppc64_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)210 IRExpr* guest_ppc64_spechelper ( const HChar* function_name,
211 IRExpr** args,
212 IRStmt** precedingStmts,
213 Int n_precedingStmts )
214 {
215 return NULL;
216 }
217
218
219 /*----------------------------------------------*/
220 /*--- The exported fns .. ---*/
221 /*----------------------------------------------*/
222
223 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_get_CR(const VexGuestPPC32State * vex_state)224 UInt LibVEX_GuestPPC32_get_CR ( /*IN*/const VexGuestPPC32State* vex_state )
225 {
226 # define FIELD(_n) \
227 ( ( (UInt) \
228 ( (vex_state->guest_CR##_n##_321 & (7<<1)) \
229 | (vex_state->guest_CR##_n##_0 & 1) \
230 ) \
231 ) \
232 << (4 * (7-(_n))) \
233 )
234
235 return
236 FIELD(0) | FIELD(1) | FIELD(2) | FIELD(3)
237 | FIELD(4) | FIELD(5) | FIELD(6) | FIELD(7);
238
239 # undef FIELD
240 }
241
242
243 /* VISIBLE TO LIBVEX CLIENT */
244 /* Note: %CR is 32 bits even for ppc64 */
LibVEX_GuestPPC64_get_CR(const VexGuestPPC64State * vex_state)245 UInt LibVEX_GuestPPC64_get_CR ( /*IN*/const VexGuestPPC64State* vex_state )
246 {
247 # define FIELD(_n) \
248 ( ( (UInt) \
249 ( (vex_state->guest_CR##_n##_321 & (7<<1)) \
250 | (vex_state->guest_CR##_n##_0 & 1) \
251 ) \
252 ) \
253 << (4 * (7-(_n))) \
254 )
255
256 return
257 FIELD(0) | FIELD(1) | FIELD(2) | FIELD(3)
258 | FIELD(4) | FIELD(5) | FIELD(6) | FIELD(7);
259
260 # undef FIELD
261 }
262
263
264 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_put_CR(UInt cr_native,VexGuestPPC32State * vex_state)265 void LibVEX_GuestPPC32_put_CR ( UInt cr_native,
266 /*OUT*/VexGuestPPC32State* vex_state )
267 {
268 UInt t;
269
270 # define FIELD(_n) \
271 do { \
272 t = cr_native >> (4*(7-(_n))); \
273 vex_state->guest_CR##_n##_0 = toUChar(t & 1); \
274 vex_state->guest_CR##_n##_321 = toUChar(t & (7<<1)); \
275 } while (0)
276
277 FIELD(0);
278 FIELD(1);
279 FIELD(2);
280 FIELD(3);
281 FIELD(4);
282 FIELD(5);
283 FIELD(6);
284 FIELD(7);
285
286 # undef FIELD
287 }
288
289
290 /* VISIBLE TO LIBVEX CLIENT */
291 /* Note: %CR is 32 bits even for ppc64 */
LibVEX_GuestPPC64_put_CR(UInt cr_native,VexGuestPPC64State * vex_state)292 void LibVEX_GuestPPC64_put_CR ( UInt cr_native,
293 /*OUT*/VexGuestPPC64State* vex_state )
294 {
295 UInt t;
296
297 # define FIELD(_n) \
298 do { \
299 t = cr_native >> (4*(7-(_n))); \
300 vex_state->guest_CR##_n##_0 = toUChar(t & 1); \
301 vex_state->guest_CR##_n##_321 = toUChar(t & (7<<1)); \
302 } while (0)
303
304 FIELD(0);
305 FIELD(1);
306 FIELD(2);
307 FIELD(3);
308 FIELD(4);
309 FIELD(5);
310 FIELD(6);
311 FIELD(7);
312
313 # undef FIELD
314 }
315
316
317 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_get_XER(const VexGuestPPC32State * vex_state)318 UInt LibVEX_GuestPPC32_get_XER ( /*IN*/const VexGuestPPC32State* vex_state )
319 {
320 UInt w = 0;
321 w |= ( ((UInt)vex_state->guest_XER_BC) & 0xFF );
322 w |= ( (((UInt)vex_state->guest_XER_SO) & 0x1) << 31 );
323 w |= ( (((UInt)vex_state->guest_XER_OV) & 0x1) << 30 );
324 w |= ( (((UInt)vex_state->guest_XER_CA) & 0x1) << 29 );
325 return w;
326 }
327
328
329 /* VISIBLE TO LIBVEX CLIENT */
330 /* Note: %XER is 32 bits even for ppc64 */
LibVEX_GuestPPC64_get_XER(const VexGuestPPC64State * vex_state)331 UInt LibVEX_GuestPPC64_get_XER ( /*IN*/const VexGuestPPC64State* vex_state )
332 {
333 UInt w = 0;
334 w |= ( ((UInt)vex_state->guest_XER_BC) & 0xFF );
335 w |= ( (((UInt)vex_state->guest_XER_SO) & 0x1) << 31 );
336 w |= ( (((UInt)vex_state->guest_XER_OV) & 0x1) << 30 );
337 w |= ( (((UInt)vex_state->guest_XER_CA) & 0x1) << 29 );
338 return w;
339 }
340
341
342 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_put_XER(UInt xer_native,VexGuestPPC32State * vex_state)343 void LibVEX_GuestPPC32_put_XER ( UInt xer_native,
344 /*OUT*/VexGuestPPC32State* vex_state )
345 {
346 vex_state->guest_XER_BC = toUChar(xer_native & 0xFF);
347 vex_state->guest_XER_SO = toUChar((xer_native >> 31) & 0x1);
348 vex_state->guest_XER_OV = toUChar((xer_native >> 30) & 0x1);
349 vex_state->guest_XER_CA = toUChar((xer_native >> 29) & 0x1);
350 }
351
352 /* VISIBLE TO LIBVEX CLIENT */
353 /* Note: %XER is 32 bits even for ppc64 */
LibVEX_GuestPPC64_put_XER(UInt xer_native,VexGuestPPC64State * vex_state)354 void LibVEX_GuestPPC64_put_XER ( UInt xer_native,
355 /*OUT*/VexGuestPPC64State* vex_state )
356 {
357 vex_state->guest_XER_BC = toUChar(xer_native & 0xFF);
358 vex_state->guest_XER_SO = toUChar((xer_native >> 31) & 0x1);
359 vex_state->guest_XER_OV = toUChar((xer_native >> 30) & 0x1);
360 vex_state->guest_XER_CA = toUChar((xer_native >> 29) & 0x1);
361 }
362
363 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_initialise(VexGuestPPC32State * vex_state)364 void LibVEX_GuestPPC32_initialise ( /*OUT*/VexGuestPPC32State* vex_state )
365 {
366 Int i;
367 vex_state->host_EvC_FAILADDR = 0;
368 vex_state->host_EvC_COUNTER = 0;
369 vex_state->pad3 = 0;
370 vex_state->pad4 = 0;
371
372 vex_state->guest_GPR0 = 0;
373 vex_state->guest_GPR1 = 0;
374 vex_state->guest_GPR2 = 0;
375 vex_state->guest_GPR3 = 0;
376 vex_state->guest_GPR4 = 0;
377 vex_state->guest_GPR5 = 0;
378 vex_state->guest_GPR6 = 0;
379 vex_state->guest_GPR7 = 0;
380 vex_state->guest_GPR8 = 0;
381 vex_state->guest_GPR9 = 0;
382 vex_state->guest_GPR10 = 0;
383 vex_state->guest_GPR11 = 0;
384 vex_state->guest_GPR12 = 0;
385 vex_state->guest_GPR13 = 0;
386 vex_state->guest_GPR14 = 0;
387 vex_state->guest_GPR15 = 0;
388 vex_state->guest_GPR16 = 0;
389 vex_state->guest_GPR17 = 0;
390 vex_state->guest_GPR18 = 0;
391 vex_state->guest_GPR19 = 0;
392 vex_state->guest_GPR20 = 0;
393 vex_state->guest_GPR21 = 0;
394 vex_state->guest_GPR22 = 0;
395 vex_state->guest_GPR23 = 0;
396 vex_state->guest_GPR24 = 0;
397 vex_state->guest_GPR25 = 0;
398 vex_state->guest_GPR26 = 0;
399 vex_state->guest_GPR27 = 0;
400 vex_state->guest_GPR28 = 0;
401 vex_state->guest_GPR29 = 0;
402 vex_state->guest_GPR30 = 0;
403 vex_state->guest_GPR31 = 0;
404
405 /* Initialise the vector state. */
406 # define VECZERO(_vr) _vr[0]=_vr[1]=_vr[2]=_vr[3] = 0;
407
408 VECZERO(vex_state->guest_VSR0 );
409 VECZERO(vex_state->guest_VSR1 );
410 VECZERO(vex_state->guest_VSR2 );
411 VECZERO(vex_state->guest_VSR3 );
412 VECZERO(vex_state->guest_VSR4 );
413 VECZERO(vex_state->guest_VSR5 );
414 VECZERO(vex_state->guest_VSR6 );
415 VECZERO(vex_state->guest_VSR7 );
416 VECZERO(vex_state->guest_VSR8 );
417 VECZERO(vex_state->guest_VSR9 );
418 VECZERO(vex_state->guest_VSR10);
419 VECZERO(vex_state->guest_VSR11);
420 VECZERO(vex_state->guest_VSR12);
421 VECZERO(vex_state->guest_VSR13);
422 VECZERO(vex_state->guest_VSR14);
423 VECZERO(vex_state->guest_VSR15);
424 VECZERO(vex_state->guest_VSR16);
425 VECZERO(vex_state->guest_VSR17);
426 VECZERO(vex_state->guest_VSR18);
427 VECZERO(vex_state->guest_VSR19);
428 VECZERO(vex_state->guest_VSR20);
429 VECZERO(vex_state->guest_VSR21);
430 VECZERO(vex_state->guest_VSR22);
431 VECZERO(vex_state->guest_VSR23);
432 VECZERO(vex_state->guest_VSR24);
433 VECZERO(vex_state->guest_VSR25);
434 VECZERO(vex_state->guest_VSR26);
435 VECZERO(vex_state->guest_VSR27);
436 VECZERO(vex_state->guest_VSR28);
437 VECZERO(vex_state->guest_VSR29);
438 VECZERO(vex_state->guest_VSR30);
439 VECZERO(vex_state->guest_VSR31);
440 VECZERO(vex_state->guest_VSR32);
441 VECZERO(vex_state->guest_VSR33);
442 VECZERO(vex_state->guest_VSR34);
443 VECZERO(vex_state->guest_VSR35);
444 VECZERO(vex_state->guest_VSR36);
445 VECZERO(vex_state->guest_VSR37);
446 VECZERO(vex_state->guest_VSR38);
447 VECZERO(vex_state->guest_VSR39);
448 VECZERO(vex_state->guest_VSR40);
449 VECZERO(vex_state->guest_VSR41);
450 VECZERO(vex_state->guest_VSR42);
451 VECZERO(vex_state->guest_VSR43);
452 VECZERO(vex_state->guest_VSR44);
453 VECZERO(vex_state->guest_VSR45);
454 VECZERO(vex_state->guest_VSR46);
455 VECZERO(vex_state->guest_VSR47);
456 VECZERO(vex_state->guest_VSR48);
457 VECZERO(vex_state->guest_VSR49);
458 VECZERO(vex_state->guest_VSR50);
459 VECZERO(vex_state->guest_VSR51);
460 VECZERO(vex_state->guest_VSR52);
461 VECZERO(vex_state->guest_VSR53);
462 VECZERO(vex_state->guest_VSR54);
463 VECZERO(vex_state->guest_VSR55);
464 VECZERO(vex_state->guest_VSR56);
465 VECZERO(vex_state->guest_VSR57);
466 VECZERO(vex_state->guest_VSR58);
467 VECZERO(vex_state->guest_VSR59);
468 VECZERO(vex_state->guest_VSR60);
469 VECZERO(vex_state->guest_VSR61);
470 VECZERO(vex_state->guest_VSR62);
471 VECZERO(vex_state->guest_VSR63);
472
473 # undef VECZERO
474
475 vex_state->guest_CIA = 0;
476 vex_state->guest_LR = 0;
477 vex_state->guest_CTR = 0;
478
479 vex_state->guest_XER_SO = 0;
480 vex_state->guest_XER_OV = 0;
481 vex_state->guest_XER_CA = 0;
482 vex_state->guest_XER_BC = 0;
483
484 vex_state->guest_CR0_321 = 0;
485 vex_state->guest_CR0_0 = 0;
486 vex_state->guest_CR1_321 = 0;
487 vex_state->guest_CR1_0 = 0;
488 vex_state->guest_CR2_321 = 0;
489 vex_state->guest_CR2_0 = 0;
490 vex_state->guest_CR3_321 = 0;
491 vex_state->guest_CR3_0 = 0;
492 vex_state->guest_CR4_321 = 0;
493 vex_state->guest_CR4_0 = 0;
494 vex_state->guest_CR5_321 = 0;
495 vex_state->guest_CR5_0 = 0;
496 vex_state->guest_CR6_321 = 0;
497 vex_state->guest_CR6_0 = 0;
498 vex_state->guest_CR7_321 = 0;
499 vex_state->guest_CR7_0 = 0;
500
501 vex_state->guest_FPROUND = PPCrm_NEAREST;
502 vex_state->guest_DFPROUND = PPCrm_NEAREST;
503 vex_state->pad1 = 0;
504 vex_state->pad2 = 0;
505
506 vex_state->guest_VRSAVE = 0;
507
508 vex_state->guest_VSCR = 0x0; // Non-Java mode = 0
509
510 vex_state->guest_EMNOTE = EmNote_NONE;
511
512 vex_state->guest_CMSTART = 0;
513 vex_state->guest_CMLEN = 0;
514
515 vex_state->guest_NRADDR = 0;
516 vex_state->guest_NRADDR_GPR2 = 0;
517
518 vex_state->guest_REDIR_SP = -1;
519 for (i = 0; i < VEX_GUEST_PPC32_REDIR_STACK_SIZE; i++)
520 vex_state->guest_REDIR_STACK[i] = 0;
521
522 vex_state->guest_IP_AT_SYSCALL = 0;
523 vex_state->guest_SPRG3_RO = 0;
524 vex_state->guest_PPR = 0x4ULL << 50; // medium priority
525 vex_state->guest_PSPB = 0x100; // an arbitrary non-zero value to start with
526
527 vex_state->padding1 = 0;
528 vex_state->padding2 = 0;
529 }
530
531
532 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC64_initialise(VexGuestPPC64State * vex_state)533 void LibVEX_GuestPPC64_initialise ( /*OUT*/VexGuestPPC64State* vex_state )
534 {
535 Int i;
536 vex_state->host_EvC_FAILADDR = 0;
537 vex_state->host_EvC_COUNTER = 0;
538 vex_state->pad0 = 0;
539 vex_state->guest_GPR0 = 0;
540 vex_state->guest_GPR1 = 0;
541 vex_state->guest_GPR2 = 0;
542 vex_state->guest_GPR3 = 0;
543 vex_state->guest_GPR4 = 0;
544 vex_state->guest_GPR5 = 0;
545 vex_state->guest_GPR6 = 0;
546 vex_state->guest_GPR7 = 0;
547 vex_state->guest_GPR8 = 0;
548 vex_state->guest_GPR9 = 0;
549 vex_state->guest_GPR10 = 0;
550 vex_state->guest_GPR11 = 0;
551 vex_state->guest_GPR12 = 0;
552 vex_state->guest_GPR13 = 0;
553 vex_state->guest_GPR14 = 0;
554 vex_state->guest_GPR15 = 0;
555 vex_state->guest_GPR16 = 0;
556 vex_state->guest_GPR17 = 0;
557 vex_state->guest_GPR18 = 0;
558 vex_state->guest_GPR19 = 0;
559 vex_state->guest_GPR20 = 0;
560 vex_state->guest_GPR21 = 0;
561 vex_state->guest_GPR22 = 0;
562 vex_state->guest_GPR23 = 0;
563 vex_state->guest_GPR24 = 0;
564 vex_state->guest_GPR25 = 0;
565 vex_state->guest_GPR26 = 0;
566 vex_state->guest_GPR27 = 0;
567 vex_state->guest_GPR28 = 0;
568 vex_state->guest_GPR29 = 0;
569 vex_state->guest_GPR30 = 0;
570 vex_state->guest_GPR31 = 0;
571
572 /* Initialise the vector state. */
573 # define VECZERO(_vr) _vr[0]=_vr[1]=_vr[2]=_vr[3] = 0;
574
575 VECZERO(vex_state->guest_VSR0 );
576 VECZERO(vex_state->guest_VSR1 );
577 VECZERO(vex_state->guest_VSR2 );
578 VECZERO(vex_state->guest_VSR3 );
579 VECZERO(vex_state->guest_VSR4 );
580 VECZERO(vex_state->guest_VSR5 );
581 VECZERO(vex_state->guest_VSR6 );
582 VECZERO(vex_state->guest_VSR7 );
583 VECZERO(vex_state->guest_VSR8 );
584 VECZERO(vex_state->guest_VSR9 );
585 VECZERO(vex_state->guest_VSR10);
586 VECZERO(vex_state->guest_VSR11);
587 VECZERO(vex_state->guest_VSR12);
588 VECZERO(vex_state->guest_VSR13);
589 VECZERO(vex_state->guest_VSR14);
590 VECZERO(vex_state->guest_VSR15);
591 VECZERO(vex_state->guest_VSR16);
592 VECZERO(vex_state->guest_VSR17);
593 VECZERO(vex_state->guest_VSR18);
594 VECZERO(vex_state->guest_VSR19);
595 VECZERO(vex_state->guest_VSR20);
596 VECZERO(vex_state->guest_VSR21);
597 VECZERO(vex_state->guest_VSR22);
598 VECZERO(vex_state->guest_VSR23);
599 VECZERO(vex_state->guest_VSR24);
600 VECZERO(vex_state->guest_VSR25);
601 VECZERO(vex_state->guest_VSR26);
602 VECZERO(vex_state->guest_VSR27);
603 VECZERO(vex_state->guest_VSR28);
604 VECZERO(vex_state->guest_VSR29);
605 VECZERO(vex_state->guest_VSR30);
606 VECZERO(vex_state->guest_VSR31);
607 VECZERO(vex_state->guest_VSR32);
608 VECZERO(vex_state->guest_VSR33);
609 VECZERO(vex_state->guest_VSR34);
610 VECZERO(vex_state->guest_VSR35);
611 VECZERO(vex_state->guest_VSR36);
612 VECZERO(vex_state->guest_VSR37);
613 VECZERO(vex_state->guest_VSR38);
614 VECZERO(vex_state->guest_VSR39);
615 VECZERO(vex_state->guest_VSR40);
616 VECZERO(vex_state->guest_VSR41);
617 VECZERO(vex_state->guest_VSR42);
618 VECZERO(vex_state->guest_VSR43);
619 VECZERO(vex_state->guest_VSR44);
620 VECZERO(vex_state->guest_VSR45);
621 VECZERO(vex_state->guest_VSR46);
622 VECZERO(vex_state->guest_VSR47);
623 VECZERO(vex_state->guest_VSR48);
624 VECZERO(vex_state->guest_VSR49);
625 VECZERO(vex_state->guest_VSR50);
626 VECZERO(vex_state->guest_VSR51);
627 VECZERO(vex_state->guest_VSR52);
628 VECZERO(vex_state->guest_VSR53);
629 VECZERO(vex_state->guest_VSR54);
630 VECZERO(vex_state->guest_VSR55);
631 VECZERO(vex_state->guest_VSR56);
632 VECZERO(vex_state->guest_VSR57);
633 VECZERO(vex_state->guest_VSR58);
634 VECZERO(vex_state->guest_VSR59);
635 VECZERO(vex_state->guest_VSR60);
636 VECZERO(vex_state->guest_VSR61);
637 VECZERO(vex_state->guest_VSR62);
638 VECZERO(vex_state->guest_VSR63);
639
640 # undef VECZERO
641
642 vex_state->guest_CIA = 0;
643 vex_state->guest_LR = 0;
644 vex_state->guest_CTR = 0;
645
646 vex_state->guest_XER_SO = 0;
647 vex_state->guest_XER_OV = 0;
648 vex_state->guest_XER_CA = 0;
649 vex_state->guest_XER_BC = 0;
650
651 vex_state->guest_CR0_321 = 0;
652 vex_state->guest_CR0_0 = 0;
653 vex_state->guest_CR1_321 = 0;
654 vex_state->guest_CR1_0 = 0;
655 vex_state->guest_CR2_321 = 0;
656 vex_state->guest_CR2_0 = 0;
657 vex_state->guest_CR3_321 = 0;
658 vex_state->guest_CR3_0 = 0;
659 vex_state->guest_CR4_321 = 0;
660 vex_state->guest_CR4_0 = 0;
661 vex_state->guest_CR5_321 = 0;
662 vex_state->guest_CR5_0 = 0;
663 vex_state->guest_CR6_321 = 0;
664 vex_state->guest_CR6_0 = 0;
665 vex_state->guest_CR7_321 = 0;
666 vex_state->guest_CR7_0 = 0;
667
668 vex_state->guest_FPROUND = PPCrm_NEAREST;
669 vex_state->guest_DFPROUND = PPCrm_NEAREST;
670 vex_state->pad1 = 0;
671 vex_state->pad2 = 0;
672
673 vex_state->guest_VRSAVE = 0;
674
675 vex_state->guest_VSCR = 0x0; // Non-Java mode = 0
676
677 vex_state->guest_EMNOTE = EmNote_NONE;
678
679 vex_state->padding = 0;
680
681 vex_state->guest_CMSTART = 0;
682 vex_state->guest_CMLEN = 0;
683
684 vex_state->guest_NRADDR = 0;
685 vex_state->guest_NRADDR_GPR2 = 0;
686
687 vex_state->guest_REDIR_SP = -1;
688 for (i = 0; i < VEX_GUEST_PPC64_REDIR_STACK_SIZE; i++)
689 vex_state->guest_REDIR_STACK[i] = 0;
690
691 vex_state->guest_IP_AT_SYSCALL = 0;
692 vex_state->guest_SPRG3_RO = 0;
693 vex_state->guest_TFHAR = 0;
694 vex_state->guest_TFIAR = 0;
695 vex_state->guest_TEXASR = 0;
696 vex_state->guest_PPR = 0x4ULL << 50; // medium priority
697 vex_state->guest_PSPB = 0x100; // an arbitrary non-zero value to start with
698 }
699
700
701 /*-----------------------------------------------------------*/
702 /*--- Describing the ppc guest state, for the benefit ---*/
703 /*--- of iropt and instrumenters. ---*/
704 /*-----------------------------------------------------------*/
705
706 /* Figure out if any part of the guest state contained in minoff
707 .. maxoff requires precise memory exceptions. If in doubt return
708 True (but this is generates significantly slower code).
709
710 By default we enforce precise exns for guest R1 (stack pointer),
711 CIA (current insn address) and LR (link register). These are the
712 minimum needed to extract correct stack backtraces from ppc
713 code. [[NB: not sure if keeping LR up to date is actually
714 necessary.]]
715
716 Only R1 is needed in mode VexRegUpdSpAtMemAccess.
717 */
guest_ppc32_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)718 Bool guest_ppc32_state_requires_precise_mem_exns (
719 Int minoff, Int maxoff, VexRegisterUpdates pxControl
720 )
721 {
722 Int lr_min = offsetof(VexGuestPPC32State, guest_LR);
723 Int lr_max = lr_min + 4 - 1;
724 Int r1_min = offsetof(VexGuestPPC32State, guest_GPR1);
725 Int r1_max = r1_min + 4 - 1;
726 Int cia_min = offsetof(VexGuestPPC32State, guest_CIA);
727 Int cia_max = cia_min + 4 - 1;
728
729 if (maxoff < r1_min || minoff > r1_max) {
730 /* no overlap with R1 */
731 if (pxControl == VexRegUpdSpAtMemAccess)
732 return False; // We only need to check stack pointer.
733 } else {
734 return True;
735 }
736
737 if (maxoff < lr_min || minoff > lr_max) {
738 /* no overlap with LR */
739 } else {
740 return True;
741 }
742
743 if (maxoff < cia_min || minoff > cia_max) {
744 /* no overlap with CIA */
745 } else {
746 return True;
747 }
748
749 return False;
750 }
751
guest_ppc64_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)752 Bool guest_ppc64_state_requires_precise_mem_exns (
753 Int minoff, Int maxoff, VexRegisterUpdates pxControl
754 )
755 {
756 /* Given that R2 is a Big Deal in the ELF ppc64 ABI, it seems
757 prudent to be conservative with it, even though thus far there
758 is no evidence to suggest that it actually needs to be kept up
759 to date wrt possible exceptions. */
760 Int lr_min = offsetof(VexGuestPPC64State, guest_LR);
761 Int lr_max = lr_min + 8 - 1;
762 Int r1_min = offsetof(VexGuestPPC64State, guest_GPR1);
763 Int r1_max = r1_min + 8 - 1;
764 Int r2_min = offsetof(VexGuestPPC64State, guest_GPR2);
765 Int r2_max = r2_min + 8 - 1;
766 Int cia_min = offsetof(VexGuestPPC64State, guest_CIA);
767 Int cia_max = cia_min + 8 - 1;
768
769 if (maxoff < r1_min || minoff > r1_max) {
770 /* no overlap with R1 */
771 if (pxControl == VexRegUpdSpAtMemAccess)
772 return False; // We only need to check stack pointer.
773 } else {
774 return True;
775 }
776
777 if (maxoff < lr_min || minoff > lr_max) {
778 /* no overlap with LR */
779 } else {
780 return True;
781 }
782
783 if (maxoff < r2_min || minoff > r2_max) {
784 /* no overlap with R2 */
785 } else {
786 return True;
787 }
788
789 if (maxoff < cia_min || minoff > cia_max) {
790 /* no overlap with CIA */
791 } else {
792 return True;
793 }
794
795 return False;
796 }
797
798
799 #define ALWAYSDEFD32(field) \
800 { offsetof(VexGuestPPC32State, field), \
801 (sizeof ((VexGuestPPC32State*)0)->field) }
802
803 VexGuestLayout
804 ppc32Guest_layout
805 = {
806 /* Total size of the guest state, in bytes. */
807 .total_sizeB = sizeof(VexGuestPPC32State),
808
809 /* Describe the stack pointer. */
810 .offset_SP = offsetof(VexGuestPPC32State,guest_GPR1),
811 .sizeof_SP = 4,
812
813 /* Describe the frame pointer. */
814 .offset_FP = offsetof(VexGuestPPC32State,guest_GPR1),
815 .sizeof_FP = 4,
816
817 /* Describe the instruction pointer. */
818 .offset_IP = offsetof(VexGuestPPC32State,guest_CIA),
819 .sizeof_IP = 4,
820
821 /* Describe any sections to be regarded by Memcheck as
822 'always-defined'. */
823 .n_alwaysDefd = 11,
824
825 .alwaysDefd
826 = { /* 0 */ ALWAYSDEFD32(guest_CIA),
827 /* 1 */ ALWAYSDEFD32(guest_EMNOTE),
828 /* 2 */ ALWAYSDEFD32(guest_CMSTART),
829 /* 3 */ ALWAYSDEFD32(guest_CMLEN),
830 /* 4 */ ALWAYSDEFD32(guest_VSCR),
831 /* 5 */ ALWAYSDEFD32(guest_FPROUND),
832 /* 6 */ ALWAYSDEFD32(guest_NRADDR),
833 /* 7 */ ALWAYSDEFD32(guest_NRADDR_GPR2),
834 /* 8 */ ALWAYSDEFD32(guest_REDIR_SP),
835 /* 9 */ ALWAYSDEFD32(guest_REDIR_STACK),
836 /* 10 */ ALWAYSDEFD32(guest_IP_AT_SYSCALL)
837 }
838 };
839
840 #define ALWAYSDEFD64(field) \
841 { offsetof(VexGuestPPC64State, field), \
842 (sizeof ((VexGuestPPC64State*)0)->field) }
843
844 VexGuestLayout
845 ppc64Guest_layout
846 = {
847 /* Total size of the guest state, in bytes. */
848 .total_sizeB = sizeof(VexGuestPPC64State),
849
850 /* Describe the stack pointer. */
851 .offset_SP = offsetof(VexGuestPPC64State,guest_GPR1),
852 .sizeof_SP = 8,
853
854 /* Describe the frame pointer. */
855 .offset_FP = offsetof(VexGuestPPC64State,guest_GPR1),
856 .sizeof_FP = 8,
857
858 /* Describe the instruction pointer. */
859 .offset_IP = offsetof(VexGuestPPC64State,guest_CIA),
860 .sizeof_IP = 8,
861
862 /* Describe any sections to be regarded by Memcheck as
863 'always-defined'. */
864 .n_alwaysDefd = 11,
865
866 .alwaysDefd
867 = { /* 0 */ ALWAYSDEFD64(guest_CIA),
868 /* 1 */ ALWAYSDEFD64(guest_EMNOTE),
869 /* 2 */ ALWAYSDEFD64(guest_CMSTART),
870 /* 3 */ ALWAYSDEFD64(guest_CMLEN),
871 /* 4 */ ALWAYSDEFD64(guest_VSCR),
872 /* 5 */ ALWAYSDEFD64(guest_FPROUND),
873 /* 6 */ ALWAYSDEFD64(guest_NRADDR),
874 /* 7 */ ALWAYSDEFD64(guest_NRADDR_GPR2),
875 /* 8 */ ALWAYSDEFD64(guest_REDIR_SP),
876 /* 9 */ ALWAYSDEFD64(guest_REDIR_STACK),
877 /* 10 */ ALWAYSDEFD64(guest_IP_AT_SYSCALL)
878 }
879 };
880
881 /*---------------------------------------------------------------*/
882 /*--- end guest_ppc_helpers.c ---*/
883 /*---------------------------------------------------------------*/
884