• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*--------------------------------------------------------------------*/
3 /*--- Interface to LibVEX_Translate, and the SP-update pass        ---*/
4 /*---                                                m_translate.c ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of Valgrind, a dynamic binary instrumentation
9    framework.
10 
11    Copyright (C) 2000-2013 Julian Seward
12       jseward@acm.org
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27    02111-1307, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 */
31 
32 #include "pub_core_basics.h"
33 #include "pub_core_vki.h"
34 #include "pub_core_aspacemgr.h"
35 
36 #include "pub_core_machine.h"    // VG_(fnptr_to_fnentry)
37                                  // VG_(get_SP)
38                                  // VG_(machine_get_VexArchInfo)
39 #include "pub_core_libcbase.h"
40 #include "pub_core_libcassert.h"
41 #include "pub_core_libcprint.h"
42 #include "pub_core_options.h"
43 
44 #include "pub_core_debuginfo.h"  // VG_(get_fnname_w_offset)
45 #include "pub_core_redir.h"      // VG_(redir_do_lookup)
46 
47 #include "pub_core_signals.h"    // VG_(synth_fault_{perms,mapping}
48 #include "pub_core_stacks.h"     // VG_(unknown_SP_update*)()
49 #include "pub_core_tooliface.h"  // VG_(tdict)
50 
51 #include "pub_core_translate.h"
52 #include "pub_core_transtab.h"
53 #include "pub_core_dispatch.h" // VG_(run_innerloop__dispatch_{un}profiled)
54                                // VG_(run_a_noredir_translation__return_point)
55 
56 #include "pub_core_libcsetjmp.h"   // to keep _threadstate.h happy
57 #include "pub_core_threadstate.h"  // VexGuestArchState
58 #include "pub_core_trampoline.h"   // VG_(ppctoc_magic_redirect_return_stub)
59 
60 #include "pub_core_execontext.h"  // VG_(make_depth_1_ExeContext_from_Addr)
61 
62 #include "pub_core_gdbserver.h"   // VG_(tool_instrument_then_gdbserver_if_needed)
63 
64 #include "libvex_emnote.h"        // For PPC, EmWarn_PPC64_redir_underflow
65 
66 /*------------------------------------------------------------*/
67 /*--- Stats                                                ---*/
68 /*------------------------------------------------------------*/
69 
70 static UInt n_SP_updates_fast            = 0;
71 static UInt n_SP_updates_generic_known   = 0;
72 static UInt n_SP_updates_generic_unknown = 0;
73 
VG_(print_translation_stats)74 void VG_(print_translation_stats) ( void )
75 {
76    HChar buf[7];
77    UInt n_SP_updates = n_SP_updates_fast + n_SP_updates_generic_known
78                                          + n_SP_updates_generic_unknown;
79    VG_(percentify)(n_SP_updates_fast, n_SP_updates, 1, 6, buf);
80    VG_(message)(Vg_DebugMsg,
81       "translate:            fast SP updates identified: %'u (%s)\n",
82       n_SP_updates_fast, buf );
83 
84    VG_(percentify)(n_SP_updates_generic_known, n_SP_updates, 1, 6, buf);
85    VG_(message)(Vg_DebugMsg,
86       "translate:   generic_known SP updates identified: %'u (%s)\n",
87       n_SP_updates_generic_known, buf );
88 
89    VG_(percentify)(n_SP_updates_generic_unknown, n_SP_updates, 1, 6, buf);
90    VG_(message)(Vg_DebugMsg,
91       "translate: generic_unknown SP updates identified: %'u (%s)\n",
92       n_SP_updates_generic_unknown, buf );
93 }
94 
95 /*------------------------------------------------------------*/
96 /*--- %SP-update pass                                      ---*/
97 /*------------------------------------------------------------*/
98 
need_to_handle_SP_assignment(void)99 static Bool need_to_handle_SP_assignment(void)
100 {
101    return ( VG_(tdict).track_new_mem_stack_4   ||
102             VG_(tdict).track_die_mem_stack_4   ||
103             VG_(tdict).track_new_mem_stack_8   ||
104             VG_(tdict).track_die_mem_stack_8   ||
105             VG_(tdict).track_new_mem_stack_12  ||
106             VG_(tdict).track_die_mem_stack_12  ||
107             VG_(tdict).track_new_mem_stack_16  ||
108             VG_(tdict).track_die_mem_stack_16  ||
109             VG_(tdict).track_new_mem_stack_32  ||
110             VG_(tdict).track_die_mem_stack_32  ||
111             VG_(tdict).track_new_mem_stack_112 ||
112             VG_(tdict).track_die_mem_stack_112 ||
113             VG_(tdict).track_new_mem_stack_128 ||
114             VG_(tdict).track_die_mem_stack_128 ||
115             VG_(tdict).track_new_mem_stack_144 ||
116             VG_(tdict).track_die_mem_stack_144 ||
117             VG_(tdict).track_new_mem_stack_160 ||
118             VG_(tdict).track_die_mem_stack_160 ||
119             VG_(tdict).track_new_mem_stack     ||
120             VG_(tdict).track_die_mem_stack     );
121 }
122 
123 // - The SP aliases are held in an array which is used as a circular buffer.
124 //   This misses very few constant updates of SP (ie. < 0.1%) while using a
125 //   small, constant structure that will also never fill up and cause
126 //   execution to abort.
127 // - Unused slots have a .temp value of 'IRTemp_INVALID'.
128 // - 'next_SP_alias_slot' is the index where the next alias will be stored.
129 // - If the buffer fills, we circle around and start over-writing
130 //   non-IRTemp_INVALID values.  This is rare, and the overwriting of a
131 //   value that would have subsequently be used is even rarer.
132 // - Every slot below next_SP_alias_slot holds a non-IRTemp_INVALID value.
133 //   The rest either all won't (if we haven't yet circled around) or all
134 //   will (if we have circled around).
135 
136 typedef
137    struct {
138       IRTemp temp;
139       Long   delta;
140    }
141    SP_Alias;
142 
143 // With 32 slots the buffer fills very rarely -- eg. once in a run of GCC.
144 // And I've tested with smaller values and the wrap-around case works ok.
145 #define N_ALIASES    32
146 static SP_Alias SP_aliases[N_ALIASES];
147 static Int      next_SP_alias_slot = 0;
148 
clear_SP_aliases(void)149 static void clear_SP_aliases(void)
150 {
151    Int i;
152    for (i = 0; i < N_ALIASES; i++) {
153       SP_aliases[i].temp  = IRTemp_INVALID;
154       SP_aliases[i].delta = 0;
155    }
156    next_SP_alias_slot = 0;
157 }
158 
add_SP_alias(IRTemp temp,Long delta)159 static void add_SP_alias(IRTemp temp, Long delta)
160 {
161    vg_assert(temp != IRTemp_INVALID);
162    SP_aliases[ next_SP_alias_slot ].temp  = temp;
163    SP_aliases[ next_SP_alias_slot ].delta = delta;
164    next_SP_alias_slot++;
165    if (N_ALIASES == next_SP_alias_slot) next_SP_alias_slot = 0;
166 }
167 
get_SP_delta(IRTemp temp,Long * delta)168 static Bool get_SP_delta(IRTemp temp, Long* delta)
169 {
170    Int i;      // i must be signed!
171    vg_assert(IRTemp_INVALID != temp);
172    // Search backwards between current buffer position and the start.
173    for (i = next_SP_alias_slot-1; i >= 0; i--) {
174       if (temp == SP_aliases[i].temp) {
175          *delta = SP_aliases[i].delta;
176          return True;
177       }
178    }
179    // Search backwards between the end and the current buffer position.
180    for (i = N_ALIASES-1; i >= next_SP_alias_slot; i--) {
181       if (temp == SP_aliases[i].temp) {
182          *delta = SP_aliases[i].delta;
183          return True;
184       }
185    }
186    return False;
187 }
188 
update_SP_aliases(Long delta)189 static void update_SP_aliases(Long delta)
190 {
191    Int i;
192    for (i = 0; i < N_ALIASES; i++) {
193       if (SP_aliases[i].temp == IRTemp_INVALID) {
194          return;
195       }
196       SP_aliases[i].delta += delta;
197    }
198 }
199 
200 /* Given a guest IP, get an origin tag for a 1-element stack trace,
201    and wrap it up in an IR atom that can be passed as the origin-tag
202    value for a stack-adjustment helper function. */
mk_ecu_Expr(Addr64 guest_IP)203 static IRExpr* mk_ecu_Expr ( Addr64 guest_IP )
204 {
205    UInt ecu;
206    ExeContext* ec
207       = VG_(make_depth_1_ExeContext_from_Addr)( (Addr)guest_IP );
208    vg_assert(ec);
209    ecu = VG_(get_ECU_from_ExeContext)( ec );
210    vg_assert(VG_(is_plausible_ECU)(ecu));
211    /* This is always safe to do, since ecu is only 32 bits, and
212       HWord is 32 or 64. */
213    return mkIRExpr_HWord( (HWord)ecu );
214 }
215 
216 /* When gdbserver is activated, the translation of a block must
217    first be done by the tool function, then followed by a pass
218    which (if needed) instruments the code for gdbserver.
219 */
220 static
tool_instrument_then_gdbserver_if_needed(VgCallbackClosure * closureV,IRSB * sb_in,VexGuestLayout * layout,VexGuestExtents * vge,VexArchInfo * vai,IRType gWordTy,IRType hWordTy)221 IRSB* tool_instrument_then_gdbserver_if_needed ( VgCallbackClosure* closureV,
222                                                  IRSB*              sb_in,
223                                                  VexGuestLayout*    layout,
224                                                  VexGuestExtents*   vge,
225                                                  VexArchInfo*       vai,
226                                                  IRType             gWordTy,
227                                                  IRType             hWordTy )
228 {
229    return VG_(instrument_for_gdbserver_if_needed)
230       (VG_(tdict).tool_instrument (closureV,
231                                    sb_in,
232                                    layout,
233                                    vge,
234                                    vai,
235                                    gWordTy,
236                                    hWordTy),
237        layout,
238        vge,
239        gWordTy,
240        hWordTy);
241 }
242 
243 /* For tools that want to know about SP changes, this pass adds
244    in the appropriate hooks.  We have to do it after the tool's
245    instrumentation, so the tool doesn't have to worry about the C calls
246    it adds in, and we must do it before register allocation because
247    spilled temps make it much harder to work out the SP deltas.
248    This it is done with Vex's "second instrumentation" pass.
249 
250    Basically, we look for GET(SP)/PUT(SP) pairs and track constant
251    increments/decrements of SP between them.  (This requires tracking one or
252    more "aliases", which are not exact aliases but instead are tempregs
253    whose value is equal to the SP's plus or minus a known constant.)
254    If all the changes to SP leading up to a PUT(SP) are by known, small
255    constants, we can do a specific call to eg. new_mem_stack_4, otherwise
256    we fall back to the case that handles an unknown SP change.
257 
258    There is some extra complexity to deal correctly with updates to
259    only parts of SP.  Bizarre, but it has been known to happen.
260 */
261 static
vg_SP_update_pass(void * closureV,IRSB * sb_in,VexGuestLayout * layout,VexGuestExtents * vge,VexArchInfo * vai,IRType gWordTy,IRType hWordTy)262 IRSB* vg_SP_update_pass ( void*             closureV,
263                           IRSB*             sb_in,
264                           VexGuestLayout*   layout,
265                           VexGuestExtents*  vge,
266                           VexArchInfo*      vai,
267                           IRType            gWordTy,
268                           IRType            hWordTy )
269 {
270    Int         i, j, k, minoff_ST, maxoff_ST, sizeof_SP, offset_SP;
271    Int         first_SP, last_SP, first_Put, last_Put;
272    IRDirty     *dcall, *d;
273    IRStmt*     st;
274    IRExpr*     e;
275    IRRegArray* descr;
276    IRType      typeof_SP;
277    Long        delta, con;
278 
279    /* Set up stuff for tracking the guest IP */
280    Bool   curr_IP_known = False;
281    Addr64 curr_IP       = 0;
282 
283    /* Set up BB */
284    IRSB* bb     = emptyIRSB();
285    bb->tyenv    = deepCopyIRTypeEnv(sb_in->tyenv);
286    bb->next     = deepCopyIRExpr(sb_in->next);
287    bb->jumpkind = sb_in->jumpkind;
288    bb->offsIP   = sb_in->offsIP;
289 
290    delta = 0;
291 
292    sizeof_SP = layout->sizeof_SP;
293    offset_SP = layout->offset_SP;
294    typeof_SP = sizeof_SP==4 ? Ity_I32 : Ity_I64;
295    vg_assert(sizeof_SP == 4 || sizeof_SP == 8);
296 
297    /* --- Start of #defines --- */
298 
299 #  define IS_ADD(op) (sizeof_SP==4 ? ((op)==Iop_Add32) : ((op)==Iop_Add64))
300 #  define IS_SUB(op) (sizeof_SP==4 ? ((op)==Iop_Sub32) : ((op)==Iop_Sub64))
301 
302 #  define IS_ADD_OR_SUB(op) (IS_ADD(op) || IS_SUB(op))
303 
304 #  define GET_CONST(con)                                                \
305        (sizeof_SP==4 ? (Long)(Int)(con->Ico.U32)                        \
306                      : (Long)(con->Ico.U64))
307 
308 #  define DO_NEW(syze, tmpp)                                            \
309       do {                                                              \
310          Bool vanilla, w_ecu;                                           \
311          vg_assert(curr_IP_known);                                      \
312          vanilla = NULL != VG_(tdict).track_new_mem_stack_##syze;       \
313          w_ecu   = NULL != VG_(tdict).track_new_mem_stack_##syze##_w_ECU; \
314          vg_assert(!(vanilla && w_ecu)); /* can't have both */          \
315          if (!(vanilla || w_ecu))                                       \
316             goto generic;                                               \
317                                                                         \
318          /* I don't know if it's really necessary to say that the */    \
319          /* call reads the stack pointer.  But anyway, we do. */        \
320          if (w_ecu) {                                                   \
321             dcall = unsafeIRDirty_0_N(                                  \
322                        2/*regparms*/,                                   \
323                        "track_new_mem_stack_" #syze "_w_ECU",           \
324                        VG_(fnptr_to_fnentry)(                           \
325                           VG_(tdict).track_new_mem_stack_##syze##_w_ECU ), \
326                        mkIRExprVec_2(IRExpr_RdTmp(tmpp),                \
327                                      mk_ecu_Expr(curr_IP))              \
328                     );                                                  \
329          } else {                                                       \
330             dcall = unsafeIRDirty_0_N(                                  \
331                        1/*regparms*/,                                   \
332                        "track_new_mem_stack_" #syze ,                   \
333                        VG_(fnptr_to_fnentry)(                           \
334                           VG_(tdict).track_new_mem_stack_##syze ),      \
335                        mkIRExprVec_1(IRExpr_RdTmp(tmpp))                \
336                     );                                                  \
337          }                                                              \
338          dcall->nFxState = 1;                                           \
339          dcall->fxState[0].fx     = Ifx_Read;                           \
340          dcall->fxState[0].offset = layout->offset_SP;                  \
341          dcall->fxState[0].size   = layout->sizeof_SP;                  \
342          dcall->fxState[0].nRepeats  = 0;                               \
343          dcall->fxState[0].repeatLen = 0;                               \
344                                                                         \
345          addStmtToIRSB( bb, IRStmt_Dirty(dcall) );                      \
346                                                                         \
347          tl_assert(syze > 0);                                           \
348          update_SP_aliases(syze);                                       \
349                                                                         \
350          n_SP_updates_fast++;                                           \
351                                                                         \
352       } while (0)
353 
354 #  define DO_DIE(syze, tmpp)                                            \
355       do {                                                              \
356          if (!VG_(tdict).track_die_mem_stack_##syze)                    \
357             goto generic;                                               \
358                                                                         \
359          /* I don't know if it's really necessary to say that the */    \
360          /* call reads the stack pointer.  But anyway, we do. */        \
361          dcall = unsafeIRDirty_0_N(                                     \
362                     1/*regparms*/,                                      \
363                     "track_die_mem_stack_" #syze,                       \
364                     VG_(fnptr_to_fnentry)(                              \
365                        VG_(tdict).track_die_mem_stack_##syze ),         \
366                     mkIRExprVec_1(IRExpr_RdTmp(tmpp))                   \
367                  );                                                     \
368          dcall->nFxState = 1;                                           \
369          dcall->fxState[0].fx     = Ifx_Read;                           \
370          dcall->fxState[0].offset = layout->offset_SP;                  \
371          dcall->fxState[0].size   = layout->sizeof_SP;                  \
372          dcall->fxState[0].nRepeats  = 0;                               \
373          dcall->fxState[0].repeatLen = 0;                               \
374                                                                         \
375          addStmtToIRSB( bb, IRStmt_Dirty(dcall) );                      \
376                                                                         \
377          tl_assert(syze > 0);                                           \
378          update_SP_aliases(-(syze));                                    \
379                                                                         \
380          n_SP_updates_fast++;                                           \
381                                                                         \
382       } while (0)
383 
384    /* --- End of #defines --- */
385 
386    clear_SP_aliases();
387 
388    for (i = 0; i <  sb_in->stmts_used; i++) {
389 
390       st = sb_in->stmts[i];
391 
392       if (st->tag == Ist_IMark) {
393          curr_IP_known = True;
394          curr_IP       = st->Ist.IMark.addr;
395       }
396 
397       /* t = Get(sp):   curr = t, delta = 0 */
398       if (st->tag != Ist_WrTmp) goto case2;
399       e = st->Ist.WrTmp.data;
400       if (e->tag != Iex_Get)              goto case2;
401       if (e->Iex.Get.offset != offset_SP) goto case2;
402       if (e->Iex.Get.ty != typeof_SP)     goto case2;
403       vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
404       add_SP_alias(st->Ist.WrTmp.tmp, 0);
405       addStmtToIRSB( bb, st );
406       continue;
407 
408      case2:
409       /* t' = curr +/- const:   curr = t',  delta +=/-= const */
410       if (st->tag != Ist_WrTmp) goto case3;
411       e = st->Ist.WrTmp.data;
412       if (e->tag != Iex_Binop) goto case3;
413       if (e->Iex.Binop.arg1->tag != Iex_RdTmp) goto case3;
414       if (!get_SP_delta(e->Iex.Binop.arg1->Iex.RdTmp.tmp, &delta)) goto case3;
415       if (e->Iex.Binop.arg2->tag != Iex_Const) goto case3;
416       if (!IS_ADD_OR_SUB(e->Iex.Binop.op)) goto case3;
417       con = GET_CONST(e->Iex.Binop.arg2->Iex.Const.con);
418       vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
419       if (IS_ADD(e->Iex.Binop.op)) {
420          add_SP_alias(st->Ist.WrTmp.tmp, delta + con);
421       } else {
422          add_SP_alias(st->Ist.WrTmp.tmp, delta - con);
423       }
424       addStmtToIRSB( bb, st );
425       continue;
426 
427      case3:
428       /* t' = curr:   curr = t' */
429       if (st->tag != Ist_WrTmp) goto case4;
430       e = st->Ist.WrTmp.data;
431       if (e->tag != Iex_RdTmp) goto case4;
432       if (!get_SP_delta(e->Iex.RdTmp.tmp, &delta)) goto case4;
433       vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
434       add_SP_alias(st->Ist.WrTmp.tmp, delta);
435       addStmtToIRSB( bb, st );
436       continue;
437 
438      case4:
439       /* Put(sp) = curr */
440       /* More generally, we must correctly handle a Put which writes
441          any part of SP, not just the case where all of SP is
442          written. */
443       if (st->tag != Ist_Put) goto case5;
444       first_SP  = offset_SP;
445       last_SP   = first_SP + sizeof_SP - 1;
446       first_Put = st->Ist.Put.offset;
447       last_Put  = first_Put
448                   + sizeofIRType( typeOfIRExpr( bb->tyenv, st->Ist.Put.data ))
449                   - 1;
450       vg_assert(first_SP <= last_SP);
451       vg_assert(first_Put <= last_Put);
452 
453       if (last_Put < first_SP || last_SP < first_Put)
454          goto case5; /* no overlap */
455 
456       if (st->Ist.Put.data->tag == Iex_RdTmp
457           && get_SP_delta(st->Ist.Put.data->Iex.RdTmp.tmp, &delta)) {
458          IRTemp tttmp = st->Ist.Put.data->Iex.RdTmp.tmp;
459          /* Why should the following assertion hold?  Because any
460             alias added by put_SP_alias must be of a temporary which
461             has the same type as typeof_SP, and whose value is a Get
462             at exactly offset_SP of size typeof_SP.  Each call to
463             put_SP_alias is immediately preceded by an assertion that
464             we are putting in a binding for a correctly-typed
465             temporary. */
466          vg_assert( typeOfIRTemp(bb->tyenv, tttmp) == typeof_SP );
467          /* From the same type-and-offset-correctness argument, if
468             we found a useable alias, it must for an "exact" write of SP. */
469          vg_assert(first_SP == first_Put);
470          vg_assert(last_SP == last_Put);
471          switch (delta) {
472             case    0:                      addStmtToIRSB(bb,st); continue;
473             case    4: DO_DIE(  4,  tttmp); addStmtToIRSB(bb,st); continue;
474             case   -4: DO_NEW(  4,  tttmp); addStmtToIRSB(bb,st); continue;
475             case    8: DO_DIE(  8,  tttmp); addStmtToIRSB(bb,st); continue;
476             case   -8: DO_NEW(  8,  tttmp); addStmtToIRSB(bb,st); continue;
477             case   12: DO_DIE(  12, tttmp); addStmtToIRSB(bb,st); continue;
478             case  -12: DO_NEW(  12, tttmp); addStmtToIRSB(bb,st); continue;
479             case   16: DO_DIE(  16, tttmp); addStmtToIRSB(bb,st); continue;
480             case  -16: DO_NEW(  16, tttmp); addStmtToIRSB(bb,st); continue;
481             case   32: DO_DIE(  32, tttmp); addStmtToIRSB(bb,st); continue;
482             case  -32: DO_NEW(  32, tttmp); addStmtToIRSB(bb,st); continue;
483             case  112: DO_DIE( 112, tttmp); addStmtToIRSB(bb,st); continue;
484             case -112: DO_NEW( 112, tttmp); addStmtToIRSB(bb,st); continue;
485             case  128: DO_DIE( 128, tttmp); addStmtToIRSB(bb,st); continue;
486             case -128: DO_NEW( 128, tttmp); addStmtToIRSB(bb,st); continue;
487             case  144: DO_DIE( 144, tttmp); addStmtToIRSB(bb,st); continue;
488             case -144: DO_NEW( 144, tttmp); addStmtToIRSB(bb,st); continue;
489             case  160: DO_DIE( 160, tttmp); addStmtToIRSB(bb,st); continue;
490             case -160: DO_NEW( 160, tttmp); addStmtToIRSB(bb,st); continue;
491             default:
492                /* common values for ppc64: 144 128 160 112 176 */
493                n_SP_updates_generic_known++;
494                goto generic;
495          }
496       } else {
497          /* Deal with an unknown update to SP.  We're here because
498             either:
499             (1) the Put does not exactly cover SP; it is a partial update.
500                 Highly unlikely, but has been known to happen for 16-bit
501                 Windows apps running on Wine, doing 16-bit adjustments to
502                 %sp.
503             (2) the Put does exactly cover SP, but we are unable to
504                 determine how the value relates to the old SP.  In any
505                 case, we cannot assume that the Put.data value is a tmp;
506                 we must assume it can be anything allowed in flat IR (tmp
507                 or const).
508          */
509          IRTemp  old_SP;
510          n_SP_updates_generic_unknown++;
511 
512          // Nb: if all is well, this generic case will typically be
513          // called something like every 1000th SP update.  If it's more than
514          // that, the above code may be missing some cases.
515         generic:
516          /* Pass both the old and new SP values to this helper.  Also,
517             pass an origin tag, even if it isn't needed. */
518          old_SP = newIRTemp(bb->tyenv, typeof_SP);
519          addStmtToIRSB(
520             bb,
521             IRStmt_WrTmp( old_SP, IRExpr_Get(offset_SP, typeof_SP) )
522          );
523 
524          /* Now we know what the old value of SP is.  But knowing the new
525             value is a bit tricky if there is a partial write. */
526          if (first_Put == first_SP && last_Put == last_SP) {
527             /* The common case, an exact write to SP.  So st->Ist.Put.data
528                does hold the new value; simple. */
529             vg_assert(curr_IP_known);
530             if (NULL != VG_(tdict).track_new_mem_stack_w_ECU)
531                dcall = unsafeIRDirty_0_N(
532                           3/*regparms*/,
533                           "VG_(unknown_SP_update_w_ECU)",
534                           VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update_w_ECU) ),
535                           mkIRExprVec_3( IRExpr_RdTmp(old_SP), st->Ist.Put.data,
536                                          mk_ecu_Expr(curr_IP) )
537                        );
538             else
539                dcall = unsafeIRDirty_0_N(
540                           2/*regparms*/,
541                           "VG_(unknown_SP_update)",
542                           VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
543                           mkIRExprVec_2( IRExpr_RdTmp(old_SP), st->Ist.Put.data )
544                        );
545 
546             addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
547             /* don't forget the original assignment */
548             addStmtToIRSB( bb, st );
549          } else {
550             /* We have a partial update to SP.  We need to know what
551                the new SP will be, and hand that to the helper call,
552                but when the helper call happens, SP must hold the
553                value it had before the update.  Tricky.
554                Therefore use the following kludge:
555                1. do the partial SP update (Put)
556                2. Get the new SP value into a tmp, new_SP
557                3. Put old_SP
558                4. Call the helper
559                5. Put new_SP
560             */
561             IRTemp new_SP;
562             /* 1 */
563             addStmtToIRSB( bb, st );
564             /* 2 */
565             new_SP = newIRTemp(bb->tyenv, typeof_SP);
566             addStmtToIRSB(
567                bb,
568                IRStmt_WrTmp( new_SP, IRExpr_Get(offset_SP, typeof_SP) )
569             );
570             /* 3 */
571             addStmtToIRSB( bb, IRStmt_Put(offset_SP, IRExpr_RdTmp(old_SP) ));
572             /* 4 */
573             vg_assert(curr_IP_known);
574             if (NULL != VG_(tdict).track_new_mem_stack_w_ECU)
575                dcall = unsafeIRDirty_0_N(
576                           3/*regparms*/,
577                           "VG_(unknown_SP_update_w_ECU)",
578                           VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update_w_ECU) ),
579                           mkIRExprVec_3( IRExpr_RdTmp(old_SP),
580                                          IRExpr_RdTmp(new_SP),
581                                          mk_ecu_Expr(curr_IP) )
582                        );
583             else
584                dcall = unsafeIRDirty_0_N(
585                           2/*regparms*/,
586                           "VG_(unknown_SP_update)",
587                           VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
588                           mkIRExprVec_2( IRExpr_RdTmp(old_SP),
589                                          IRExpr_RdTmp(new_SP) )
590                        );
591             addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
592             /* 5 */
593             addStmtToIRSB( bb, IRStmt_Put(offset_SP, IRExpr_RdTmp(new_SP) ));
594          }
595 
596          /* Forget what we already know. */
597          clear_SP_aliases();
598 
599          /* If this is a Put of a tmp that exactly updates SP,
600             start tracking aliases against this tmp. */
601 
602          if (first_Put == first_SP && last_Put == last_SP
603              && st->Ist.Put.data->tag == Iex_RdTmp) {
604             vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.Put.data->Iex.RdTmp.tmp)
605                        == typeof_SP );
606             add_SP_alias(st->Ist.Put.data->Iex.RdTmp.tmp, 0);
607          }
608          continue;
609       }
610 
611      case5:
612       /* PutI or Dirty call which overlaps SP: complain.  We can't
613          deal with SP changing in weird ways (well, we can, but not at
614          this time of night).  */
615       if (st->tag == Ist_PutI) {
616          descr = st->Ist.PutI.details->descr;
617          minoff_ST = descr->base;
618          maxoff_ST = descr->base
619                      + descr->nElems * sizeofIRType(descr->elemTy) - 1;
620          if (!(offset_SP > maxoff_ST
621                || (offset_SP + sizeof_SP - 1) < minoff_ST))
622             goto complain;
623       }
624       if (st->tag == Ist_Dirty) {
625          d = st->Ist.Dirty.details;
626          for (j = 0; j < d->nFxState; j++) {
627             if (d->fxState[j].fx == Ifx_Read || d->fxState[j].fx == Ifx_None)
628                continue;
629             /* Enumerate the described state segments */
630             for (k = 0; k < 1 + d->fxState[j].nRepeats; k++) {
631                minoff_ST = d->fxState[j].offset + k * d->fxState[j].repeatLen;
632                maxoff_ST = minoff_ST + d->fxState[j].size - 1;
633                if (!(offset_SP > maxoff_ST
634                      || (offset_SP + sizeof_SP - 1) < minoff_ST))
635                   goto complain;
636             }
637          }
638       }
639 
640       /* well, not interesting.  Just copy and keep going. */
641       addStmtToIRSB( bb, st );
642 
643    } /* for (i = 0; i < sb_in->stmts_used; i++) */
644 
645    return bb;
646 
647   complain:
648    VG_(core_panic)("vg_SP_update_pass: PutI or Dirty which overlaps SP");
649 
650 #undef IS_ADD
651 #undef IS_SUB
652 #undef IS_ADD_OR_SUB
653 #undef GET_CONST
654 #undef DO_NEW
655 #undef DO_DIE
656 }
657 
658 /*------------------------------------------------------------*/
659 /*--- Main entry point for the JITter.                     ---*/
660 /*------------------------------------------------------------*/
661 
662 /* Extra comments re self-checking translations and self-modifying
663    code.  (JRS 14 Oct 05).
664 
665    There are 3 modes:
666    (1) no checking: all code assumed to be not self-modifying
667    (2) partial: known-problematic situations get a self-check
668    (3) full checking: all translations get a self-check
669 
670    As currently implemented, the default is (2).  (3) is always safe,
671    but very slow.  (1) works mostly, but fails for gcc nested-function
672    code which uses trampolines on the stack; this situation is
673    detected and handled by (2).
674 
675    ----------
676 
677    A more robust and transparent solution, which is not currently
678    implemented, is a variant of (2): if a translation is made from an
679    area which aspacem says does not have 'w' permission, then it can
680    be non-self-checking.  Otherwise, it needs a self-check.
681 
682    This is complicated by Vex's basic-block chasing.  If a self-check
683    is requested, then Vex will not chase over basic block boundaries
684    (it's too complex).  However there is still a problem if it chases
685    from a non-'w' area into a 'w' area.
686 
687    I think the right thing to do is:
688 
689    - if a translation request starts in a 'w' area, ask for a
690      self-checking translation, and do not allow any chasing (make
691      chase_into_ok return False).  Note that the latter is redundant
692      in the sense that Vex won't chase anyway in this situation.
693 
694    - if a translation request starts in a non-'w' area, do not ask for
695      a self-checking translation.  However, do not allow chasing (as
696      determined by chase_into_ok) to go into a 'w' area.
697 
698    The result of this is that all code inside 'w' areas is self
699    checking.
700 
701    To complete the trick, there is a caveat: we must watch the
702    client's mprotect calls.  If pages are changed from non-'w' to 'w'
703    then we should throw away all translations which intersect the
704    affected area, so as to force them to be redone with self-checks.
705 
706    ----------
707 
708    The above outlines the conditions under which bb chasing is allowed
709    from a self-modifying-code point of view.  There are other
710    situations pertaining to function redirection in which it is
711    necessary to disallow chasing, but those fall outside the scope of
712    this comment.
713 */
714 
715 
716 /* Vex dumps the final code in here.  Then we can copy it off
717    wherever we like. */
718 /* 60000: should agree with assertion in VG_(add_to_transtab) in
719    m_transtab.c. */
720 #define N_TMPBUF 60000
721 static UChar tmpbuf[N_TMPBUF];
722 
723 
724 /* Function pointers we must supply to LibVEX in order that it
725    can bomb out and emit messages under Valgrind's control. */
726 __attribute__ ((noreturn))
727 static
failure_exit(void)728 void failure_exit ( void )
729 {
730    LibVEX_ShowAllocStats();
731    VG_(core_panic)("LibVEX called failure_exit().");
732 }
733 
734 static
log_bytes(HChar * bytes,Int nbytes)735 void log_bytes ( HChar* bytes, Int nbytes )
736 {
737   Int i;
738   for (i = 0; i < nbytes-3; i += 4)
739      VG_(printf)("%c%c%c%c", bytes[i], bytes[i+1], bytes[i+2], bytes[i+3]);
740   for (; i < nbytes; i++)
741      VG_(printf)("%c", bytes[i]);
742 }
743 
744 
745 /* --------- Various helper functions for translation --------- */
746 
747 /* Look for reasons to disallow making translations from the given
748    segment/addr. */
749 
translations_allowable_from_seg(NSegment const * seg,Addr addr)750 static Bool translations_allowable_from_seg ( NSegment const* seg, Addr addr )
751 {
752 #  if defined(VGA_x86) || defined(VGA_s390x) || defined(VGA_mips32) \
753       || defined(VGA_mips64)
754    Bool allowR = True;
755 #  else
756    Bool allowR = False;
757 #  endif
758    return seg != NULL
759           && (seg->kind == SkAnonC || seg->kind == SkFileC || seg->kind == SkShmC)
760           && (seg->hasX
761               || (seg->hasR && (allowR
762                                 || VG_(has_gdbserver_breakpoint) (addr))));
763    /* If GDB/gdbsrv has inserted a breakpoint at addr, assume this is a valid
764       location to translate if seg is not executable but is readable.
765       This is needed for inferior function calls from GDB: GDB inserts a
766       breakpoint on the stack, and expects to regain control before the
767       breakpoint instruction at the breakpoint address is really
768       executed. For this, the breakpoint instruction must be translated
769       so as to have the call to gdbserver executed. */
770 }
771 
772 
773 /* Produce a bitmask stating which of the supplied extents needs a
774    self-check.  See documentation of
775    VexTranslateArgs::needs_self_check for more details about the
776    return convention. */
777 
needs_self_check(void * closureV,VexGuestExtents * vge)778 static UInt needs_self_check ( void* closureV,
779                                VexGuestExtents* vge )
780 {
781    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
782    UInt i, bitset;
783 
784    vg_assert(vge->n_used >= 1 && vge->n_used <= 3);
785    bitset = 0;
786 
787    for (i = 0; i < vge->n_used; i++) {
788       Bool  check = False;
789       Addr  addr  = (Addr)vge->base[i];
790       SizeT len   = (SizeT)vge->len[i];
791       NSegment const* segA = NULL;
792 
793 #     if defined(VGO_darwin)
794       // GrP fixme hack - dyld i386 IMPORT gets rewritten.
795       // To really do this correctly, we'd need to flush the
796       // translation cache whenever a segment became +WX.
797       segA = VG_(am_find_nsegment)(addr);
798       if (segA && segA->hasX && segA->hasW)
799          check = True;
800 #     endif
801 
802       if (!check) {
803          switch (VG_(clo_smc_check)) {
804             case Vg_SmcNone:
805                /* never check (except as per Darwin hack above) */
806                break;
807             case Vg_SmcAll:
808                /* always check */
809                check = True;
810                break;
811             case Vg_SmcStack: {
812                /* check if the address is in the same segment as this
813                   thread's stack pointer */
814                Addr sp = VG_(get_SP)(closure->tid);
815                if (!segA) {
816                   segA = VG_(am_find_nsegment)(addr);
817                }
818                NSegment const* segSP = VG_(am_find_nsegment)(sp);
819                if (segA && segSP && segA == segSP)
820                   check = True;
821                break;
822             }
823             case Vg_SmcAllNonFile: {
824                /* check if any part of the extent is not in a
825                   file-mapped segment */
826                if (!segA) {
827                   segA = VG_(am_find_nsegment)(addr);
828                }
829                if (segA && segA->kind == SkFileC && segA->start <= addr
830                    && (len == 0 || addr + len <= segA->end + 1)) {
831                   /* in a file-mapped segment; skip the check */
832                } else {
833                   check = True;
834                }
835                break;
836             }
837             default:
838                vg_assert(0);
839          }
840       }
841 
842       if (check)
843          bitset |= (1 << i);
844    }
845 
846    return bitset;
847 }
848 
849 
850 /* This is a callback passed to LibVEX_Translate.  It stops Vex from
851    chasing into function entry points that we wish to redirect.
852    Chasing across them obviously defeats the redirect mechanism, with
853    bad effects for Memcheck, Helgrind, DRD, Massif, and possibly others.
854 */
chase_into_ok(void * closureV,Addr64 addr64)855 static Bool chase_into_ok ( void* closureV, Addr64 addr64 )
856 {
857    Addr               addr    = (Addr)addr64;
858    NSegment const*    seg     = VG_(am_find_nsegment)(addr);
859 
860    /* Work through a list of possibilities why we might not want to
861       allow a chase. */
862 
863    /* Destination not in a plausible segment? */
864    if (!translations_allowable_from_seg(seg, addr))
865       goto dontchase;
866 
867    /* Destination is redirected? */
868    if (addr != VG_(redir_do_lookup)(addr, NULL))
869       goto dontchase;
870 
871 #  if defined(VG_PLAT_USES_PPCTOC)
872    /* This needs to be at the start of its own block.  Don't chase. Re
873       ULong_to_Ptr, be careful to ensure we only compare 32 bits on a
874       32-bit target.*/
875    if (ULong_to_Ptr(addr64)
876        == (void*)&VG_(ppctoc_magic_redirect_return_stub))
877       goto dontchase;
878 #  endif
879 
880    /* overly conservative, but .. don't chase into the distinguished
881       address that m_transtab uses as an empty-slot marker for
882       VG_(tt_fast). */
883    if (addr == TRANSTAB_BOGUS_GUEST_ADDR)
884       goto dontchase;
885 
886 #  if defined(VGA_s390x)
887    /* Never chase into an EX instruction. Generating IR for EX causes
888       a round-trip through the scheduler including VG_(discard_translations).
889       And that's expensive as shown by perf/tinycc.c:
890       Chasing into EX increases the number of EX translations from 21 to
891       102666 causing a 7x runtime increase for "none" and a 3.2x runtime
892       increase for memcheck. */
893    if (((UChar *)ULong_to_Ptr(addr))[0] == 0x44 ||   /* EX */
894        ((UChar *)ULong_to_Ptr(addr))[0] == 0xC6)     /* EXRL */
895      goto dontchase;
896 #  endif
897 
898    /* well, ok then.  go on and chase. */
899    return True;
900 
901    vg_assert(0);
902    /*NOTREACHED*/
903 
904   dontchase:
905    if (0) VG_(printf)("not chasing into 0x%lx\n", addr);
906    return False;
907 }
908 
909 
910 /* --------------- helpers for with-TOC platforms --------------- */
911 
912 /* NOTE: with-TOC platforms are: ppc64-linux. */
913 
mkU64(ULong n)914 static IRExpr* mkU64 ( ULong n ) {
915    return IRExpr_Const(IRConst_U64(n));
916 }
mkU32(UInt n)917 static IRExpr* mkU32 ( UInt n ) {
918    return IRExpr_Const(IRConst_U32(n));
919 }
920 
921 #if defined(VG_PLAT_USES_PPCTOC)
mkU8(UChar n)922 static IRExpr* mkU8 ( UChar n ) {
923    return IRExpr_Const(IRConst_U8(n));
924 }
narrowTo32(IRTypeEnv * tyenv,IRExpr * e)925 static IRExpr* narrowTo32 ( IRTypeEnv* tyenv, IRExpr* e ) {
926    if (typeOfIRExpr(tyenv, e) == Ity_I32) {
927       return e;
928    } else {
929       vg_assert(typeOfIRExpr(tyenv, e) == Ity_I64);
930       return IRExpr_Unop(Iop_64to32, e);
931    }
932 }
933 
934 /* Generate code to push word-typed expression 'e' onto this thread's
935    redir stack, checking for stack overflow and generating code to
936    bomb out if so. */
937 
gen_PUSH(IRSB * bb,IRExpr * e)938 static void gen_PUSH ( IRSB* bb, IRExpr* e )
939 {
940    IRRegArray* descr;
941    IRTemp      t1;
942    IRExpr*     one;
943 
944 #  if defined(VGP_ppc64_linux)
945    Int    stack_size       = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
946    Int    offB_REDIR_SP    = offsetof(VexGuestPPC64State,guest_REDIR_SP);
947    Int    offB_REDIR_STACK = offsetof(VexGuestPPC64State,guest_REDIR_STACK);
948    Int    offB_EMNOTE      = offsetof(VexGuestPPC64State,guest_EMNOTE);
949    Int    offB_CIA         = offsetof(VexGuestPPC64State,guest_CIA);
950    Bool   is64             = True;
951    IRType ty_Word          = Ity_I64;
952    IROp   op_CmpNE         = Iop_CmpNE64;
953    IROp   op_Sar           = Iop_Sar64;
954    IROp   op_Sub           = Iop_Sub64;
955    IROp   op_Add           = Iop_Add64;
956    IRExpr*(*mkU)(ULong)    = mkU64;
957    vg_assert(VG_WORDSIZE == 8);
958 #  else
959    Int    stack_size       = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
960    Int    offB_REDIR_SP    = offsetof(VexGuestPPC32State,guest_REDIR_SP);
961    Int    offB_REDIR_STACK = offsetof(VexGuestPPC32State,guest_REDIR_STACK);
962    Int    offB_EMNOTE      = offsetof(VexGuestPPC32State,guest_EMNOTE);
963    Int    offB_CIA         = offsetof(VexGuestPPC32State,guest_CIA);
964    Bool   is64             = False;
965    IRType ty_Word          = Ity_I32;
966    IROp   op_CmpNE         = Iop_CmpNE32;
967    IROp   op_Sar           = Iop_Sar32;
968    IROp   op_Sub           = Iop_Sub32;
969    IROp   op_Add           = Iop_Add32;
970    IRExpr*(*mkU)(UInt)     = mkU32;
971    vg_assert(VG_WORDSIZE == 4);
972 #  endif
973 
974    vg_assert(sizeof(void*) == VG_WORDSIZE);
975    vg_assert(sizeof(Word)  == VG_WORDSIZE);
976    vg_assert(sizeof(Addr)  == VG_WORDSIZE);
977 
978    descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
979    t1    = newIRTemp( bb->tyenv, ty_Word );
980    one   = mkU(1);
981 
982    vg_assert(typeOfIRExpr(bb->tyenv, e) == ty_Word);
983 
984    /* t1 = guest_REDIR_SP + 1 */
985    addStmtToIRSB(
986       bb,
987       IRStmt_WrTmp(
988          t1,
989          IRExpr_Binop(op_Add, IRExpr_Get( offB_REDIR_SP, ty_Word ), one)
990       )
991    );
992 
993    /* Bomb out if t1 >=s stack_size, that is, (stack_size-1)-t1 <s 0.
994       The destination (0) is a bit bogus but it doesn't matter since
995       this is an unrecoverable error and will lead to Valgrind
996       shutting down.  _EMNOTE is set regardless - that's harmless
997       since is only has a meaning if the exit is taken. */
998    addStmtToIRSB(
999       bb,
1000       IRStmt_Put(offB_EMNOTE, mkU32(EmWarn_PPC64_redir_overflow))
1001    );
1002    addStmtToIRSB(
1003       bb,
1004       IRStmt_Exit(
1005          IRExpr_Binop(
1006             op_CmpNE,
1007             IRExpr_Binop(
1008                op_Sar,
1009                IRExpr_Binop(op_Sub,mkU(stack_size-1),IRExpr_RdTmp(t1)),
1010                mkU8(8 * VG_WORDSIZE - 1)
1011             ),
1012             mkU(0)
1013          ),
1014          Ijk_EmFail,
1015          is64 ? IRConst_U64(0) : IRConst_U32(0),
1016          offB_CIA
1017       )
1018    );
1019 
1020    /* guest_REDIR_SP = t1 */
1021    addStmtToIRSB(bb, IRStmt_Put(offB_REDIR_SP, IRExpr_RdTmp(t1)));
1022 
1023    /* guest_REDIR_STACK[t1+0] = e */
1024    /* PutI/GetI have I32-typed indexes regardless of guest word size */
1025    addStmtToIRSB(
1026       bb,
1027       IRStmt_PutI(mkIRPutI(descr,
1028                            narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0, e)));
1029 }
1030 
1031 
1032 /* Generate code to pop a word-sized value from this thread's redir
1033    stack, binding it to a new temporary, which is returned.  As with
1034    gen_PUSH, an overflow check is also performed. */
1035 
gen_POP(IRSB * bb)1036 static IRTemp gen_POP ( IRSB* bb )
1037 {
1038 #  if defined(VGP_ppc64_linux)
1039    Int    stack_size       = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
1040    Int    offB_REDIR_SP    = offsetof(VexGuestPPC64State,guest_REDIR_SP);
1041    Int    offB_REDIR_STACK = offsetof(VexGuestPPC64State,guest_REDIR_STACK);
1042    Int    offB_EMNOTE      = offsetof(VexGuestPPC64State,guest_EMNOTE);
1043    Int    offB_CIA         = offsetof(VexGuestPPC64State,guest_CIA);
1044    Bool   is64             = True;
1045    IRType ty_Word          = Ity_I64;
1046    IROp   op_CmpNE         = Iop_CmpNE64;
1047    IROp   op_Sar           = Iop_Sar64;
1048    IROp   op_Sub           = Iop_Sub64;
1049    IRExpr*(*mkU)(ULong)    = mkU64;
1050 #  else
1051    Int    stack_size       = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
1052    Int    offB_REDIR_SP    = offsetof(VexGuestPPC32State,guest_REDIR_SP);
1053    Int    offB_REDIR_STACK = offsetof(VexGuestPPC32State,guest_REDIR_STACK);
1054    Int    offB_EMNOTE      = offsetof(VexGuestPPC32State,guest_EMNOTE);
1055    Int    offB_CIA         = offsetof(VexGuestPPC32State,guest_CIA);
1056    Bool   is64             = False;
1057    IRType ty_Word          = Ity_I32;
1058    IROp   op_CmpNE         = Iop_CmpNE32;
1059    IROp   op_Sar           = Iop_Sar32;
1060    IROp   op_Sub           = Iop_Sub32;
1061    IRExpr*(*mkU)(UInt)     = mkU32;
1062 #  endif
1063 
1064    IRRegArray* descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
1065    IRTemp      t1    = newIRTemp( bb->tyenv, ty_Word );
1066    IRTemp      res   = newIRTemp( bb->tyenv, ty_Word );
1067    IRExpr*     one   = mkU(1);
1068 
1069    vg_assert(sizeof(void*) == VG_WORDSIZE);
1070    vg_assert(sizeof(Word)  == VG_WORDSIZE);
1071    vg_assert(sizeof(Addr)  == VG_WORDSIZE);
1072 
1073    /* t1 = guest_REDIR_SP */
1074    addStmtToIRSB(
1075       bb,
1076       IRStmt_WrTmp( t1, IRExpr_Get( offB_REDIR_SP, ty_Word ) )
1077    );
1078 
1079    /* Bomb out if t1 < 0.  Same comments as gen_PUSH apply. */
1080    addStmtToIRSB(
1081       bb,
1082       IRStmt_Put(offB_EMNOTE, mkU32(EmWarn_PPC64_redir_underflow))
1083    );
1084    addStmtToIRSB(
1085       bb,
1086       IRStmt_Exit(
1087          IRExpr_Binop(
1088             op_CmpNE,
1089             IRExpr_Binop(
1090                op_Sar,
1091                IRExpr_RdTmp(t1),
1092                mkU8(8 * VG_WORDSIZE - 1)
1093             ),
1094             mkU(0)
1095          ),
1096          Ijk_EmFail,
1097          is64 ? IRConst_U64(0) : IRConst_U32(0),
1098          offB_CIA
1099       )
1100    );
1101 
1102    /* res = guest_REDIR_STACK[t1+0] */
1103    /* PutI/GetI have I32-typed indexes regardless of guest word size */
1104    addStmtToIRSB(
1105       bb,
1106       IRStmt_WrTmp(
1107          res,
1108          IRExpr_GetI(descr, narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0)
1109       )
1110    );
1111 
1112    /* guest_REDIR_SP = t1-1 */
1113    addStmtToIRSB(
1114       bb,
1115       IRStmt_Put(offB_REDIR_SP, IRExpr_Binop(op_Sub, IRExpr_RdTmp(t1), one))
1116    );
1117 
1118    return res;
1119 }
1120 
1121 /* Generate code to push LR and R2 onto this thread's redir stack,
1122    then set R2 to the new value (which is the TOC pointer to be used
1123    for the duration of the replacement function, as determined by
1124    m_debuginfo), and set LR to the magic return stub, so we get to
1125    intercept the return and restore R2 and L2 to the values saved
1126    here. */
1127 
gen_push_and_set_LR_R2(IRSB * bb,Addr64 new_R2_value)1128 static void gen_push_and_set_LR_R2 ( IRSB* bb, Addr64 new_R2_value )
1129 {
1130 #  if defined(VGP_ppc64_linux)
1131    Addr64 bogus_RA  = (Addr64)&VG_(ppctoc_magic_redirect_return_stub);
1132    Int    offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
1133    Int    offB_LR   = offsetof(VexGuestPPC64State,guest_LR);
1134    gen_PUSH( bb, IRExpr_Get(offB_LR,   Ity_I64) );
1135    gen_PUSH( bb, IRExpr_Get(offB_GPR2, Ity_I64) );
1136    addStmtToIRSB( bb, IRStmt_Put( offB_LR,   mkU64( bogus_RA )) );
1137    addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, mkU64( new_R2_value )) );
1138 
1139 #  else
1140 #    error Platform is not TOC-afflicted, fortunately
1141 #  endif
1142 }
1143 
gen_pop_R2_LR_then_bLR(IRSB * bb)1144 static void gen_pop_R2_LR_then_bLR ( IRSB* bb )
1145 {
1146 #  if defined(VGP_ppc64_linux)
1147    Int    offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
1148    Int    offB_LR   = offsetof(VexGuestPPC64State,guest_LR);
1149    Int    offB_CIA  = offsetof(VexGuestPPC64State,guest_CIA);
1150    IRTemp old_R2    = newIRTemp( bb->tyenv, Ity_I64 );
1151    IRTemp old_LR    = newIRTemp( bb->tyenv, Ity_I64 );
1152    /* Restore R2 */
1153    old_R2 = gen_POP( bb );
1154    addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, IRExpr_RdTmp(old_R2)) );
1155    /* Restore LR */
1156    old_LR = gen_POP( bb );
1157    addStmtToIRSB( bb, IRStmt_Put( offB_LR, IRExpr_RdTmp(old_LR)) );
1158    /* Branch to LR */
1159    /* re boring, we arrived here precisely because a wrapped fn did a
1160       blr (hence Ijk_Ret); so we should just mark this jump as Boring,
1161       else one _Call will have resulted in two _Rets. */
1162    bb->jumpkind = Ijk_Boring;
1163    bb->next     = IRExpr_Binop(Iop_And64, IRExpr_RdTmp(old_LR), mkU64(~(3ULL)));
1164    bb->offsIP   = offB_CIA;
1165 #  else
1166 #    error Platform is not TOC-afflicted, fortunately
1167 #  endif
1168 }
1169 
1170 static
mk_preamble__ppctoc_magic_return_stub(void * closureV,IRSB * bb)1171 Bool mk_preamble__ppctoc_magic_return_stub ( void* closureV, IRSB* bb )
1172 {
1173    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1174    /* Since we're creating the entire IRSB right here, give it a
1175       proper IMark, as it won't get one any other way, and cachegrind
1176       will barf if it doesn't have one (fair enough really). */
1177    addStmtToIRSB( bb, IRStmt_IMark( closure->readdr, 4, 0 ) );
1178    /* Generate the magic sequence:
1179          pop R2 from hidden stack
1180          pop LR from hidden stack
1181          goto LR
1182    */
1183    gen_pop_R2_LR_then_bLR(bb);
1184    return True; /* True == this is the entire BB; don't disassemble any
1185                    real insns into it - just hand it directly to
1186                    optimiser/instrumenter/backend. */
1187 }
1188 #endif
1189 
1190 /* --------------- END helpers for with-TOC platforms --------------- */
1191 
1192 
1193 /* This is the IR preamble generator used for replacement
1194    functions.  It adds code to set the guest_NRADDR{_GPR2} to zero
1195    (technically not necessary, but facilitates detecting mixups in
1196    which a replacement function has been erroneously declared using
1197    VG_REPLACE_FUNCTION_Z{U,Z} when instead it should have been written
1198    using VG_WRAP_FUNCTION_Z{U,Z}).
1199 
1200    On with-TOC platforms the follow hacks are also done: LR and R2 are
1201    pushed onto a hidden stack, R2 is set to the correct value for the
1202    replacement function, and LR is set to point at the magic
1203    return-stub address.  Setting LR causes the return of the
1204    wrapped/redirected function to lead to our magic return stub, which
1205    restores LR and R2 from said stack and returns for real.
1206 
1207    VG_(get_StackTrace_wrk) understands that the LR value may point to
1208    the return stub address, and that in that case it can get the real
1209    LR value from the hidden stack instead. */
1210 static
mk_preamble__set_NRADDR_to_zero(void * closureV,IRSB * bb)1211 Bool mk_preamble__set_NRADDR_to_zero ( void* closureV, IRSB* bb )
1212 {
1213    Int nraddr_szB
1214       = sizeof(((VexGuestArchState*)0)->guest_NRADDR);
1215    vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
1216    vg_assert(nraddr_szB == VG_WORDSIZE);
1217    addStmtToIRSB(
1218       bb,
1219       IRStmt_Put(
1220          offsetof(VexGuestArchState,guest_NRADDR),
1221          nraddr_szB == 8 ? mkU64(0) : mkU32(0)
1222       )
1223    );
1224    // t9 needs to be set to point to the start of the redirected function.
1225 #  if defined(VGP_mips32_linux)
1226    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1227    Int offB_GPR25 = offsetof(VexGuestMIPS32State, guest_r25);
1228    addStmtToIRSB(bb, IRStmt_Put(offB_GPR25, mkU32(closure->readdr)));
1229 #  endif
1230 #  if defined(VGP_mips64_linux)
1231    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1232    Int offB_GPR25 = offsetof(VexGuestMIPS64State, guest_r25);
1233    addStmtToIRSB(bb, IRStmt_Put(offB_GPR25, mkU64(closure->readdr)));
1234 #  endif
1235 #  if defined(VG_PLAT_USES_PPCTOC)
1236    { VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1237      addStmtToIRSB(
1238         bb,
1239         IRStmt_Put(
1240            offsetof(VexGuestArchState,guest_NRADDR_GPR2),
1241            VG_WORDSIZE==8 ? mkU64(0) : mkU32(0)
1242         )
1243      );
1244      gen_push_and_set_LR_R2 ( bb, VG_(get_tocptr)( closure->readdr ) );
1245    }
1246 #  endif
1247    return False;
1248 }
1249 
1250 /* Ditto, except set guest_NRADDR to nraddr (the un-redirected guest
1251    address).  This is needed for function wrapping - so the wrapper
1252    can read _NRADDR and find the address of the function being
1253    wrapped.  On toc-afflicted platforms we must also snarf r2. */
1254 static
mk_preamble__set_NRADDR_to_nraddr(void * closureV,IRSB * bb)1255 Bool mk_preamble__set_NRADDR_to_nraddr ( void* closureV, IRSB* bb )
1256 {
1257    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
1258    Int nraddr_szB
1259       = sizeof(((VexGuestArchState*)0)->guest_NRADDR);
1260    vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
1261    vg_assert(nraddr_szB == VG_WORDSIZE);
1262    addStmtToIRSB(
1263       bb,
1264       IRStmt_Put(
1265          offsetof(VexGuestArchState,guest_NRADDR),
1266          nraddr_szB == 8
1267             ? IRExpr_Const(IRConst_U64( closure->nraddr ))
1268             : IRExpr_Const(IRConst_U32( (UInt)closure->nraddr ))
1269       )
1270    );
1271    // t9 needs to be set to point to the start of the redirected function.
1272 #  if defined(VGP_mips32_linux)
1273    Int offB_GPR25 = offsetof(VexGuestMIPS32State, guest_r25);
1274    addStmtToIRSB(bb, IRStmt_Put(offB_GPR25, mkU32(closure->readdr)));
1275 #  endif
1276 #  if defined(VGP_mips64_linux)
1277    Int offB_GPR25 = offsetof(VexGuestMIPS64State, guest_r25);
1278    addStmtToIRSB(bb, IRStmt_Put(offB_GPR25, mkU64(closure->readdr)));
1279 #  endif
1280 #  if defined(VGP_ppc64_linux)
1281    addStmtToIRSB(
1282       bb,
1283       IRStmt_Put(
1284          offsetof(VexGuestArchState,guest_NRADDR_GPR2),
1285          IRExpr_Get(offsetof(VexGuestArchState,guest_GPR2),
1286                     VG_WORDSIZE==8 ? Ity_I64 : Ity_I32)
1287       )
1288    );
1289    gen_push_and_set_LR_R2 ( bb, VG_(get_tocptr)( closure->readdr ) );
1290 #  endif
1291    return False;
1292 }
1293 
1294 /* --- Helpers to do with PPC related stack redzones. --- */
1295 
1296 __attribute__((unused))
const_True(Addr64 guest_addr)1297 static Bool const_True ( Addr64 guest_addr )
1298 {
1299    return True;
1300 }
1301 
1302 /* --------------- main translation function --------------- */
1303 
1304 /* Note: see comments at top of m_redir.c for the Big Picture on how
1305    redirections are managed. */
1306 
1307 typedef
1308    enum {
1309       /* normal translation, redir neither requested nor inhibited */
1310       T_Normal,
1311       /* redir translation, function-wrap (set _NRADDR) style */
1312       T_Redir_Wrap,
1313       /* redir translation, replacement (don't set _NRADDR) style */
1314       T_Redir_Replace,
1315       /* a translation in which redir is specifically disallowed */
1316       T_NoRedir
1317    }
1318    T_Kind;
1319 
1320 /* Translate the basic block beginning at NRADDR, and add it to the
1321    translation cache & translation table.  Unless
1322    DEBUGGING_TRANSLATION is true, in which case the call is being done
1323    for debugging purposes, so (a) throw away the translation once it
1324    is made, and (b) produce a load of debugging output.  If
1325    ALLOW_REDIRECTION is False, do not attempt redirection of NRADDR,
1326    and also, put the resulting translation into the no-redirect tt/tc
1327    instead of the normal one.
1328 
1329    TID is the identity of the thread requesting this translation.
1330 */
1331 
VG_(translate)1332 Bool VG_(translate) ( ThreadId tid,
1333                       Addr64   nraddr,
1334                       Bool     debugging_translation,
1335                       Int      debugging_verbosity,
1336                       ULong    bbs_done,
1337                       Bool     allow_redirection )
1338 {
1339    Addr64             addr;
1340    T_Kind             kind;
1341    Int                tmpbuf_used, verbosity, i;
1342    Bool (*preamble_fn)(void*,IRSB*);
1343    VexArch            vex_arch;
1344    VexArchInfo        vex_archinfo;
1345    VexAbiInfo         vex_abiinfo;
1346    VexGuestExtents    vge;
1347    VexTranslateArgs   vta;
1348    VexTranslateResult tres;
1349    VgCallbackClosure  closure;
1350 
1351    /* Make sure Vex is initialised right. */
1352 
1353    static Bool vex_init_done = False;
1354 
1355    if (!vex_init_done) {
1356       LibVEX_Init ( &failure_exit, &log_bytes,
1357                     1,     /* debug_paranoia */
1358                     False, /* valgrind support */
1359                     &VG_(clo_vex_control) );
1360       vex_init_done = True;
1361    }
1362 
1363    /* Establish the translation kind and actual guest address to
1364       start from.  Sets (addr,kind). */
1365    if (allow_redirection) {
1366       Bool isWrap;
1367       Addr64 tmp = VG_(redir_do_lookup)( nraddr, &isWrap );
1368       if (tmp == nraddr) {
1369          /* no redirection found */
1370          addr = nraddr;
1371          kind = T_Normal;
1372       } else {
1373          /* found a redirect */
1374          addr = tmp;
1375          kind = isWrap ? T_Redir_Wrap : T_Redir_Replace;
1376       }
1377    } else {
1378       addr = nraddr;
1379       kind = T_NoRedir;
1380    }
1381 
1382    /* Established: (nraddr, addr, kind) */
1383 
1384    /* Printing redirection info. */
1385 
1386    if ((kind == T_Redir_Wrap || kind == T_Redir_Replace)
1387        && (VG_(clo_verbosity) >= 2 || VG_(clo_trace_redir))) {
1388       Bool ok;
1389       HChar name1[512] = "";
1390       HChar name2[512] = "";
1391       name1[0] = name2[0] = 0;
1392       ok = VG_(get_fnname_w_offset)(nraddr, name1, 512);
1393       if (!ok) VG_(strcpy)(name1, "???");
1394       ok = VG_(get_fnname_w_offset)(addr, name2, 512);
1395       if (!ok) VG_(strcpy)(name2, "???");
1396       VG_(message)(Vg_DebugMsg,
1397                    "REDIR: 0x%llx (%s) redirected to 0x%llx (%s)\n",
1398                    nraddr, name1,
1399                    addr, name2 );
1400    }
1401 
1402    if (!debugging_translation)
1403       VG_TRACK( pre_mem_read, Vg_CoreTranslate,
1404                               tid, "(translator)", addr, 1 );
1405 
1406    /* If doing any code printing, print a basic block start marker */
1407    if (VG_(clo_trace_flags) || debugging_translation) {
1408       HChar fnname[512] = "UNKNOWN_FUNCTION";
1409       VG_(get_fnname_w_offset)(addr, fnname, 512);
1410       const HChar* objname = "UNKNOWN_OBJECT";
1411       OffT         objoff  = 0;
1412       DebugInfo*   di      = VG_(find_DebugInfo)( addr );
1413       if (di) {
1414          objname = VG_(DebugInfo_get_filename)(di);
1415          objoff  = addr - VG_(DebugInfo_get_text_bias)(di);
1416       }
1417       vg_assert(objname);
1418       VG_(printf)(
1419          "==== SB %d (evchecks %lld) [tid %d] 0x%llx %s %s+0x%llx\n",
1420          VG_(get_bbs_translated)(), bbs_done, (Int)tid, addr,
1421          fnname, objname, (ULong)objoff
1422       );
1423    }
1424 
1425    /* Are we allowed to translate here? */
1426 
1427    { /* BEGIN new scope specially for 'seg' */
1428    NSegment const* seg = VG_(am_find_nsegment)(addr);
1429 
1430    if ( (!translations_allowable_from_seg(seg, addr))
1431         || addr == TRANSTAB_BOGUS_GUEST_ADDR ) {
1432       if (VG_(clo_trace_signals))
1433          VG_(message)(Vg_DebugMsg, "translations not allowed here (0x%llx)"
1434                                    " - throwing SEGV\n", addr);
1435       /* U R busted, sonny.  Place your hands on your head and step
1436          away from the orig_addr. */
1437       /* Code address is bad - deliver a signal instead */
1438       if (seg != NULL) {
1439          /* There's some kind of segment at the requested place, but we
1440             aren't allowed to execute code here. */
1441          if (debugging_translation)
1442             VG_(printf)("translations not allowed here (segment not executable)"
1443                         "(0x%llx)\n", addr);
1444          else
1445             VG_(synth_fault_perms)(tid, addr);
1446       } else {
1447         /* There is no segment at all; we are attempting to execute in
1448            the middle of nowhere. */
1449          if (debugging_translation)
1450             VG_(printf)("translations not allowed here (no segment)"
1451                         "(0x%llx)\n", addr);
1452          else
1453             VG_(synth_fault_mapping)(tid, addr);
1454       }
1455       return False;
1456    }
1457 
1458    /* True if a debug trans., or if bit N set in VG_(clo_trace_codegen). */
1459    verbosity = 0;
1460    if (debugging_translation) {
1461       verbosity = debugging_verbosity;
1462    }
1463    else
1464    if ( (VG_(clo_trace_flags) > 0
1465         && VG_(get_bbs_translated)() <= VG_(clo_trace_notabove)
1466         && VG_(get_bbs_translated)() >= VG_(clo_trace_notbelow) )) {
1467       verbosity = VG_(clo_trace_flags);
1468    }
1469 
1470    /* Figure out which preamble-mangling callback to send. */
1471    preamble_fn = NULL;
1472    if (kind == T_Redir_Replace)
1473       preamble_fn = mk_preamble__set_NRADDR_to_zero;
1474    else
1475    if (kind == T_Redir_Wrap)
1476       preamble_fn = mk_preamble__set_NRADDR_to_nraddr;
1477 
1478 #  if defined(VG_PLAT_USES_PPCTOC)
1479    if (ULong_to_Ptr(nraddr)
1480        == (void*)&VG_(ppctoc_magic_redirect_return_stub)) {
1481       /* If entering the special return stub, this means a wrapped or
1482          redirected function is returning.  Make this translation one
1483          which restores R2 and LR from the thread's hidden redir
1484          stack, and branch to the (restored) link register, thereby
1485          really causing the function to return. */
1486       vg_assert(kind == T_Normal);
1487       vg_assert(nraddr == addr);
1488       preamble_fn = mk_preamble__ppctoc_magic_return_stub;
1489    }
1490 #  endif
1491 
1492    /* ------ Actually do the translation. ------ */
1493    tl_assert2(VG_(tdict).tool_instrument,
1494               "you forgot to set VgToolInterface function 'tool_instrument'");
1495 
1496    /* Get the CPU info established at startup. */
1497    VG_(machine_get_VexArchInfo)( &vex_arch, &vex_archinfo );
1498 
1499    /* Set up 'abiinfo' structure with stuff Vex needs to know about
1500       the guest and host ABIs. */
1501 
1502    LibVEX_default_VexAbiInfo( &vex_abiinfo );
1503    vex_abiinfo.guest_stack_redzone_size = VG_STACK_REDZONE_SZB;
1504 
1505 #  if defined(VGP_amd64_linux)
1506    vex_abiinfo.guest_amd64_assume_fs_is_zero  = True;
1507 #  endif
1508 #  if defined(VGP_amd64_darwin)
1509    vex_abiinfo.guest_amd64_assume_gs_is_0x60  = True;
1510 #  endif
1511 #  if defined(VGP_ppc32_linux)
1512    vex_abiinfo.guest_ppc_zap_RZ_at_blr        = False;
1513    vex_abiinfo.guest_ppc_zap_RZ_at_bl         = NULL;
1514    vex_abiinfo.host_ppc32_regalign_int64_args = True;
1515 #  endif
1516 #  if defined(VGP_ppc64_linux)
1517    vex_abiinfo.guest_ppc_zap_RZ_at_blr        = True;
1518    vex_abiinfo.guest_ppc_zap_RZ_at_bl         = const_True;
1519    vex_abiinfo.host_ppc_calls_use_fndescrs    = True;
1520 #  endif
1521 
1522    /* Set up closure args. */
1523    closure.tid    = tid;
1524    closure.nraddr = nraddr;
1525    closure.readdr = addr;
1526 
1527    /* Set up args for LibVEX_Translate. */
1528    vta.arch_guest       = vex_arch;
1529    vta.archinfo_guest   = vex_archinfo;
1530    vta.arch_host        = vex_arch;
1531    vta.archinfo_host    = vex_archinfo;
1532    vta.abiinfo_both     = vex_abiinfo;
1533    vta.callback_opaque  = (void*)&closure;
1534    vta.guest_bytes      = (UChar*)ULong_to_Ptr(addr);
1535    vta.guest_bytes_addr = (Addr64)addr;
1536    vta.chase_into_ok    = chase_into_ok;
1537    vta.guest_extents    = &vge;
1538    vta.host_bytes       = tmpbuf;
1539    vta.host_bytes_size  = N_TMPBUF;
1540    vta.host_bytes_used  = &tmpbuf_used;
1541    { /* At this point we have to reconcile Vex's view of the
1542         instrumentation callback - which takes a void* first argument
1543         - with Valgrind's view, in which the first arg is a
1544         VgCallbackClosure*.  Hence the following longwinded casts.
1545         They are entirely legal but longwinded so as to maximise the
1546         chance of the C typechecker picking up any type snafus. */
1547      IRSB*(*f)(VgCallbackClosure*,
1548                IRSB*,VexGuestLayout*,VexGuestExtents*, VexArchInfo*,
1549                IRType,IRType)
1550         = VG_(clo_vgdb) != Vg_VgdbNo
1551              ? tool_instrument_then_gdbserver_if_needed
1552              : VG_(tdict).tool_instrument;
1553      IRSB*(*g)(void*,
1554                IRSB*,VexGuestLayout*,VexGuestExtents*,VexArchInfo*,
1555                IRType,IRType)
1556        = (IRSB*(*)(void*,IRSB*,VexGuestLayout*,VexGuestExtents*,
1557                    VexArchInfo*,IRType,IRType))f;
1558      vta.instrument1     = g;
1559    }
1560    /* No need for type kludgery here. */
1561    vta.instrument2       = need_to_handle_SP_assignment()
1562                               ? vg_SP_update_pass
1563                               : NULL;
1564    vta.finaltidy         = VG_(needs).final_IR_tidy_pass
1565                               ? VG_(tdict).tool_final_IR_tidy_pass
1566                               : NULL;
1567    vta.needs_self_check  = needs_self_check;
1568    vta.preamble_function = preamble_fn;
1569    vta.traceflags        = verbosity;
1570    vta.sigill_diag       = VG_(clo_sigill_diag);
1571    vta.addProfInc        = VG_(clo_profyle_sbs) && kind != T_NoRedir;
1572 
1573    /* Set up the dispatch continuation-point info.  If this is a
1574       no-redir translation then it cannot be chained, and the chain-me
1575       points are set to NULL to indicate that.  The indir point must
1576       also be NULL, since we can't allow this translation to do an
1577       indir transfer -- that would take it back into the main
1578       translation cache too.
1579 
1580       All this is because no-redir translations live outside the main
1581       translation cache (in a secondary one) and chaining them would
1582       involve more adminstrative complexity that isn't worth the
1583       hassle, because we don't expect them to get used often.  So
1584       don't bother. */
1585    if (allow_redirection) {
1586       vta.disp_cp_chain_me_to_slowEP
1587          = VG_(fnptr_to_fnentry)( &VG_(disp_cp_chain_me_to_slowEP) );
1588       vta.disp_cp_chain_me_to_fastEP
1589          = VG_(fnptr_to_fnentry)( &VG_(disp_cp_chain_me_to_fastEP) );
1590       vta.disp_cp_xindir
1591          = VG_(fnptr_to_fnentry)( &VG_(disp_cp_xindir) );
1592    } else {
1593       vta.disp_cp_chain_me_to_slowEP = NULL;
1594       vta.disp_cp_chain_me_to_fastEP = NULL;
1595       vta.disp_cp_xindir             = NULL;
1596    }
1597    /* This doesn't involve chaining and so is always allowable. */
1598    vta.disp_cp_xassisted
1599       = VG_(fnptr_to_fnentry)( &VG_(disp_cp_xassisted) );
1600 
1601    /* Sheesh.  Finally, actually _do_ the translation! */
1602    tres = LibVEX_Translate ( &vta );
1603 
1604    vg_assert(tres.status == VexTransOK);
1605    vg_assert(tres.n_sc_extents >= 0 && tres.n_sc_extents <= 3);
1606    vg_assert(tmpbuf_used <= N_TMPBUF);
1607    vg_assert(tmpbuf_used > 0);
1608 
1609    /* Tell aspacem of all segments that have had translations taken
1610       from them.  Optimisation: don't re-look up vge.base[0] since seg
1611       should already point to it. */
1612 
1613    vg_assert( vge.base[0] == (Addr64)addr );
1614    /* set 'translations taken from this segment' flag */
1615    VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( seg );
1616    } /* END new scope specially for 'seg' */
1617 
1618    for (i = 1; i < vge.n_used; i++) {
1619       NSegment const* seg
1620          = VG_(am_find_nsegment)( vge.base[i] );
1621       /* set 'translations taken from this segment' flag */
1622       VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( seg );
1623    }
1624 
1625    /* Copy data at trans_addr into the translation cache. */
1626    vg_assert(tmpbuf_used > 0 && tmpbuf_used < 65536);
1627 
1628    // If debugging, don't do anything with the translated block;  we
1629    // only did this for the debugging output produced along the way.
1630    if (!debugging_translation) {
1631 
1632       if (kind != T_NoRedir) {
1633           // Put it into the normal TT/TC structures.  This is the
1634           // normal case.
1635 
1636           // Note that we use nraddr (the non-redirected address), not
1637           // addr, which might have been changed by the redirection
1638           VG_(add_to_transtab)( &vge,
1639                                 nraddr,
1640                                 (Addr)(&tmpbuf[0]),
1641                                 tmpbuf_used,
1642                                 tres.n_sc_extents > 0,
1643                                 tres.offs_profInc,
1644                                 tres.n_guest_instrs,
1645                                 vex_arch );
1646       } else {
1647           vg_assert(tres.offs_profInc == -1); /* -1 == unset */
1648           VG_(add_to_unredir_transtab)( &vge,
1649                                         nraddr,
1650                                         (Addr)(&tmpbuf[0]),
1651                                         tmpbuf_used );
1652       }
1653    }
1654 
1655    return True;
1656 }
1657 
1658 /*--------------------------------------------------------------------*/
1659 /*--- end                                                          ---*/
1660 /*--------------------------------------------------------------------*/
1661