• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #include <cmath>
28 
29 #include "test-runner.h"
30 #include "test-utils-aarch64.h"
31 
32 #include "aarch64/cpu-aarch64.h"
33 #include "aarch64/disasm-aarch64.h"
34 #include "aarch64/macro-assembler-aarch64.h"
35 #include "aarch64/simulator-aarch64.h"
36 
37 #define __ masm->
38 
39 namespace vixl {
40 namespace aarch64 {
41 
42 
43 // This value is a signalling NaN as both a double and as a float (taking the
44 // least-significant word).
45 const double kFP64SignallingNaN = RawbitsToDouble(UINT64_C(0x7ff000007f800001));
46 const float kFP32SignallingNaN = RawbitsToFloat(0x7f800001);
47 const Float16 kFP16SignallingNaN = RawbitsToFloat16(0x7c01);
48 
49 // A similar value, but as a quiet NaN.
50 const double kFP64QuietNaN = RawbitsToDouble(UINT64_C(0x7ff800007fc00001));
51 const float kFP32QuietNaN = RawbitsToFloat(0x7fc00001);
52 const Float16 kFP16QuietNaN = RawbitsToFloat16(0x7e01);
53 
54 
Equal32(uint32_t expected,const RegisterDump *,uint32_t result)55 bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
56   if (result != expected) {
57     printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
58            expected,
59            result);
60   }
61 
62   return expected == result;
63 }
64 
65 
Equal64(uint64_t reference,const RegisterDump *,uint64_t result,ExpectedResult option)66 bool Equal64(uint64_t reference,
67              const RegisterDump*,
68              uint64_t result,
69              ExpectedResult option) {
70   switch (option) {
71     case kExpectEqual:
72       if (result != reference) {
73         printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
74                reference,
75                result);
76       }
77       break;
78     case kExpectNotEqual:
79       if (result == reference) {
80         printf("Expected a result not equal to 0x%016" PRIx64 "\n", reference);
81       }
82       break;
83   }
84 
85   return reference == result;
86 }
87 
88 
Equal128(vec128_t expected,const RegisterDump *,vec128_t result)89 bool Equal128(vec128_t expected, const RegisterDump*, vec128_t result) {
90   if ((result.h != expected.h) || (result.l != expected.l)) {
91     printf("Expected 0x%016" PRIx64 "%016" PRIx64
92            "\t "
93            "Found 0x%016" PRIx64 "%016" PRIx64 "\n",
94            expected.h,
95            expected.l,
96            result.h,
97            result.l);
98   }
99 
100   return ((expected.h == result.h) && (expected.l == result.l));
101 }
102 
103 
EqualFP16(Float16 expected,const RegisterDump *,Float16 result)104 bool EqualFP16(Float16 expected, const RegisterDump*, Float16 result) {
105   uint16_t e_rawbits = Float16ToRawbits(expected);
106   uint16_t r_rawbits = Float16ToRawbits(result);
107   if (e_rawbits == r_rawbits) {
108     return true;
109   } else {
110     if (IsNaN(expected) || IsZero(expected)) {
111       printf("Expected 0x%04" PRIx16 "\t Found 0x%04" PRIx16 "\n",
112              e_rawbits,
113              r_rawbits);
114     } else {
115       printf("Expected %.6f (16 bit): (0x%04" PRIx16
116              ")\t "
117              "Found %.6f (0x%04" PRIx16 ")\n",
118              FPToFloat(expected, kIgnoreDefaultNaN),
119              e_rawbits,
120              FPToFloat(result, kIgnoreDefaultNaN),
121              r_rawbits);
122     }
123     return false;
124   }
125 }
126 
127 
EqualFP32(float expected,const RegisterDump *,float result)128 bool EqualFP32(float expected, const RegisterDump*, float result) {
129   if (FloatToRawbits(expected) == FloatToRawbits(result)) {
130     return true;
131   } else {
132     if (IsNaN(expected) || (expected == 0.0)) {
133       printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
134              FloatToRawbits(expected),
135              FloatToRawbits(result));
136     } else {
137       printf("Expected %.9f (0x%08" PRIx32
138              ")\t "
139              "Found %.9f (0x%08" PRIx32 ")\n",
140              expected,
141              FloatToRawbits(expected),
142              result,
143              FloatToRawbits(result));
144     }
145     return false;
146   }
147 }
148 
149 
EqualFP64(double expected,const RegisterDump *,double result)150 bool EqualFP64(double expected, const RegisterDump*, double result) {
151   if (DoubleToRawbits(expected) == DoubleToRawbits(result)) {
152     return true;
153   }
154 
155   if (IsNaN(expected) || (expected == 0.0)) {
156     printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
157            DoubleToRawbits(expected),
158            DoubleToRawbits(result));
159   } else {
160     printf("Expected %.17f (0x%016" PRIx64
161            ")\t "
162            "Found %.17f (0x%016" PRIx64 ")\n",
163            expected,
164            DoubleToRawbits(expected),
165            result,
166            DoubleToRawbits(result));
167   }
168   return false;
169 }
170 
171 
Equal32(uint32_t expected,const RegisterDump * core,const Register & reg)172 bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
173   VIXL_ASSERT(reg.Is32Bits());
174   // Retrieve the corresponding X register so we can check that the upper part
175   // was properly cleared.
176   int64_t result_x = core->xreg(reg.GetCode());
177   if ((result_x & 0xffffffff00000000) != 0) {
178     printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
179            expected,
180            result_x);
181     return false;
182   }
183   uint32_t result_w = core->wreg(reg.GetCode());
184   return Equal32(expected, core, result_w);
185 }
186 
187 
Equal64(uint64_t reference,const RegisterDump * core,const Register & reg,ExpectedResult option)188 bool Equal64(uint64_t reference,
189              const RegisterDump* core,
190              const Register& reg,
191              ExpectedResult option) {
192   VIXL_ASSERT(reg.Is64Bits());
193   uint64_t result = core->xreg(reg.GetCode());
194   return Equal64(reference, core, result, option);
195 }
196 
197 
NotEqual64(uint64_t reference,const RegisterDump * core,const Register & reg)198 bool NotEqual64(uint64_t reference,
199                 const RegisterDump* core,
200                 const Register& reg) {
201   VIXL_ASSERT(reg.Is64Bits());
202   uint64_t result = core->xreg(reg.GetCode());
203   return NotEqual64(reference, core, result);
204 }
205 
206 
Equal128(uint64_t expected_h,uint64_t expected_l,const RegisterDump * core,const VRegister & vreg)207 bool Equal128(uint64_t expected_h,
208               uint64_t expected_l,
209               const RegisterDump* core,
210               const VRegister& vreg) {
211   VIXL_ASSERT(vreg.Is128Bits());
212   vec128_t expected = {expected_l, expected_h};
213   vec128_t result = core->qreg(vreg.GetCode());
214   return Equal128(expected, core, result);
215 }
216 
217 
EqualFP16(Float16 expected,const RegisterDump * core,const VRegister & fpreg)218 bool EqualFP16(Float16 expected,
219                const RegisterDump* core,
220                const VRegister& fpreg) {
221   VIXL_ASSERT(fpreg.Is16Bits());
222   // Retrieve the corresponding D register so we can check that the upper part
223   // was properly cleared.
224   uint64_t result_64 = core->dreg_bits(fpreg.GetCode());
225   if ((result_64 & 0xfffffffffff0000) != 0) {
226     printf("Expected 0x%04" PRIx16 " (%f)\t Found 0x%016" PRIx64 "\n",
227            Float16ToRawbits(expected),
228            FPToFloat(expected, kIgnoreDefaultNaN),
229            result_64);
230     return false;
231   }
232   return EqualFP16(expected, core, core->hreg(fpreg.GetCode()));
233 }
234 
235 
EqualFP32(float expected,const RegisterDump * core,const VRegister & fpreg)236 bool EqualFP32(float expected,
237                const RegisterDump* core,
238                const VRegister& fpreg) {
239   VIXL_ASSERT(fpreg.Is32Bits());
240   // Retrieve the corresponding D register so we can check that the upper part
241   // was properly cleared.
242   uint64_t result_64 = core->dreg_bits(fpreg.GetCode());
243   if ((result_64 & 0xffffffff00000000) != 0) {
244     printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
245            FloatToRawbits(expected),
246            expected,
247            result_64);
248     return false;
249   }
250 
251   return EqualFP32(expected, core, core->sreg(fpreg.GetCode()));
252 }
253 
254 
EqualFP64(double expected,const RegisterDump * core,const VRegister & fpreg)255 bool EqualFP64(double expected,
256                const RegisterDump* core,
257                const VRegister& fpreg) {
258   VIXL_ASSERT(fpreg.Is64Bits());
259   return EqualFP64(expected, core, core->dreg(fpreg.GetCode()));
260 }
261 
262 
Equal64(const Register & reg0,const RegisterDump * core,const Register & reg1,ExpectedResult option)263 bool Equal64(const Register& reg0,
264              const RegisterDump* core,
265              const Register& reg1,
266              ExpectedResult option) {
267   VIXL_ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
268   int64_t reference = core->xreg(reg0.GetCode());
269   int64_t result = core->xreg(reg1.GetCode());
270   return Equal64(reference, core, result, option);
271 }
272 
273 
NotEqual64(const Register & reg0,const RegisterDump * core,const Register & reg1)274 bool NotEqual64(const Register& reg0,
275                 const RegisterDump* core,
276                 const Register& reg1) {
277   VIXL_ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
278   int64_t expected = core->xreg(reg0.GetCode());
279   int64_t result = core->xreg(reg1.GetCode());
280   return NotEqual64(expected, core, result);
281 }
282 
283 
Equal64(uint64_t expected,const RegisterDump * core,const VRegister & vreg)284 bool Equal64(uint64_t expected,
285              const RegisterDump* core,
286              const VRegister& vreg) {
287   VIXL_ASSERT(vreg.Is64Bits());
288   uint64_t result = core->dreg_bits(vreg.GetCode());
289   return Equal64(expected, core, result);
290 }
291 
292 
FlagN(uint32_t flags)293 static char FlagN(uint32_t flags) { return (flags & NFlag) ? 'N' : 'n'; }
294 
295 
FlagZ(uint32_t flags)296 static char FlagZ(uint32_t flags) { return (flags & ZFlag) ? 'Z' : 'z'; }
297 
298 
FlagC(uint32_t flags)299 static char FlagC(uint32_t flags) { return (flags & CFlag) ? 'C' : 'c'; }
300 
301 
FlagV(uint32_t flags)302 static char FlagV(uint32_t flags) { return (flags & VFlag) ? 'V' : 'v'; }
303 
304 
EqualNzcv(uint32_t expected,uint32_t result)305 bool EqualNzcv(uint32_t expected, uint32_t result) {
306   VIXL_ASSERT((expected & ~NZCVFlag) == 0);
307   VIXL_ASSERT((result & ~NZCVFlag) == 0);
308   if (result != expected) {
309     printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
310            FlagN(expected),
311            FlagZ(expected),
312            FlagC(expected),
313            FlagV(expected),
314            FlagN(result),
315            FlagZ(result),
316            FlagC(result),
317            FlagV(result));
318     return false;
319   }
320 
321   return true;
322 }
323 
324 
EqualRegisters(const RegisterDump * a,const RegisterDump * b)325 bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
326   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
327     if (a->xreg(i) != b->xreg(i)) {
328       printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
329              i,
330              a->xreg(i),
331              b->xreg(i));
332       return false;
333     }
334   }
335 
336   for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
337     uint64_t a_bits = a->dreg_bits(i);
338     uint64_t b_bits = b->dreg_bits(i);
339     if (a_bits != b_bits) {
340       printf("d%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
341              i,
342              a_bits,
343              b_bits);
344       return false;
345     }
346   }
347 
348   return true;
349 }
350 
351 
PopulateRegisterArray(Register * w,Register * x,Register * r,int reg_size,int reg_count,RegList allowed)352 RegList PopulateRegisterArray(Register* w,
353                               Register* x,
354                               Register* r,
355                               int reg_size,
356                               int reg_count,
357                               RegList allowed) {
358   RegList list = 0;
359   int i = 0;
360   for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
361     if (((UINT64_C(1) << n) & allowed) != 0) {
362       // Only assign allowed registers.
363       if (r) {
364         r[i] = Register(n, reg_size);
365       }
366       if (x) {
367         x[i] = Register(n, kXRegSize);
368       }
369       if (w) {
370         w[i] = Register(n, kWRegSize);
371       }
372       list |= (UINT64_C(1) << n);
373       i++;
374     }
375   }
376   // Check that we got enough registers.
377   VIXL_ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
378 
379   return list;
380 }
381 
382 
PopulateVRegisterArray(VRegister * s,VRegister * d,VRegister * v,int reg_size,int reg_count,RegList allowed)383 RegList PopulateVRegisterArray(VRegister* s,
384                                VRegister* d,
385                                VRegister* v,
386                                int reg_size,
387                                int reg_count,
388                                RegList allowed) {
389   RegList list = 0;
390   int i = 0;
391   for (unsigned n = 0; (n < kNumberOfVRegisters) && (i < reg_count); n++) {
392     if (((UINT64_C(1) << n) & allowed) != 0) {
393       // Only assigned allowed registers.
394       if (v) {
395         v[i] = VRegister(n, reg_size);
396       }
397       if (d) {
398         d[i] = VRegister(n, kDRegSize);
399       }
400       if (s) {
401         s[i] = VRegister(n, kSRegSize);
402       }
403       list |= (UINT64_C(1) << n);
404       i++;
405     }
406   }
407   // Check that we got enough registers.
408   VIXL_ASSERT(CountSetBits(list, kNumberOfVRegisters) == reg_count);
409 
410   return list;
411 }
412 
413 
Clobber(MacroAssembler * masm,RegList reg_list,uint64_t const value)414 void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
415   Register first = NoReg;
416   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
417     if (reg_list & (UINT64_C(1) << i)) {
418       Register xn(i, kXRegSize);
419       // We should never write into sp here.
420       VIXL_ASSERT(!xn.Is(sp));
421       if (!xn.IsZero()) {
422         if (!first.IsValid()) {
423           // This is the first register we've hit, so construct the literal.
424           __ Mov(xn, value);
425           first = xn;
426         } else {
427           // We've already loaded the literal, so re-use the value already
428           // loaded into the first register we hit.
429           __ Mov(xn, first);
430         }
431       }
432     }
433   }
434 }
435 
436 
ClobberFP(MacroAssembler * masm,RegList reg_list,double const value)437 void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
438   VRegister first = NoVReg;
439   for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
440     if (reg_list & (UINT64_C(1) << i)) {
441       VRegister dn(i, kDRegSize);
442       if (!first.IsValid()) {
443         // This is the first register we've hit, so construct the literal.
444         __ Fmov(dn, value);
445         first = dn;
446       } else {
447         // We've already loaded the literal, so re-use the value already loaded
448         // into the first register we hit.
449         __ Fmov(dn, first);
450       }
451     }
452   }
453 }
454 
455 
Clobber(MacroAssembler * masm,CPURegList reg_list)456 void Clobber(MacroAssembler* masm, CPURegList reg_list) {
457   if (reg_list.GetType() == CPURegister::kRegister) {
458     // This will always clobber X registers.
459     Clobber(masm, reg_list.GetList());
460   } else if (reg_list.GetType() == CPURegister::kVRegister) {
461     // This will always clobber D registers.
462     ClobberFP(masm, reg_list.GetList());
463   } else {
464     VIXL_UNREACHABLE();
465   }
466 }
467 
468 
Dump(MacroAssembler * masm)469 void RegisterDump::Dump(MacroAssembler* masm) {
470   VIXL_ASSERT(__ StackPointer().Is(sp));
471 
472   // Ensure that we don't unintentionally clobber any registers.
473   UseScratchRegisterScope temps(masm);
474   temps.ExcludeAll();
475 
476   // Preserve some temporary registers.
477   Register dump_base = x0;
478   Register dump = x1;
479   Register tmp = x2;
480   Register dump_base_w = dump_base.W();
481   Register dump_w = dump.W();
482   Register tmp_w = tmp.W();
483 
484   // Offsets into the dump_ structure.
485   const int x_offset = offsetof(dump_t, x_);
486   const int w_offset = offsetof(dump_t, w_);
487   const int d_offset = offsetof(dump_t, d_);
488   const int s_offset = offsetof(dump_t, s_);
489   const int h_offset = offsetof(dump_t, h_);
490   const int q_offset = offsetof(dump_t, q_);
491   const int sp_offset = offsetof(dump_t, sp_);
492   const int wsp_offset = offsetof(dump_t, wsp_);
493   const int flags_offset = offsetof(dump_t, flags_);
494 
495   __ Push(xzr, dump_base, dump, tmp);
496 
497   // Load the address where we will dump the state.
498   __ Mov(dump_base, reinterpret_cast<uintptr_t>(&dump_));
499 
500   // Dump the stack pointer (sp and wsp).
501   // The stack pointer cannot be stored directly; it needs to be moved into
502   // another register first. Also, we pushed four X registers, so we need to
503   // compensate here.
504   __ Add(tmp, sp, 4 * kXRegSizeInBytes);
505   __ Str(tmp, MemOperand(dump_base, sp_offset));
506   __ Add(tmp_w, wsp, 4 * kXRegSizeInBytes);
507   __ Str(tmp_w, MemOperand(dump_base, wsp_offset));
508 
509   // Dump X registers.
510   __ Add(dump, dump_base, x_offset);
511   for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
512     __ Stp(Register::GetXRegFromCode(i),
513            Register::GetXRegFromCode(i + 1),
514            MemOperand(dump, i * kXRegSizeInBytes));
515   }
516 
517   // Dump W registers.
518   __ Add(dump, dump_base, w_offset);
519   for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
520     __ Stp(Register::GetWRegFromCode(i),
521            Register::GetWRegFromCode(i + 1),
522            MemOperand(dump, i * kWRegSizeInBytes));
523   }
524 
525   // Dump D registers.
526   __ Add(dump, dump_base, d_offset);
527   for (unsigned i = 0; i < kNumberOfVRegisters; i += 2) {
528     __ Stp(VRegister::GetDRegFromCode(i),
529            VRegister::GetDRegFromCode(i + 1),
530            MemOperand(dump, i * kDRegSizeInBytes));
531   }
532 
533   // Dump S registers.
534   __ Add(dump, dump_base, s_offset);
535   for (unsigned i = 0; i < kNumberOfVRegisters; i += 2) {
536     __ Stp(VRegister::GetSRegFromCode(i),
537            VRegister::GetSRegFromCode(i + 1),
538            MemOperand(dump, i * kSRegSizeInBytes));
539   }
540 
541 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
542   // Dump H registers. Note: Stp does not support 16 bits.
543   __ Add(dump, dump_base, h_offset);
544   for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
545     __ Str(VRegister::GetHRegFromCode(i),
546            MemOperand(dump, i * kHRegSizeInBytes));
547   }
548 #else
549   USE(h_offset);
550 #endif
551 
552   // Dump Q registers.
553   __ Add(dump, dump_base, q_offset);
554   for (unsigned i = 0; i < kNumberOfVRegisters; i += 2) {
555     __ Stp(VRegister::GetQRegFromCode(i),
556            VRegister::GetQRegFromCode(i + 1),
557            MemOperand(dump, i * kQRegSizeInBytes));
558   }
559 
560   // Dump the flags.
561   __ Mrs(tmp, NZCV);
562   __ Str(tmp, MemOperand(dump_base, flags_offset));
563 
564   // To dump the values that were in tmp amd dump, we need a new scratch
565   // register. We can use any of the already dumped registers since we can
566   // easily restore them.
567   Register dump2_base = x10;
568   Register dump2 = x11;
569   VIXL_ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
570 
571   // Don't lose the dump_ address.
572   __ Mov(dump2_base, dump_base);
573 
574   __ Pop(tmp, dump, dump_base, xzr);
575 
576   __ Add(dump2, dump2_base, w_offset);
577   __ Str(dump_base_w,
578          MemOperand(dump2, dump_base.GetCode() * kWRegSizeInBytes));
579   __ Str(dump_w, MemOperand(dump2, dump.GetCode() * kWRegSizeInBytes));
580   __ Str(tmp_w, MemOperand(dump2, tmp.GetCode() * kWRegSizeInBytes));
581 
582   __ Add(dump2, dump2_base, x_offset);
583   __ Str(dump_base, MemOperand(dump2, dump_base.GetCode() * kXRegSizeInBytes));
584   __ Str(dump, MemOperand(dump2, dump.GetCode() * kXRegSizeInBytes));
585   __ Str(tmp, MemOperand(dump2, tmp.GetCode() * kXRegSizeInBytes));
586 
587   // Finally, restore dump2_base and dump2.
588   __ Ldr(dump2_base,
589          MemOperand(dump2, dump2_base.GetCode() * kXRegSizeInBytes));
590   __ Ldr(dump2, MemOperand(dump2, dump2.GetCode() * kXRegSizeInBytes));
591 
592   completed_ = true;
593 }
594 
595 }  // namespace aarch64
596 }  // namespace vixl
597