• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #include <cmath>
28 
29 #include "test-runner.h"
30 #include "test-utils-aarch64.h"
31 
32 #include "aarch64/cpu-aarch64.h"
33 #include "aarch64/disasm-aarch64.h"
34 #include "aarch64/macro-assembler-aarch64.h"
35 #include "aarch64/simulator-aarch64.h"
36 
37 #define __ masm->
38 
39 namespace vixl {
40 namespace aarch64 {
41 
42 
43 // This value is a signalling NaN as both a double and as a float (taking the
44 // least-significant word).
45 const double kFP64SignallingNaN = RawbitsToDouble(UINT64_C(0x7ff000007f800001));
46 const float kFP32SignallingNaN = RawbitsToFloat(0x7f800001);
47 const Float16 kFP16SignallingNaN = RawbitsToFloat16(0x7c01);
48 
49 // A similar value, but as a quiet NaN.
50 const double kFP64QuietNaN = RawbitsToDouble(UINT64_C(0x7ff800007fc00001));
51 const float kFP32QuietNaN = RawbitsToFloat(0x7fc00001);
52 const Float16 kFP16QuietNaN = RawbitsToFloat16(0x7e01);
53 
54 
Equal32(uint32_t expected,const RegisterDump *,uint32_t result)55 bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
56   if (result != expected) {
57     printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
58            expected,
59            result);
60   }
61 
62   return expected == result;
63 }
64 
65 
Equal64(uint64_t expected,const RegisterDump *,uint64_t result)66 bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result) {
67   if (result != expected) {
68     printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
69            expected,
70            result);
71   }
72 
73   return expected == result;
74 }
75 
76 
Equal128(vec128_t expected,const RegisterDump *,vec128_t result)77 bool Equal128(vec128_t expected, const RegisterDump*, vec128_t result) {
78   if ((result.h != expected.h) || (result.l != expected.l)) {
79     printf("Expected 0x%016" PRIx64 "%016" PRIx64
80            "\t "
81            "Found 0x%016" PRIx64 "%016" PRIx64 "\n",
82            expected.h,
83            expected.l,
84            result.h,
85            result.l);
86   }
87 
88   return ((expected.h == result.h) && (expected.l == result.l));
89 }
90 
91 
EqualFP16(Float16 expected,const RegisterDump *,Float16 result)92 bool EqualFP16(Float16 expected, const RegisterDump*, Float16 result) {
93   uint16_t e_rawbits = Float16ToRawbits(expected);
94   uint16_t r_rawbits = Float16ToRawbits(result);
95   if (e_rawbits == r_rawbits) {
96     return true;
97   } else {
98     if (IsNaN(expected) || IsZero(expected)) {
99       printf("Expected 0x%04" PRIx16 "\t Found 0x%04" PRIx16 "\n",
100              e_rawbits,
101              r_rawbits);
102     } else {
103       printf("Expected %.6f (16 bit): (0x%04" PRIx16
104              ")\t "
105              "Found %.6f (0x%04" PRIx16 ")\n",
106              FPToFloat(expected, kIgnoreDefaultNaN),
107              e_rawbits,
108              FPToFloat(result, kIgnoreDefaultNaN),
109              r_rawbits);
110     }
111     return false;
112   }
113 }
114 
115 
EqualFP32(float expected,const RegisterDump *,float result)116 bool EqualFP32(float expected, const RegisterDump*, float result) {
117   if (FloatToRawbits(expected) == FloatToRawbits(result)) {
118     return true;
119   } else {
120     if (IsNaN(expected) || (expected == 0.0)) {
121       printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
122              FloatToRawbits(expected),
123              FloatToRawbits(result));
124     } else {
125       printf("Expected %.9f (0x%08" PRIx32
126              ")\t "
127              "Found %.9f (0x%08" PRIx32 ")\n",
128              expected,
129              FloatToRawbits(expected),
130              result,
131              FloatToRawbits(result));
132     }
133     return false;
134   }
135 }
136 
137 
EqualFP64(double expected,const RegisterDump *,double result)138 bool EqualFP64(double expected, const RegisterDump*, double result) {
139   if (DoubleToRawbits(expected) == DoubleToRawbits(result)) {
140     return true;
141   }
142 
143   if (IsNaN(expected) || (expected == 0.0)) {
144     printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
145            DoubleToRawbits(expected),
146            DoubleToRawbits(result));
147   } else {
148     printf("Expected %.17f (0x%016" PRIx64
149            ")\t "
150            "Found %.17f (0x%016" PRIx64 ")\n",
151            expected,
152            DoubleToRawbits(expected),
153            result,
154            DoubleToRawbits(result));
155   }
156   return false;
157 }
158 
159 
Equal32(uint32_t expected,const RegisterDump * core,const Register & reg)160 bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
161   VIXL_ASSERT(reg.Is32Bits());
162   // Retrieve the corresponding X register so we can check that the upper part
163   // was properly cleared.
164   int64_t result_x = core->xreg(reg.GetCode());
165   if ((result_x & 0xffffffff00000000) != 0) {
166     printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
167            expected,
168            result_x);
169     return false;
170   }
171   uint32_t result_w = core->wreg(reg.GetCode());
172   return Equal32(expected, core, result_w);
173 }
174 
175 
Equal64(uint64_t expected,const RegisterDump * core,const Register & reg)176 bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg) {
177   VIXL_ASSERT(reg.Is64Bits());
178   uint64_t result = core->xreg(reg.GetCode());
179   return Equal64(expected, core, result);
180 }
181 
182 
Equal128(uint64_t expected_h,uint64_t expected_l,const RegisterDump * core,const VRegister & vreg)183 bool Equal128(uint64_t expected_h,
184               uint64_t expected_l,
185               const RegisterDump* core,
186               const VRegister& vreg) {
187   VIXL_ASSERT(vreg.Is128Bits());
188   vec128_t expected = {expected_l, expected_h};
189   vec128_t result = core->qreg(vreg.GetCode());
190   return Equal128(expected, core, result);
191 }
192 
193 
EqualFP16(Float16 expected,const RegisterDump * core,const FPRegister & fpreg)194 bool EqualFP16(Float16 expected,
195                const RegisterDump* core,
196                const FPRegister& fpreg) {
197   VIXL_ASSERT(fpreg.Is16Bits());
198   // Retrieve the corresponding D register so we can check that the upper part
199   // was properly cleared.
200   uint64_t result_64 = core->dreg_bits(fpreg.GetCode());
201   if ((result_64 & 0xfffffffffff0000) != 0) {
202     printf("Expected 0x%04" PRIx16 " (%f)\t Found 0x%016" PRIx64 "\n",
203            Float16ToRawbits(expected),
204            FPToFloat(expected, kIgnoreDefaultNaN),
205            result_64);
206     return false;
207   }
208   return EqualFP16(expected, core, core->hreg(fpreg.GetCode()));
209 }
210 
211 
EqualFP32(float expected,const RegisterDump * core,const FPRegister & fpreg)212 bool EqualFP32(float expected,
213                const RegisterDump* core,
214                const FPRegister& fpreg) {
215   VIXL_ASSERT(fpreg.Is32Bits());
216   // Retrieve the corresponding D register so we can check that the upper part
217   // was properly cleared.
218   uint64_t result_64 = core->dreg_bits(fpreg.GetCode());
219   if ((result_64 & 0xffffffff00000000) != 0) {
220     printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
221            FloatToRawbits(expected),
222            expected,
223            result_64);
224     return false;
225   }
226 
227   return EqualFP32(expected, core, core->sreg(fpreg.GetCode()));
228 }
229 
230 
EqualFP64(double expected,const RegisterDump * core,const FPRegister & fpreg)231 bool EqualFP64(double expected,
232                const RegisterDump* core,
233                const FPRegister& fpreg) {
234   VIXL_ASSERT(fpreg.Is64Bits());
235   return EqualFP64(expected, core, core->dreg(fpreg.GetCode()));
236 }
237 
238 
Equal64(const Register & reg0,const RegisterDump * core,const Register & reg1)239 bool Equal64(const Register& reg0,
240              const RegisterDump* core,
241              const Register& reg1) {
242   VIXL_ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
243   int64_t expected = core->xreg(reg0.GetCode());
244   int64_t result = core->xreg(reg1.GetCode());
245   return Equal64(expected, core, result);
246 }
247 
248 
Equal64(uint64_t expected,const RegisterDump * core,const VRegister & vreg)249 bool Equal64(uint64_t expected,
250              const RegisterDump* core,
251              const VRegister& vreg) {
252   VIXL_ASSERT(vreg.Is64Bits());
253   uint64_t result = core->dreg_bits(vreg.GetCode());
254   return Equal64(expected, core, result);
255 }
256 
257 
FlagN(uint32_t flags)258 static char FlagN(uint32_t flags) { return (flags & NFlag) ? 'N' : 'n'; }
259 
260 
FlagZ(uint32_t flags)261 static char FlagZ(uint32_t flags) { return (flags & ZFlag) ? 'Z' : 'z'; }
262 
263 
FlagC(uint32_t flags)264 static char FlagC(uint32_t flags) { return (flags & CFlag) ? 'C' : 'c'; }
265 
266 
FlagV(uint32_t flags)267 static char FlagV(uint32_t flags) { return (flags & VFlag) ? 'V' : 'v'; }
268 
269 
EqualNzcv(uint32_t expected,uint32_t result)270 bool EqualNzcv(uint32_t expected, uint32_t result) {
271   VIXL_ASSERT((expected & ~NZCVFlag) == 0);
272   VIXL_ASSERT((result & ~NZCVFlag) == 0);
273   if (result != expected) {
274     printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
275            FlagN(expected),
276            FlagZ(expected),
277            FlagC(expected),
278            FlagV(expected),
279            FlagN(result),
280            FlagZ(result),
281            FlagC(result),
282            FlagV(result));
283     return false;
284   }
285 
286   return true;
287 }
288 
289 
EqualRegisters(const RegisterDump * a,const RegisterDump * b)290 bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
291   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
292     if (a->xreg(i) != b->xreg(i)) {
293       printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
294              i,
295              a->xreg(i),
296              b->xreg(i));
297       return false;
298     }
299   }
300 
301   for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
302     uint64_t a_bits = a->dreg_bits(i);
303     uint64_t b_bits = b->dreg_bits(i);
304     if (a_bits != b_bits) {
305       printf("d%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
306              i,
307              a_bits,
308              b_bits);
309       return false;
310     }
311   }
312 
313   return true;
314 }
315 
316 
PopulateRegisterArray(Register * w,Register * x,Register * r,int reg_size,int reg_count,RegList allowed)317 RegList PopulateRegisterArray(Register* w,
318                               Register* x,
319                               Register* r,
320                               int reg_size,
321                               int reg_count,
322                               RegList allowed) {
323   RegList list = 0;
324   int i = 0;
325   for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
326     if (((UINT64_C(1) << n) & allowed) != 0) {
327       // Only assign allowed registers.
328       if (r) {
329         r[i] = Register(n, reg_size);
330       }
331       if (x) {
332         x[i] = Register(n, kXRegSize);
333       }
334       if (w) {
335         w[i] = Register(n, kWRegSize);
336       }
337       list |= (UINT64_C(1) << n);
338       i++;
339     }
340   }
341   // Check that we got enough registers.
342   VIXL_ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
343 
344   return list;
345 }
346 
347 
PopulateFPRegisterArray(FPRegister * s,FPRegister * d,FPRegister * v,int reg_size,int reg_count,RegList allowed)348 RegList PopulateFPRegisterArray(FPRegister* s,
349                                 FPRegister* d,
350                                 FPRegister* v,
351                                 int reg_size,
352                                 int reg_count,
353                                 RegList allowed) {
354   RegList list = 0;
355   int i = 0;
356   for (unsigned n = 0; (n < kNumberOfFPRegisters) && (i < reg_count); n++) {
357     if (((UINT64_C(1) << n) & allowed) != 0) {
358       // Only assigned allowed registers.
359       if (v) {
360         v[i] = FPRegister(n, reg_size);
361       }
362       if (d) {
363         d[i] = FPRegister(n, kDRegSize);
364       }
365       if (s) {
366         s[i] = FPRegister(n, kSRegSize);
367       }
368       list |= (UINT64_C(1) << n);
369       i++;
370     }
371   }
372   // Check that we got enough registers.
373   VIXL_ASSERT(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
374 
375   return list;
376 }
377 
378 
Clobber(MacroAssembler * masm,RegList reg_list,uint64_t const value)379 void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
380   Register first = NoReg;
381   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
382     if (reg_list & (UINT64_C(1) << i)) {
383       Register xn(i, kXRegSize);
384       // We should never write into sp here.
385       VIXL_ASSERT(!xn.Is(sp));
386       if (!xn.IsZero()) {
387         if (!first.IsValid()) {
388           // This is the first register we've hit, so construct the literal.
389           __ Mov(xn, value);
390           first = xn;
391         } else {
392           // We've already loaded the literal, so re-use the value already
393           // loaded into the first register we hit.
394           __ Mov(xn, first);
395         }
396       }
397     }
398   }
399 }
400 
401 
ClobberFP(MacroAssembler * masm,RegList reg_list,double const value)402 void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
403   FPRegister first = NoFPReg;
404   for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
405     if (reg_list & (UINT64_C(1) << i)) {
406       FPRegister dn(i, kDRegSize);
407       if (!first.IsValid()) {
408         // This is the first register we've hit, so construct the literal.
409         __ Fmov(dn, value);
410         first = dn;
411       } else {
412         // We've already loaded the literal, so re-use the value already loaded
413         // into the first register we hit.
414         __ Fmov(dn, first);
415       }
416     }
417   }
418 }
419 
420 
Clobber(MacroAssembler * masm,CPURegList reg_list)421 void Clobber(MacroAssembler* masm, CPURegList reg_list) {
422   if (reg_list.GetType() == CPURegister::kRegister) {
423     // This will always clobber X registers.
424     Clobber(masm, reg_list.GetList());
425   } else if (reg_list.GetType() == CPURegister::kVRegister) {
426     // This will always clobber D registers.
427     ClobberFP(masm, reg_list.GetList());
428   } else {
429     VIXL_UNREACHABLE();
430   }
431 }
432 
433 
Dump(MacroAssembler * masm)434 void RegisterDump::Dump(MacroAssembler* masm) {
435   VIXL_ASSERT(__ StackPointer().Is(sp));
436 
437   // Ensure that we don't unintentionally clobber any registers.
438   UseScratchRegisterScope temps(masm);
439   temps.ExcludeAll();
440 
441   // Preserve some temporary registers.
442   Register dump_base = x0;
443   Register dump = x1;
444   Register tmp = x2;
445   Register dump_base_w = dump_base.W();
446   Register dump_w = dump.W();
447   Register tmp_w = tmp.W();
448 
449   // Offsets into the dump_ structure.
450   const int x_offset = offsetof(dump_t, x_);
451   const int w_offset = offsetof(dump_t, w_);
452   const int d_offset = offsetof(dump_t, d_);
453   const int s_offset = offsetof(dump_t, s_);
454   const int h_offset = offsetof(dump_t, h_);
455   const int q_offset = offsetof(dump_t, q_);
456   const int sp_offset = offsetof(dump_t, sp_);
457   const int wsp_offset = offsetof(dump_t, wsp_);
458   const int flags_offset = offsetof(dump_t, flags_);
459 
460   __ Push(xzr, dump_base, dump, tmp);
461 
462   // Load the address where we will dump the state.
463   __ Mov(dump_base, reinterpret_cast<uintptr_t>(&dump_));
464 
465   // Dump the stack pointer (sp and wsp).
466   // The stack pointer cannot be stored directly; it needs to be moved into
467   // another register first. Also, we pushed four X registers, so we need to
468   // compensate here.
469   __ Add(tmp, sp, 4 * kXRegSizeInBytes);
470   __ Str(tmp, MemOperand(dump_base, sp_offset));
471   __ Add(tmp_w, wsp, 4 * kXRegSizeInBytes);
472   __ Str(tmp_w, MemOperand(dump_base, wsp_offset));
473 
474   // Dump X registers.
475   __ Add(dump, dump_base, x_offset);
476   for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
477     __ Stp(Register::GetXRegFromCode(i),
478            Register::GetXRegFromCode(i + 1),
479            MemOperand(dump, i * kXRegSizeInBytes));
480   }
481 
482   // Dump W registers.
483   __ Add(dump, dump_base, w_offset);
484   for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
485     __ Stp(Register::GetWRegFromCode(i),
486            Register::GetWRegFromCode(i + 1),
487            MemOperand(dump, i * kWRegSizeInBytes));
488   }
489 
490   // Dump D registers.
491   __ Add(dump, dump_base, d_offset);
492   for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
493     __ Stp(FPRegister::GetDRegFromCode(i),
494            FPRegister::GetDRegFromCode(i + 1),
495            MemOperand(dump, i * kDRegSizeInBytes));
496   }
497 
498   // Dump S registers.
499   __ Add(dump, dump_base, s_offset);
500   for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
501     __ Stp(FPRegister::GetSRegFromCode(i),
502            FPRegister::GetSRegFromCode(i + 1),
503            MemOperand(dump, i * kSRegSizeInBytes));
504   }
505 
506 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
507   // Dump H registers. Note: Stp does not support 16 bits.
508   __ Add(dump, dump_base, h_offset);
509   for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
510     __ Str(FPRegister::GetHRegFromCode(i),
511            MemOperand(dump, i * kHRegSizeInBytes));
512   }
513 #else
514   USE(h_offset);
515 #endif
516 
517   // Dump Q registers.
518   __ Add(dump, dump_base, q_offset);
519   for (unsigned i = 0; i < kNumberOfVRegisters; i += 2) {
520     __ Stp(VRegister::GetQRegFromCode(i),
521            VRegister::GetQRegFromCode(i + 1),
522            MemOperand(dump, i * kQRegSizeInBytes));
523   }
524 
525   // Dump the flags.
526   __ Mrs(tmp, NZCV);
527   __ Str(tmp, MemOperand(dump_base, flags_offset));
528 
529   // To dump the values that were in tmp amd dump, we need a new scratch
530   // register. We can use any of the already dumped registers since we can
531   // easily restore them.
532   Register dump2_base = x10;
533   Register dump2 = x11;
534   VIXL_ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
535 
536   // Don't lose the dump_ address.
537   __ Mov(dump2_base, dump_base);
538 
539   __ Pop(tmp, dump, dump_base, xzr);
540 
541   __ Add(dump2, dump2_base, w_offset);
542   __ Str(dump_base_w,
543          MemOperand(dump2, dump_base.GetCode() * kWRegSizeInBytes));
544   __ Str(dump_w, MemOperand(dump2, dump.GetCode() * kWRegSizeInBytes));
545   __ Str(tmp_w, MemOperand(dump2, tmp.GetCode() * kWRegSizeInBytes));
546 
547   __ Add(dump2, dump2_base, x_offset);
548   __ Str(dump_base, MemOperand(dump2, dump_base.GetCode() * kXRegSizeInBytes));
549   __ Str(dump, MemOperand(dump2, dump.GetCode() * kXRegSizeInBytes));
550   __ Str(tmp, MemOperand(dump2, tmp.GetCode() * kXRegSizeInBytes));
551 
552   // Finally, restore dump2_base and dump2.
553   __ Ldr(dump2_base,
554          MemOperand(dump2, dump2_base.GetCode() * kXRegSizeInBytes));
555   __ Ldr(dump2, MemOperand(dump2, dump2.GetCode() * kXRegSizeInBytes));
556 
557   completed_ = true;
558 }
559 
560 }  // namespace aarch64
561 }  // namespace vixl
562