• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #include <cmath>
28 #include <queue>
29 
30 #include "test-runner.h"
31 #include "test-utils-aarch64.h"
32 
33 #include "aarch64/cpu-aarch64.h"
34 #include "aarch64/disasm-aarch64.h"
35 #include "aarch64/macro-assembler-aarch64.h"
36 #include "aarch64/simulator-aarch64.h"
37 
38 #define __ masm->
39 
40 namespace vixl {
41 namespace aarch64 {
42 
43 
44 // This value is a signalling NaN as FP64, and also as FP32 or FP16 (taking the
45 // least-significant bits).
46 const double kFP64SignallingNaN = RawbitsToDouble(UINT64_C(0x7ff000007f807c01));
47 const float kFP32SignallingNaN = RawbitsToFloat(0x7f807c01);
48 const Float16 kFP16SignallingNaN = RawbitsToFloat16(0x7c01);
49 
50 // A similar value, but as a quiet NaN.
51 const double kFP64QuietNaN = RawbitsToDouble(UINT64_C(0x7ff800007fc07e01));
52 const float kFP32QuietNaN = RawbitsToFloat(0x7fc07e01);
53 const Float16 kFP16QuietNaN = RawbitsToFloat16(0x7e01);
54 
55 
Equal32(uint32_t expected,const RegisterDump *,uint32_t result)56 bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
57   if (result != expected) {
58     printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
59            expected,
60            result);
61   }
62 
63   return expected == result;
64 }
65 
66 
Equal64(uint64_t reference,const RegisterDump *,uint64_t result,ExpectedResult option)67 bool Equal64(uint64_t reference,
68              const RegisterDump*,
69              uint64_t result,
70              ExpectedResult option) {
71   switch (option) {
72     case kExpectEqual:
73       if (result != reference) {
74         printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
75                reference,
76                result);
77       }
78       break;
79     case kExpectNotEqual:
80       if (result == reference) {
81         printf("Expected a result not equal to 0x%016" PRIx64 "\n", reference);
82       }
83       break;
84   }
85 
86   return reference == result;
87 }
88 
89 
Equal128(QRegisterValue expected,const RegisterDump *,QRegisterValue result)90 bool Equal128(QRegisterValue expected,
91               const RegisterDump*,
92               QRegisterValue result) {
93   if (!expected.Equals(result)) {
94     printf("Expected 0x%016" PRIx64 "%016" PRIx64
95            "\t "
96            "Found 0x%016" PRIx64 "%016" PRIx64 "\n",
97            expected.GetLane<uint64_t>(1),
98            expected.GetLane<uint64_t>(0),
99            result.GetLane<uint64_t>(1),
100            result.GetLane<uint64_t>(0));
101   }
102 
103   return expected.Equals(result);
104 }
105 
106 
EqualFP16(Float16 expected,const RegisterDump *,Float16 result)107 bool EqualFP16(Float16 expected, const RegisterDump*, Float16 result) {
108   uint16_t e_rawbits = Float16ToRawbits(expected);
109   uint16_t r_rawbits = Float16ToRawbits(result);
110   if (e_rawbits == r_rawbits) {
111     return true;
112   } else {
113     if (IsNaN(expected) || IsZero(expected)) {
114       printf("Expected 0x%04" PRIx16 "\t Found 0x%04" PRIx16 "\n",
115              e_rawbits,
116              r_rawbits);
117     } else {
118       printf("Expected %.6f (16 bit): (0x%04" PRIx16
119              ")\t "
120              "Found %.6f (0x%04" PRIx16 ")\n",
121              FPToFloat(expected, kIgnoreDefaultNaN),
122              e_rawbits,
123              FPToFloat(result, kIgnoreDefaultNaN),
124              r_rawbits);
125     }
126     return false;
127   }
128 }
129 
130 
EqualFP32(float expected,const RegisterDump *,float result)131 bool EqualFP32(float expected, const RegisterDump*, float result) {
132   if (FloatToRawbits(expected) == FloatToRawbits(result)) {
133     return true;
134   } else {
135     if (IsNaN(expected) || (expected == 0.0)) {
136       printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
137              FloatToRawbits(expected),
138              FloatToRawbits(result));
139     } else {
140       printf("Expected %.9f (0x%08" PRIx32
141              ")\t "
142              "Found %.9f (0x%08" PRIx32 ")\n",
143              expected,
144              FloatToRawbits(expected),
145              result,
146              FloatToRawbits(result));
147     }
148     return false;
149   }
150 }
151 
152 
EqualFP64(double expected,const RegisterDump *,double result)153 bool EqualFP64(double expected, const RegisterDump*, double result) {
154   if (DoubleToRawbits(expected) == DoubleToRawbits(result)) {
155     return true;
156   }
157 
158   if (IsNaN(expected) || (expected == 0.0)) {
159     printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
160            DoubleToRawbits(expected),
161            DoubleToRawbits(result));
162   } else {
163     printf("Expected %.17f (0x%016" PRIx64
164            ")\t "
165            "Found %.17f (0x%016" PRIx64 ")\n",
166            expected,
167            DoubleToRawbits(expected),
168            result,
169            DoubleToRawbits(result));
170   }
171   return false;
172 }
173 
174 
Equal32(uint32_t expected,const RegisterDump * core,const Register & reg)175 bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
176   VIXL_ASSERT(reg.Is32Bits());
177   // Retrieve the corresponding X register so we can check that the upper part
178   // was properly cleared.
179   int64_t result_x = core->xreg(reg.GetCode());
180   if ((result_x & 0xffffffff00000000) != 0) {
181     printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
182            expected,
183            result_x);
184     return false;
185   }
186   uint32_t result_w = core->wreg(reg.GetCode());
187   return Equal32(expected, core, result_w);
188 }
189 
190 
Equal64(uint64_t reference,const RegisterDump * core,const Register & reg,ExpectedResult option)191 bool Equal64(uint64_t reference,
192              const RegisterDump* core,
193              const Register& reg,
194              ExpectedResult option) {
195   VIXL_ASSERT(reg.Is64Bits());
196   uint64_t result = core->xreg(reg.GetCode());
197   return Equal64(reference, core, result, option);
198 }
199 
200 
NotEqual64(uint64_t reference,const RegisterDump * core,const Register & reg)201 bool NotEqual64(uint64_t reference,
202                 const RegisterDump* core,
203                 const Register& reg) {
204   VIXL_ASSERT(reg.Is64Bits());
205   uint64_t result = core->xreg(reg.GetCode());
206   return NotEqual64(reference, core, result);
207 }
208 
209 
Equal128(uint64_t expected_h,uint64_t expected_l,const RegisterDump * core,const VRegister & vreg)210 bool Equal128(uint64_t expected_h,
211               uint64_t expected_l,
212               const RegisterDump* core,
213               const VRegister& vreg) {
214   VIXL_ASSERT(vreg.Is128Bits());
215   QRegisterValue expected;
216   expected.SetLane(0, expected_l);
217   expected.SetLane(1, expected_h);
218   QRegisterValue result = core->qreg(vreg.GetCode());
219   return Equal128(expected, core, result);
220 }
221 
222 
EqualFP16(Float16 expected,const RegisterDump * core,const VRegister & fpreg)223 bool EqualFP16(Float16 expected,
224                const RegisterDump* core,
225                const VRegister& fpreg) {
226   VIXL_ASSERT(fpreg.Is16Bits());
227   // Retrieve the corresponding D register so we can check that the upper part
228   // was properly cleared.
229   uint64_t result_64 = core->dreg_bits(fpreg.GetCode());
230   if ((result_64 & 0xfffffffffff0000) != 0) {
231     printf("Expected 0x%04" PRIx16 " (%f)\t Found 0x%016" PRIx64 "\n",
232            Float16ToRawbits(expected),
233            FPToFloat(expected, kIgnoreDefaultNaN),
234            result_64);
235     return false;
236   }
237   return EqualFP16(expected, core, core->hreg(fpreg.GetCode()));
238 }
239 
240 
EqualFP32(float expected,const RegisterDump * core,const VRegister & fpreg)241 bool EqualFP32(float expected,
242                const RegisterDump* core,
243                const VRegister& fpreg) {
244   VIXL_ASSERT(fpreg.Is32Bits());
245   // Retrieve the corresponding D register so we can check that the upper part
246   // was properly cleared.
247   uint64_t result_64 = core->dreg_bits(fpreg.GetCode());
248   if ((result_64 & 0xffffffff00000000) != 0) {
249     printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
250            FloatToRawbits(expected),
251            expected,
252            result_64);
253     return false;
254   }
255 
256   return EqualFP32(expected, core, core->sreg(fpreg.GetCode()));
257 }
258 
259 
EqualFP64(double expected,const RegisterDump * core,const VRegister & fpreg)260 bool EqualFP64(double expected,
261                const RegisterDump* core,
262                const VRegister& fpreg) {
263   VIXL_ASSERT(fpreg.Is64Bits());
264   return EqualFP64(expected, core, core->dreg(fpreg.GetCode()));
265 }
266 
267 
Equal64(const Register & reg0,const RegisterDump * core,const Register & reg1,ExpectedResult option)268 bool Equal64(const Register& reg0,
269              const RegisterDump* core,
270              const Register& reg1,
271              ExpectedResult option) {
272   VIXL_ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
273   int64_t reference = core->xreg(reg0.GetCode());
274   int64_t result = core->xreg(reg1.GetCode());
275   return Equal64(reference, core, result, option);
276 }
277 
278 
NotEqual64(const Register & reg0,const RegisterDump * core,const Register & reg1)279 bool NotEqual64(const Register& reg0,
280                 const RegisterDump* core,
281                 const Register& reg1) {
282   VIXL_ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
283   int64_t expected = core->xreg(reg0.GetCode());
284   int64_t result = core->xreg(reg1.GetCode());
285   return NotEqual64(expected, core, result);
286 }
287 
288 
Equal64(uint64_t expected,const RegisterDump * core,const VRegister & vreg)289 bool Equal64(uint64_t expected,
290              const RegisterDump* core,
291              const VRegister& vreg) {
292   VIXL_ASSERT(vreg.Is64Bits());
293   uint64_t result = core->dreg_bits(vreg.GetCode());
294   return Equal64(expected, core, result);
295 }
296 
297 
FlagN(uint32_t flags)298 static char FlagN(uint32_t flags) { return (flags & NFlag) ? 'N' : 'n'; }
299 
300 
FlagZ(uint32_t flags)301 static char FlagZ(uint32_t flags) { return (flags & ZFlag) ? 'Z' : 'z'; }
302 
303 
FlagC(uint32_t flags)304 static char FlagC(uint32_t flags) { return (flags & CFlag) ? 'C' : 'c'; }
305 
306 
FlagV(uint32_t flags)307 static char FlagV(uint32_t flags) { return (flags & VFlag) ? 'V' : 'v'; }
308 
309 
EqualNzcv(uint32_t expected,uint32_t result)310 bool EqualNzcv(uint32_t expected, uint32_t result) {
311   VIXL_ASSERT((expected & ~NZCVFlag) == 0);
312   VIXL_ASSERT((result & ~NZCVFlag) == 0);
313   if (result != expected) {
314     printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
315            FlagN(expected),
316            FlagZ(expected),
317            FlagC(expected),
318            FlagV(expected),
319            FlagN(result),
320            FlagZ(result),
321            FlagC(result),
322            FlagV(result));
323     return false;
324   }
325 
326   return true;
327 }
328 
329 
EqualRegisters(const RegisterDump * a,const RegisterDump * b)330 bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
331   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
332     if (a->xreg(i) != b->xreg(i)) {
333       printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
334              i,
335              a->xreg(i),
336              b->xreg(i));
337       return false;
338     }
339   }
340 
341   for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
342     uint64_t a_bits = a->dreg_bits(i);
343     uint64_t b_bits = b->dreg_bits(i);
344     if (a_bits != b_bits) {
345       printf("d%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
346              i,
347              a_bits,
348              b_bits);
349       return false;
350     }
351   }
352 
353   return true;
354 }
355 
EqualSVELane(uint64_t expected,const RegisterDump * core,const ZRegister & reg,int lane)356 bool EqualSVELane(uint64_t expected,
357                   const RegisterDump* core,
358                   const ZRegister& reg,
359                   int lane) {
360   unsigned lane_size = reg.GetLaneSizeInBits();
361   // For convenience in the tests, we allow negative values to be passed into
362   // `expected`, but truncate them to an appropriately-sized unsigned value for
363   // the check. For example, in `EqualSVELane(-1, core, z0.VnB())`, the expected
364   // value is truncated from 0xffffffffffffffff to 0xff before the comparison.
365   VIXL_ASSERT(IsUintN(lane_size, expected) ||
366               IsIntN(lane_size, RawbitsToInt64(expected)));
367   expected &= GetUintMask(lane_size);
368 
369   uint64_t result = core->zreg_lane(reg.GetCode(), lane_size, lane);
370   if (expected != result) {
371     unsigned lane_size_in_hex_chars = lane_size / 4;
372     std::string reg_name = reg.GetArchitecturalName();
373     printf("%s[%d]\t Expected 0x%0*" PRIx64 "\t Found 0x%0*" PRIx64 "\n",
374            reg_name.c_str(),
375            lane,
376            lane_size_in_hex_chars,
377            expected,
378            lane_size_in_hex_chars,
379            result);
380     return false;
381   }
382   return true;
383 }
384 
EqualSVELane(uint64_t expected,const RegisterDump * core,const PRegister & reg,int lane)385 bool EqualSVELane(uint64_t expected,
386                   const RegisterDump* core,
387                   const PRegister& reg,
388                   int lane) {
389   VIXL_ASSERT(reg.HasLaneSize());
390   VIXL_ASSERT((reg.GetLaneSizeInBits() % kZRegBitsPerPRegBit) == 0);
391   unsigned p_bits_per_lane = reg.GetLaneSizeInBits() / kZRegBitsPerPRegBit;
392   VIXL_ASSERT(IsUintN(p_bits_per_lane, expected));
393   expected &= GetUintMask(p_bits_per_lane);
394 
395   uint64_t result = core->preg_lane(reg.GetCode(), p_bits_per_lane, lane);
396   if (expected != result) {
397     unsigned lane_size_in_hex_chars = (p_bits_per_lane + 3) / 4;
398     std::string reg_name = reg.GetArchitecturalName();
399     printf("%s[%d]\t Expected 0x%0*" PRIx64 "\t Found 0x%0*" PRIx64 "\n",
400            reg_name.c_str(),
401            lane,
402            lane_size_in_hex_chars,
403            expected,
404            lane_size_in_hex_chars,
405            result);
406     return false;
407   }
408   return true;
409 }
410 
411 struct EqualMemoryChunk {
412   typedef uint64_t RawChunk;
413 
414   uintptr_t address;
415   RawChunk expected;
416   RawChunk result;
417 
IsEqualvixl::aarch64::EqualMemoryChunk418   bool IsEqual() const { return expected == result; }
419 };
420 
EqualMemory(const void * expected,const void * result,size_t size_in_bytes,size_t zero_offset)421 bool EqualMemory(const void* expected,
422                  const void* result,
423                  size_t size_in_bytes,
424                  size_t zero_offset) {
425   if (memcmp(expected, result, size_in_bytes) == 0) return true;
426 
427   // Read 64-bit chunks, and print them side-by-side if they don't match.
428 
429   // Remember the last few chunks, even if they matched, so we can print some
430   // context. We don't want to print the whole buffer, because it could be huge.
431   static const size_t kContextLines = 1;
432   std::queue<EqualMemoryChunk> context;
433   static const size_t kChunkSize = sizeof(EqualMemoryChunk::RawChunk);
434 
435   // This assumption keeps the logic simple, and is acceptable for our tests.
436   VIXL_ASSERT((size_in_bytes % kChunkSize) == 0);
437 
438   const char* expected_it = reinterpret_cast<const char*>(expected);
439   const char* result_it = reinterpret_cast<const char*>(result);
440 
441   // This is the first error, so print a header row.
442   printf("  Address (of result)                  Expected           Result\n");
443 
444   // Always print some context at the start of the buffer.
445   uintptr_t print_context_to =
446       reinterpret_cast<uintptr_t>(result) + (kContextLines + 1) * kChunkSize;
447   for (size_t i = 0; i < size_in_bytes; i += kChunkSize) {
448     EqualMemoryChunk chunk;
449     chunk.address = reinterpret_cast<uintptr_t>(result_it);
450     memcpy(&chunk.expected, expected_it, kChunkSize);
451     memcpy(&chunk.result, result_it, kChunkSize);
452 
453     while (context.size() > kContextLines) context.pop();
454     context.push(chunk);
455 
456     // Print context after an error, and at the end of the buffer.
457     if (!chunk.IsEqual() || ((i + kChunkSize) >= size_in_bytes)) {
458       if (chunk.address > print_context_to) {
459         // We aren't currently printing context, so separate this context from
460         // the previous block.
461         printf("...\n");
462       }
463       print_context_to = chunk.address + (kContextLines + 1) * kChunkSize;
464     }
465 
466     // Print context (including the current line).
467     while (!context.empty() && (context.front().address < print_context_to)) {
468       uintptr_t address = context.front().address;
469       uint64_t offset = address - reinterpret_cast<uintptr_t>(result);
470       bool is_negative = (offset < zero_offset);
471       printf("0x%016" PRIxPTR " (result %c %5" PRIu64 "): 0x%016" PRIx64
472              " 0x%016" PRIx64 "\n",
473              address,
474              (is_negative ? '-' : '+'),
475              (is_negative ? (zero_offset - offset) : (offset - zero_offset)),
476              context.front().expected,
477              context.front().result);
478       context.pop();
479     }
480 
481     expected_it += kChunkSize;
482     result_it += kChunkSize;
483   }
484 
485   return false;
486 }
PopulateRegisterArray(Register * w,Register * x,Register * r,int reg_size,int reg_count,RegList allowed)487 RegList PopulateRegisterArray(Register* w,
488                               Register* x,
489                               Register* r,
490                               int reg_size,
491                               int reg_count,
492                               RegList allowed) {
493   RegList list = 0;
494   int i = 0;
495   for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
496     if (((UINT64_C(1) << n) & allowed) != 0) {
497       // Only assign allowed registers.
498       if (r) {
499         r[i] = Register(n, reg_size);
500       }
501       if (x) {
502         x[i] = Register(n, kXRegSize);
503       }
504       if (w) {
505         w[i] = Register(n, kWRegSize);
506       }
507       list |= (UINT64_C(1) << n);
508       i++;
509     }
510   }
511   // Check that we got enough registers.
512   VIXL_ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
513 
514   return list;
515 }
516 
517 
PopulateVRegisterArray(VRegister * s,VRegister * d,VRegister * v,int reg_size,int reg_count,RegList allowed)518 RegList PopulateVRegisterArray(VRegister* s,
519                                VRegister* d,
520                                VRegister* v,
521                                int reg_size,
522                                int reg_count,
523                                RegList allowed) {
524   RegList list = 0;
525   int i = 0;
526   for (unsigned n = 0; (n < kNumberOfVRegisters) && (i < reg_count); n++) {
527     if (((UINT64_C(1) << n) & allowed) != 0) {
528       // Only assigned allowed registers.
529       if (v) {
530         v[i] = VRegister(n, reg_size);
531       }
532       if (d) {
533         d[i] = VRegister(n, kDRegSize);
534       }
535       if (s) {
536         s[i] = VRegister(n, kSRegSize);
537       }
538       list |= (UINT64_C(1) << n);
539       i++;
540     }
541   }
542   // Check that we got enough registers.
543   VIXL_ASSERT(CountSetBits(list, kNumberOfVRegisters) == reg_count);
544 
545   return list;
546 }
547 
548 
Clobber(MacroAssembler * masm,RegList reg_list,uint64_t const value)549 void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
550   Register first = NoReg;
551   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
552     if (reg_list & (UINT64_C(1) << i)) {
553       Register xn(i, kXRegSize);
554       // We should never write into sp here.
555       VIXL_ASSERT(!xn.Is(sp));
556       if (!xn.IsZero()) {
557         if (!first.IsValid()) {
558           // This is the first register we've hit, so construct the literal.
559           __ Mov(xn, value);
560           first = xn;
561         } else {
562           // We've already loaded the literal, so re-use the value already
563           // loaded into the first register we hit.
564           __ Mov(xn, first);
565         }
566       }
567     }
568   }
569 }
570 
571 
ClobberFP(MacroAssembler * masm,RegList reg_list,double const value)572 void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
573   VRegister first = NoVReg;
574   for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
575     if (reg_list & (UINT64_C(1) << i)) {
576       VRegister dn(i, kDRegSize);
577       if (!first.IsValid()) {
578         // This is the first register we've hit, so construct the literal.
579         __ Fmov(dn, value);
580         first = dn;
581       } else {
582         // We've already loaded the literal, so re-use the value already loaded
583         // into the first register we hit.
584         __ Fmov(dn, first);
585       }
586     }
587   }
588 }
589 
590 
Clobber(MacroAssembler * masm,CPURegList reg_list)591 void Clobber(MacroAssembler* masm, CPURegList reg_list) {
592   if (reg_list.GetType() == CPURegister::kRegister) {
593     // This will always clobber X registers.
594     Clobber(masm, reg_list.GetList());
595   } else if (reg_list.GetType() == CPURegister::kVRegister) {
596     // This will always clobber D registers.
597     ClobberFP(masm, reg_list.GetList());
598   } else {
599     VIXL_UNIMPLEMENTED();
600   }
601 }
602 
603 // TODO: Once registers have sufficiently compatible interfaces, merge the two
604 // DumpRegisters templates.
605 template <typename T>
DumpRegisters(MacroAssembler * masm,Register dump_base,int offset)606 static void DumpRegisters(MacroAssembler* masm,
607                           Register dump_base,
608                           int offset) {
609   UseScratchRegisterScope temps(masm);
610   Register dump = temps.AcquireX();
611   __ Add(dump, dump_base, offset);
612   for (unsigned i = 0; i <= T::GetMaxCode(); i++) {
613     T reg(i);
614     __ Str(reg, SVEMemOperand(dump));
615     __ Add(dump, dump, reg.GetMaxSizeInBytes());
616   }
617 }
618 
619 template <typename T>
DumpRegisters(MacroAssembler * masm,Register dump_base,int offset,int reg_size_in_bytes)620 static void DumpRegisters(MacroAssembler* masm,
621                           Register dump_base,
622                           int offset,
623                           int reg_size_in_bytes) {
624   UseScratchRegisterScope temps(masm);
625   Register dump = temps.AcquireX();
626   __ Add(dump, dump_base, offset);
627   for (unsigned i = 0; i <= T::GetMaxCode(); i++) {
628     T reg(i, reg_size_in_bytes * kBitsPerByte);
629     __ Str(reg, MemOperand(dump));
630     __ Add(dump, dump, reg_size_in_bytes);
631   }
632 }
633 
Dump(MacroAssembler * masm)634 void RegisterDump::Dump(MacroAssembler* masm) {
635   VIXL_ASSERT(__ StackPointer().Is(sp));
636 
637   dump_cpu_features_ = *masm->GetCPUFeatures();
638 
639   // We need some scratch registers, but we also need to dump them, so we have
640   // to control exactly which registers are used, and dump them separately.
641   CPURegList scratch_registers(x0, x1, x2, x3);
642 
643   UseScratchRegisterScope temps(masm);
644   temps.ExcludeAll();
645   __ PushCPURegList(scratch_registers);
646   temps.Include(scratch_registers);
647 
648   Register dump_base = temps.AcquireX();
649   Register tmp = temps.AcquireX();
650 
651   // Offsets into the dump_ structure.
652   const int x_offset = offsetof(dump_t, x_);
653   const int w_offset = offsetof(dump_t, w_);
654   const int d_offset = offsetof(dump_t, d_);
655   const int s_offset = offsetof(dump_t, s_);
656   const int h_offset = offsetof(dump_t, h_);
657   const int q_offset = offsetof(dump_t, q_);
658   const int z_offset = offsetof(dump_t, z_);
659   const int p_offset = offsetof(dump_t, p_);
660   const int sp_offset = offsetof(dump_t, sp_);
661   const int wsp_offset = offsetof(dump_t, wsp_);
662   const int flags_offset = offsetof(dump_t, flags_);
663   const int vl_offset = offsetof(dump_t, vl_);
664 
665   // Load the address where we will dump the state.
666   __ Mov(dump_base, reinterpret_cast<uintptr_t>(&dump_));
667 
668   // Dump the stack pointer (sp and wsp).
669   // The stack pointer cannot be stored directly; it needs to be moved into
670   // another register first. Also, we pushed four X registers, so we need to
671   // compensate here.
672   __ Add(tmp, sp, 4 * kXRegSizeInBytes);
673   __ Str(tmp, MemOperand(dump_base, sp_offset));
674   __ Add(tmp.W(), wsp, 4 * kXRegSizeInBytes);
675   __ Str(tmp.W(), MemOperand(dump_base, wsp_offset));
676 
677   // Dump core registers.
678   DumpRegisters<Register>(masm, dump_base, x_offset, kXRegSizeInBytes);
679   DumpRegisters<Register>(masm, dump_base, w_offset, kWRegSizeInBytes);
680 
681   // Dump NEON and FP registers.
682   DumpRegisters<VRegister>(masm, dump_base, q_offset, kQRegSizeInBytes);
683   DumpRegisters<VRegister>(masm, dump_base, d_offset, kDRegSizeInBytes);
684   DumpRegisters<VRegister>(masm, dump_base, s_offset, kSRegSizeInBytes);
685   DumpRegisters<VRegister>(masm, dump_base, h_offset, kHRegSizeInBytes);
686 
687   // Dump SVE registers.
688   if (CPUHas(CPUFeatures::kSVE)) {
689     DumpRegisters<ZRegister>(masm, dump_base, z_offset);
690     DumpRegisters<PRegister>(masm, dump_base, p_offset);
691 
692     // Record the vector length.
693     __ Rdvl(tmp, kBitsPerByte);
694     __ Str(tmp, MemOperand(dump_base, vl_offset));
695   }
696 
697   // Dump the flags.
698   __ Mrs(tmp, NZCV);
699   __ Str(tmp, MemOperand(dump_base, flags_offset));
700 
701   // To dump the values we used as scratch registers, we need a new scratch
702   // register. We can use any of the already dumped registers since we can
703   // easily restore them.
704   Register dump2_base = x10;
705   VIXL_ASSERT(!scratch_registers.IncludesAliasOf(dump2_base));
706 
707   VIXL_ASSERT(scratch_registers.IncludesAliasOf(dump_base));
708 
709   // Ensure that we don't try to use the scratch registers again.
710   temps.ExcludeAll();
711 
712   // Don't lose the dump_ address.
713   __ Mov(dump2_base, dump_base);
714 
715   __ PopCPURegList(scratch_registers);
716 
717   while (!scratch_registers.IsEmpty()) {
718     CPURegister reg = scratch_registers.PopLowestIndex();
719     Register x = reg.X();
720     Register w = reg.W();
721     unsigned code = reg.GetCode();
722     __ Str(x, MemOperand(dump2_base, x_offset + (code * kXRegSizeInBytes)));
723     __ Str(w, MemOperand(dump2_base, w_offset + (code * kWRegSizeInBytes)));
724   }
725 
726   // Finally, restore dump2_base.
727   __ Ldr(dump2_base,
728          MemOperand(dump2_base,
729                     x_offset + (dump2_base.GetCode() * kXRegSizeInBytes)));
730 
731   completed_ = true;
732 }
733 
GetSignallingNan(int size_in_bits)734 uint64_t GetSignallingNan(int size_in_bits) {
735   switch (size_in_bits) {
736     case kHRegSize:
737       return Float16ToRawbits(kFP16SignallingNaN);
738     case kSRegSize:
739       return FloatToRawbits(kFP32SignallingNaN);
740     case kDRegSize:
741       return DoubleToRawbits(kFP64SignallingNaN);
742     default:
743       VIXL_UNIMPLEMENTED();
744       return 0;
745   }
746 }
747 
CanRun(const CPUFeatures & required,bool * queried_can_run)748 bool CanRun(const CPUFeatures& required, bool* queried_can_run) {
749   bool log_if_missing = true;
750   if (queried_can_run != NULL) {
751     log_if_missing = !*queried_can_run;
752     *queried_can_run = true;
753   }
754 
755 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
756   // The Simulator can run any test that VIXL can assemble.
757   USE(required);
758   USE(log_if_missing);
759   return true;
760 #else
761   CPUFeatures cpu = CPUFeatures::InferFromOS();
762   // If InferFromOS fails, assume that basic features are present.
763   if (cpu.HasNoFeatures()) cpu = CPUFeatures::AArch64LegacyBaseline();
764   VIXL_ASSERT(cpu.Has(kInfrastructureCPUFeatures));
765 
766   if (cpu.Has(required)) return true;
767 
768   if (log_if_missing) {
769     CPUFeatures missing = required.Without(cpu);
770     // Note: This message needs to match REGEXP_MISSING_FEATURES from
771     // tools/threaded_test.py.
772     std::cout << "SKIPPED: Missing features: { " << missing << " }\n";
773     std::cout << "This test requires the following features to run its "
774                  "generated code on this CPU: "
775               << required << "\n";
776   }
777   return false;
778 #endif
779 }
780 
781 }  // namespace aarch64
782 }  // namespace vixl
783