1 // Copyright 2014, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #ifndef VIXL_AARCH64_TEST_UTILS_AARCH64_H_
28 #define VIXL_AARCH64_TEST_UTILS_AARCH64_H_
29
30 #include "test-runner.h"
31
32 #include "aarch64/cpu-aarch64.h"
33 #include "aarch64/disasm-aarch64.h"
34 #include "aarch64/macro-assembler-aarch64.h"
35 #include "aarch64/simulator-aarch64.h"
36
37 namespace vixl {
38 namespace aarch64 {
39
40 // Signalling and quiet NaNs in double format, constructed such that the bottom
41 // 32 bits look like a signalling or quiet NaN (as appropriate) when interpreted
42 // as a float. These values are not architecturally significant, but they're
43 // useful in tests for initialising registers.
44 extern const double kFP64SignallingNaN;
45 extern const double kFP64QuietNaN;
46
47 // Signalling and quiet NaNs in float format.
48 extern const float kFP32SignallingNaN;
49 extern const float kFP32QuietNaN;
50
51 // Signalling and quiet NaNs in half-precision float format.
52 extern const Float16 kFP16SignallingNaN;
53 extern const Float16 kFP16QuietNaN;
54
55 // Vector registers don't naturally fit any C++ native type, so define a class
56 // with convenient accessors.
57 // Note that this has to be a POD type so that we can use 'offsetof' with it.
58 template <int kSizeInBytes>
59 struct VectorValue {
60 template <typename T>
GetLaneVectorValue61 T GetLane(int lane) const {
62 size_t lane_size = sizeof(T);
63 VIXL_CHECK(lane >= 0);
64 VIXL_CHECK(kSizeInBytes >= ((lane + 1) * lane_size));
65 T result;
66 memcpy(&result, bytes + (lane * lane_size), lane_size);
67 return result;
68 }
69
70 template <typename T>
SetLaneVectorValue71 void SetLane(int lane, T value) {
72 size_t lane_size = sizeof(value);
73 VIXL_CHECK(kSizeInBytes >= ((lane + 1) * lane_size));
74 memcpy(bytes + (lane * lane_size), &value, lane_size);
75 }
76
EqualsVectorValue77 bool Equals(const VectorValue<kSizeInBytes>& other) const {
78 return memcmp(bytes, other.bytes, kSizeInBytes) == 0;
79 }
80
81 uint8_t bytes[kSizeInBytes];
82 };
83
84 // It would be convenient to make these subclasses, so we can provide convenient
85 // constructors and utility methods specific to each register type, but we can't
86 // do that because it makes the result a non-POD type, and then we can't use
87 // 'offsetof' in RegisterDump::Dump.
88 typedef VectorValue<kQRegSizeInBytes> QRegisterValue;
89 typedef VectorValue<kZRegMaxSizeInBytes> ZRegisterValue;
90 typedef VectorValue<kPRegMaxSizeInBytes> PRegisterValue;
91
92 // RegisterDump: Object allowing integer, floating point and flags registers
93 // to be saved to itself for future reference.
94 class RegisterDump {
95 public:
RegisterDump()96 RegisterDump() : completed_(false) {
97 VIXL_ASSERT(sizeof(dump_.d_[0]) == kDRegSizeInBytes);
98 VIXL_ASSERT(sizeof(dump_.s_[0]) == kSRegSizeInBytes);
99 VIXL_ASSERT(sizeof(dump_.h_[0]) == kHRegSizeInBytes);
100 VIXL_ASSERT(sizeof(dump_.d_[0]) == kXRegSizeInBytes);
101 VIXL_ASSERT(sizeof(dump_.s_[0]) == kWRegSizeInBytes);
102 VIXL_ASSERT(sizeof(dump_.x_[0]) == kXRegSizeInBytes);
103 VIXL_ASSERT(sizeof(dump_.w_[0]) == kWRegSizeInBytes);
104 VIXL_ASSERT(sizeof(dump_.q_[0]) == kQRegSizeInBytes);
105 }
106
107 // The Dump method generates code to store a snapshot of the register values.
108 // It needs to be able to use the stack temporarily, and requires that the
109 // current stack pointer is sp, and is properly aligned.
110 //
111 // The dumping code is generated though the given MacroAssembler. No registers
112 // are corrupted in the process, but the stack is used briefly. The flags will
113 // be corrupted during this call.
114 void Dump(MacroAssembler* assm);
115
116 // Register accessors.
wreg(unsigned code)117 inline int32_t wreg(unsigned code) const {
118 if (code == kSPRegInternalCode) {
119 return wspreg();
120 }
121 VIXL_ASSERT(RegAliasesMatch(code));
122 return dump_.w_[code];
123 }
124
xreg(unsigned code)125 inline int64_t xreg(unsigned code) const {
126 if (code == kSPRegInternalCode) {
127 return spreg();
128 }
129 VIXL_ASSERT(RegAliasesMatch(code));
130 return dump_.x_[code];
131 }
132
133 // VRegister accessors.
hreg_bits(unsigned code)134 inline uint16_t hreg_bits(unsigned code) const {
135 VIXL_ASSERT(VRegAliasesMatch(code));
136 return dump_.h_[code];
137 }
138
sreg_bits(unsigned code)139 inline uint32_t sreg_bits(unsigned code) const {
140 VIXL_ASSERT(VRegAliasesMatch(code));
141 return dump_.s_[code];
142 }
143
hreg(unsigned code)144 inline Float16 hreg(unsigned code) const {
145 return RawbitsToFloat16(hreg_bits(code));
146 }
147
sreg(unsigned code)148 inline float sreg(unsigned code) const {
149 return RawbitsToFloat(sreg_bits(code));
150 }
151
dreg_bits(unsigned code)152 inline uint64_t dreg_bits(unsigned code) const {
153 VIXL_ASSERT(VRegAliasesMatch(code));
154 return dump_.d_[code];
155 }
156
dreg(unsigned code)157 inline double dreg(unsigned code) const {
158 return RawbitsToDouble(dreg_bits(code));
159 }
160
qreg(unsigned code)161 inline QRegisterValue qreg(unsigned code) const { return dump_.q_[code]; }
162
163 template <typename T>
zreg_lane(unsigned code,int lane)164 inline T zreg_lane(unsigned code, int lane) const {
165 VIXL_ASSERT(VRegAliasesMatch(code));
166 VIXL_ASSERT(CPUHas(CPUFeatures::kSVE));
167 VIXL_ASSERT(lane < GetSVELaneCount(sizeof(T) * kBitsPerByte));
168 return dump_.z_[code].GetLane<T>(lane);
169 }
170
zreg_lane(unsigned code,unsigned size_in_bits,int lane)171 inline uint64_t zreg_lane(unsigned code,
172 unsigned size_in_bits,
173 int lane) const {
174 switch (size_in_bits) {
175 case kBRegSize:
176 return zreg_lane<uint8_t>(code, lane);
177 case kHRegSize:
178 return zreg_lane<uint16_t>(code, lane);
179 case kSRegSize:
180 return zreg_lane<uint32_t>(code, lane);
181 case kDRegSize:
182 return zreg_lane<uint64_t>(code, lane);
183 }
184 VIXL_UNREACHABLE();
185 return 0;
186 }
187
preg_lane(unsigned code,unsigned p_bits_per_lane,int lane)188 inline uint64_t preg_lane(unsigned code,
189 unsigned p_bits_per_lane,
190 int lane) const {
191 VIXL_ASSERT(CPUHas(CPUFeatures::kSVE));
192 VIXL_ASSERT(lane < GetSVELaneCount(p_bits_per_lane * kZRegBitsPerPRegBit));
193 // Load a chunk and extract the necessary bits. The chunk size is arbitrary.
194 typedef uint64_t Chunk;
195 const size_t kChunkSizeInBits = sizeof(Chunk) * kBitsPerByte;
196 VIXL_ASSERT(IsPowerOf2(p_bits_per_lane));
197 VIXL_ASSERT(p_bits_per_lane <= kChunkSizeInBits);
198
199 int chunk_index = (lane * p_bits_per_lane) / kChunkSizeInBits;
200 int bit_index = (lane * p_bits_per_lane) % kChunkSizeInBits;
201 Chunk chunk = dump_.p_[code].GetLane<Chunk>(chunk_index);
202 return (chunk >> bit_index) & GetUintMask(p_bits_per_lane);
203 }
204
GetSVELaneCount(int lane_size_in_bits)205 inline int GetSVELaneCount(int lane_size_in_bits) const {
206 VIXL_ASSERT(lane_size_in_bits > 0);
207 VIXL_ASSERT((dump_.vl_ % lane_size_in_bits) == 0);
208 uint64_t count = dump_.vl_ / lane_size_in_bits;
209 VIXL_ASSERT(count <= INT_MAX);
210 return static_cast<int>(count);
211 }
212
213 template <typename T>
HasSVELane(T reg,int lane)214 inline bool HasSVELane(T reg, int lane) const {
215 VIXL_ASSERT(reg.IsZRegister() || reg.IsPRegister());
216 return lane < GetSVELaneCount(reg.GetLaneSizeInBits());
217 }
218
219 template <typename T>
GetSVELane(T reg,int lane)220 inline uint64_t GetSVELane(T reg, int lane) const {
221 VIXL_ASSERT(HasSVELane(reg, lane));
222 if (reg.IsZRegister()) {
223 return zreg_lane(reg.GetCode(), reg.GetLaneSizeInBits(), lane);
224 } else if (reg.IsPRegister()) {
225 VIXL_ASSERT((reg.GetLaneSizeInBits() % kZRegBitsPerPRegBit) == 0);
226 return preg_lane(reg.GetCode(),
227 reg.GetLaneSizeInBits() / kZRegBitsPerPRegBit,
228 lane);
229 } else {
230 VIXL_ABORT();
231 }
232 }
233
234 // Stack pointer accessors.
spreg()235 inline int64_t spreg() const {
236 VIXL_ASSERT(SPRegAliasesMatch());
237 return dump_.sp_;
238 }
239
wspreg()240 inline int32_t wspreg() const {
241 VIXL_ASSERT(SPRegAliasesMatch());
242 return static_cast<int32_t>(dump_.wsp_);
243 }
244
245 // Flags accessors.
flags_nzcv()246 inline uint32_t flags_nzcv() const {
247 VIXL_ASSERT(IsComplete());
248 VIXL_ASSERT((dump_.flags_ & ~Flags_mask) == 0);
249 return dump_.flags_ & Flags_mask;
250 }
251
IsComplete()252 inline bool IsComplete() const { return completed_; }
253
254 private:
255 // Indicate whether the dump operation has been completed.
256 bool completed_;
257
258 // Check that the lower 32 bits of x<code> exactly match the 32 bits of
259 // w<code>. A failure of this test most likely represents a failure in the
260 // ::Dump method, or a failure in the simulator.
RegAliasesMatch(unsigned code)261 bool RegAliasesMatch(unsigned code) const {
262 VIXL_ASSERT(IsComplete());
263 VIXL_ASSERT(code < kNumberOfRegisters);
264 return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
265 }
266
267 // As RegAliasesMatch, but for the stack pointer.
SPRegAliasesMatch()268 bool SPRegAliasesMatch() const {
269 VIXL_ASSERT(IsComplete());
270 return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
271 }
272
273 // As RegAliasesMatch, but for Z and V registers.
VRegAliasesMatch(unsigned code)274 bool VRegAliasesMatch(unsigned code) const {
275 VIXL_ASSERT(IsComplete());
276 VIXL_ASSERT(code < kNumberOfVRegisters);
277 bool match = ((dump_.q_[code].GetLane<uint64_t>(0) == dump_.d_[code]) &&
278 ((dump_.d_[code] & kSRegMask) == dump_.s_[code]) &&
279 ((dump_.s_[code] & kHRegMask) == dump_.h_[code]));
280 if (CPUHas(CPUFeatures::kSVE)) {
281 bool z_match =
282 memcmp(&dump_.q_[code], &dump_.z_[code], kQRegSizeInBytes) == 0;
283 match = match && z_match;
284 }
285 return match;
286 }
287
288 // Record the CPUFeatures enabled when Dump was called.
289 CPUFeatures dump_cpu_features_;
290
291 // Convenience pass-through for CPU feature checks.
292 bool CPUHas(CPUFeatures::Feature feature0,
293 CPUFeatures::Feature feature1 = CPUFeatures::kNone,
294 CPUFeatures::Feature feature2 = CPUFeatures::kNone,
295 CPUFeatures::Feature feature3 = CPUFeatures::kNone) const {
296 return dump_cpu_features_.Has(feature0, feature1, feature2, feature3);
297 }
298
299 // Store all the dumped elements in a simple struct so the implementation can
300 // use offsetof to quickly find the correct field.
301 struct dump_t {
302 // Core registers.
303 uint64_t x_[kNumberOfRegisters];
304 uint32_t w_[kNumberOfRegisters];
305
306 // Floating-point registers, as raw bits.
307 uint64_t d_[kNumberOfVRegisters];
308 uint32_t s_[kNumberOfVRegisters];
309 uint16_t h_[kNumberOfVRegisters];
310
311 // Vector registers.
312 QRegisterValue q_[kNumberOfVRegisters];
313 ZRegisterValue z_[kNumberOfZRegisters];
314
315 PRegisterValue p_[kNumberOfPRegisters];
316
317 // The stack pointer.
318 uint64_t sp_;
319 uint64_t wsp_;
320
321 // NZCV flags, stored in bits 28 to 31.
322 // bit[31] : Negative
323 // bit[30] : Zero
324 // bit[29] : Carry
325 // bit[28] : oVerflow
326 uint64_t flags_;
327
328 // The SVE "VL" (vector length) in bits.
329 uint64_t vl_;
330 } dump_;
331 };
332
333 // Some tests want to check that a value is _not_ equal to a reference value.
334 // These enum values can be used to control the error reporting behaviour.
335 enum ExpectedResult { kExpectEqual, kExpectNotEqual };
336
337 // The Equal* methods return true if the result matches the reference value.
338 // They all print an error message to the console if the result is incorrect
339 // (according to the ExpectedResult argument, or kExpectEqual if it is absent).
340 //
341 // Some of these methods don't use the RegisterDump argument, but they have to
342 // accept them so that they can overload those that take register arguments.
343 bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result);
344 bool Equal64(uint64_t reference,
345 const RegisterDump*,
346 uint64_t result,
347 ExpectedResult option = kExpectEqual);
348 bool Equal128(QRegisterValue expected,
349 const RegisterDump*,
350 QRegisterValue result);
351
352 bool EqualFP16(Float16 expected, const RegisterDump*, uint16_t result);
353 bool EqualFP32(float expected, const RegisterDump*, float result);
354 bool EqualFP64(double expected, const RegisterDump*, double result);
355
356 bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
357 bool Equal64(uint64_t reference,
358 const RegisterDump* core,
359 const Register& reg,
360 ExpectedResult option = kExpectEqual);
361 bool Equal64(uint64_t expected,
362 const RegisterDump* core,
363 const VRegister& vreg);
364
365 bool EqualFP16(Float16 expected,
366 const RegisterDump* core,
367 const VRegister& fpreg);
368 bool EqualFP32(float expected,
369 const RegisterDump* core,
370 const VRegister& fpreg);
371 bool EqualFP64(double expected,
372 const RegisterDump* core,
373 const VRegister& fpreg);
374
375 bool Equal64(const Register& reg0,
376 const RegisterDump* core,
377 const Register& reg1,
378 ExpectedResult option = kExpectEqual);
379 bool Equal128(uint64_t expected_h,
380 uint64_t expected_l,
381 const RegisterDump* core,
382 const VRegister& reg);
383
384 bool EqualNzcv(uint32_t expected, uint32_t result);
385
386 bool EqualRegisters(const RegisterDump* a, const RegisterDump* b);
387
388 template <typename T0, typename T1>
NotEqual64(T0 reference,const RegisterDump * core,T1 result)389 bool NotEqual64(T0 reference, const RegisterDump* core, T1 result) {
390 return !Equal64(reference, core, result, kExpectNotEqual);
391 }
392
393 bool EqualSVELane(uint64_t expected,
394 const RegisterDump* core,
395 const ZRegister& reg,
396 int lane);
397
398 bool EqualSVELane(uint64_t expected,
399 const RegisterDump* core,
400 const PRegister& reg,
401 int lane);
402
403 // Check that each SVE lane matches the corresponding expected[] value. The
404 // highest-indexed array element maps to the lowest-numbered lane.
405 template <typename T, int N, typename R>
EqualSVE(const T (& expected)[N],const RegisterDump * core,const R & reg,bool * printed_warning)406 bool EqualSVE(const T (&expected)[N],
407 const RegisterDump* core,
408 const R& reg,
409 bool* printed_warning) {
410 VIXL_ASSERT(reg.IsZRegister() || reg.IsPRegister());
411 VIXL_ASSERT(reg.HasLaneSize());
412 // Evaluate and report errors on every lane, rather than just the first.
413 bool equal = true;
414 for (int lane = 0; lane < N; ++lane) {
415 if (!core->HasSVELane(reg, lane)) {
416 if (*printed_warning == false) {
417 *printed_warning = true;
418 printf(
419 "Warning: Ignoring SVE lanes beyond VL (%d bytes) "
420 "because the CPU does not implement them.\n",
421 core->GetSVELaneCount(kBRegSize));
422 }
423 break;
424 }
425 // Map the highest-indexed array element to the lowest-numbered lane.
426 equal = EqualSVELane(expected[N - lane - 1], core, reg, lane) && equal;
427 }
428 return equal;
429 }
430
431 // Check that each SVE lanes matches the `expected` value.
432 template <typename R>
EqualSVE(uint64_t expected,const RegisterDump * core,const R & reg,bool * printed_warning)433 bool EqualSVE(uint64_t expected,
434 const RegisterDump* core,
435 const R& reg,
436 bool* printed_warning) {
437 VIXL_ASSERT(reg.IsZRegister() || reg.IsPRegister());
438 VIXL_ASSERT(reg.HasLaneSize());
439 USE(printed_warning);
440 // Evaluate and report errors on every lane, rather than just the first.
441 bool equal = true;
442 for (int lane = 0; lane < core->GetSVELaneCount(reg.GetLaneSizeInBits());
443 ++lane) {
444 equal = EqualSVELane(expected, core, reg, lane) && equal;
445 }
446 return equal;
447 }
448
449 // Check that two Z or P registers are equal.
450 template <typename R>
EqualSVE(const R & expected,const RegisterDump * core,const R & result,bool * printed_warning)451 bool EqualSVE(const R& expected,
452 const RegisterDump* core,
453 const R& result,
454 bool* printed_warning) {
455 VIXL_ASSERT(result.IsZRegister() || result.IsPRegister());
456 VIXL_ASSERT(AreSameFormat(expected, result));
457 USE(printed_warning);
458
459 // If the lane size is omitted, pick a default.
460 if (!result.HasLaneSize()) {
461 return EqualSVE(expected.VnB(), core, result.VnB(), printed_warning);
462 }
463
464 // Evaluate and report errors on every lane, rather than just the first.
465 bool equal = true;
466 int lane_size = result.GetLaneSizeInBits();
467 for (int lane = 0; lane < core->GetSVELaneCount(lane_size); ++lane) {
468 uint64_t expected_lane = core->GetSVELane(expected, lane);
469 equal = equal && EqualSVELane(expected_lane, core, result, lane);
470 }
471 return equal;
472 }
473
474 bool EqualMemory(const void* expected,
475 const void* result,
476 size_t size_in_bytes,
477 size_t zero_offset = 0);
478
479 // Populate the w, x and r arrays with registers from the 'allowed' mask. The
480 // r array will be populated with <reg_size>-sized registers,
481 //
482 // This allows for tests which use large, parameterized blocks of registers
483 // (such as the push and pop tests), but where certain registers must be
484 // avoided as they are used for other purposes.
485 //
486 // Any of w, x, or r can be NULL if they are not required.
487 //
488 // The return value is a RegList indicating which registers were allocated.
489 RegList PopulateRegisterArray(Register* w,
490 Register* x,
491 Register* r,
492 int reg_size,
493 int reg_count,
494 RegList allowed);
495
496 // As PopulateRegisterArray, but for floating-point registers.
497 RegList PopulateVRegisterArray(VRegister* s,
498 VRegister* d,
499 VRegister* v,
500 int reg_size,
501 int reg_count,
502 RegList allowed);
503
504 // Ovewrite the contents of the specified registers. This enables tests to
505 // check that register contents are written in cases where it's likely that the
506 // correct outcome could already be stored in the register.
507 //
508 // This always overwrites X-sized registers. If tests are operating on W
509 // registers, a subsequent write into an aliased W register should clear the
510 // top word anyway, so clobbering the full X registers should make tests more
511 // rigorous.
512 void Clobber(MacroAssembler* masm,
513 RegList reg_list,
514 uint64_t const value = 0xfedcba9876543210);
515
516 // As Clobber, but for FP registers.
517 void ClobberFP(MacroAssembler* masm,
518 RegList reg_list,
519 double const value = kFP64SignallingNaN);
520
521 // As Clobber, but for a CPURegList with either FP or integer registers. When
522 // using this method, the clobber value is always the default for the basic
523 // Clobber or ClobberFP functions.
524 void Clobber(MacroAssembler* masm, CPURegList reg_list);
525
526 uint64_t GetSignallingNan(int size_in_bits);
527
528 // This class acts as a drop-in replacement for VIXL's MacroAssembler, giving
529 // CalculateSVEAddress public visibility.
530 //
531 // CalculateSVEAddress normally has protected visibility, but it's useful to
532 // test it in isolation because it is the basis of all SVE non-scatter-gather
533 // load and store fall-backs.
534 class CalculateSVEAddressMacroAssembler : public vixl::aarch64::MacroAssembler {
535 public:
CalculateSVEAddress(const Register & xd,const SVEMemOperand & addr,int vl_divisor_log2)536 void CalculateSVEAddress(const Register& xd,
537 const SVEMemOperand& addr,
538 int vl_divisor_log2) {
539 MacroAssembler::CalculateSVEAddress(xd, addr, vl_divisor_log2);
540 }
541
CalculateSVEAddress(const Register & xd,const SVEMemOperand & addr)542 void CalculateSVEAddress(const Register& xd, const SVEMemOperand& addr) {
543 MacroAssembler::CalculateSVEAddress(xd, addr);
544 }
545 };
546
547 // This class acts as a drop-in replacement for VIXL's MacroAssembler, with
548 // fast NaN proparation mode switched on.
549 class FastNaNPropagationMacroAssembler : public MacroAssembler {
550 public:
FastNaNPropagationMacroAssembler()551 FastNaNPropagationMacroAssembler() {
552 SetFPNaNPropagationOption(FastNaNPropagation);
553 }
554 };
555
556 // This class acts as a drop-in replacement for VIXL's MacroAssembler, with
557 // strict NaN proparation mode switched on.
558 class StrictNaNPropagationMacroAssembler : public MacroAssembler {
559 public:
StrictNaNPropagationMacroAssembler()560 StrictNaNPropagationMacroAssembler() {
561 SetFPNaNPropagationOption(StrictNaNPropagation);
562 }
563 };
564
565 // If the required features are available, return true.
566 // Otherwise:
567 // - Print a warning message, unless *queried_can_run indicates that we've
568 // already done so.
569 // - Return false.
570 //
571 // If *queried_can_run is NULL, it is treated as false. Otherwise, it is set to
572 // true, regardless of the return value.
573 //
574 // The warning message printed on failure is used by tools/threaded_tests.py to
575 // count skipped tests. A test must not print more than one such warning
576 // message. It is safe to call CanRun multiple times per test, as long as
577 // queried_can_run is propagated correctly between calls, and the first call to
578 // CanRun requires every feature that is required by subsequent calls. If
579 // queried_can_run is NULL, CanRun must not be called more than once per test.
580 bool CanRun(const CPUFeatures& required, bool* queried_can_run = NULL);
581
582 // PushCalleeSavedRegisters(), PopCalleeSavedRegisters() and Dump() use NEON, so
583 // we need to enable it in the infrastructure code for each test.
584 static const CPUFeatures kInfrastructureCPUFeatures(CPUFeatures::kNEON);
585
586 enum InputSet {
587 kIntInputSet = 0,
588 kFpInputSet,
589 };
590
591 // Initialise CPU registers to a predictable, non-zero set of values. This
592 // sets core, vector, predicate and flag registers, though leaves the stack
593 // pointer at its original value.
594 void SetInitialMachineState(MacroAssembler* masm,
595 InputSet input_set = kIntInputSet);
596
597 // Compute a CRC32 hash of the machine state, and store it to dst. The hash
598 // covers core (not sp), vector (lower 128 bits), predicate (lower 16 bits)
599 // and flag registers.
600 void ComputeMachineStateHash(MacroAssembler* masm, uint32_t* dst);
601
602 // The TEST_SVE macro works just like the usual TEST macro, but the resulting
603 // function receives a `const Test& config` argument, to allow it to query the
604 // vector length.
605 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
606
607 #define TEST_SVE_INNER(type, name) \
608 void Test##name(Test* config); \
609 Test* test_##name##_list[] = \
610 {Test::MakeSVETest(128, \
611 "AARCH64_" type "_" #name "_vl128", \
612 &Test##name), \
613 Test::MakeSVETest(384, \
614 "AARCH64_" type "_" #name "_vl384", \
615 &Test##name), \
616 Test::MakeSVETest(2048, \
617 "AARCH64_" type "_" #name "_vl2048", \
618 &Test##name)}; \
619 void Test##name(Test* config)
620
621 #define SVE_SETUP_WITH_FEATURES(...) \
622 SETUP_WITH_FEATURES(__VA_ARGS__); \
623 simulator.SetVectorLengthInBits(config->sve_vl_in_bits())
624
625 #else
626 // Otherwise, just use whatever the hardware provides.
627 static const int kSVEVectorLengthInBits =
628 CPUFeatures::InferFromOS().Has(CPUFeatures::kSVE)
629 ? CPU::ReadSVEVectorLengthInBits()
630 : kZRegMinSize;
631
632 #define TEST_SVE_INNER(type, name) \
633 void Test##name(Test* config); \
634 Test* test_##name##_vlauto = \
635 Test::MakeSVETest(kSVEVectorLengthInBits, \
636 "AARCH64_" type "_" #name "_vlauto", \
637 &Test##name); \
638 void Test##name(Test* config)
639
640 #define SVE_SETUP_WITH_FEATURES(...) \
641 SETUP_WITH_FEATURES(__VA_ARGS__); \
642 USE(config)
643
644 #endif
645
646 // Call masm->Insr repeatedly to allow test inputs to be set up concisely. This
647 // is optimised for call-site clarity, not generated code quality, so it doesn't
648 // exist in the MacroAssembler itself.
649 //
650 // Usage:
651 //
652 // int values[] = { 42, 43, 44 };
653 // InsrHelper(&masm, z0.VnS(), values); // Sets z0.S = { ..., 42, 43, 44 }
654 //
655 // The rightmost (highest-indexed) array element maps to the lowest-numbered
656 // lane.
657 template <typename T, size_t N>
InsrHelper(MacroAssembler * masm,const ZRegister & zdn,const T (& values)[N])658 void InsrHelper(MacroAssembler* masm,
659 const ZRegister& zdn,
660 const T (&values)[N]) {
661 for (size_t i = 0; i < N; i++) {
662 masm->Insr(zdn, values[i]);
663 }
664 }
665
666 } // namespace aarch64
667 } // namespace vixl
668
669 #endif // VIXL_AARCH64_TEST_UTILS_AARCH64_H_
670