1 // Copyright 2014, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #ifndef VIXL_AARCH64_TEST_UTILS_AARCH64_H_
28 #define VIXL_AARCH64_TEST_UTILS_AARCH64_H_
29
30 #include "test-runner.h"
31
32 #include "aarch64/cpu-aarch64.h"
33 #include "aarch64/disasm-aarch64.h"
34 #include "aarch64/macro-assembler-aarch64.h"
35 #include "aarch64/simulator-aarch64.h"
36
37 namespace vixl {
38 namespace aarch64 {
39
40 // Signalling and quiet NaNs in double format, constructed such that the bottom
41 // 32 bits look like a signalling or quiet NaN (as appropriate) when interpreted
42 // as a float. These values are not architecturally significant, but they're
43 // useful in tests for initialising registers.
44 extern const double kFP64SignallingNaN;
45 extern const double kFP64QuietNaN;
46
47 // Signalling and quiet NaNs in float format.
48 extern const float kFP32SignallingNaN;
49 extern const float kFP32QuietNaN;
50
51 // Signalling and quiet NaNs in half-precision float format.
52 extern const Float16 kFP16SignallingNaN;
53 extern const Float16 kFP16QuietNaN;
54
55 // Structure representing Q registers in a RegisterDump.
56 struct vec128_t {
57 uint64_t l;
58 uint64_t h;
59 };
60
61 // RegisterDump: Object allowing integer, floating point and flags registers
62 // to be saved to itself for future reference.
63 class RegisterDump {
64 public:
RegisterDump()65 RegisterDump() : completed_(false) {
66 VIXL_ASSERT(sizeof(dump_.d_[0]) == kDRegSizeInBytes);
67 VIXL_ASSERT(sizeof(dump_.s_[0]) == kSRegSizeInBytes);
68 VIXL_ASSERT(sizeof(dump_.h_[0]) == kHRegSizeInBytes);
69 VIXL_ASSERT(sizeof(dump_.d_[0]) == kXRegSizeInBytes);
70 VIXL_ASSERT(sizeof(dump_.s_[0]) == kWRegSizeInBytes);
71 VIXL_ASSERT(sizeof(dump_.x_[0]) == kXRegSizeInBytes);
72 VIXL_ASSERT(sizeof(dump_.w_[0]) == kWRegSizeInBytes);
73 VIXL_ASSERT(sizeof(dump_.q_[0]) == kQRegSizeInBytes);
74 }
75
76 // The Dump method generates code to store a snapshot of the register values.
77 // It needs to be able to use the stack temporarily, and requires that the
78 // current stack pointer is sp, and is properly aligned.
79 //
80 // The dumping code is generated though the given MacroAssembler. No registers
81 // are corrupted in the process, but the stack is used briefly. The flags will
82 // be corrupted during this call.
83 void Dump(MacroAssembler* assm);
84
85 // Register accessors.
wreg(unsigned code)86 inline int32_t wreg(unsigned code) const {
87 if (code == kSPRegInternalCode) {
88 return wspreg();
89 }
90 VIXL_ASSERT(RegAliasesMatch(code));
91 return dump_.w_[code];
92 }
93
xreg(unsigned code)94 inline int64_t xreg(unsigned code) const {
95 if (code == kSPRegInternalCode) {
96 return spreg();
97 }
98 VIXL_ASSERT(RegAliasesMatch(code));
99 return dump_.x_[code];
100 }
101
102 // VRegister accessors.
hreg_bits(unsigned code)103 inline uint16_t hreg_bits(unsigned code) const {
104 VIXL_ASSERT(FPRegAliasesMatch(code));
105 return dump_.h_[code];
106 }
107
sreg_bits(unsigned code)108 inline uint32_t sreg_bits(unsigned code) const {
109 VIXL_ASSERT(FPRegAliasesMatch(code));
110 return dump_.s_[code];
111 }
112
hreg(unsigned code)113 inline Float16 hreg(unsigned code) const {
114 return RawbitsToFloat16(hreg_bits(code));
115 }
116
sreg(unsigned code)117 inline float sreg(unsigned code) const {
118 return RawbitsToFloat(sreg_bits(code));
119 }
120
dreg_bits(unsigned code)121 inline uint64_t dreg_bits(unsigned code) const {
122 VIXL_ASSERT(FPRegAliasesMatch(code));
123 return dump_.d_[code];
124 }
125
dreg(unsigned code)126 inline double dreg(unsigned code) const {
127 return RawbitsToDouble(dreg_bits(code));
128 }
129
qreg(unsigned code)130 inline vec128_t qreg(unsigned code) const { return dump_.q_[code]; }
131
132 // Stack pointer accessors.
spreg()133 inline int64_t spreg() const {
134 VIXL_ASSERT(SPRegAliasesMatch());
135 return dump_.sp_;
136 }
137
wspreg()138 inline int32_t wspreg() const {
139 VIXL_ASSERT(SPRegAliasesMatch());
140 return static_cast<int32_t>(dump_.wsp_);
141 }
142
143 // Flags accessors.
flags_nzcv()144 inline uint32_t flags_nzcv() const {
145 VIXL_ASSERT(IsComplete());
146 VIXL_ASSERT((dump_.flags_ & ~Flags_mask) == 0);
147 return dump_.flags_ & Flags_mask;
148 }
149
IsComplete()150 inline bool IsComplete() const { return completed_; }
151
152 private:
153 // Indicate whether the dump operation has been completed.
154 bool completed_;
155
156 // Check that the lower 32 bits of x<code> exactly match the 32 bits of
157 // w<code>. A failure of this test most likely represents a failure in the
158 // ::Dump method, or a failure in the simulator.
RegAliasesMatch(unsigned code)159 bool RegAliasesMatch(unsigned code) const {
160 VIXL_ASSERT(IsComplete());
161 VIXL_ASSERT(code < kNumberOfRegisters);
162 return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
163 }
164
165 // As RegAliasesMatch, but for the stack pointer.
SPRegAliasesMatch()166 bool SPRegAliasesMatch() const {
167 VIXL_ASSERT(IsComplete());
168 return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
169 }
170
171 // As RegAliasesMatch, but for floating-point registers.
FPRegAliasesMatch(unsigned code)172 bool FPRegAliasesMatch(unsigned code) const {
173 VIXL_ASSERT(IsComplete());
174 VIXL_ASSERT(code < kNumberOfVRegisters);
175 return (((dump_.d_[code] & kSRegMask) == dump_.s_[code]) ||
176 ((dump_.s_[code] & kHRegMask) == dump_.h_[code]));
177 }
178
179 // Store all the dumped elements in a simple struct so the implementation can
180 // use offsetof to quickly find the correct field.
181 struct dump_t {
182 // Core registers.
183 uint64_t x_[kNumberOfRegisters];
184 uint32_t w_[kNumberOfRegisters];
185
186 // Floating-point registers, as raw bits.
187 uint64_t d_[kNumberOfVRegisters];
188 uint32_t s_[kNumberOfVRegisters];
189 uint16_t h_[kNumberOfVRegisters];
190
191 // Vector registers.
192 vec128_t q_[kNumberOfVRegisters];
193
194 // The stack pointer.
195 uint64_t sp_;
196 uint64_t wsp_;
197
198 // NZCV flags, stored in bits 28 to 31.
199 // bit[31] : Negative
200 // bit[30] : Zero
201 // bit[29] : Carry
202 // bit[28] : oVerflow
203 uint64_t flags_;
204 } dump_;
205 };
206
207 // Some tests want to check that a value is _not_ equal to a reference value.
208 // These enum values can be used to control the error reporting behaviour.
209 enum ExpectedResult { kExpectEqual, kExpectNotEqual };
210
211 // The Equal* methods return true if the result matches the reference value.
212 // They all print an error message to the console if the result is incorrect
213 // (according to the ExpectedResult argument, or kExpectEqual if it is absent).
214 //
215 // Some of these methods don't use the RegisterDump argument, but they have to
216 // accept them so that they can overload those that take register arguments.
217 bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result);
218 bool Equal64(uint64_t reference,
219 const RegisterDump*,
220 uint64_t result,
221 ExpectedResult option = kExpectEqual);
222
223 bool EqualFP16(Float16 expected, const RegisterDump*, uint16_t result);
224 bool EqualFP32(float expected, const RegisterDump*, float result);
225 bool EqualFP64(double expected, const RegisterDump*, double result);
226
227 bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
228 bool Equal64(uint64_t reference,
229 const RegisterDump* core,
230 const Register& reg,
231 ExpectedResult option = kExpectEqual);
232 bool Equal64(uint64_t expected,
233 const RegisterDump* core,
234 const VRegister& vreg);
235
236 bool EqualFP16(Float16 expected,
237 const RegisterDump* core,
238 const VRegister& fpreg);
239 bool EqualFP32(float expected,
240 const RegisterDump* core,
241 const VRegister& fpreg);
242 bool EqualFP64(double expected,
243 const RegisterDump* core,
244 const VRegister& fpreg);
245
246 bool Equal64(const Register& reg0,
247 const RegisterDump* core,
248 const Register& reg1,
249 ExpectedResult option = kExpectEqual);
250 bool Equal128(uint64_t expected_h,
251 uint64_t expected_l,
252 const RegisterDump* core,
253 const VRegister& reg);
254
255 bool EqualNzcv(uint32_t expected, uint32_t result);
256
257 bool EqualRegisters(const RegisterDump* a, const RegisterDump* b);
258
259 template <typename T0, typename T1>
NotEqual64(T0 reference,const RegisterDump * core,T1 result)260 bool NotEqual64(T0 reference, const RegisterDump* core, T1 result) {
261 return !Equal64(reference, core, result, kExpectNotEqual);
262 }
263
264 // Populate the w, x and r arrays with registers from the 'allowed' mask. The
265 // r array will be populated with <reg_size>-sized registers,
266 //
267 // This allows for tests which use large, parameterized blocks of registers
268 // (such as the push and pop tests), but where certain registers must be
269 // avoided as they are used for other purposes.
270 //
271 // Any of w, x, or r can be NULL if they are not required.
272 //
273 // The return value is a RegList indicating which registers were allocated.
274 RegList PopulateRegisterArray(Register* w,
275 Register* x,
276 Register* r,
277 int reg_size,
278 int reg_count,
279 RegList allowed);
280
281 // As PopulateRegisterArray, but for floating-point registers.
282 RegList PopulateVRegisterArray(VRegister* s,
283 VRegister* d,
284 VRegister* v,
285 int reg_size,
286 int reg_count,
287 RegList allowed);
288
289 // Ovewrite the contents of the specified registers. This enables tests to
290 // check that register contents are written in cases where it's likely that the
291 // correct outcome could already be stored in the register.
292 //
293 // This always overwrites X-sized registers. If tests are operating on W
294 // registers, a subsequent write into an aliased W register should clear the
295 // top word anyway, so clobbering the full X registers should make tests more
296 // rigorous.
297 void Clobber(MacroAssembler* masm,
298 RegList reg_list,
299 uint64_t const value = 0xfedcba9876543210);
300
301 // As Clobber, but for FP registers.
302 void ClobberFP(MacroAssembler* masm,
303 RegList reg_list,
304 double const value = kFP64SignallingNaN);
305
306 // As Clobber, but for a CPURegList with either FP or integer registers. When
307 // using this method, the clobber value is always the default for the basic
308 // Clobber or ClobberFP functions.
309 void Clobber(MacroAssembler* masm, CPURegList reg_list);
310
311 } // namespace aarch64
312 } // namespace vixl
313
314 #endif // VIXL_AARCH64_TEST_UTILS_AARCH64_H_
315