• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifndef VIXL_AARCH64_TEST_UTILS_AARCH64_H_
28 #define VIXL_AARCH64_TEST_UTILS_AARCH64_H_
29 
30 #include "test-runner.h"
31 
32 #include "aarch64/cpu-aarch64.h"
33 #include "aarch64/disasm-aarch64.h"
34 #include "aarch64/macro-assembler-aarch64.h"
35 #include "aarch64/simulator-aarch64.h"
36 
37 namespace vixl {
38 namespace aarch64 {
39 
40 // Signalling and quiet NaNs in double format, constructed such that the bottom
41 // 32 bits look like a signalling or quiet NaN (as appropriate) when interpreted
42 // as a float. These values are not architecturally significant, but they're
43 // useful in tests for initialising registers.
44 extern const double kFP64SignallingNaN;
45 extern const double kFP64QuietNaN;
46 
47 // Signalling and quiet NaNs in float format.
48 extern const float kFP32SignallingNaN;
49 extern const float kFP32QuietNaN;
50 
51 // Signalling and quiet NaNs in half-precision float format.
52 extern const Float16 kFP16SignallingNaN;
53 extern const Float16 kFP16QuietNaN;
54 
55 // Structure representing Q registers in a RegisterDump.
56 struct vec128_t {
57   uint64_t l;
58   uint64_t h;
59 };
60 
61 // RegisterDump: Object allowing integer, floating point and flags registers
62 // to be saved to itself for future reference.
63 class RegisterDump {
64  public:
RegisterDump()65   RegisterDump() : completed_(false) {
66     VIXL_ASSERT(sizeof(dump_.d_[0]) == kDRegSizeInBytes);
67     VIXL_ASSERT(sizeof(dump_.s_[0]) == kSRegSizeInBytes);
68     VIXL_ASSERT(sizeof(dump_.h_[0]) == kHRegSizeInBytes);
69     VIXL_ASSERT(sizeof(dump_.d_[0]) == kXRegSizeInBytes);
70     VIXL_ASSERT(sizeof(dump_.s_[0]) == kWRegSizeInBytes);
71     VIXL_ASSERT(sizeof(dump_.x_[0]) == kXRegSizeInBytes);
72     VIXL_ASSERT(sizeof(dump_.w_[0]) == kWRegSizeInBytes);
73     VIXL_ASSERT(sizeof(dump_.q_[0]) == kQRegSizeInBytes);
74   }
75 
76   // The Dump method generates code to store a snapshot of the register values.
77   // It needs to be able to use the stack temporarily, and requires that the
78   // current stack pointer is sp, and is properly aligned.
79   //
80   // The dumping code is generated though the given MacroAssembler. No registers
81   // are corrupted in the process, but the stack is used briefly. The flags will
82   // be corrupted during this call.
83   void Dump(MacroAssembler* assm);
84 
85   // Register accessors.
wreg(unsigned code)86   inline int32_t wreg(unsigned code) const {
87     if (code == kSPRegInternalCode) {
88       return wspreg();
89     }
90     VIXL_ASSERT(RegAliasesMatch(code));
91     return dump_.w_[code];
92   }
93 
xreg(unsigned code)94   inline int64_t xreg(unsigned code) const {
95     if (code == kSPRegInternalCode) {
96       return spreg();
97     }
98     VIXL_ASSERT(RegAliasesMatch(code));
99     return dump_.x_[code];
100   }
101 
102   // FPRegister accessors.
hreg_bits(unsigned code)103   inline uint16_t hreg_bits(unsigned code) const {
104     VIXL_ASSERT(FPRegAliasesMatch(code));
105     return dump_.h_[code];
106   }
107 
sreg_bits(unsigned code)108   inline uint32_t sreg_bits(unsigned code) const {
109     VIXL_ASSERT(FPRegAliasesMatch(code));
110     return dump_.s_[code];
111   }
112 
hreg(unsigned code)113   inline Float16 hreg(unsigned code) const {
114     return RawbitsToFloat16(hreg_bits(code));
115   }
116 
sreg(unsigned code)117   inline float sreg(unsigned code) const {
118     return RawbitsToFloat(sreg_bits(code));
119   }
120 
dreg_bits(unsigned code)121   inline uint64_t dreg_bits(unsigned code) const {
122     VIXL_ASSERT(FPRegAliasesMatch(code));
123     return dump_.d_[code];
124   }
125 
dreg(unsigned code)126   inline double dreg(unsigned code) const {
127     return RawbitsToDouble(dreg_bits(code));
128   }
129 
qreg(unsigned code)130   inline vec128_t qreg(unsigned code) const { return dump_.q_[code]; }
131 
132   // Stack pointer accessors.
spreg()133   inline int64_t spreg() const {
134     VIXL_ASSERT(SPRegAliasesMatch());
135     return dump_.sp_;
136   }
137 
wspreg()138   inline int32_t wspreg() const {
139     VIXL_ASSERT(SPRegAliasesMatch());
140     return static_cast<int32_t>(dump_.wsp_);
141   }
142 
143   // Flags accessors.
flags_nzcv()144   inline uint32_t flags_nzcv() const {
145     VIXL_ASSERT(IsComplete());
146     VIXL_ASSERT((dump_.flags_ & ~Flags_mask) == 0);
147     return dump_.flags_ & Flags_mask;
148   }
149 
IsComplete()150   inline bool IsComplete() const { return completed_; }
151 
152  private:
153   // Indicate whether the dump operation has been completed.
154   bool completed_;
155 
156   // Check that the lower 32 bits of x<code> exactly match the 32 bits of
157   // w<code>. A failure of this test most likely represents a failure in the
158   // ::Dump method, or a failure in the simulator.
RegAliasesMatch(unsigned code)159   bool RegAliasesMatch(unsigned code) const {
160     VIXL_ASSERT(IsComplete());
161     VIXL_ASSERT(code < kNumberOfRegisters);
162     return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
163   }
164 
165   // As RegAliasesMatch, but for the stack pointer.
SPRegAliasesMatch()166   bool SPRegAliasesMatch() const {
167     VIXL_ASSERT(IsComplete());
168     return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
169   }
170 
171   // As RegAliasesMatch, but for floating-point registers.
FPRegAliasesMatch(unsigned code)172   bool FPRegAliasesMatch(unsigned code) const {
173     VIXL_ASSERT(IsComplete());
174     VIXL_ASSERT(code < kNumberOfFPRegisters);
175     return (((dump_.d_[code] & kSRegMask) == dump_.s_[code]) ||
176             ((dump_.s_[code] & kHRegMask) == dump_.h_[code]));
177   }
178 
179   // Store all the dumped elements in a simple struct so the implementation can
180   // use offsetof to quickly find the correct field.
181   struct dump_t {
182     // Core registers.
183     uint64_t x_[kNumberOfRegisters];
184     uint32_t w_[kNumberOfRegisters];
185 
186     // Floating-point registers, as raw bits.
187     uint64_t d_[kNumberOfFPRegisters];
188     uint32_t s_[kNumberOfFPRegisters];
189     uint16_t h_[kNumberOfFPRegisters];
190 
191     // Vector registers.
192     vec128_t q_[kNumberOfVRegisters];
193 
194     // The stack pointer.
195     uint64_t sp_;
196     uint64_t wsp_;
197 
198     // NZCV flags, stored in bits 28 to 31.
199     // bit[31] : Negative
200     // bit[30] : Zero
201     // bit[29] : Carry
202     // bit[28] : oVerflow
203     uint64_t flags_;
204   } dump_;
205 };
206 
207 // Some of these methods don't use the RegisterDump argument, but they have to
208 // accept them so that they can overload those that take register arguments.
209 bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result);
210 bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result);
211 
212 bool EqualFP16(Float16 expected, const RegisterDump*, uint16_t result);
213 bool EqualFP32(float expected, const RegisterDump*, float result);
214 bool EqualFP64(double expected, const RegisterDump*, double result);
215 
216 bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
217 bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg);
218 bool Equal64(uint64_t expected,
219              const RegisterDump* core,
220              const VRegister& vreg);
221 
222 bool EqualFP16(Float16 expected,
223                const RegisterDump* core,
224                const FPRegister& fpreg);
225 bool EqualFP32(float expected,
226                const RegisterDump* core,
227                const FPRegister& fpreg);
228 bool EqualFP64(double expected,
229                const RegisterDump* core,
230                const FPRegister& fpreg);
231 
232 bool Equal64(const Register& reg0,
233              const RegisterDump* core,
234              const Register& reg1);
235 bool Equal128(uint64_t expected_h,
236               uint64_t expected_l,
237               const RegisterDump* core,
238               const VRegister& reg);
239 
240 bool EqualNzcv(uint32_t expected, uint32_t result);
241 
242 bool EqualRegisters(const RegisterDump* a, const RegisterDump* b);
243 
244 // Populate the w, x and r arrays with registers from the 'allowed' mask. The
245 // r array will be populated with <reg_size>-sized registers,
246 //
247 // This allows for tests which use large, parameterized blocks of registers
248 // (such as the push and pop tests), but where certain registers must be
249 // avoided as they are used for other purposes.
250 //
251 // Any of w, x, or r can be NULL if they are not required.
252 //
253 // The return value is a RegList indicating which registers were allocated.
254 RegList PopulateRegisterArray(Register* w,
255                               Register* x,
256                               Register* r,
257                               int reg_size,
258                               int reg_count,
259                               RegList allowed);
260 
261 // As PopulateRegisterArray, but for floating-point registers.
262 RegList PopulateFPRegisterArray(FPRegister* s,
263                                 FPRegister* d,
264                                 FPRegister* v,
265                                 int reg_size,
266                                 int reg_count,
267                                 RegList allowed);
268 
269 // Ovewrite the contents of the specified registers. This enables tests to
270 // check that register contents are written in cases where it's likely that the
271 // correct outcome could already be stored in the register.
272 //
273 // This always overwrites X-sized registers. If tests are operating on W
274 // registers, a subsequent write into an aliased W register should clear the
275 // top word anyway, so clobbering the full X registers should make tests more
276 // rigorous.
277 void Clobber(MacroAssembler* masm,
278              RegList reg_list,
279              uint64_t const value = 0xfedcba9876543210);
280 
281 // As Clobber, but for FP registers.
282 void ClobberFP(MacroAssembler* masm,
283                RegList reg_list,
284                double const value = kFP64SignallingNaN);
285 
286 // As Clobber, but for a CPURegList with either FP or integer registers. When
287 // using this method, the clobber value is always the default for the basic
288 // Clobber or ClobberFP functions.
289 void Clobber(MacroAssembler* masm, CPURegList reg_list);
290 
291 }  // namespace aarch64
292 }  // namespace vixl
293 
294 #endif  // VIXL_AARCH64_TEST_UTILS_AARCH64_H_
295