1 // Copyright 2014, VIXL authors 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are met: 6 // 7 // * Redistributions of source code must retain the above copyright notice, 8 // this list of conditions and the following disclaimer. 9 // * Redistributions in binary form must reproduce the above copyright notice, 10 // this list of conditions and the following disclaimer in the documentation 11 // and/or other materials provided with the distribution. 12 // * Neither the name of ARM Limited nor the names of its contributors may be 13 // used to endorse or promote products derived from this software without 14 // specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND 17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 27 #ifndef VIXL_AARCH64_TEST_UTILS_AARCH64_H_ 28 #define VIXL_AARCH64_TEST_UTILS_AARCH64_H_ 29 30 #include "test-runner.h" 31 32 #include "aarch64/cpu-aarch64.h" 33 #include "aarch64/disasm-aarch64.h" 34 #include "aarch64/macro-assembler-aarch64.h" 35 #include "aarch64/simulator-aarch64.h" 36 37 namespace vixl { 38 namespace aarch64 { 39 40 // Signalling and quiet NaNs in double format, constructed such that the bottom 41 // 32 bits look like a signalling or quiet NaN (as appropriate) when interpreted 42 // as a float. These values are not architecturally significant, but they're 43 // useful in tests for initialising registers. 44 extern const double kFP64SignallingNaN; 45 extern const double kFP64QuietNaN; 46 47 // Signalling and quiet NaNs in float format. 48 extern const float kFP32SignallingNaN; 49 extern const float kFP32QuietNaN; 50 51 // Structure representing Q registers in a RegisterDump. 52 struct vec128_t { 53 uint64_t l; 54 uint64_t h; 55 }; 56 57 // RegisterDump: Object allowing integer, floating point and flags registers 58 // to be saved to itself for future reference. 59 class RegisterDump { 60 public: RegisterDump()61 RegisterDump() : completed_(false) { 62 VIXL_ASSERT(sizeof(dump_.d_[0]) == kDRegSizeInBytes); 63 VIXL_ASSERT(sizeof(dump_.s_[0]) == kSRegSizeInBytes); 64 VIXL_ASSERT(sizeof(dump_.d_[0]) == kXRegSizeInBytes); 65 VIXL_ASSERT(sizeof(dump_.s_[0]) == kWRegSizeInBytes); 66 VIXL_ASSERT(sizeof(dump_.x_[0]) == kXRegSizeInBytes); 67 VIXL_ASSERT(sizeof(dump_.w_[0]) == kWRegSizeInBytes); 68 VIXL_ASSERT(sizeof(dump_.q_[0]) == kQRegSizeInBytes); 69 } 70 71 // The Dump method generates code to store a snapshot of the register values. 72 // It needs to be able to use the stack temporarily, and requires that the 73 // current stack pointer is sp, and is properly aligned. 74 // 75 // The dumping code is generated though the given MacroAssembler. No registers 76 // are corrupted in the process, but the stack is used briefly. The flags will 77 // be corrupted during this call. 78 void Dump(MacroAssembler* assm); 79 80 // Register accessors. wreg(unsigned code)81 inline int32_t wreg(unsigned code) const { 82 if (code == kSPRegInternalCode) { 83 return wspreg(); 84 } 85 VIXL_ASSERT(RegAliasesMatch(code)); 86 return dump_.w_[code]; 87 } 88 xreg(unsigned code)89 inline int64_t xreg(unsigned code) const { 90 if (code == kSPRegInternalCode) { 91 return spreg(); 92 } 93 VIXL_ASSERT(RegAliasesMatch(code)); 94 return dump_.x_[code]; 95 } 96 97 // FPRegister accessors. sreg_bits(unsigned code)98 inline uint32_t sreg_bits(unsigned code) const { 99 VIXL_ASSERT(FPRegAliasesMatch(code)); 100 return dump_.s_[code]; 101 } 102 sreg(unsigned code)103 inline float sreg(unsigned code) const { 104 return RawbitsToFloat(sreg_bits(code)); 105 } 106 dreg_bits(unsigned code)107 inline uint64_t dreg_bits(unsigned code) const { 108 VIXL_ASSERT(FPRegAliasesMatch(code)); 109 return dump_.d_[code]; 110 } 111 dreg(unsigned code)112 inline double dreg(unsigned code) const { 113 return RawbitsToDouble(dreg_bits(code)); 114 } 115 qreg(unsigned code)116 inline vec128_t qreg(unsigned code) const { return dump_.q_[code]; } 117 118 // Stack pointer accessors. spreg()119 inline int64_t spreg() const { 120 VIXL_ASSERT(SPRegAliasesMatch()); 121 return dump_.sp_; 122 } 123 wspreg()124 inline int32_t wspreg() const { 125 VIXL_ASSERT(SPRegAliasesMatch()); 126 return static_cast<int32_t>(dump_.wsp_); 127 } 128 129 // Flags accessors. flags_nzcv()130 inline uint32_t flags_nzcv() const { 131 VIXL_ASSERT(IsComplete()); 132 VIXL_ASSERT((dump_.flags_ & ~Flags_mask) == 0); 133 return dump_.flags_ & Flags_mask; 134 } 135 IsComplete()136 inline bool IsComplete() const { return completed_; } 137 138 private: 139 // Indicate whether the dump operation has been completed. 140 bool completed_; 141 142 // Check that the lower 32 bits of x<code> exactly match the 32 bits of 143 // w<code>. A failure of this test most likely represents a failure in the 144 // ::Dump method, or a failure in the simulator. RegAliasesMatch(unsigned code)145 bool RegAliasesMatch(unsigned code) const { 146 VIXL_ASSERT(IsComplete()); 147 VIXL_ASSERT(code < kNumberOfRegisters); 148 return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]); 149 } 150 151 // As RegAliasesMatch, but for the stack pointer. SPRegAliasesMatch()152 bool SPRegAliasesMatch() const { 153 VIXL_ASSERT(IsComplete()); 154 return ((dump_.sp_ & kWRegMask) == dump_.wsp_); 155 } 156 157 // As RegAliasesMatch, but for floating-point registers. FPRegAliasesMatch(unsigned code)158 bool FPRegAliasesMatch(unsigned code) const { 159 VIXL_ASSERT(IsComplete()); 160 VIXL_ASSERT(code < kNumberOfFPRegisters); 161 return (dump_.d_[code] & kSRegMask) == dump_.s_[code]; 162 } 163 164 // Store all the dumped elements in a simple struct so the implementation can 165 // use offsetof to quickly find the correct field. 166 struct dump_t { 167 // Core registers. 168 uint64_t x_[kNumberOfRegisters]; 169 uint32_t w_[kNumberOfRegisters]; 170 171 // Floating-point registers, as raw bits. 172 uint64_t d_[kNumberOfFPRegisters]; 173 uint32_t s_[kNumberOfFPRegisters]; 174 175 // Vector registers. 176 vec128_t q_[kNumberOfVRegisters]; 177 178 // The stack pointer. 179 uint64_t sp_; 180 uint64_t wsp_; 181 182 // NZCV flags, stored in bits 28 to 31. 183 // bit[31] : Negative 184 // bit[30] : Zero 185 // bit[29] : Carry 186 // bit[28] : oVerflow 187 uint64_t flags_; 188 } dump_; 189 }; 190 191 // Some of these methods don't use the RegisterDump argument, but they have to 192 // accept them so that they can overload those that take register arguments. 193 bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result); 194 bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result); 195 196 bool EqualFP32(float expected, const RegisterDump*, float result); 197 bool EqualFP64(double expected, const RegisterDump*, double result); 198 199 bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg); 200 bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg); 201 bool Equal64(uint64_t expected, 202 const RegisterDump* core, 203 const VRegister& vreg); 204 205 bool EqualFP32(float expected, 206 const RegisterDump* core, 207 const FPRegister& fpreg); 208 bool EqualFP64(double expected, 209 const RegisterDump* core, 210 const FPRegister& fpreg); 211 212 bool Equal64(const Register& reg0, 213 const RegisterDump* core, 214 const Register& reg1); 215 bool Equal128(uint64_t expected_h, 216 uint64_t expected_l, 217 const RegisterDump* core, 218 const VRegister& reg); 219 220 bool EqualNzcv(uint32_t expected, uint32_t result); 221 222 bool EqualRegisters(const RegisterDump* a, const RegisterDump* b); 223 224 // Populate the w, x and r arrays with registers from the 'allowed' mask. The 225 // r array will be populated with <reg_size>-sized registers, 226 // 227 // This allows for tests which use large, parameterized blocks of registers 228 // (such as the push and pop tests), but where certain registers must be 229 // avoided as they are used for other purposes. 230 // 231 // Any of w, x, or r can be NULL if they are not required. 232 // 233 // The return value is a RegList indicating which registers were allocated. 234 RegList PopulateRegisterArray(Register* w, 235 Register* x, 236 Register* r, 237 int reg_size, 238 int reg_count, 239 RegList allowed); 240 241 // As PopulateRegisterArray, but for floating-point registers. 242 RegList PopulateFPRegisterArray(FPRegister* s, 243 FPRegister* d, 244 FPRegister* v, 245 int reg_size, 246 int reg_count, 247 RegList allowed); 248 249 // Ovewrite the contents of the specified registers. This enables tests to 250 // check that register contents are written in cases where it's likely that the 251 // correct outcome could already be stored in the register. 252 // 253 // This always overwrites X-sized registers. If tests are operating on W 254 // registers, a subsequent write into an aliased W register should clear the 255 // top word anyway, so clobbering the full X registers should make tests more 256 // rigorous. 257 void Clobber(MacroAssembler* masm, 258 RegList reg_list, 259 uint64_t const value = 0xfedcba9876543210); 260 261 // As Clobber, but for FP registers. 262 void ClobberFP(MacroAssembler* masm, 263 RegList reg_list, 264 double const value = kFP64SignallingNaN); 265 266 // As Clobber, but for a CPURegList with either FP or integer registers. When 267 // using this method, the clobber value is always the default for the basic 268 // Clobber or ClobberFP functions. 269 void Clobber(MacroAssembler* masm, CPURegList reg_list); 270 271 } // namespace aarch64 272 } // namespace vixl 273 274 #endif // VIXL_AARCH64_TEST_UTILS_AARCH64_H_ 275