• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
28 
29 #include <errno.h>
30 #include <unistd.h>
31 
32 #include <cmath>
33 #include <cstring>
34 #include <limits>
35 
36 #include "simulator-aarch64.h"
37 
38 namespace vixl {
39 namespace aarch64 {
40 
41 using vixl::internal::SimFloat16;
42 
43 const Instruction* Simulator::kEndOfSimAddress = NULL;
44 
SetBits(int msb,int lsb,uint32_t bits)45 void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
46   int width = msb - lsb + 1;
47   VIXL_ASSERT(IsUintN(width, bits) || IsIntN(width, bits));
48 
49   bits <<= lsb;
50   uint32_t mask = ((1 << width) - 1) << lsb;
51   VIXL_ASSERT((mask & write_ignore_mask_) == 0);
52 
53   value_ = (value_ & ~mask) | (bits & mask);
54 }
55 
56 
DefaultValueFor(SystemRegister id)57 SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
58   switch (id) {
59     case NZCV:
60       return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
61     case FPCR:
62       return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
63     default:
64       VIXL_UNREACHABLE();
65       return SimSystemRegister();
66   }
67 }
68 
69 
Simulator(Decoder * decoder,FILE * stream,SimStack::Allocated stack)70 Simulator::Simulator(Decoder* decoder, FILE* stream, SimStack::Allocated stack)
71     : memory_(std::move(stack)),
72       movprfx_(NULL),
73       cpu_features_auditor_(decoder, CPUFeatures::All()) {
74   // Ensure that shift operations act as the simulator expects.
75   VIXL_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
76   VIXL_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7fffffff);
77 
78   // Set up a placeholder pipe for CanReadMemory.
79   VIXL_CHECK(pipe(placeholder_pipe_fd_) == 0);
80 
81   // Set up the decoder.
82   decoder_ = decoder;
83   decoder_->AppendVisitor(this);
84 
85   stream_ = stream;
86 
87   print_disasm_ = new PrintDisassembler(stream_);
88   // The Simulator and Disassembler share the same available list, held by the
89   // auditor. The Disassembler only annotates instructions with features that
90   // are _not_ available, so registering the auditor should have no effect
91   // unless the simulator is about to abort (due to missing features). In
92   // practice, this means that with trace enabled, the simulator will crash just
93   // after the disassembler prints the instruction, with the missing features
94   // enumerated.
95   print_disasm_->RegisterCPUFeaturesAuditor(&cpu_features_auditor_);
96 
97   SetColouredTrace(false);
98   trace_parameters_ = LOG_NONE;
99 
100   // We have to configure the SVE vector register length before calling
101   // ResetState().
102   SetVectorLengthInBits(kZRegMinSize);
103 
104   ResetState();
105 
106   // Print a warning about exclusive-access instructions, but only the first
107   // time they are encountered. This warning can be silenced using
108   // SilenceExclusiveAccessWarning().
109   print_exclusive_access_warning_ = true;
110 
111   guard_pages_ = false;
112 
113   // Initialize the common state of RNDR and RNDRRS.
114   uint16_t seed[3] = {11, 22, 33};
115   VIXL_STATIC_ASSERT(sizeof(seed) == sizeof(rand_state_));
116   memcpy(rand_state_, seed, sizeof(rand_state_));
117 
118   // Initialize all bits of pseudo predicate register to true.
119   LogicPRegister ones(pregister_all_true_);
120   ones.SetAllBits();
121 }
122 
ResetSystemRegisters()123 void Simulator::ResetSystemRegisters() {
124   // Reset the system registers.
125   nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
126   fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
127   ResetFFR();
128 }
129 
ResetRegisters()130 void Simulator::ResetRegisters() {
131   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
132     WriteXRegister(i, 0xbadbeef);
133   }
134   // Returning to address 0 exits the Simulator.
135   WriteLr(kEndOfSimAddress);
136 }
137 
ResetVRegisters()138 void Simulator::ResetVRegisters() {
139   // Set SVE/FP registers to a value that is a NaN in both 32-bit and 64-bit FP.
140   VIXL_ASSERT((GetVectorLengthInBytes() % kDRegSizeInBytes) == 0);
141   int lane_count = GetVectorLengthInBytes() / kDRegSizeInBytes;
142   for (unsigned i = 0; i < kNumberOfZRegisters; i++) {
143     VIXL_ASSERT(vregisters_[i].GetSizeInBytes() == GetVectorLengthInBytes());
144     vregisters_[i].NotifyAccessAsZ();
145     for (int lane = 0; lane < lane_count; lane++) {
146       // Encode the register number and (D-sized) lane into each NaN, to
147       // make them easier to trace.
148       uint64_t nan_bits = 0x7ff0f0007f80f000 | (0x0000000100000000 * i) |
149                           (0x0000000000000001 * lane);
150       VIXL_ASSERT(IsSignallingNaN(RawbitsToDouble(nan_bits & kDRegMask)));
151       VIXL_ASSERT(IsSignallingNaN(RawbitsToFloat(nan_bits & kSRegMask)));
152       vregisters_[i].Insert(lane, nan_bits);
153     }
154   }
155 }
156 
ResetPRegisters()157 void Simulator::ResetPRegisters() {
158   VIXL_ASSERT((GetPredicateLengthInBytes() % kHRegSizeInBytes) == 0);
159   int lane_count = GetPredicateLengthInBytes() / kHRegSizeInBytes;
160   // Ensure the register configuration fits in this bit encoding.
161   VIXL_STATIC_ASSERT(kNumberOfPRegisters <= UINT8_MAX);
162   VIXL_ASSERT(lane_count <= UINT8_MAX);
163   for (unsigned i = 0; i < kNumberOfPRegisters; i++) {
164     VIXL_ASSERT(pregisters_[i].GetSizeInBytes() == GetPredicateLengthInBytes());
165     for (int lane = 0; lane < lane_count; lane++) {
166       // Encode the register number and (H-sized) lane into each lane slot.
167       uint16_t bits = (0x0100 * lane) | i;
168       pregisters_[i].Insert(lane, bits);
169     }
170   }
171 }
172 
ResetFFR()173 void Simulator::ResetFFR() {
174   VIXL_ASSERT((GetPredicateLengthInBytes() % kHRegSizeInBytes) == 0);
175   int default_active_lanes = GetPredicateLengthInBytes() / kHRegSizeInBytes;
176   ffr_register_.Write(static_cast<uint16_t>(GetUintMask(default_active_lanes)));
177 }
178 
ResetState()179 void Simulator::ResetState() {
180   ResetSystemRegisters();
181   ResetRegisters();
182   ResetVRegisters();
183   ResetPRegisters();
184 
185   WriteSp(memory_.GetStack().GetBase());
186 
187   pc_ = NULL;
188   pc_modified_ = false;
189 
190   // BTI state.
191   btype_ = DefaultBType;
192   next_btype_ = DefaultBType;
193 }
194 
SetVectorLengthInBits(unsigned vector_length)195 void Simulator::SetVectorLengthInBits(unsigned vector_length) {
196   VIXL_ASSERT((vector_length >= kZRegMinSize) &&
197               (vector_length <= kZRegMaxSize));
198   VIXL_ASSERT((vector_length % kZRegMinSize) == 0);
199   vector_length_ = vector_length;
200 
201   for (unsigned i = 0; i < kNumberOfZRegisters; i++) {
202     vregisters_[i].SetSizeInBytes(GetVectorLengthInBytes());
203   }
204   for (unsigned i = 0; i < kNumberOfPRegisters; i++) {
205     pregisters_[i].SetSizeInBytes(GetPredicateLengthInBytes());
206   }
207 
208   ffr_register_.SetSizeInBytes(GetPredicateLengthInBytes());
209 
210   ResetVRegisters();
211   ResetPRegisters();
212   ResetFFR();
213 }
214 
~Simulator()215 Simulator::~Simulator() {
216   // The decoder may outlive the simulator.
217   decoder_->RemoveVisitor(print_disasm_);
218   delete print_disasm_;
219   close(placeholder_pipe_fd_[0]);
220   close(placeholder_pipe_fd_[1]);
221 }
222 
223 
Run()224 void Simulator::Run() {
225   // Flush any written registers before executing anything, so that
226   // manually-set registers are logged _before_ the first instruction.
227   LogAllWrittenRegisters();
228 
229   while (pc_ != kEndOfSimAddress) {
230     ExecuteInstruction();
231   }
232 }
233 
234 
RunFrom(const Instruction * first)235 void Simulator::RunFrom(const Instruction* first) {
236   WritePc(first, NoBranchLog);
237   Run();
238 }
239 
240 
241 // clang-format off
242 const char* Simulator::xreg_names[] = {"x0",  "x1",  "x2",  "x3",  "x4",  "x5",
243                                        "x6",  "x7",  "x8",  "x9",  "x10", "x11",
244                                        "x12", "x13", "x14", "x15", "x16", "x17",
245                                        "x18", "x19", "x20", "x21", "x22", "x23",
246                                        "x24", "x25", "x26", "x27", "x28", "x29",
247                                        "lr",  "xzr", "sp"};
248 
249 const char* Simulator::wreg_names[] = {"w0",  "w1",  "w2",  "w3",  "w4",  "w5",
250                                        "w6",  "w7",  "w8",  "w9",  "w10", "w11",
251                                        "w12", "w13", "w14", "w15", "w16", "w17",
252                                        "w18", "w19", "w20", "w21", "w22", "w23",
253                                        "w24", "w25", "w26", "w27", "w28", "w29",
254                                        "w30", "wzr", "wsp"};
255 
256 const char* Simulator::breg_names[] = {"b0",  "b1",  "b2",  "b3",  "b4",  "b5",
257                                        "b6",  "b7",  "b8",  "b9",  "b10", "b11",
258                                        "b12", "b13", "b14", "b15", "b16", "b17",
259                                        "b18", "b19", "b20", "b21", "b22", "b23",
260                                        "b24", "b25", "b26", "b27", "b28", "b29",
261                                        "b30", "b31"};
262 
263 const char* Simulator::hreg_names[] = {"h0",  "h1",  "h2",  "h3",  "h4",  "h5",
264                                        "h6",  "h7",  "h8",  "h9",  "h10", "h11",
265                                        "h12", "h13", "h14", "h15", "h16", "h17",
266                                        "h18", "h19", "h20", "h21", "h22", "h23",
267                                        "h24", "h25", "h26", "h27", "h28", "h29",
268                                        "h30", "h31"};
269 
270 const char* Simulator::sreg_names[] = {"s0",  "s1",  "s2",  "s3",  "s4",  "s5",
271                                        "s6",  "s7",  "s8",  "s9",  "s10", "s11",
272                                        "s12", "s13", "s14", "s15", "s16", "s17",
273                                        "s18", "s19", "s20", "s21", "s22", "s23",
274                                        "s24", "s25", "s26", "s27", "s28", "s29",
275                                        "s30", "s31"};
276 
277 const char* Simulator::dreg_names[] = {"d0",  "d1",  "d2",  "d3",  "d4",  "d5",
278                                        "d6",  "d7",  "d8",  "d9",  "d10", "d11",
279                                        "d12", "d13", "d14", "d15", "d16", "d17",
280                                        "d18", "d19", "d20", "d21", "d22", "d23",
281                                        "d24", "d25", "d26", "d27", "d28", "d29",
282                                        "d30", "d31"};
283 
284 const char* Simulator::vreg_names[] = {"v0",  "v1",  "v2",  "v3",  "v4",  "v5",
285                                        "v6",  "v7",  "v8",  "v9",  "v10", "v11",
286                                        "v12", "v13", "v14", "v15", "v16", "v17",
287                                        "v18", "v19", "v20", "v21", "v22", "v23",
288                                        "v24", "v25", "v26", "v27", "v28", "v29",
289                                        "v30", "v31"};
290 
291 const char* Simulator::zreg_names[] = {"z0",  "z1",  "z2",  "z3",  "z4",  "z5",
292                                        "z6",  "z7",  "z8",  "z9",  "z10", "z11",
293                                        "z12", "z13", "z14", "z15", "z16", "z17",
294                                        "z18", "z19", "z20", "z21", "z22", "z23",
295                                        "z24", "z25", "z26", "z27", "z28", "z29",
296                                        "z30", "z31"};
297 
298 const char* Simulator::preg_names[] = {"p0",  "p1",  "p2",  "p3",  "p4",  "p5",
299                                        "p6",  "p7",  "p8",  "p9",  "p10", "p11",
300                                        "p12", "p13", "p14", "p15"};
301 // clang-format on
302 
303 
WRegNameForCode(unsigned code,Reg31Mode mode)304 const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
305   // If the code represents the stack pointer, index the name after zr.
306   if ((code == kSPRegInternalCode) ||
307       ((code == kZeroRegCode) && (mode == Reg31IsStackPointer))) {
308     code = kZeroRegCode + 1;
309   }
310   VIXL_ASSERT(code < ArrayLength(wreg_names));
311   return wreg_names[code];
312 }
313 
314 
XRegNameForCode(unsigned code,Reg31Mode mode)315 const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
316   // If the code represents the stack pointer, index the name after zr.
317   if ((code == kSPRegInternalCode) ||
318       ((code == kZeroRegCode) && (mode == Reg31IsStackPointer))) {
319     code = kZeroRegCode + 1;
320   }
321   VIXL_ASSERT(code < ArrayLength(xreg_names));
322   return xreg_names[code];
323 }
324 
325 
BRegNameForCode(unsigned code)326 const char* Simulator::BRegNameForCode(unsigned code) {
327   VIXL_ASSERT(code < kNumberOfVRegisters);
328   return breg_names[code];
329 }
330 
331 
HRegNameForCode(unsigned code)332 const char* Simulator::HRegNameForCode(unsigned code) {
333   VIXL_ASSERT(code < kNumberOfVRegisters);
334   return hreg_names[code];
335 }
336 
337 
SRegNameForCode(unsigned code)338 const char* Simulator::SRegNameForCode(unsigned code) {
339   VIXL_ASSERT(code < kNumberOfVRegisters);
340   return sreg_names[code];
341 }
342 
343 
DRegNameForCode(unsigned code)344 const char* Simulator::DRegNameForCode(unsigned code) {
345   VIXL_ASSERT(code < kNumberOfVRegisters);
346   return dreg_names[code];
347 }
348 
349 
VRegNameForCode(unsigned code)350 const char* Simulator::VRegNameForCode(unsigned code) {
351   VIXL_ASSERT(code < kNumberOfVRegisters);
352   return vreg_names[code];
353 }
354 
355 
ZRegNameForCode(unsigned code)356 const char* Simulator::ZRegNameForCode(unsigned code) {
357   VIXL_ASSERT(code < kNumberOfZRegisters);
358   return zreg_names[code];
359 }
360 
361 
PRegNameForCode(unsigned code)362 const char* Simulator::PRegNameForCode(unsigned code) {
363   VIXL_ASSERT(code < kNumberOfPRegisters);
364   return preg_names[code];
365 }
366 
ExpandToSimVRegister(const SimPRegister & pg)367 SimVRegister Simulator::ExpandToSimVRegister(const SimPRegister& pg) {
368   SimVRegister ones, result;
369   dup_immediate(kFormatVnB, ones, 0xff);
370   mov_zeroing(kFormatVnB, result, pg, ones);
371   return result;
372 }
373 
ExtractFromSimVRegister(VectorFormat vform,SimPRegister & pd,SimVRegister vreg)374 void Simulator::ExtractFromSimVRegister(VectorFormat vform,
375                                         SimPRegister& pd,
376                                         SimVRegister vreg) {
377   SimVRegister zero;
378   dup_immediate(kFormatVnB, zero, 0);
379   SVEIntCompareVectorsHelper(ne,
380                              vform,
381                              pd,
382                              GetPTrue(),
383                              vreg,
384                              zero,
385                              false,
386                              LeaveFlags);
387 }
388 
389 #define COLOUR(colour_code) "\033[0;" colour_code "m"
390 #define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
391 #define COLOUR_HIGHLIGHT "\033[43m"
392 #define NORMAL ""
393 #define GREY "30"
394 #define RED "31"
395 #define GREEN "32"
396 #define YELLOW "33"
397 #define BLUE "34"
398 #define MAGENTA "35"
399 #define CYAN "36"
400 #define WHITE "37"
SetColouredTrace(bool value)401 void Simulator::SetColouredTrace(bool value) {
402   coloured_trace_ = value;
403 
404   clr_normal = value ? COLOUR(NORMAL) : "";
405   clr_flag_name = value ? COLOUR_BOLD(WHITE) : "";
406   clr_flag_value = value ? COLOUR(NORMAL) : "";
407   clr_reg_name = value ? COLOUR_BOLD(CYAN) : "";
408   clr_reg_value = value ? COLOUR(CYAN) : "";
409   clr_vreg_name = value ? COLOUR_BOLD(MAGENTA) : "";
410   clr_vreg_value = value ? COLOUR(MAGENTA) : "";
411   clr_preg_name = value ? COLOUR_BOLD(GREEN) : "";
412   clr_preg_value = value ? COLOUR(GREEN) : "";
413   clr_memory_address = value ? COLOUR_BOLD(BLUE) : "";
414   clr_warning = value ? COLOUR_BOLD(YELLOW) : "";
415   clr_warning_message = value ? COLOUR(YELLOW) : "";
416   clr_printf = value ? COLOUR(GREEN) : "";
417   clr_branch_marker = value ? COLOUR(GREY) COLOUR_HIGHLIGHT : "";
418 
419   if (value) {
420     print_disasm_->SetCPUFeaturesPrefix("// Needs: " COLOUR_BOLD(RED));
421     print_disasm_->SetCPUFeaturesSuffix(COLOUR(NORMAL));
422   } else {
423     print_disasm_->SetCPUFeaturesPrefix("// Needs: ");
424     print_disasm_->SetCPUFeaturesSuffix("");
425   }
426 }
427 
428 
SetTraceParameters(int parameters)429 void Simulator::SetTraceParameters(int parameters) {
430   bool disasm_before = trace_parameters_ & LOG_DISASM;
431   trace_parameters_ = parameters;
432   bool disasm_after = trace_parameters_ & LOG_DISASM;
433 
434   if (disasm_before != disasm_after) {
435     if (disasm_after) {
436       decoder_->InsertVisitorBefore(print_disasm_, this);
437     } else {
438       decoder_->RemoveVisitor(print_disasm_);
439     }
440   }
441 }
442 
443 
444 // Helpers ---------------------------------------------------------------------
AddWithCarry(unsigned reg_size,bool set_flags,uint64_t left,uint64_t right,int carry_in)445 uint64_t Simulator::AddWithCarry(unsigned reg_size,
446                                  bool set_flags,
447                                  uint64_t left,
448                                  uint64_t right,
449                                  int carry_in) {
450   VIXL_ASSERT((carry_in == 0) || (carry_in == 1));
451   VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
452 
453   uint64_t max_uint = (reg_size == kWRegSize) ? kWMaxUInt : kXMaxUInt;
454   uint64_t reg_mask = (reg_size == kWRegSize) ? kWRegMask : kXRegMask;
455   uint64_t sign_mask = (reg_size == kWRegSize) ? kWSignMask : kXSignMask;
456 
457   left &= reg_mask;
458   right &= reg_mask;
459   uint64_t result = (left + right + carry_in) & reg_mask;
460 
461   if (set_flags) {
462     ReadNzcv().SetN(CalcNFlag(result, reg_size));
463     ReadNzcv().SetZ(CalcZFlag(result));
464 
465     // Compute the C flag by comparing the result to the max unsigned integer.
466     uint64_t max_uint_2op = max_uint - carry_in;
467     bool C = (left > max_uint_2op) || ((max_uint_2op - left) < right);
468     ReadNzcv().SetC(C ? 1 : 0);
469 
470     // Overflow iff the sign bit is the same for the two inputs and different
471     // for the result.
472     uint64_t left_sign = left & sign_mask;
473     uint64_t right_sign = right & sign_mask;
474     uint64_t result_sign = result & sign_mask;
475     bool V = (left_sign == right_sign) && (left_sign != result_sign);
476     ReadNzcv().SetV(V ? 1 : 0);
477 
478     LogSystemRegister(NZCV);
479   }
480   return result;
481 }
482 
483 
ShiftOperand(unsigned reg_size,uint64_t uvalue,Shift shift_type,unsigned amount) const484 int64_t Simulator::ShiftOperand(unsigned reg_size,
485                                 uint64_t uvalue,
486                                 Shift shift_type,
487                                 unsigned amount) const {
488   VIXL_ASSERT((reg_size == kBRegSize) || (reg_size == kHRegSize) ||
489               (reg_size == kSRegSize) || (reg_size == kDRegSize));
490   if (amount > 0) {
491     uint64_t mask = GetUintMask(reg_size);
492     bool is_negative = (uvalue & GetSignMask(reg_size)) != 0;
493     // The behavior is undefined in c++ if the shift amount greater than or
494     // equal to the register lane size. Work out the shifted result based on
495     // architectural behavior before performing the c++ type shfit operations.
496     switch (shift_type) {
497       case LSL:
498         if (amount >= reg_size) {
499           return UINT64_C(0);
500         }
501         uvalue <<= amount;
502         break;
503       case LSR:
504         if (amount >= reg_size) {
505           return UINT64_C(0);
506         }
507         uvalue >>= amount;
508         break;
509       case ASR:
510         if (amount >= reg_size) {
511           return is_negative ? ~UINT64_C(0) : UINT64_C(0);
512         }
513         uvalue >>= amount;
514         if (is_negative) {
515           // Simulate sign-extension to 64 bits.
516           uvalue |= ~UINT64_C(0) << (reg_size - amount);
517         }
518         break;
519       case ROR: {
520         uvalue = RotateRight(uvalue, amount, reg_size);
521         break;
522       }
523       default:
524         VIXL_UNIMPLEMENTED();
525         return 0;
526     }
527     uvalue &= mask;
528   }
529 
530   int64_t result;
531   memcpy(&result, &uvalue, sizeof(result));
532   return result;
533 }
534 
535 
ExtendValue(unsigned reg_size,int64_t value,Extend extend_type,unsigned left_shift) const536 int64_t Simulator::ExtendValue(unsigned reg_size,
537                                int64_t value,
538                                Extend extend_type,
539                                unsigned left_shift) const {
540   switch (extend_type) {
541     case UXTB:
542       value &= kByteMask;
543       break;
544     case UXTH:
545       value &= kHalfWordMask;
546       break;
547     case UXTW:
548       value &= kWordMask;
549       break;
550     case SXTB:
551       value &= kByteMask;
552       if ((value & 0x80) != 0) {
553         value |= ~UINT64_C(0) << 8;
554       }
555       break;
556     case SXTH:
557       value &= kHalfWordMask;
558       if ((value & 0x8000) != 0) {
559         value |= ~UINT64_C(0) << 16;
560       }
561       break;
562     case SXTW:
563       value &= kWordMask;
564       if ((value & 0x80000000) != 0) {
565         value |= ~UINT64_C(0) << 32;
566       }
567       break;
568     case UXTX:
569     case SXTX:
570       break;
571     default:
572       VIXL_UNREACHABLE();
573   }
574   return ShiftOperand(reg_size, value, LSL, left_shift);
575 }
576 
577 
FPCompare(double val0,double val1,FPTrapFlags trap)578 void Simulator::FPCompare(double val0, double val1, FPTrapFlags trap) {
579   AssertSupportedFPCR();
580 
581   // TODO: This assumes that the C++ implementation handles comparisons in the
582   // way that we expect (as per AssertSupportedFPCR()).
583   bool process_exception = false;
584   if ((IsNaN(val0) != 0) || (IsNaN(val1) != 0)) {
585     ReadNzcv().SetRawValue(FPUnorderedFlag);
586     if (IsSignallingNaN(val0) || IsSignallingNaN(val1) ||
587         (trap == EnableTrap)) {
588       process_exception = true;
589     }
590   } else if (val0 < val1) {
591     ReadNzcv().SetRawValue(FPLessThanFlag);
592   } else if (val0 > val1) {
593     ReadNzcv().SetRawValue(FPGreaterThanFlag);
594   } else if (val0 == val1) {
595     ReadNzcv().SetRawValue(FPEqualFlag);
596   } else {
597     VIXL_UNREACHABLE();
598   }
599   LogSystemRegister(NZCV);
600   if (process_exception) FPProcessException();
601 }
602 
603 
ComputeMemOperandAddress(const MemOperand & mem_op) const604 uint64_t Simulator::ComputeMemOperandAddress(const MemOperand& mem_op) const {
605   VIXL_ASSERT(mem_op.IsValid());
606   int64_t base = ReadRegister<int64_t>(mem_op.GetBaseRegister());
607   if (mem_op.IsImmediateOffset()) {
608     return base + mem_op.GetOffset();
609   } else {
610     VIXL_ASSERT(mem_op.GetRegisterOffset().IsValid());
611     int64_t offset = ReadRegister<int64_t>(mem_op.GetRegisterOffset());
612     unsigned shift_amount = mem_op.GetShiftAmount();
613     if (mem_op.GetShift() != NO_SHIFT) {
614       offset = ShiftOperand(kXRegSize, offset, mem_op.GetShift(), shift_amount);
615     }
616     if (mem_op.GetExtend() != NO_EXTEND) {
617       offset = ExtendValue(kXRegSize, offset, mem_op.GetExtend(), shift_amount);
618     }
619     return static_cast<uint64_t>(base + offset);
620   }
621 }
622 
623 
GetPrintRegisterFormatForSize(unsigned reg_size,unsigned lane_size)624 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize(
625     unsigned reg_size, unsigned lane_size) {
626   VIXL_ASSERT(reg_size >= lane_size);
627 
628   uint32_t format = 0;
629   if (reg_size != lane_size) {
630     switch (reg_size) {
631       default:
632         VIXL_UNREACHABLE();
633         break;
634       case kQRegSizeInBytes:
635         format = kPrintRegAsQVector;
636         break;
637       case kDRegSizeInBytes:
638         format = kPrintRegAsDVector;
639         break;
640     }
641   }
642 
643   switch (lane_size) {
644     default:
645       VIXL_UNREACHABLE();
646       break;
647     case kQRegSizeInBytes:
648       format |= kPrintReg1Q;
649       break;
650     case kDRegSizeInBytes:
651       format |= kPrintReg1D;
652       break;
653     case kSRegSizeInBytes:
654       format |= kPrintReg1S;
655       break;
656     case kHRegSizeInBytes:
657       format |= kPrintReg1H;
658       break;
659     case kBRegSizeInBytes:
660       format |= kPrintReg1B;
661       break;
662   }
663   // These sizes would be duplicate case labels.
664   VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
665   VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
666   VIXL_STATIC_ASSERT(kPrintXReg == kPrintReg1D);
667   VIXL_STATIC_ASSERT(kPrintWReg == kPrintReg1S);
668 
669   return static_cast<PrintRegisterFormat>(format);
670 }
671 
672 
GetPrintRegisterFormat(VectorFormat vform)673 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat(
674     VectorFormat vform) {
675   switch (vform) {
676     default:
677       VIXL_UNREACHABLE();
678       return kPrintReg16B;
679     case kFormat16B:
680       return kPrintReg16B;
681     case kFormat8B:
682       return kPrintReg8B;
683     case kFormat8H:
684       return kPrintReg8H;
685     case kFormat4H:
686       return kPrintReg4H;
687     case kFormat4S:
688       return kPrintReg4S;
689     case kFormat2S:
690       return kPrintReg2S;
691     case kFormat2D:
692       return kPrintReg2D;
693     case kFormat1D:
694       return kPrintReg1D;
695 
696     case kFormatB:
697       return kPrintReg1B;
698     case kFormatH:
699       return kPrintReg1H;
700     case kFormatS:
701       return kPrintReg1S;
702     case kFormatD:
703       return kPrintReg1D;
704 
705     case kFormatVnB:
706       return kPrintRegVnB;
707     case kFormatVnH:
708       return kPrintRegVnH;
709     case kFormatVnS:
710       return kPrintRegVnS;
711     case kFormatVnD:
712       return kPrintRegVnD;
713   }
714 }
715 
716 
GetPrintRegisterFormatFP(VectorFormat vform)717 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP(
718     VectorFormat vform) {
719   switch (vform) {
720     default:
721       VIXL_UNREACHABLE();
722       return kPrintReg16B;
723     case kFormat8H:
724       return kPrintReg8HFP;
725     case kFormat4H:
726       return kPrintReg4HFP;
727     case kFormat4S:
728       return kPrintReg4SFP;
729     case kFormat2S:
730       return kPrintReg2SFP;
731     case kFormat2D:
732       return kPrintReg2DFP;
733     case kFormat1D:
734       return kPrintReg1DFP;
735     case kFormatH:
736       return kPrintReg1HFP;
737     case kFormatS:
738       return kPrintReg1SFP;
739     case kFormatD:
740       return kPrintReg1DFP;
741   }
742 }
743 
PrintRegisters()744 void Simulator::PrintRegisters() {
745   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
746     if (i == kSpRegCode) i = kSPRegInternalCode;
747     PrintRegister(i);
748   }
749 }
750 
PrintVRegisters()751 void Simulator::PrintVRegisters() {
752   for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
753     PrintVRegister(i);
754   }
755 }
756 
PrintZRegisters()757 void Simulator::PrintZRegisters() {
758   for (unsigned i = 0; i < kNumberOfZRegisters; i++) {
759     PrintZRegister(i);
760   }
761 }
762 
PrintWrittenRegisters()763 void Simulator::PrintWrittenRegisters() {
764   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
765     if (registers_[i].WrittenSinceLastLog()) {
766       if (i == kSpRegCode) i = kSPRegInternalCode;
767       PrintRegister(i);
768     }
769   }
770 }
771 
PrintWrittenVRegisters()772 void Simulator::PrintWrittenVRegisters() {
773   bool has_sve = GetCPUFeatures()->Has(CPUFeatures::kSVE);
774   for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
775     if (vregisters_[i].WrittenSinceLastLog()) {
776       // Z registers are initialised in the constructor before the user can
777       // configure the CPU features, so we must also check for SVE here.
778       if (vregisters_[i].AccessedAsZSinceLastLog() && has_sve) {
779         PrintZRegister(i);
780       } else {
781         PrintVRegister(i);
782       }
783     }
784   }
785 }
786 
PrintWrittenPRegisters()787 void Simulator::PrintWrittenPRegisters() {
788   // P registers are initialised in the constructor before the user can
789   // configure the CPU features, so we must check for SVE here.
790   if (!GetCPUFeatures()->Has(CPUFeatures::kSVE)) return;
791   for (unsigned i = 0; i < kNumberOfPRegisters; i++) {
792     if (pregisters_[i].WrittenSinceLastLog()) {
793       PrintPRegister(i);
794     }
795   }
796   if (ReadFFR().WrittenSinceLastLog()) PrintFFR();
797 }
798 
PrintSystemRegisters()799 void Simulator::PrintSystemRegisters() {
800   PrintSystemRegister(NZCV);
801   PrintSystemRegister(FPCR);
802 }
803 
PrintRegisterValue(const uint8_t * value,int value_size,PrintRegisterFormat format)804 void Simulator::PrintRegisterValue(const uint8_t* value,
805                                    int value_size,
806                                    PrintRegisterFormat format) {
807   int print_width = GetPrintRegSizeInBytes(format);
808   VIXL_ASSERT(print_width <= value_size);
809   for (int i = value_size - 1; i >= print_width; i--) {
810     // Pad with spaces so that values align vertically.
811     fprintf(stream_, "  ");
812     // If we aren't explicitly printing a partial value, ensure that the
813     // unprinted bits are zero.
814     VIXL_ASSERT(((format & kPrintRegPartial) != 0) || (value[i] == 0));
815   }
816   fprintf(stream_, "0x");
817   for (int i = print_width - 1; i >= 0; i--) {
818     fprintf(stream_, "%02x", value[i]);
819   }
820 }
821 
PrintRegisterValueFPAnnotations(const uint8_t * value,uint16_t lane_mask,PrintRegisterFormat format)822 void Simulator::PrintRegisterValueFPAnnotations(const uint8_t* value,
823                                                 uint16_t lane_mask,
824                                                 PrintRegisterFormat format) {
825   VIXL_ASSERT((format & kPrintRegAsFP) != 0);
826   int lane_size = GetPrintRegLaneSizeInBytes(format);
827   fprintf(stream_, " (");
828   bool last_inactive = false;
829   const char* sep = "";
830   for (int i = GetPrintRegLaneCount(format) - 1; i >= 0; i--, sep = ", ") {
831     bool access = (lane_mask & (1 << (i * lane_size))) != 0;
832     if (access) {
833       // Read the lane as a double, so we can format all FP types in the same
834       // way. We squash NaNs, and a double can exactly represent any other value
835       // that the smaller types can represent, so this is lossless.
836       double element;
837       switch (lane_size) {
838         case kHRegSizeInBytes: {
839           Float16 element_fp16;
840           VIXL_STATIC_ASSERT(sizeof(element_fp16) == kHRegSizeInBytes);
841           memcpy(&element_fp16, &value[i * lane_size], sizeof(element_fp16));
842           element = FPToDouble(element_fp16, kUseDefaultNaN);
843           break;
844         }
845         case kSRegSizeInBytes: {
846           float element_fp32;
847           memcpy(&element_fp32, &value[i * lane_size], sizeof(element_fp32));
848           element = static_cast<double>(element_fp32);
849           break;
850         }
851         case kDRegSizeInBytes: {
852           memcpy(&element, &value[i * lane_size], sizeof(element));
853           break;
854         }
855         default:
856           VIXL_UNREACHABLE();
857           fprintf(stream_, "{UnknownFPValue}");
858           continue;
859       }
860       if (IsNaN(element)) {
861         // The fprintf behaviour for NaNs is implementation-defined. Always
862         // print "nan", so that traces are consistent.
863         fprintf(stream_, "%s%snan%s", sep, clr_vreg_value, clr_normal);
864       } else {
865         fprintf(stream_,
866                 "%s%s%#.4g%s",
867                 sep,
868                 clr_vreg_value,
869                 element,
870                 clr_normal);
871       }
872       last_inactive = false;
873     } else if (!last_inactive) {
874       // Replace each contiguous sequence of inactive lanes with "...".
875       fprintf(stream_, "%s...", sep);
876       last_inactive = true;
877     }
878   }
879   fprintf(stream_, ")");
880 }
881 
PrintRegister(int code,PrintRegisterFormat format,const char * suffix)882 void Simulator::PrintRegister(int code,
883                               PrintRegisterFormat format,
884                               const char* suffix) {
885   VIXL_ASSERT((static_cast<unsigned>(code) < kNumberOfRegisters) ||
886               (static_cast<unsigned>(code) == kSPRegInternalCode));
887   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsScalar);
888   VIXL_ASSERT((format & kPrintRegAsFP) == 0);
889 
890   SimRegister* reg;
891   SimRegister zero;
892   if (code == kZeroRegCode) {
893     reg = &zero;
894   } else {
895     // registers_[31] holds the SP.
896     VIXL_STATIC_ASSERT((kSPRegInternalCode % kNumberOfRegisters) == 31);
897     reg = &registers_[code % kNumberOfRegisters];
898   }
899 
900   // We trace register writes as whole register values, implying that any
901   // unprinted bits are all zero:
902   //   "#       x{code}: 0x{-----value----}"
903   //   "#       w{code}:         0x{-value}"
904   // Stores trace partial register values, implying nothing about the unprinted
905   // bits:
906   //   "# x{code}<63:0>: 0x{-----value----}"
907   //   "# x{code}<31:0>:         0x{-value}"
908   //   "# x{code}<15:0>:             0x{--}"
909   //   "#  x{code}<7:0>:               0x{}"
910 
911   bool is_partial = (format & kPrintRegPartial) != 0;
912   unsigned print_reg_size = GetPrintRegSizeInBits(format);
913   std::stringstream name;
914   if (is_partial) {
915     name << XRegNameForCode(code) << GetPartialRegSuffix(format);
916   } else {
917     // Notify the register that it has been logged, but only if we're printing
918     // all of it.
919     reg->NotifyRegisterLogged();
920     switch (print_reg_size) {
921       case kWRegSize:
922         name << WRegNameForCode(code);
923         break;
924       case kXRegSize:
925         name << XRegNameForCode(code);
926         break;
927       default:
928         VIXL_UNREACHABLE();
929         return;
930     }
931   }
932 
933   fprintf(stream_,
934           "# %s%*s: %s",
935           clr_reg_name,
936           kPrintRegisterNameFieldWidth,
937           name.str().c_str(),
938           clr_reg_value);
939   PrintRegisterValue(*reg, format);
940   fprintf(stream_, "%s%s", clr_normal, suffix);
941 }
942 
PrintVRegister(int code,PrintRegisterFormat format,const char * suffix)943 void Simulator::PrintVRegister(int code,
944                                PrintRegisterFormat format,
945                                const char* suffix) {
946   VIXL_ASSERT(static_cast<unsigned>(code) < kNumberOfVRegisters);
947   VIXL_ASSERT(((format & kPrintRegAsVectorMask) == kPrintRegAsScalar) ||
948               ((format & kPrintRegAsVectorMask) == kPrintRegAsDVector) ||
949               ((format & kPrintRegAsVectorMask) == kPrintRegAsQVector));
950 
951   // We trace register writes as whole register values, implying that any
952   // unprinted bits are all zero:
953   //   "#        v{code}: 0x{-------------value------------}"
954   //   "#        d{code}:                 0x{-----value----}"
955   //   "#        s{code}:                         0x{-value}"
956   //   "#        h{code}:                             0x{--}"
957   //   "#        b{code}:                               0x{}"
958   // Stores trace partial register values, implying nothing about the unprinted
959   // bits:
960   //   "# v{code}<127:0>: 0x{-------------value------------}"
961   //   "#  v{code}<63:0>:                 0x{-----value----}"
962   //   "#  v{code}<31:0>:                         0x{-value}"
963   //   "#  v{code}<15:0>:                             0x{--}"
964   //   "#   v{code}<7:0>:                               0x{}"
965 
966   bool is_partial = ((format & kPrintRegPartial) != 0);
967   std::stringstream name;
968   unsigned print_reg_size = GetPrintRegSizeInBits(format);
969   if (is_partial) {
970     name << VRegNameForCode(code) << GetPartialRegSuffix(format);
971   } else {
972     // Notify the register that it has been logged, but only if we're printing
973     // all of it.
974     vregisters_[code].NotifyRegisterLogged();
975     switch (print_reg_size) {
976       case kBRegSize:
977         name << BRegNameForCode(code);
978         break;
979       case kHRegSize:
980         name << HRegNameForCode(code);
981         break;
982       case kSRegSize:
983         name << SRegNameForCode(code);
984         break;
985       case kDRegSize:
986         name << DRegNameForCode(code);
987         break;
988       case kQRegSize:
989         name << VRegNameForCode(code);
990         break;
991       default:
992         VIXL_UNREACHABLE();
993         return;
994     }
995   }
996 
997   fprintf(stream_,
998           "# %s%*s: %s",
999           clr_vreg_name,
1000           kPrintRegisterNameFieldWidth,
1001           name.str().c_str(),
1002           clr_vreg_value);
1003   PrintRegisterValue(vregisters_[code], format);
1004   fprintf(stream_, "%s", clr_normal);
1005   if ((format & kPrintRegAsFP) != 0) {
1006     PrintRegisterValueFPAnnotations(vregisters_[code], format);
1007   }
1008   fprintf(stream_, "%s", suffix);
1009 }
1010 
PrintVRegistersForStructuredAccess(int rt_code,int reg_count,uint16_t focus_mask,PrintRegisterFormat format)1011 void Simulator::PrintVRegistersForStructuredAccess(int rt_code,
1012                                                    int reg_count,
1013                                                    uint16_t focus_mask,
1014                                                    PrintRegisterFormat format) {
1015   bool print_fp = (format & kPrintRegAsFP) != 0;
1016   // Suppress FP formatting, so we can specify the lanes we're interested in.
1017   PrintRegisterFormat format_no_fp =
1018       static_cast<PrintRegisterFormat>(format & ~kPrintRegAsFP);
1019 
1020   for (int r = 0; r < reg_count; r++) {
1021     int code = (rt_code + r) % kNumberOfVRegisters;
1022     PrintVRegister(code, format_no_fp, "");
1023     if (print_fp) {
1024       PrintRegisterValueFPAnnotations(vregisters_[code], focus_mask, format);
1025     }
1026     fprintf(stream_, "\n");
1027   }
1028 }
1029 
PrintZRegistersForStructuredAccess(int rt_code,int q_index,int reg_count,uint16_t focus_mask,PrintRegisterFormat format)1030 void Simulator::PrintZRegistersForStructuredAccess(int rt_code,
1031                                                    int q_index,
1032                                                    int reg_count,
1033                                                    uint16_t focus_mask,
1034                                                    PrintRegisterFormat format) {
1035   bool print_fp = (format & kPrintRegAsFP) != 0;
1036   // Suppress FP formatting, so we can specify the lanes we're interested in.
1037   PrintRegisterFormat format_no_fp =
1038       static_cast<PrintRegisterFormat>(format & ~kPrintRegAsFP);
1039 
1040   PrintRegisterFormat format_q = GetPrintRegAsQChunkOfSVE(format);
1041 
1042   const unsigned size = kQRegSizeInBytes;
1043   unsigned byte_index = q_index * size;
1044   const uint8_t* value = vregisters_[rt_code].GetBytes() + byte_index;
1045   VIXL_ASSERT((byte_index + size) <= vregisters_[rt_code].GetSizeInBytes());
1046 
1047   for (int r = 0; r < reg_count; r++) {
1048     int code = (rt_code + r) % kNumberOfZRegisters;
1049     PrintPartialZRegister(code, q_index, format_no_fp, "");
1050     if (print_fp) {
1051       PrintRegisterValueFPAnnotations(value, focus_mask, format_q);
1052     }
1053     fprintf(stream_, "\n");
1054   }
1055 }
1056 
PrintZRegister(int code,PrintRegisterFormat format)1057 void Simulator::PrintZRegister(int code, PrintRegisterFormat format) {
1058   // We're going to print the register in parts, so force a partial format.
1059   format = GetPrintRegPartial(format);
1060   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector);
1061   int vl = GetVectorLengthInBits();
1062   VIXL_ASSERT((vl % kQRegSize) == 0);
1063   for (unsigned i = 0; i < (vl / kQRegSize); i++) {
1064     PrintPartialZRegister(code, i, format);
1065   }
1066   vregisters_[code].NotifyRegisterLogged();
1067 }
1068 
PrintPRegister(int code,PrintRegisterFormat format)1069 void Simulator::PrintPRegister(int code, PrintRegisterFormat format) {
1070   // We're going to print the register in parts, so force a partial format.
1071   format = GetPrintRegPartial(format);
1072   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector);
1073   int vl = GetVectorLengthInBits();
1074   VIXL_ASSERT((vl % kQRegSize) == 0);
1075   for (unsigned i = 0; i < (vl / kQRegSize); i++) {
1076     PrintPartialPRegister(code, i, format);
1077   }
1078   pregisters_[code].NotifyRegisterLogged();
1079 }
1080 
PrintFFR(PrintRegisterFormat format)1081 void Simulator::PrintFFR(PrintRegisterFormat format) {
1082   // We're going to print the register in parts, so force a partial format.
1083   format = GetPrintRegPartial(format);
1084   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector);
1085   int vl = GetVectorLengthInBits();
1086   VIXL_ASSERT((vl % kQRegSize) == 0);
1087   SimPRegister& ffr = ReadFFR();
1088   for (unsigned i = 0; i < (vl / kQRegSize); i++) {
1089     PrintPartialPRegister("FFR", ffr, i, format);
1090   }
1091   ffr.NotifyRegisterLogged();
1092 }
1093 
PrintPartialZRegister(int code,int q_index,PrintRegisterFormat format,const char * suffix)1094 void Simulator::PrintPartialZRegister(int code,
1095                                       int q_index,
1096                                       PrintRegisterFormat format,
1097                                       const char* suffix) {
1098   VIXL_ASSERT(static_cast<unsigned>(code) < kNumberOfZRegisters);
1099   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector);
1100   VIXL_ASSERT((format & kPrintRegPartial) != 0);
1101   VIXL_ASSERT((q_index * kQRegSize) < GetVectorLengthInBits());
1102 
1103   // We _only_ trace partial Z register values in Q-sized chunks, because
1104   // they're often too large to reasonably fit on a single line. Each line
1105   // implies nothing about the unprinted bits.
1106   //   "# z{code}<127:0>: 0x{-------------value------------}"
1107 
1108   format = GetPrintRegAsQChunkOfSVE(format);
1109 
1110   const unsigned size = kQRegSizeInBytes;
1111   unsigned byte_index = q_index * size;
1112   const uint8_t* value = vregisters_[code].GetBytes() + byte_index;
1113   VIXL_ASSERT((byte_index + size) <= vregisters_[code].GetSizeInBytes());
1114 
1115   int lsb = q_index * kQRegSize;
1116   int msb = lsb + kQRegSize - 1;
1117   std::stringstream name;
1118   name << ZRegNameForCode(code) << '<' << msb << ':' << lsb << '>';
1119 
1120   fprintf(stream_,
1121           "# %s%*s: %s",
1122           clr_vreg_name,
1123           kPrintRegisterNameFieldWidth,
1124           name.str().c_str(),
1125           clr_vreg_value);
1126   PrintRegisterValue(value, size, format);
1127   fprintf(stream_, "%s", clr_normal);
1128   if ((format & kPrintRegAsFP) != 0) {
1129     PrintRegisterValueFPAnnotations(value, GetPrintRegLaneMask(format), format);
1130   }
1131   fprintf(stream_, "%s", suffix);
1132 }
1133 
PrintPartialPRegister(const char * name,const SimPRegister & reg,int q_index,PrintRegisterFormat format,const char * suffix)1134 void Simulator::PrintPartialPRegister(const char* name,
1135                                       const SimPRegister& reg,
1136                                       int q_index,
1137                                       PrintRegisterFormat format,
1138                                       const char* suffix) {
1139   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector);
1140   VIXL_ASSERT((format & kPrintRegPartial) != 0);
1141   VIXL_ASSERT((q_index * kQRegSize) < GetVectorLengthInBits());
1142 
1143   // We don't currently use the format for anything here.
1144   USE(format);
1145 
1146   // We _only_ trace partial P register values, because they're often too large
1147   // to reasonably fit on a single line. Each line implies nothing about the
1148   // unprinted bits.
1149   //
1150   // We print values in binary, with spaces between each bit, in order for the
1151   // bits to align with the Z register bytes that they predicate.
1152   //   "# {name}<15:0>: 0b{-------------value------------}"
1153 
1154   int print_size_in_bits = kQRegSize / kZRegBitsPerPRegBit;
1155   int lsb = q_index * print_size_in_bits;
1156   int msb = lsb + print_size_in_bits - 1;
1157   std::stringstream prefix;
1158   prefix << name << '<' << msb << ':' << lsb << '>';
1159 
1160   fprintf(stream_,
1161           "# %s%*s: %s0b",
1162           clr_preg_name,
1163           kPrintRegisterNameFieldWidth,
1164           prefix.str().c_str(),
1165           clr_preg_value);
1166   for (int i = msb; i >= lsb; i--) {
1167     fprintf(stream_, " %c", reg.GetBit(i) ? '1' : '0');
1168   }
1169   fprintf(stream_, "%s%s", clr_normal, suffix);
1170 }
1171 
PrintPartialPRegister(int code,int q_index,PrintRegisterFormat format,const char * suffix)1172 void Simulator::PrintPartialPRegister(int code,
1173                                       int q_index,
1174                                       PrintRegisterFormat format,
1175                                       const char* suffix) {
1176   VIXL_ASSERT(static_cast<unsigned>(code) < kNumberOfPRegisters);
1177   PrintPartialPRegister(PRegNameForCode(code),
1178                         pregisters_[code],
1179                         q_index,
1180                         format,
1181                         suffix);
1182 }
1183 
PrintSystemRegister(SystemRegister id)1184 void Simulator::PrintSystemRegister(SystemRegister id) {
1185   switch (id) {
1186     case NZCV:
1187       fprintf(stream_,
1188               "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
1189               clr_flag_name,
1190               clr_flag_value,
1191               ReadNzcv().GetN(),
1192               ReadNzcv().GetZ(),
1193               ReadNzcv().GetC(),
1194               ReadNzcv().GetV(),
1195               clr_normal);
1196       break;
1197     case FPCR: {
1198       static const char* rmode[] = {"0b00 (Round to Nearest)",
1199                                     "0b01 (Round towards Plus Infinity)",
1200                                     "0b10 (Round towards Minus Infinity)",
1201                                     "0b11 (Round towards Zero)"};
1202       VIXL_ASSERT(ReadFpcr().GetRMode() < ArrayLength(rmode));
1203       fprintf(stream_,
1204               "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
1205               clr_flag_name,
1206               clr_flag_value,
1207               ReadFpcr().GetAHP(),
1208               ReadFpcr().GetDN(),
1209               ReadFpcr().GetFZ(),
1210               rmode[ReadFpcr().GetRMode()],
1211               clr_normal);
1212       break;
1213     }
1214     default:
1215       VIXL_UNREACHABLE();
1216   }
1217 }
1218 
PrintPartialAccess(uint16_t access_mask,uint16_t future_access_mask,int struct_element_count,int lane_size_in_bytes,const char * op,uintptr_t address,int reg_size_in_bytes)1219 uint16_t Simulator::PrintPartialAccess(uint16_t access_mask,
1220                                        uint16_t future_access_mask,
1221                                        int struct_element_count,
1222                                        int lane_size_in_bytes,
1223                                        const char* op,
1224                                        uintptr_t address,
1225                                        int reg_size_in_bytes) {
1226   // We want to assume that we'll access at least one lane.
1227   VIXL_ASSERT(access_mask != 0);
1228   VIXL_ASSERT((reg_size_in_bytes == kXRegSizeInBytes) ||
1229               (reg_size_in_bytes == kQRegSizeInBytes));
1230   bool started_annotation = false;
1231   // Indent to match the register field, the fixed formatting, and the value
1232   // prefix ("0x"): "# {name}: 0x"
1233   fprintf(stream_, "# %*s    ", kPrintRegisterNameFieldWidth, "");
1234   // First, annotate the lanes (byte by byte).
1235   for (int lane = reg_size_in_bytes - 1; lane >= 0; lane--) {
1236     bool access = (access_mask & (1 << lane)) != 0;
1237     bool future = (future_access_mask & (1 << lane)) != 0;
1238     if (started_annotation) {
1239       // If we've started an annotation, draw a horizontal line in addition to
1240       // any other symbols.
1241       if (access) {
1242         fprintf(stream_, "─╨");
1243       } else if (future) {
1244         fprintf(stream_, "─║");
1245       } else {
1246         fprintf(stream_, "──");
1247       }
1248     } else {
1249       if (access) {
1250         started_annotation = true;
1251         fprintf(stream_, " ╙");
1252       } else if (future) {
1253         fprintf(stream_, " ║");
1254       } else {
1255         fprintf(stream_, "  ");
1256       }
1257     }
1258   }
1259   VIXL_ASSERT(started_annotation);
1260   fprintf(stream_, "─ 0x");
1261   int lane_size_in_nibbles = lane_size_in_bytes * 2;
1262   // Print the most-significant struct element first.
1263   const char* sep = "";
1264   for (int i = struct_element_count - 1; i >= 0; i--) {
1265     int offset = lane_size_in_bytes * i;
1266     uint64_t nibble = MemReadUint(lane_size_in_bytes, address + offset);
1267     fprintf(stream_, "%s%0*" PRIx64, sep, lane_size_in_nibbles, nibble);
1268     sep = "'";
1269   }
1270   fprintf(stream_,
1271           " %s %s0x%016" PRIxPTR "%s\n",
1272           op,
1273           clr_memory_address,
1274           address,
1275           clr_normal);
1276   return future_access_mask & ~access_mask;
1277 }
1278 
PrintAccess(int code,PrintRegisterFormat format,const char * op,uintptr_t address)1279 void Simulator::PrintAccess(int code,
1280                             PrintRegisterFormat format,
1281                             const char* op,
1282                             uintptr_t address) {
1283   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
1284   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1285   if ((format & kPrintRegPartial) == 0) {
1286     registers_[code].NotifyRegisterLogged();
1287   }
1288   // Scalar-format accesses use a simple format:
1289   //   "# {reg}: 0x{value} -> {address}"
1290 
1291   // Suppress the newline, so the access annotation goes on the same line.
1292   PrintRegister(code, format, "");
1293   fprintf(stream_,
1294           " %s %s0x%016" PRIxPTR "%s\n",
1295           op,
1296           clr_memory_address,
1297           address,
1298           clr_normal);
1299 }
1300 
PrintVAccess(int code,PrintRegisterFormat format,const char * op,uintptr_t address)1301 void Simulator::PrintVAccess(int code,
1302                              PrintRegisterFormat format,
1303                              const char* op,
1304                              uintptr_t address) {
1305   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1306 
1307   // Scalar-format accesses use a simple format:
1308   //   "# v{code}: 0x{value} -> {address}"
1309 
1310   // Suppress the newline, so the access annotation goes on the same line.
1311   PrintVRegister(code, format, "");
1312   fprintf(stream_,
1313           " %s %s0x%016" PRIxPTR "%s\n",
1314           op,
1315           clr_memory_address,
1316           address,
1317           clr_normal);
1318 }
1319 
PrintVStructAccess(int rt_code,int reg_count,PrintRegisterFormat format,const char * op,uintptr_t address)1320 void Simulator::PrintVStructAccess(int rt_code,
1321                                    int reg_count,
1322                                    PrintRegisterFormat format,
1323                                    const char* op,
1324                                    uintptr_t address) {
1325   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1326 
1327   // For example:
1328   //   "# v{code}: 0x{value}"
1329   //   "#     ...: 0x{value}"
1330   //   "#              ║   ╙─ {struct_value} -> {lowest_address}"
1331   //   "#              ╙───── {struct_value} -> {highest_address}"
1332 
1333   uint16_t lane_mask = GetPrintRegLaneMask(format);
1334   PrintVRegistersForStructuredAccess(rt_code, reg_count, lane_mask, format);
1335 
1336   int reg_size_in_bytes = GetPrintRegSizeInBytes(format);
1337   int lane_size_in_bytes = GetPrintRegLaneSizeInBytes(format);
1338   for (int i = 0; i < reg_size_in_bytes; i += lane_size_in_bytes) {
1339     uint16_t access_mask = 1 << i;
1340     VIXL_ASSERT((lane_mask & access_mask) != 0);
1341     lane_mask = PrintPartialAccess(access_mask,
1342                                    lane_mask,
1343                                    reg_count,
1344                                    lane_size_in_bytes,
1345                                    op,
1346                                    address + (i * reg_count));
1347   }
1348 }
1349 
PrintVSingleStructAccess(int rt_code,int reg_count,int lane,PrintRegisterFormat format,const char * op,uintptr_t address)1350 void Simulator::PrintVSingleStructAccess(int rt_code,
1351                                          int reg_count,
1352                                          int lane,
1353                                          PrintRegisterFormat format,
1354                                          const char* op,
1355                                          uintptr_t address) {
1356   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1357 
1358   // For example:
1359   //   "# v{code}: 0x{value}"
1360   //   "#     ...: 0x{value}"
1361   //   "#              ╙───── {struct_value} -> {address}"
1362 
1363   int lane_size_in_bytes = GetPrintRegLaneSizeInBytes(format);
1364   uint16_t lane_mask = 1 << (lane * lane_size_in_bytes);
1365   PrintVRegistersForStructuredAccess(rt_code, reg_count, lane_mask, format);
1366   PrintPartialAccess(lane_mask, 0, reg_count, lane_size_in_bytes, op, address);
1367 }
1368 
PrintVReplicatingStructAccess(int rt_code,int reg_count,PrintRegisterFormat format,const char * op,uintptr_t address)1369 void Simulator::PrintVReplicatingStructAccess(int rt_code,
1370                                               int reg_count,
1371                                               PrintRegisterFormat format,
1372                                               const char* op,
1373                                               uintptr_t address) {
1374   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1375 
1376   // For example:
1377   //   "# v{code}: 0x{value}"
1378   //   "#     ...: 0x{value}"
1379   //   "#            ╙─╨─╨─╨─ {struct_value} -> {address}"
1380 
1381   int lane_size_in_bytes = GetPrintRegLaneSizeInBytes(format);
1382   uint16_t lane_mask = GetPrintRegLaneMask(format);
1383   PrintVRegistersForStructuredAccess(rt_code, reg_count, lane_mask, format);
1384   PrintPartialAccess(lane_mask, 0, reg_count, lane_size_in_bytes, op, address);
1385 }
1386 
PrintZAccess(int rt_code,const char * op,uintptr_t address)1387 void Simulator::PrintZAccess(int rt_code, const char* op, uintptr_t address) {
1388   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1389 
1390   // Scalar-format accesses are split into separate chunks, each of which uses a
1391   // simple format:
1392   //   "#   z{code}<127:0>: 0x{value} -> {address}"
1393   //   "# z{code}<255:128>: 0x{value} -> {address + 16}"
1394   //   "# z{code}<383:256>: 0x{value} -> {address + 32}"
1395   // etc
1396 
1397   int vl = GetVectorLengthInBits();
1398   VIXL_ASSERT((vl % kQRegSize) == 0);
1399   for (unsigned q_index = 0; q_index < (vl / kQRegSize); q_index++) {
1400     // Suppress the newline, so the access annotation goes on the same line.
1401     PrintPartialZRegister(rt_code, q_index, kPrintRegVnQPartial, "");
1402     fprintf(stream_,
1403             " %s %s0x%016" PRIxPTR "%s\n",
1404             op,
1405             clr_memory_address,
1406             address,
1407             clr_normal);
1408     address += kQRegSizeInBytes;
1409   }
1410 }
1411 
PrintZStructAccess(int rt_code,int reg_count,const LogicPRegister & pg,PrintRegisterFormat format,int msize_in_bytes,const char * op,const LogicSVEAddressVector & addr)1412 void Simulator::PrintZStructAccess(int rt_code,
1413                                    int reg_count,
1414                                    const LogicPRegister& pg,
1415                                    PrintRegisterFormat format,
1416                                    int msize_in_bytes,
1417                                    const char* op,
1418                                    const LogicSVEAddressVector& addr) {
1419   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1420 
1421   // For example:
1422   //   "# z{code}<255:128>: 0x{value}"
1423   //   "#     ...<255:128>: 0x{value}"
1424   //   "#                       ║   ╙─ {struct_value} -> {first_address}"
1425   //   "#                       ╙───── {struct_value} -> {last_address}"
1426 
1427   // We're going to print the register in parts, so force a partial format.
1428   bool skip_inactive_chunks = (format & kPrintRegPartial) != 0;
1429   format = GetPrintRegPartial(format);
1430 
1431   int esize_in_bytes = GetPrintRegLaneSizeInBytes(format);
1432   int vl = GetVectorLengthInBits();
1433   VIXL_ASSERT((vl % kQRegSize) == 0);
1434   int lanes_per_q = kQRegSizeInBytes / esize_in_bytes;
1435   for (unsigned q_index = 0; q_index < (vl / kQRegSize); q_index++) {
1436     uint16_t pred =
1437         pg.GetActiveMask<uint16_t>(q_index) & GetPrintRegLaneMask(format);
1438     if ((pred == 0) && skip_inactive_chunks) continue;
1439 
1440     PrintZRegistersForStructuredAccess(rt_code,
1441                                        q_index,
1442                                        reg_count,
1443                                        pred,
1444                                        format);
1445     if (pred == 0) {
1446       // This register chunk has no active lanes. The loop below would print
1447       // nothing, so leave a blank line to keep structures grouped together.
1448       fprintf(stream_, "#\n");
1449       continue;
1450     }
1451     for (int i = 0; i < lanes_per_q; i++) {
1452       uint16_t access = 1 << (i * esize_in_bytes);
1453       int lane = (q_index * lanes_per_q) + i;
1454       // Skip inactive lanes.
1455       if ((pred & access) == 0) continue;
1456       pred = PrintPartialAccess(access,
1457                                 pred,
1458                                 reg_count,
1459                                 msize_in_bytes,
1460                                 op,
1461                                 addr.GetStructAddress(lane));
1462     }
1463   }
1464 
1465   // We print the whole register, even for stores.
1466   for (int i = 0; i < reg_count; i++) {
1467     vregisters_[(rt_code + i) % kNumberOfZRegisters].NotifyRegisterLogged();
1468   }
1469 }
1470 
PrintPAccess(int code,const char * op,uintptr_t address)1471 void Simulator::PrintPAccess(int code, const char* op, uintptr_t address) {
1472   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1473 
1474   // Scalar-format accesses are split into separate chunks, each of which uses a
1475   // simple format:
1476   //   "#  p{code}<15:0>: 0b{value} -> {address}"
1477   //   "# p{code}<31:16>: 0b{value} -> {address + 2}"
1478   //   "# p{code}<47:32>: 0b{value} -> {address + 4}"
1479   // etc
1480 
1481   int vl = GetVectorLengthInBits();
1482   VIXL_ASSERT((vl % kQRegSize) == 0);
1483   for (unsigned q_index = 0; q_index < (vl / kQRegSize); q_index++) {
1484     // Suppress the newline, so the access annotation goes on the same line.
1485     PrintPartialPRegister(code, q_index, kPrintRegVnQPartial, "");
1486     fprintf(stream_,
1487             " %s %s0x%016" PRIxPTR "%s\n",
1488             op,
1489             clr_memory_address,
1490             address,
1491             clr_normal);
1492     address += kQRegSizeInBytes;
1493   }
1494 }
1495 
PrintRead(int rt_code,PrintRegisterFormat format,uintptr_t address)1496 void Simulator::PrintRead(int rt_code,
1497                           PrintRegisterFormat format,
1498                           uintptr_t address) {
1499   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
1500   registers_[rt_code].NotifyRegisterLogged();
1501   PrintAccess(rt_code, format, "<-", address);
1502 }
1503 
PrintExtendingRead(int rt_code,PrintRegisterFormat format,int access_size_in_bytes,uintptr_t address)1504 void Simulator::PrintExtendingRead(int rt_code,
1505                                    PrintRegisterFormat format,
1506                                    int access_size_in_bytes,
1507                                    uintptr_t address) {
1508   int reg_size_in_bytes = GetPrintRegSizeInBytes(format);
1509   if (access_size_in_bytes == reg_size_in_bytes) {
1510     // There is no extension here, so print a simple load.
1511     PrintRead(rt_code, format, address);
1512     return;
1513   }
1514   VIXL_ASSERT(access_size_in_bytes < reg_size_in_bytes);
1515 
1516   // For sign- and zero-extension, make it clear that the resulting register
1517   // value is different from what is loaded from memory.
1518   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
1519   registers_[rt_code].NotifyRegisterLogged();
1520   PrintRegister(rt_code, format);
1521   PrintPartialAccess(1,
1522                      0,
1523                      1,
1524                      access_size_in_bytes,
1525                      "<-",
1526                      address,
1527                      kXRegSizeInBytes);
1528 }
1529 
PrintVRead(int rt_code,PrintRegisterFormat format,uintptr_t address)1530 void Simulator::PrintVRead(int rt_code,
1531                            PrintRegisterFormat format,
1532                            uintptr_t address) {
1533   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
1534   vregisters_[rt_code].NotifyRegisterLogged();
1535   PrintVAccess(rt_code, format, "<-", address);
1536 }
1537 
PrintWrite(int rt_code,PrintRegisterFormat format,uintptr_t address)1538 void Simulator::PrintWrite(int rt_code,
1539                            PrintRegisterFormat format,
1540                            uintptr_t address) {
1541   // Because this trace doesn't represent a change to the source register's
1542   // value, only print the relevant part of the value.
1543   format = GetPrintRegPartial(format);
1544   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
1545   registers_[rt_code].NotifyRegisterLogged();
1546   PrintAccess(rt_code, format, "->", address);
1547 }
1548 
PrintVWrite(int rt_code,PrintRegisterFormat format,uintptr_t address)1549 void Simulator::PrintVWrite(int rt_code,
1550                             PrintRegisterFormat format,
1551                             uintptr_t address) {
1552   // Because this trace doesn't represent a change to the source register's
1553   // value, only print the relevant part of the value.
1554   format = GetPrintRegPartial(format);
1555   // It only makes sense to write scalar values here. Vectors are handled by
1556   // PrintVStructAccess.
1557   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
1558   PrintVAccess(rt_code, format, "->", address);
1559 }
1560 
PrintTakenBranch(const Instruction * target)1561 void Simulator::PrintTakenBranch(const Instruction* target) {
1562   fprintf(stream_,
1563           "# %sBranch%s to 0x%016" PRIx64 ".\n",
1564           clr_branch_marker,
1565           clr_normal,
1566           reinterpret_cast<uint64_t>(target));
1567 }
1568 
1569 // Visitors---------------------------------------------------------------------
1570 
1571 
VisitReserved(const Instruction * instr)1572 void Simulator::VisitReserved(const Instruction* instr) {
1573   // UDF is the only instruction in this group, and the Decoder is precise here.
1574   VIXL_ASSERT(instr->Mask(ReservedMask) == UDF);
1575 
1576   printf("UDF (permanently undefined) instruction at %p: 0x%08" PRIx32 "\n",
1577          reinterpret_cast<const void*>(instr),
1578          instr->GetInstructionBits());
1579   VIXL_ABORT_WITH_MSG("UNDEFINED (UDF)\n");
1580 }
1581 
1582 
VisitUnimplemented(const Instruction * instr)1583 void Simulator::VisitUnimplemented(const Instruction* instr) {
1584   printf("Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
1585          reinterpret_cast<const void*>(instr),
1586          instr->GetInstructionBits());
1587   VIXL_UNIMPLEMENTED();
1588 }
1589 
1590 
VisitUnallocated(const Instruction * instr)1591 void Simulator::VisitUnallocated(const Instruction* instr) {
1592   printf("Unallocated instruction at %p: 0x%08" PRIx32 "\n",
1593          reinterpret_cast<const void*>(instr),
1594          instr->GetInstructionBits());
1595   VIXL_UNIMPLEMENTED();
1596 }
1597 
1598 
VisitPCRelAddressing(const Instruction * instr)1599 void Simulator::VisitPCRelAddressing(const Instruction* instr) {
1600   VIXL_ASSERT((instr->Mask(PCRelAddressingMask) == ADR) ||
1601               (instr->Mask(PCRelAddressingMask) == ADRP));
1602 
1603   WriteRegister(instr->GetRd(), instr->GetImmPCOffsetTarget());
1604 }
1605 
1606 
VisitUnconditionalBranch(const Instruction * instr)1607 void Simulator::VisitUnconditionalBranch(const Instruction* instr) {
1608   switch (instr->Mask(UnconditionalBranchMask)) {
1609     case BL:
1610       WriteLr(instr->GetNextInstruction());
1611       VIXL_FALLTHROUGH();
1612     case B:
1613       WritePc(instr->GetImmPCOffsetTarget());
1614       break;
1615     default:
1616       VIXL_UNREACHABLE();
1617   }
1618 }
1619 
1620 
VisitConditionalBranch(const Instruction * instr)1621 void Simulator::VisitConditionalBranch(const Instruction* instr) {
1622   VIXL_ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
1623   if (ConditionPassed(instr->GetConditionBranch())) {
1624     WritePc(instr->GetImmPCOffsetTarget());
1625   }
1626 }
1627 
GetBTypeFromInstruction(const Instruction * instr) const1628 BType Simulator::GetBTypeFromInstruction(const Instruction* instr) const {
1629   switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
1630     case BLR:
1631     case BLRAA:
1632     case BLRAB:
1633     case BLRAAZ:
1634     case BLRABZ:
1635       return BranchAndLink;
1636     case BR:
1637     case BRAA:
1638     case BRAB:
1639     case BRAAZ:
1640     case BRABZ:
1641       if ((instr->GetRn() == 16) || (instr->GetRn() == 17) ||
1642           !PcIsInGuardedPage()) {
1643         return BranchFromUnguardedOrToIP;
1644       }
1645       return BranchFromGuardedNotToIP;
1646   }
1647   return DefaultBType;
1648 }
1649 
VisitUnconditionalBranchToRegister(const Instruction * instr)1650 void Simulator::VisitUnconditionalBranchToRegister(const Instruction* instr) {
1651   bool authenticate = false;
1652   bool link = false;
1653   uint64_t addr = ReadXRegister(instr->GetRn());
1654   uint64_t context = 0;
1655 
1656   switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
1657     case BLR:
1658       link = true;
1659       VIXL_FALLTHROUGH();
1660     case BR:
1661     case RET:
1662       break;
1663 
1664     case BLRAAZ:
1665     case BLRABZ:
1666       link = true;
1667       VIXL_FALLTHROUGH();
1668     case BRAAZ:
1669     case BRABZ:
1670       authenticate = true;
1671       break;
1672 
1673     case BLRAA:
1674     case BLRAB:
1675       link = true;
1676       VIXL_FALLTHROUGH();
1677     case BRAA:
1678     case BRAB:
1679       authenticate = true;
1680       context = ReadXRegister(instr->GetRd());
1681       break;
1682 
1683     case RETAA:
1684     case RETAB:
1685       authenticate = true;
1686       addr = ReadXRegister(kLinkRegCode);
1687       context = ReadXRegister(31, Reg31IsStackPointer);
1688       break;
1689     default:
1690       VIXL_UNREACHABLE();
1691   }
1692 
1693   if (link) {
1694     WriteLr(instr->GetNextInstruction());
1695   }
1696 
1697   if (authenticate) {
1698     PACKey key = (instr->ExtractBit(10) == 0) ? kPACKeyIA : kPACKeyIB;
1699     addr = AuthPAC(addr, context, key, kInstructionPointer);
1700 
1701     int error_lsb = GetTopPACBit(addr, kInstructionPointer) - 2;
1702     if (((addr >> error_lsb) & 0x3) != 0x0) {
1703       VIXL_ABORT_WITH_MSG("Failed to authenticate pointer.");
1704     }
1705   }
1706 
1707   WritePc(Instruction::Cast(addr));
1708   WriteNextBType(GetBTypeFromInstruction(instr));
1709 }
1710 
1711 
VisitTestBranch(const Instruction * instr)1712 void Simulator::VisitTestBranch(const Instruction* instr) {
1713   unsigned bit_pos =
1714       (instr->GetImmTestBranchBit5() << 5) | instr->GetImmTestBranchBit40();
1715   bool bit_zero = ((ReadXRegister(instr->GetRt()) >> bit_pos) & 1) == 0;
1716   bool take_branch = false;
1717   switch (instr->Mask(TestBranchMask)) {
1718     case TBZ:
1719       take_branch = bit_zero;
1720       break;
1721     case TBNZ:
1722       take_branch = !bit_zero;
1723       break;
1724     default:
1725       VIXL_UNIMPLEMENTED();
1726   }
1727   if (take_branch) {
1728     WritePc(instr->GetImmPCOffsetTarget());
1729   }
1730 }
1731 
1732 
VisitCompareBranch(const Instruction * instr)1733 void Simulator::VisitCompareBranch(const Instruction* instr) {
1734   unsigned rt = instr->GetRt();
1735   bool take_branch = false;
1736   switch (instr->Mask(CompareBranchMask)) {
1737     case CBZ_w:
1738       take_branch = (ReadWRegister(rt) == 0);
1739       break;
1740     case CBZ_x:
1741       take_branch = (ReadXRegister(rt) == 0);
1742       break;
1743     case CBNZ_w:
1744       take_branch = (ReadWRegister(rt) != 0);
1745       break;
1746     case CBNZ_x:
1747       take_branch = (ReadXRegister(rt) != 0);
1748       break;
1749     default:
1750       VIXL_UNIMPLEMENTED();
1751   }
1752   if (take_branch) {
1753     WritePc(instr->GetImmPCOffsetTarget());
1754   }
1755 }
1756 
1757 
AddSubHelper(const Instruction * instr,int64_t op2)1758 void Simulator::AddSubHelper(const Instruction* instr, int64_t op2) {
1759   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
1760   bool set_flags = instr->GetFlagsUpdate();
1761   int64_t new_val = 0;
1762   Instr operation = instr->Mask(AddSubOpMask);
1763 
1764   switch (operation) {
1765     case ADD:
1766     case ADDS: {
1767       new_val = AddWithCarry(reg_size,
1768                              set_flags,
1769                              ReadRegister(reg_size,
1770                                           instr->GetRn(),
1771                                           instr->GetRnMode()),
1772                              op2);
1773       break;
1774     }
1775     case SUB:
1776     case SUBS: {
1777       new_val = AddWithCarry(reg_size,
1778                              set_flags,
1779                              ReadRegister(reg_size,
1780                                           instr->GetRn(),
1781                                           instr->GetRnMode()),
1782                              ~op2,
1783                              1);
1784       break;
1785     }
1786     default:
1787       VIXL_UNREACHABLE();
1788   }
1789 
1790   WriteRegister(reg_size,
1791                 instr->GetRd(),
1792                 new_val,
1793                 LogRegWrites,
1794                 instr->GetRdMode());
1795 }
1796 
1797 
VisitAddSubShifted(const Instruction * instr)1798 void Simulator::VisitAddSubShifted(const Instruction* instr) {
1799   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
1800   int64_t op2 = ShiftOperand(reg_size,
1801                              ReadRegister(reg_size, instr->GetRm()),
1802                              static_cast<Shift>(instr->GetShiftDP()),
1803                              instr->GetImmDPShift());
1804   AddSubHelper(instr, op2);
1805 }
1806 
1807 
VisitAddSubImmediate(const Instruction * instr)1808 void Simulator::VisitAddSubImmediate(const Instruction* instr) {
1809   int64_t op2 = instr->GetImmAddSub()
1810                 << ((instr->GetImmAddSubShift() == 1) ? 12 : 0);
1811   AddSubHelper(instr, op2);
1812 }
1813 
1814 
VisitAddSubExtended(const Instruction * instr)1815 void Simulator::VisitAddSubExtended(const Instruction* instr) {
1816   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
1817   int64_t op2 = ExtendValue(reg_size,
1818                             ReadRegister(reg_size, instr->GetRm()),
1819                             static_cast<Extend>(instr->GetExtendMode()),
1820                             instr->GetImmExtendShift());
1821   AddSubHelper(instr, op2);
1822 }
1823 
1824 
VisitAddSubWithCarry(const Instruction * instr)1825 void Simulator::VisitAddSubWithCarry(const Instruction* instr) {
1826   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
1827   int64_t op2 = ReadRegister(reg_size, instr->GetRm());
1828   int64_t new_val;
1829 
1830   if ((instr->Mask(AddSubOpMask) == SUB) ||
1831       (instr->Mask(AddSubOpMask) == SUBS)) {
1832     op2 = ~op2;
1833   }
1834 
1835   new_val = AddWithCarry(reg_size,
1836                          instr->GetFlagsUpdate(),
1837                          ReadRegister(reg_size, instr->GetRn()),
1838                          op2,
1839                          ReadC());
1840 
1841   WriteRegister(reg_size, instr->GetRd(), new_val);
1842 }
1843 
1844 
VisitRotateRightIntoFlags(const Instruction * instr)1845 void Simulator::VisitRotateRightIntoFlags(const Instruction* instr) {
1846   switch (instr->Mask(RotateRightIntoFlagsMask)) {
1847     case RMIF: {
1848       uint64_t value = ReadRegister<uint64_t>(instr->GetRn());
1849       unsigned shift = instr->GetImmRMIFRotation();
1850       unsigned mask = instr->GetNzcv();
1851       uint64_t rotated = RotateRight(value, shift, kXRegSize);
1852 
1853       ReadNzcv().SetFlags((rotated & mask) | (ReadNzcv().GetFlags() & ~mask));
1854       break;
1855     }
1856   }
1857 }
1858 
1859 
VisitEvaluateIntoFlags(const Instruction * instr)1860 void Simulator::VisitEvaluateIntoFlags(const Instruction* instr) {
1861   uint32_t value = ReadRegister<uint32_t>(instr->GetRn());
1862   unsigned msb = (instr->Mask(EvaluateIntoFlagsMask) == SETF16) ? 15 : 7;
1863 
1864   unsigned sign_bit = (value >> msb) & 1;
1865   unsigned overflow_bit = (value >> (msb + 1)) & 1;
1866   ReadNzcv().SetN(sign_bit);
1867   ReadNzcv().SetZ((value << (31 - msb)) == 0);
1868   ReadNzcv().SetV(sign_bit ^ overflow_bit);
1869 }
1870 
1871 
VisitLogicalShifted(const Instruction * instr)1872 void Simulator::VisitLogicalShifted(const Instruction* instr) {
1873   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
1874   Shift shift_type = static_cast<Shift>(instr->GetShiftDP());
1875   unsigned shift_amount = instr->GetImmDPShift();
1876   int64_t op2 = ShiftOperand(reg_size,
1877                              ReadRegister(reg_size, instr->GetRm()),
1878                              shift_type,
1879                              shift_amount);
1880   if (instr->Mask(NOT) == NOT) {
1881     op2 = ~op2;
1882   }
1883   LogicalHelper(instr, op2);
1884 }
1885 
1886 
VisitLogicalImmediate(const Instruction * instr)1887 void Simulator::VisitLogicalImmediate(const Instruction* instr) {
1888   if (instr->GetImmLogical() == 0) {
1889     VIXL_UNIMPLEMENTED();
1890   } else {
1891     LogicalHelper(instr, instr->GetImmLogical());
1892   }
1893 }
1894 
1895 
LogicalHelper(const Instruction * instr,int64_t op2)1896 void Simulator::LogicalHelper(const Instruction* instr, int64_t op2) {
1897   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
1898   int64_t op1 = ReadRegister(reg_size, instr->GetRn());
1899   int64_t result = 0;
1900   bool update_flags = false;
1901 
1902   // Switch on the logical operation, stripping out the NOT bit, as it has a
1903   // different meaning for logical immediate instructions.
1904   switch (instr->Mask(LogicalOpMask & ~NOT)) {
1905     case ANDS:
1906       update_flags = true;
1907       VIXL_FALLTHROUGH();
1908     case AND:
1909       result = op1 & op2;
1910       break;
1911     case ORR:
1912       result = op1 | op2;
1913       break;
1914     case EOR:
1915       result = op1 ^ op2;
1916       break;
1917     default:
1918       VIXL_UNIMPLEMENTED();
1919   }
1920 
1921   if (update_flags) {
1922     ReadNzcv().SetN(CalcNFlag(result, reg_size));
1923     ReadNzcv().SetZ(CalcZFlag(result));
1924     ReadNzcv().SetC(0);
1925     ReadNzcv().SetV(0);
1926     LogSystemRegister(NZCV);
1927   }
1928 
1929   WriteRegister(reg_size,
1930                 instr->GetRd(),
1931                 result,
1932                 LogRegWrites,
1933                 instr->GetRdMode());
1934 }
1935 
1936 
VisitConditionalCompareRegister(const Instruction * instr)1937 void Simulator::VisitConditionalCompareRegister(const Instruction* instr) {
1938   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
1939   ConditionalCompareHelper(instr, ReadRegister(reg_size, instr->GetRm()));
1940 }
1941 
1942 
VisitConditionalCompareImmediate(const Instruction * instr)1943 void Simulator::VisitConditionalCompareImmediate(const Instruction* instr) {
1944   ConditionalCompareHelper(instr, instr->GetImmCondCmp());
1945 }
1946 
1947 
ConditionalCompareHelper(const Instruction * instr,int64_t op2)1948 void Simulator::ConditionalCompareHelper(const Instruction* instr,
1949                                          int64_t op2) {
1950   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
1951   int64_t op1 = ReadRegister(reg_size, instr->GetRn());
1952 
1953   if (ConditionPassed(instr->GetCondition())) {
1954     // If the condition passes, set the status flags to the result of comparing
1955     // the operands.
1956     if (instr->Mask(ConditionalCompareMask) == CCMP) {
1957       AddWithCarry(reg_size, true, op1, ~op2, 1);
1958     } else {
1959       VIXL_ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
1960       AddWithCarry(reg_size, true, op1, op2, 0);
1961     }
1962   } else {
1963     // If the condition fails, set the status flags to the nzcv immediate.
1964     ReadNzcv().SetFlags(instr->GetNzcv());
1965     LogSystemRegister(NZCV);
1966   }
1967 }
1968 
1969 
VisitLoadStoreUnsignedOffset(const Instruction * instr)1970 void Simulator::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
1971   int offset = instr->GetImmLSUnsigned() << instr->GetSizeLS();
1972   LoadStoreHelper(instr, offset, Offset);
1973 }
1974 
1975 
VisitLoadStoreUnscaledOffset(const Instruction * instr)1976 void Simulator::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
1977   LoadStoreHelper(instr, instr->GetImmLS(), Offset);
1978 }
1979 
1980 
VisitLoadStorePreIndex(const Instruction * instr)1981 void Simulator::VisitLoadStorePreIndex(const Instruction* instr) {
1982   LoadStoreHelper(instr, instr->GetImmLS(), PreIndex);
1983 }
1984 
1985 
VisitLoadStorePostIndex(const Instruction * instr)1986 void Simulator::VisitLoadStorePostIndex(const Instruction* instr) {
1987   LoadStoreHelper(instr, instr->GetImmLS(), PostIndex);
1988 }
1989 
1990 
1991 template <typename T1, typename T2>
LoadAcquireRCpcUnscaledOffsetHelper(const Instruction * instr)1992 void Simulator::LoadAcquireRCpcUnscaledOffsetHelper(const Instruction* instr) {
1993   unsigned rt = instr->GetRt();
1994   unsigned rn = instr->GetRn();
1995 
1996   unsigned element_size = sizeof(T2);
1997   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
1998   int offset = instr->GetImmLS();
1999   address += offset;
2000 
2001   // Verify that the address is available to the host.
2002   VIXL_ASSERT(address == static_cast<uintptr_t>(address));
2003 
2004   // Check the alignment of `address`.
2005   if (AlignDown(address, 16) != AlignDown(address + element_size - 1, 16)) {
2006     VIXL_ALIGNMENT_EXCEPTION();
2007   }
2008 
2009   WriteRegister<T1>(rt, static_cast<T1>(MemRead<T2>(address)));
2010 
2011   // Approximate load-acquire by issuing a full barrier after the load.
2012   __sync_synchronize();
2013 
2014   LogRead(rt, GetPrintRegisterFormat(element_size), address);
2015 }
2016 
2017 
2018 template <typename T>
StoreReleaseUnscaledOffsetHelper(const Instruction * instr)2019 void Simulator::StoreReleaseUnscaledOffsetHelper(const Instruction* instr) {
2020   unsigned rt = instr->GetRt();
2021   unsigned rn = instr->GetRn();
2022 
2023   unsigned element_size = sizeof(T);
2024   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
2025   int offset = instr->GetImmLS();
2026   address += offset;
2027 
2028   // Verify that the address is available to the host.
2029   VIXL_ASSERT(address == static_cast<uintptr_t>(address));
2030 
2031   // Check the alignment of `address`.
2032   if (AlignDown(address, 16) != AlignDown(address + element_size - 1, 16)) {
2033     VIXL_ALIGNMENT_EXCEPTION();
2034   }
2035 
2036   // Approximate store-release by issuing a full barrier after the load.
2037   __sync_synchronize();
2038 
2039   MemWrite<T>(address, ReadRegister<T>(rt));
2040 
2041   LogWrite(rt, GetPrintRegisterFormat(element_size), address);
2042 }
2043 
2044 
VisitLoadStoreRCpcUnscaledOffset(const Instruction * instr)2045 void Simulator::VisitLoadStoreRCpcUnscaledOffset(const Instruction* instr) {
2046   switch (instr->Mask(LoadStoreRCpcUnscaledOffsetMask)) {
2047     case LDAPURB:
2048       LoadAcquireRCpcUnscaledOffsetHelper<uint8_t, uint8_t>(instr);
2049       break;
2050     case LDAPURH:
2051       LoadAcquireRCpcUnscaledOffsetHelper<uint16_t, uint16_t>(instr);
2052       break;
2053     case LDAPUR_w:
2054       LoadAcquireRCpcUnscaledOffsetHelper<uint32_t, uint32_t>(instr);
2055       break;
2056     case LDAPUR_x:
2057       LoadAcquireRCpcUnscaledOffsetHelper<uint64_t, uint64_t>(instr);
2058       break;
2059     case LDAPURSB_w:
2060       LoadAcquireRCpcUnscaledOffsetHelper<int32_t, int8_t>(instr);
2061       break;
2062     case LDAPURSB_x:
2063       LoadAcquireRCpcUnscaledOffsetHelper<int64_t, int8_t>(instr);
2064       break;
2065     case LDAPURSH_w:
2066       LoadAcquireRCpcUnscaledOffsetHelper<int32_t, int16_t>(instr);
2067       break;
2068     case LDAPURSH_x:
2069       LoadAcquireRCpcUnscaledOffsetHelper<int64_t, int16_t>(instr);
2070       break;
2071     case LDAPURSW:
2072       LoadAcquireRCpcUnscaledOffsetHelper<int64_t, int32_t>(instr);
2073       break;
2074     case STLURB:
2075       StoreReleaseUnscaledOffsetHelper<uint8_t>(instr);
2076       break;
2077     case STLURH:
2078       StoreReleaseUnscaledOffsetHelper<uint16_t>(instr);
2079       break;
2080     case STLUR_w:
2081       StoreReleaseUnscaledOffsetHelper<uint32_t>(instr);
2082       break;
2083     case STLUR_x:
2084       StoreReleaseUnscaledOffsetHelper<uint64_t>(instr);
2085       break;
2086   }
2087 }
2088 
2089 
VisitLoadStorePAC(const Instruction * instr)2090 void Simulator::VisitLoadStorePAC(const Instruction* instr) {
2091   unsigned dst = instr->GetRt();
2092   unsigned addr_reg = instr->GetRn();
2093 
2094   uint64_t address = ReadXRegister(addr_reg, Reg31IsStackPointer);
2095 
2096   PACKey key = (instr->ExtractBit(23) == 0) ? kPACKeyDA : kPACKeyDB;
2097   address = AuthPAC(address, 0, key, kDataPointer);
2098 
2099   int error_lsb = GetTopPACBit(address, kInstructionPointer) - 2;
2100   if (((address >> error_lsb) & 0x3) != 0x0) {
2101     VIXL_ABORT_WITH_MSG("Failed to authenticate pointer.");
2102   }
2103 
2104 
2105   if ((addr_reg == 31) && ((address % 16) != 0)) {
2106     // When the base register is SP the stack pointer is required to be
2107     // quadword aligned prior to the address calculation and write-backs.
2108     // Misalignment will cause a stack alignment fault.
2109     VIXL_ALIGNMENT_EXCEPTION();
2110   }
2111 
2112   int64_t offset = instr->GetImmLSPAC();
2113   address += offset;
2114 
2115   if (instr->Mask(LoadStorePACPreBit) == LoadStorePACPreBit) {
2116     // Pre-index mode.
2117     VIXL_ASSERT(offset != 0);
2118     WriteXRegister(addr_reg, address, LogRegWrites, Reg31IsStackPointer);
2119   }
2120 
2121   uintptr_t addr_ptr = static_cast<uintptr_t>(address);
2122 
2123   // Verify that the calculated address is available to the host.
2124   VIXL_ASSERT(address == addr_ptr);
2125 
2126   WriteXRegister(dst, MemRead<uint64_t>(addr_ptr), NoRegLog);
2127   unsigned access_size = 1 << 3;
2128   LogRead(dst, GetPrintRegisterFormatForSize(access_size), addr_ptr);
2129 }
2130 
2131 
VisitLoadStoreRegisterOffset(const Instruction * instr)2132 void Simulator::VisitLoadStoreRegisterOffset(const Instruction* instr) {
2133   Extend ext = static_cast<Extend>(instr->GetExtendMode());
2134   VIXL_ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
2135   unsigned shift_amount = instr->GetImmShiftLS() * instr->GetSizeLS();
2136 
2137   int64_t offset =
2138       ExtendValue(kXRegSize, ReadXRegister(instr->GetRm()), ext, shift_amount);
2139   LoadStoreHelper(instr, offset, Offset);
2140 }
2141 
2142 
LoadStoreHelper(const Instruction * instr,int64_t offset,AddrMode addrmode)2143 void Simulator::LoadStoreHelper(const Instruction* instr,
2144                                 int64_t offset,
2145                                 AddrMode addrmode) {
2146   unsigned srcdst = instr->GetRt();
2147   uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addrmode);
2148 
2149   bool rt_is_vreg = false;
2150   int extend_to_size = 0;
2151   LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
2152   switch (op) {
2153     case LDRB_w:
2154       WriteWRegister(srcdst, MemRead<uint8_t>(address), NoRegLog);
2155       extend_to_size = kWRegSizeInBytes;
2156       break;
2157     case LDRH_w:
2158       WriteWRegister(srcdst, MemRead<uint16_t>(address), NoRegLog);
2159       extend_to_size = kWRegSizeInBytes;
2160       break;
2161     case LDR_w:
2162       WriteWRegister(srcdst, MemRead<uint32_t>(address), NoRegLog);
2163       extend_to_size = kWRegSizeInBytes;
2164       break;
2165     case LDR_x:
2166       WriteXRegister(srcdst, MemRead<uint64_t>(address), NoRegLog);
2167       extend_to_size = kXRegSizeInBytes;
2168       break;
2169     case LDRSB_w:
2170       WriteWRegister(srcdst, MemRead<int8_t>(address), NoRegLog);
2171       extend_to_size = kWRegSizeInBytes;
2172       break;
2173     case LDRSH_w:
2174       WriteWRegister(srcdst, MemRead<int16_t>(address), NoRegLog);
2175       extend_to_size = kWRegSizeInBytes;
2176       break;
2177     case LDRSB_x:
2178       WriteXRegister(srcdst, MemRead<int8_t>(address), NoRegLog);
2179       extend_to_size = kXRegSizeInBytes;
2180       break;
2181     case LDRSH_x:
2182       WriteXRegister(srcdst, MemRead<int16_t>(address), NoRegLog);
2183       extend_to_size = kXRegSizeInBytes;
2184       break;
2185     case LDRSW_x:
2186       WriteXRegister(srcdst, MemRead<int32_t>(address), NoRegLog);
2187       extend_to_size = kXRegSizeInBytes;
2188       break;
2189     case LDR_b:
2190       WriteBRegister(srcdst, MemRead<uint8_t>(address), NoRegLog);
2191       rt_is_vreg = true;
2192       break;
2193     case LDR_h:
2194       WriteHRegister(srcdst, MemRead<uint16_t>(address), NoRegLog);
2195       rt_is_vreg = true;
2196       break;
2197     case LDR_s:
2198       WriteSRegister(srcdst, MemRead<float>(address), NoRegLog);
2199       rt_is_vreg = true;
2200       break;
2201     case LDR_d:
2202       WriteDRegister(srcdst, MemRead<double>(address), NoRegLog);
2203       rt_is_vreg = true;
2204       break;
2205     case LDR_q:
2206       WriteQRegister(srcdst, MemRead<qreg_t>(address), NoRegLog);
2207       rt_is_vreg = true;
2208       break;
2209 
2210     case STRB_w:
2211       MemWrite<uint8_t>(address, ReadWRegister(srcdst));
2212       break;
2213     case STRH_w:
2214       MemWrite<uint16_t>(address, ReadWRegister(srcdst));
2215       break;
2216     case STR_w:
2217       MemWrite<uint32_t>(address, ReadWRegister(srcdst));
2218       break;
2219     case STR_x:
2220       MemWrite<uint64_t>(address, ReadXRegister(srcdst));
2221       break;
2222     case STR_b:
2223       MemWrite<uint8_t>(address, ReadBRegister(srcdst));
2224       rt_is_vreg = true;
2225       break;
2226     case STR_h:
2227       MemWrite<uint16_t>(address, ReadHRegisterBits(srcdst));
2228       rt_is_vreg = true;
2229       break;
2230     case STR_s:
2231       MemWrite<float>(address, ReadSRegister(srcdst));
2232       rt_is_vreg = true;
2233       break;
2234     case STR_d:
2235       MemWrite<double>(address, ReadDRegister(srcdst));
2236       rt_is_vreg = true;
2237       break;
2238     case STR_q:
2239       MemWrite<qreg_t>(address, ReadQRegister(srcdst));
2240       rt_is_vreg = true;
2241       break;
2242 
2243     // Ignore prfm hint instructions.
2244     case PRFM:
2245       break;
2246 
2247     default:
2248       VIXL_UNIMPLEMENTED();
2249   }
2250 
2251   // Print a detailed trace (including the memory address).
2252   bool extend = (extend_to_size != 0);
2253   unsigned access_size = 1 << instr->GetSizeLS();
2254   unsigned result_size = extend ? extend_to_size : access_size;
2255   PrintRegisterFormat print_format =
2256       rt_is_vreg ? GetPrintRegisterFormatForSizeTryFP(result_size)
2257                  : GetPrintRegisterFormatForSize(result_size);
2258 
2259   if (instr->IsLoad()) {
2260     if (rt_is_vreg) {
2261       LogVRead(srcdst, print_format, address);
2262     } else {
2263       LogExtendingRead(srcdst, print_format, access_size, address);
2264     }
2265   } else if (instr->IsStore()) {
2266     if (rt_is_vreg) {
2267       LogVWrite(srcdst, print_format, address);
2268     } else {
2269       LogWrite(srcdst, GetPrintRegisterFormatForSize(result_size), address);
2270     }
2271   } else {
2272     VIXL_ASSERT(op == PRFM);
2273   }
2274 
2275   local_monitor_.MaybeClear();
2276 }
2277 
2278 
VisitLoadStorePairOffset(const Instruction * instr)2279 void Simulator::VisitLoadStorePairOffset(const Instruction* instr) {
2280   LoadStorePairHelper(instr, Offset);
2281 }
2282 
2283 
VisitLoadStorePairPreIndex(const Instruction * instr)2284 void Simulator::VisitLoadStorePairPreIndex(const Instruction* instr) {
2285   LoadStorePairHelper(instr, PreIndex);
2286 }
2287 
2288 
VisitLoadStorePairPostIndex(const Instruction * instr)2289 void Simulator::VisitLoadStorePairPostIndex(const Instruction* instr) {
2290   LoadStorePairHelper(instr, PostIndex);
2291 }
2292 
2293 
VisitLoadStorePairNonTemporal(const Instruction * instr)2294 void Simulator::VisitLoadStorePairNonTemporal(const Instruction* instr) {
2295   LoadStorePairHelper(instr, Offset);
2296 }
2297 
2298 
LoadStorePairHelper(const Instruction * instr,AddrMode addrmode)2299 void Simulator::LoadStorePairHelper(const Instruction* instr,
2300                                     AddrMode addrmode) {
2301   unsigned rt = instr->GetRt();
2302   unsigned rt2 = instr->GetRt2();
2303   int element_size = 1 << instr->GetSizeLSPair();
2304   int64_t offset = instr->GetImmLSPair() * element_size;
2305   uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addrmode);
2306   uintptr_t address2 = address + element_size;
2307 
2308   LoadStorePairOp op =
2309       static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
2310 
2311   // 'rt' and 'rt2' can only be aliased for stores.
2312   VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
2313 
2314   bool rt_is_vreg = false;
2315   bool sign_extend = false;
2316   switch (op) {
2317     // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We
2318     // will print a more detailed log.
2319     case LDP_w: {
2320       WriteWRegister(rt, MemRead<uint32_t>(address), NoRegLog);
2321       WriteWRegister(rt2, MemRead<uint32_t>(address2), NoRegLog);
2322       break;
2323     }
2324     case LDP_s: {
2325       WriteSRegister(rt, MemRead<float>(address), NoRegLog);
2326       WriteSRegister(rt2, MemRead<float>(address2), NoRegLog);
2327       rt_is_vreg = true;
2328       break;
2329     }
2330     case LDP_x: {
2331       WriteXRegister(rt, MemRead<uint64_t>(address), NoRegLog);
2332       WriteXRegister(rt2, MemRead<uint64_t>(address2), NoRegLog);
2333       break;
2334     }
2335     case LDP_d: {
2336       WriteDRegister(rt, MemRead<double>(address), NoRegLog);
2337       WriteDRegister(rt2, MemRead<double>(address2), NoRegLog);
2338       rt_is_vreg = true;
2339       break;
2340     }
2341     case LDP_q: {
2342       WriteQRegister(rt, MemRead<qreg_t>(address), NoRegLog);
2343       WriteQRegister(rt2, MemRead<qreg_t>(address2), NoRegLog);
2344       rt_is_vreg = true;
2345       break;
2346     }
2347     case LDPSW_x: {
2348       WriteXRegister(rt, MemRead<int32_t>(address), NoRegLog);
2349       WriteXRegister(rt2, MemRead<int32_t>(address2), NoRegLog);
2350       sign_extend = true;
2351       break;
2352     }
2353     case STP_w: {
2354       MemWrite<uint32_t>(address, ReadWRegister(rt));
2355       MemWrite<uint32_t>(address2, ReadWRegister(rt2));
2356       break;
2357     }
2358     case STP_s: {
2359       MemWrite<float>(address, ReadSRegister(rt));
2360       MemWrite<float>(address2, ReadSRegister(rt2));
2361       rt_is_vreg = true;
2362       break;
2363     }
2364     case STP_x: {
2365       MemWrite<uint64_t>(address, ReadXRegister(rt));
2366       MemWrite<uint64_t>(address2, ReadXRegister(rt2));
2367       break;
2368     }
2369     case STP_d: {
2370       MemWrite<double>(address, ReadDRegister(rt));
2371       MemWrite<double>(address2, ReadDRegister(rt2));
2372       rt_is_vreg = true;
2373       break;
2374     }
2375     case STP_q: {
2376       MemWrite<qreg_t>(address, ReadQRegister(rt));
2377       MemWrite<qreg_t>(address2, ReadQRegister(rt2));
2378       rt_is_vreg = true;
2379       break;
2380     }
2381     default:
2382       VIXL_UNREACHABLE();
2383   }
2384 
2385   // Print a detailed trace (including the memory address).
2386   unsigned result_size = sign_extend ? kXRegSizeInBytes : element_size;
2387   PrintRegisterFormat print_format =
2388       rt_is_vreg ? GetPrintRegisterFormatForSizeTryFP(result_size)
2389                  : GetPrintRegisterFormatForSize(result_size);
2390 
2391   if (instr->IsLoad()) {
2392     if (rt_is_vreg) {
2393       LogVRead(rt, print_format, address);
2394       LogVRead(rt2, print_format, address2);
2395     } else if (sign_extend) {
2396       LogExtendingRead(rt, print_format, element_size, address);
2397       LogExtendingRead(rt2, print_format, element_size, address2);
2398     } else {
2399       LogRead(rt, print_format, address);
2400       LogRead(rt2, print_format, address2);
2401     }
2402   } else {
2403     if (rt_is_vreg) {
2404       LogVWrite(rt, print_format, address);
2405       LogVWrite(rt2, print_format, address2);
2406     } else {
2407       LogWrite(rt, print_format, address);
2408       LogWrite(rt2, print_format, address2);
2409     }
2410   }
2411 
2412   local_monitor_.MaybeClear();
2413 }
2414 
2415 
2416 template <typename T>
CompareAndSwapHelper(const Instruction * instr)2417 void Simulator::CompareAndSwapHelper(const Instruction* instr) {
2418   unsigned rs = instr->GetRs();
2419   unsigned rt = instr->GetRt();
2420   unsigned rn = instr->GetRn();
2421 
2422   unsigned element_size = sizeof(T);
2423   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
2424 
2425   CheckIsValidUnalignedAtomicAccess(rn, address, element_size);
2426 
2427   bool is_acquire = instr->ExtractBit(22) == 1;
2428   bool is_release = instr->ExtractBit(15) == 1;
2429 
2430   T comparevalue = ReadRegister<T>(rs);
2431   T newvalue = ReadRegister<T>(rt);
2432 
2433   // The architecture permits that the data read clears any exclusive monitors
2434   // associated with that location, even if the compare subsequently fails.
2435   local_monitor_.Clear();
2436 
2437   T data = MemRead<T>(address);
2438   if (is_acquire) {
2439     // Approximate load-acquire by issuing a full barrier after the load.
2440     __sync_synchronize();
2441   }
2442 
2443   if (data == comparevalue) {
2444     if (is_release) {
2445       // Approximate store-release by issuing a full barrier before the store.
2446       __sync_synchronize();
2447     }
2448     MemWrite<T>(address, newvalue);
2449     LogWrite(rt, GetPrintRegisterFormatForSize(element_size), address);
2450   }
2451   WriteRegister<T>(rs, data, NoRegLog);
2452   LogRead(rs, GetPrintRegisterFormatForSize(element_size), address);
2453 }
2454 
2455 
2456 template <typename T>
CompareAndSwapPairHelper(const Instruction * instr)2457 void Simulator::CompareAndSwapPairHelper(const Instruction* instr) {
2458   VIXL_ASSERT((sizeof(T) == 4) || (sizeof(T) == 8));
2459   unsigned rs = instr->GetRs();
2460   unsigned rt = instr->GetRt();
2461   unsigned rn = instr->GetRn();
2462 
2463   VIXL_ASSERT((rs % 2 == 0) && (rt % 2 == 0));
2464 
2465   unsigned element_size = sizeof(T);
2466   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
2467 
2468   CheckIsValidUnalignedAtomicAccess(rn, address, element_size * 2);
2469 
2470   uint64_t address2 = address + element_size;
2471 
2472   bool is_acquire = instr->ExtractBit(22) == 1;
2473   bool is_release = instr->ExtractBit(15) == 1;
2474 
2475   T comparevalue_high = ReadRegister<T>(rs + 1);
2476   T comparevalue_low = ReadRegister<T>(rs);
2477   T newvalue_high = ReadRegister<T>(rt + 1);
2478   T newvalue_low = ReadRegister<T>(rt);
2479 
2480   // The architecture permits that the data read clears any exclusive monitors
2481   // associated with that location, even if the compare subsequently fails.
2482   local_monitor_.Clear();
2483 
2484   T data_low = MemRead<T>(address);
2485   T data_high = MemRead<T>(address2);
2486 
2487   if (is_acquire) {
2488     // Approximate load-acquire by issuing a full barrier after the load.
2489     __sync_synchronize();
2490   }
2491 
2492   bool same =
2493       (data_high == comparevalue_high) && (data_low == comparevalue_low);
2494   if (same) {
2495     if (is_release) {
2496       // Approximate store-release by issuing a full barrier before the store.
2497       __sync_synchronize();
2498     }
2499 
2500     MemWrite<T>(address, newvalue_low);
2501     MemWrite<T>(address2, newvalue_high);
2502   }
2503 
2504   WriteRegister<T>(rs + 1, data_high, NoRegLog);
2505   WriteRegister<T>(rs, data_low, NoRegLog);
2506 
2507   PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
2508   LogRead(rs, format, address);
2509   LogRead(rs + 1, format, address2);
2510 
2511   if (same) {
2512     LogWrite(rt, format, address);
2513     LogWrite(rt + 1, format, address2);
2514   }
2515 }
2516 
CanReadMemory(uintptr_t address,size_t size)2517 bool Simulator::CanReadMemory(uintptr_t address, size_t size) {
2518   // To simulate fault-tolerant loads, we need to know what host addresses we
2519   // can access without generating a real fault. One way to do that is to
2520   // attempt to `write()` the memory to a placeholder pipe[1]. This is more
2521   // portable and less intrusive than using (global) signal handlers.
2522   //
2523   // [1]: https://stackoverflow.com/questions/7134590
2524 
2525   size_t written = 0;
2526   bool can_read = true;
2527   // `write` will normally return after one invocation, but it is allowed to
2528   // handle only part of the operation, so wrap it in a loop.
2529   while (can_read && (written < size)) {
2530     ssize_t result = write(placeholder_pipe_fd_[1],
2531                            reinterpret_cast<void*>(address + written),
2532                            size - written);
2533     if (result > 0) {
2534       written += result;
2535     } else {
2536       switch (result) {
2537         case -EPERM:
2538         case -EFAULT:
2539           // The address range is not accessible.
2540           // `write` is supposed to return -EFAULT in this case, but in practice
2541           // it seems to return -EPERM, so we accept that too.
2542           can_read = false;
2543           break;
2544         case -EINTR:
2545           // The call was interrupted by a signal. Just try again.
2546           break;
2547         default:
2548           // Any other error is fatal.
2549           VIXL_ABORT();
2550       }
2551     }
2552   }
2553   // Drain the read side of the pipe. If we don't do this, we'll leak memory as
2554   // the placeholder data is buffered. As before, we expect to drain the whole
2555   // write in one invocation, but cannot guarantee that, so we wrap it in a
2556   // loop. This function is primarily intended to implement SVE fault-tolerant
2557   // loads, so the maximum Z register size is a good default buffer size.
2558   char buffer[kZRegMaxSizeInBytes];
2559   while (written > 0) {
2560     ssize_t result = read(placeholder_pipe_fd_[0],
2561                           reinterpret_cast<void*>(buffer),
2562                           sizeof(buffer));
2563     // `read` blocks, and returns 0 only at EOF. We should not hit EOF until
2564     // we've read everything that was written, so treat 0 as an error.
2565     if (result > 0) {
2566       VIXL_ASSERT(static_cast<size_t>(result) <= written);
2567       written -= result;
2568     } else {
2569       // For -EINTR, just try again. We can't handle any other error.
2570       VIXL_CHECK(result == -EINTR);
2571     }
2572   }
2573 
2574   return can_read;
2575 }
2576 
PrintExclusiveAccessWarning()2577 void Simulator::PrintExclusiveAccessWarning() {
2578   if (print_exclusive_access_warning_) {
2579     fprintf(stderr,
2580             "%sWARNING:%s VIXL simulator support for "
2581             "load-/store-/clear-exclusive "
2582             "instructions is limited. Refer to the README for details.%s\n",
2583             clr_warning,
2584             clr_warning_message,
2585             clr_normal);
2586     print_exclusive_access_warning_ = false;
2587   }
2588 }
2589 
VisitLoadStoreExclusive(const Instruction * instr)2590 void Simulator::VisitLoadStoreExclusive(const Instruction* instr) {
2591   LoadStoreExclusive op =
2592       static_cast<LoadStoreExclusive>(instr->Mask(LoadStoreExclusiveMask));
2593 
2594   switch (op) {
2595     case CAS_w:
2596     case CASA_w:
2597     case CASL_w:
2598     case CASAL_w:
2599       CompareAndSwapHelper<uint32_t>(instr);
2600       break;
2601     case CAS_x:
2602     case CASA_x:
2603     case CASL_x:
2604     case CASAL_x:
2605       CompareAndSwapHelper<uint64_t>(instr);
2606       break;
2607     case CASB:
2608     case CASAB:
2609     case CASLB:
2610     case CASALB:
2611       CompareAndSwapHelper<uint8_t>(instr);
2612       break;
2613     case CASH:
2614     case CASAH:
2615     case CASLH:
2616     case CASALH:
2617       CompareAndSwapHelper<uint16_t>(instr);
2618       break;
2619     case CASP_w:
2620     case CASPA_w:
2621     case CASPL_w:
2622     case CASPAL_w:
2623       CompareAndSwapPairHelper<uint32_t>(instr);
2624       break;
2625     case CASP_x:
2626     case CASPA_x:
2627     case CASPL_x:
2628     case CASPAL_x:
2629       CompareAndSwapPairHelper<uint64_t>(instr);
2630       break;
2631     default:
2632       PrintExclusiveAccessWarning();
2633 
2634       unsigned rs = instr->GetRs();
2635       unsigned rt = instr->GetRt();
2636       unsigned rt2 = instr->GetRt2();
2637       unsigned rn = instr->GetRn();
2638 
2639       bool is_exclusive = !instr->GetLdStXNotExclusive();
2640       bool is_acquire_release =
2641           !is_exclusive || instr->GetLdStXAcquireRelease();
2642       bool is_load = instr->GetLdStXLoad();
2643       bool is_pair = instr->GetLdStXPair();
2644 
2645       unsigned element_size = 1 << instr->GetLdStXSizeLog2();
2646       unsigned access_size = is_pair ? element_size * 2 : element_size;
2647       uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
2648 
2649       CheckIsValidUnalignedAtomicAccess(rn, address, access_size);
2650 
2651       if (is_load) {
2652         if (is_exclusive) {
2653           local_monitor_.MarkExclusive(address, access_size);
2654         } else {
2655           // Any non-exclusive load can clear the local monitor as a side
2656           // effect. We don't need to do this, but it is useful to stress the
2657           // simulated code.
2658           local_monitor_.Clear();
2659         }
2660 
2661         // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS).
2662         // We will print a more detailed log.
2663         unsigned reg_size = 0;
2664         switch (op) {
2665           case LDXRB_w:
2666           case LDAXRB_w:
2667           case LDARB_w:
2668           case LDLARB:
2669             WriteWRegister(rt, MemRead<uint8_t>(address), NoRegLog);
2670             reg_size = kWRegSizeInBytes;
2671             break;
2672           case LDXRH_w:
2673           case LDAXRH_w:
2674           case LDARH_w:
2675           case LDLARH:
2676             WriteWRegister(rt, MemRead<uint16_t>(address), NoRegLog);
2677             reg_size = kWRegSizeInBytes;
2678             break;
2679           case LDXR_w:
2680           case LDAXR_w:
2681           case LDAR_w:
2682           case LDLAR_w:
2683             WriteWRegister(rt, MemRead<uint32_t>(address), NoRegLog);
2684             reg_size = kWRegSizeInBytes;
2685             break;
2686           case LDXR_x:
2687           case LDAXR_x:
2688           case LDAR_x:
2689           case LDLAR_x:
2690             WriteXRegister(rt, MemRead<uint64_t>(address), NoRegLog);
2691             reg_size = kXRegSizeInBytes;
2692             break;
2693           case LDXP_w:
2694           case LDAXP_w:
2695             WriteWRegister(rt, MemRead<uint32_t>(address), NoRegLog);
2696             WriteWRegister(rt2,
2697                            MemRead<uint32_t>(address + element_size),
2698                            NoRegLog);
2699             reg_size = kWRegSizeInBytes;
2700             break;
2701           case LDXP_x:
2702           case LDAXP_x:
2703             WriteXRegister(rt, MemRead<uint64_t>(address), NoRegLog);
2704             WriteXRegister(rt2,
2705                            MemRead<uint64_t>(address + element_size),
2706                            NoRegLog);
2707             reg_size = kXRegSizeInBytes;
2708             break;
2709           default:
2710             VIXL_UNREACHABLE();
2711         }
2712 
2713         if (is_acquire_release) {
2714           // Approximate load-acquire by issuing a full barrier after the load.
2715           __sync_synchronize();
2716         }
2717 
2718         PrintRegisterFormat format = GetPrintRegisterFormatForSize(reg_size);
2719         LogExtendingRead(rt, format, element_size, address);
2720         if (is_pair) {
2721           LogExtendingRead(rt2, format, element_size, address + element_size);
2722         }
2723       } else {
2724         if (is_acquire_release) {
2725           // Approximate store-release by issuing a full barrier before the
2726           // store.
2727           __sync_synchronize();
2728         }
2729 
2730         bool do_store = true;
2731         if (is_exclusive) {
2732           do_store = local_monitor_.IsExclusive(address, access_size) &&
2733                      global_monitor_.IsExclusive(address, access_size);
2734           WriteWRegister(rs, do_store ? 0 : 1);
2735 
2736           //  - All exclusive stores explicitly clear the local monitor.
2737           local_monitor_.Clear();
2738         } else {
2739           //  - Any other store can clear the local monitor as a side effect.
2740           local_monitor_.MaybeClear();
2741         }
2742 
2743         if (do_store) {
2744           switch (op) {
2745             case STXRB_w:
2746             case STLXRB_w:
2747             case STLRB_w:
2748             case STLLRB:
2749               MemWrite<uint8_t>(address, ReadWRegister(rt));
2750               break;
2751             case STXRH_w:
2752             case STLXRH_w:
2753             case STLRH_w:
2754             case STLLRH:
2755               MemWrite<uint16_t>(address, ReadWRegister(rt));
2756               break;
2757             case STXR_w:
2758             case STLXR_w:
2759             case STLR_w:
2760             case STLLR_w:
2761               MemWrite<uint32_t>(address, ReadWRegister(rt));
2762               break;
2763             case STXR_x:
2764             case STLXR_x:
2765             case STLR_x:
2766             case STLLR_x:
2767               MemWrite<uint64_t>(address, ReadXRegister(rt));
2768               break;
2769             case STXP_w:
2770             case STLXP_w:
2771               MemWrite<uint32_t>(address, ReadWRegister(rt));
2772               MemWrite<uint32_t>(address + element_size, ReadWRegister(rt2));
2773               break;
2774             case STXP_x:
2775             case STLXP_x:
2776               MemWrite<uint64_t>(address, ReadXRegister(rt));
2777               MemWrite<uint64_t>(address + element_size, ReadXRegister(rt2));
2778               break;
2779             default:
2780               VIXL_UNREACHABLE();
2781           }
2782 
2783           PrintRegisterFormat format =
2784               GetPrintRegisterFormatForSize(element_size);
2785           LogWrite(rt, format, address);
2786           if (is_pair) {
2787             LogWrite(rt2, format, address + element_size);
2788           }
2789         }
2790       }
2791   }
2792 }
2793 
2794 template <typename T>
AtomicMemorySimpleHelper(const Instruction * instr)2795 void Simulator::AtomicMemorySimpleHelper(const Instruction* instr) {
2796   unsigned rs = instr->GetRs();
2797   unsigned rt = instr->GetRt();
2798   unsigned rn = instr->GetRn();
2799 
2800   bool is_acquire = (instr->ExtractBit(23) == 1) && (rt != kZeroRegCode);
2801   bool is_release = instr->ExtractBit(22) == 1;
2802 
2803   unsigned element_size = sizeof(T);
2804   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
2805 
2806   CheckIsValidUnalignedAtomicAccess(rn, address, element_size);
2807 
2808   T value = ReadRegister<T>(rs);
2809 
2810   T data = MemRead<T>(address);
2811 
2812   if (is_acquire) {
2813     // Approximate load-acquire by issuing a full barrier after the load.
2814     __sync_synchronize();
2815   }
2816 
2817   T result = 0;
2818   switch (instr->Mask(AtomicMemorySimpleOpMask)) {
2819     case LDADDOp:
2820       result = data + value;
2821       break;
2822     case LDCLROp:
2823       VIXL_ASSERT(!std::numeric_limits<T>::is_signed);
2824       result = data & ~value;
2825       break;
2826     case LDEOROp:
2827       VIXL_ASSERT(!std::numeric_limits<T>::is_signed);
2828       result = data ^ value;
2829       break;
2830     case LDSETOp:
2831       VIXL_ASSERT(!std::numeric_limits<T>::is_signed);
2832       result = data | value;
2833       break;
2834 
2835     // Signed/Unsigned difference is done via the templated type T.
2836     case LDSMAXOp:
2837     case LDUMAXOp:
2838       result = (data > value) ? data : value;
2839       break;
2840     case LDSMINOp:
2841     case LDUMINOp:
2842       result = (data > value) ? value : data;
2843       break;
2844   }
2845 
2846   if (is_release) {
2847     // Approximate store-release by issuing a full barrier before the store.
2848     __sync_synchronize();
2849   }
2850 
2851   MemWrite<T>(address, result);
2852   WriteRegister<T>(rt, data, NoRegLog);
2853 
2854   PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
2855   LogRead(rt, format, address);
2856   LogWrite(rs, format, address);
2857 }
2858 
2859 template <typename T>
AtomicMemorySwapHelper(const Instruction * instr)2860 void Simulator::AtomicMemorySwapHelper(const Instruction* instr) {
2861   unsigned rs = instr->GetRs();
2862   unsigned rt = instr->GetRt();
2863   unsigned rn = instr->GetRn();
2864 
2865   bool is_acquire = (instr->ExtractBit(23) == 1) && (rt != kZeroRegCode);
2866   bool is_release = instr->ExtractBit(22) == 1;
2867 
2868   unsigned element_size = sizeof(T);
2869   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
2870 
2871   CheckIsValidUnalignedAtomicAccess(rn, address, element_size);
2872 
2873   T data = MemRead<T>(address);
2874   if (is_acquire) {
2875     // Approximate load-acquire by issuing a full barrier after the load.
2876     __sync_synchronize();
2877   }
2878 
2879   if (is_release) {
2880     // Approximate store-release by issuing a full barrier before the store.
2881     __sync_synchronize();
2882   }
2883   MemWrite<T>(address, ReadRegister<T>(rs));
2884 
2885   WriteRegister<T>(rt, data);
2886 
2887   PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
2888   LogRead(rt, format, address);
2889   LogWrite(rs, format, address);
2890 }
2891 
2892 template <typename T>
LoadAcquireRCpcHelper(const Instruction * instr)2893 void Simulator::LoadAcquireRCpcHelper(const Instruction* instr) {
2894   unsigned rt = instr->GetRt();
2895   unsigned rn = instr->GetRn();
2896 
2897   unsigned element_size = sizeof(T);
2898   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
2899 
2900   CheckIsValidUnalignedAtomicAccess(rn, address, element_size);
2901 
2902   WriteRegister<T>(rt, MemRead<T>(address));
2903 
2904   // Approximate load-acquire by issuing a full barrier after the load.
2905   __sync_synchronize();
2906 
2907   LogRead(rt, GetPrintRegisterFormatForSize(element_size), address);
2908 }
2909 
2910 #define ATOMIC_MEMORY_SIMPLE_UINT_LIST(V) \
2911   V(LDADD)                                \
2912   V(LDCLR)                                \
2913   V(LDEOR)                                \
2914   V(LDSET)                                \
2915   V(LDUMAX)                               \
2916   V(LDUMIN)
2917 
2918 #define ATOMIC_MEMORY_SIMPLE_INT_LIST(V) \
2919   V(LDSMAX)                              \
2920   V(LDSMIN)
2921 
VisitAtomicMemory(const Instruction * instr)2922 void Simulator::VisitAtomicMemory(const Instruction* instr) {
2923   switch (instr->Mask(AtomicMemoryMask)) {
2924 // clang-format off
2925 #define SIM_FUNC_B(A) \
2926     case A##B:        \
2927     case A##AB:       \
2928     case A##LB:       \
2929     case A##ALB:
2930 #define SIM_FUNC_H(A) \
2931     case A##H:        \
2932     case A##AH:       \
2933     case A##LH:       \
2934     case A##ALH:
2935 #define SIM_FUNC_w(A) \
2936     case A##_w:       \
2937     case A##A_w:      \
2938     case A##L_w:      \
2939     case A##AL_w:
2940 #define SIM_FUNC_x(A) \
2941     case A##_x:       \
2942     case A##A_x:      \
2943     case A##L_x:      \
2944     case A##AL_x:
2945 
2946     ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_B)
2947       AtomicMemorySimpleHelper<uint8_t>(instr);
2948       break;
2949     ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_B)
2950       AtomicMemorySimpleHelper<int8_t>(instr);
2951       break;
2952     ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_H)
2953       AtomicMemorySimpleHelper<uint16_t>(instr);
2954       break;
2955     ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_H)
2956       AtomicMemorySimpleHelper<int16_t>(instr);
2957       break;
2958     ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_w)
2959       AtomicMemorySimpleHelper<uint32_t>(instr);
2960       break;
2961     ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_w)
2962       AtomicMemorySimpleHelper<int32_t>(instr);
2963       break;
2964     ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_x)
2965       AtomicMemorySimpleHelper<uint64_t>(instr);
2966       break;
2967     ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_x)
2968       AtomicMemorySimpleHelper<int64_t>(instr);
2969       break;
2970     // clang-format on
2971 
2972     case SWPB:
2973     case SWPAB:
2974     case SWPLB:
2975     case SWPALB:
2976       AtomicMemorySwapHelper<uint8_t>(instr);
2977       break;
2978     case SWPH:
2979     case SWPAH:
2980     case SWPLH:
2981     case SWPALH:
2982       AtomicMemorySwapHelper<uint16_t>(instr);
2983       break;
2984     case SWP_w:
2985     case SWPA_w:
2986     case SWPL_w:
2987     case SWPAL_w:
2988       AtomicMemorySwapHelper<uint32_t>(instr);
2989       break;
2990     case SWP_x:
2991     case SWPA_x:
2992     case SWPL_x:
2993     case SWPAL_x:
2994       AtomicMemorySwapHelper<uint64_t>(instr);
2995       break;
2996     case LDAPRB:
2997       LoadAcquireRCpcHelper<uint8_t>(instr);
2998       break;
2999     case LDAPRH:
3000       LoadAcquireRCpcHelper<uint16_t>(instr);
3001       break;
3002     case LDAPR_w:
3003       LoadAcquireRCpcHelper<uint32_t>(instr);
3004       break;
3005     case LDAPR_x:
3006       LoadAcquireRCpcHelper<uint64_t>(instr);
3007       break;
3008   }
3009 }
3010 
3011 
VisitLoadLiteral(const Instruction * instr)3012 void Simulator::VisitLoadLiteral(const Instruction* instr) {
3013   unsigned rt = instr->GetRt();
3014   uint64_t address = instr->GetLiteralAddress<uint64_t>();
3015 
3016   // Verify that the calculated address is available to the host.
3017   VIXL_ASSERT(address == static_cast<uintptr_t>(address));
3018 
3019   switch (instr->Mask(LoadLiteralMask)) {
3020     // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_VREGS), then
3021     // print a more detailed log.
3022     case LDR_w_lit:
3023       WriteWRegister(rt, MemRead<uint32_t>(address), NoRegLog);
3024       LogRead(rt, kPrintWReg, address);
3025       break;
3026     case LDR_x_lit:
3027       WriteXRegister(rt, MemRead<uint64_t>(address), NoRegLog);
3028       LogRead(rt, kPrintXReg, address);
3029       break;
3030     case LDR_s_lit:
3031       WriteSRegister(rt, MemRead<float>(address), NoRegLog);
3032       LogVRead(rt, kPrintSRegFP, address);
3033       break;
3034     case LDR_d_lit:
3035       WriteDRegister(rt, MemRead<double>(address), NoRegLog);
3036       LogVRead(rt, kPrintDRegFP, address);
3037       break;
3038     case LDR_q_lit:
3039       WriteQRegister(rt, MemRead<qreg_t>(address), NoRegLog);
3040       LogVRead(rt, kPrintReg1Q, address);
3041       break;
3042     case LDRSW_x_lit:
3043       WriteXRegister(rt, MemRead<int32_t>(address), NoRegLog);
3044       LogExtendingRead(rt, kPrintXReg, kWRegSizeInBytes, address);
3045       break;
3046 
3047     // Ignore prfm hint instructions.
3048     case PRFM_lit:
3049       break;
3050 
3051     default:
3052       VIXL_UNREACHABLE();
3053   }
3054 
3055   local_monitor_.MaybeClear();
3056 }
3057 
3058 
AddressModeHelper(unsigned addr_reg,int64_t offset,AddrMode addrmode)3059 uintptr_t Simulator::AddressModeHelper(unsigned addr_reg,
3060                                        int64_t offset,
3061                                        AddrMode addrmode) {
3062   uint64_t address = ReadXRegister(addr_reg, Reg31IsStackPointer);
3063 
3064   if ((addr_reg == 31) && ((address % 16) != 0)) {
3065     // When the base register is SP the stack pointer is required to be
3066     // quadword aligned prior to the address calculation and write-backs.
3067     // Misalignment will cause a stack alignment fault.
3068     VIXL_ALIGNMENT_EXCEPTION();
3069   }
3070 
3071   if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
3072     VIXL_ASSERT(offset != 0);
3073     // Only preindex should log the register update here. For Postindex, the
3074     // update will be printed automatically by LogWrittenRegisters _after_ the
3075     // memory access itself is logged.
3076     RegLogMode log_mode = (addrmode == PreIndex) ? LogRegWrites : NoRegLog;
3077     WriteXRegister(addr_reg, address + offset, log_mode, Reg31IsStackPointer);
3078   }
3079 
3080   if ((addrmode == Offset) || (addrmode == PreIndex)) {
3081     address += offset;
3082   }
3083 
3084   // Verify that the calculated address is available to the host.
3085   VIXL_ASSERT(address == static_cast<uintptr_t>(address));
3086 
3087   return static_cast<uintptr_t>(address);
3088 }
3089 
3090 
VisitMoveWideImmediate(const Instruction * instr)3091 void Simulator::VisitMoveWideImmediate(const Instruction* instr) {
3092   MoveWideImmediateOp mov_op =
3093       static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
3094   int64_t new_xn_val = 0;
3095 
3096   bool is_64_bits = instr->GetSixtyFourBits() == 1;
3097   // Shift is limited for W operations.
3098   VIXL_ASSERT(is_64_bits || (instr->GetShiftMoveWide() < 2));
3099 
3100   // Get the shifted immediate.
3101   int64_t shift = instr->GetShiftMoveWide() * 16;
3102   int64_t shifted_imm16 = static_cast<int64_t>(instr->GetImmMoveWide())
3103                           << shift;
3104 
3105   // Compute the new value.
3106   switch (mov_op) {
3107     case MOVN_w:
3108     case MOVN_x: {
3109       new_xn_val = ~shifted_imm16;
3110       if (!is_64_bits) new_xn_val &= kWRegMask;
3111       break;
3112     }
3113     case MOVK_w:
3114     case MOVK_x: {
3115       unsigned reg_code = instr->GetRd();
3116       int64_t prev_xn_val =
3117           is_64_bits ? ReadXRegister(reg_code) : ReadWRegister(reg_code);
3118       new_xn_val = (prev_xn_val & ~(INT64_C(0xffff) << shift)) | shifted_imm16;
3119       break;
3120     }
3121     case MOVZ_w:
3122     case MOVZ_x: {
3123       new_xn_val = shifted_imm16;
3124       break;
3125     }
3126     default:
3127       VIXL_UNREACHABLE();
3128   }
3129 
3130   // Update the destination register.
3131   WriteXRegister(instr->GetRd(), new_xn_val);
3132 }
3133 
3134 
VisitConditionalSelect(const Instruction * instr)3135 void Simulator::VisitConditionalSelect(const Instruction* instr) {
3136   uint64_t new_val = ReadXRegister(instr->GetRn());
3137 
3138   if (ConditionFailed(static_cast<Condition>(instr->GetCondition()))) {
3139     new_val = ReadXRegister(instr->GetRm());
3140     switch (instr->Mask(ConditionalSelectMask)) {
3141       case CSEL_w:
3142       case CSEL_x:
3143         break;
3144       case CSINC_w:
3145       case CSINC_x:
3146         new_val++;
3147         break;
3148       case CSINV_w:
3149       case CSINV_x:
3150         new_val = ~new_val;
3151         break;
3152       case CSNEG_w:
3153       case CSNEG_x:
3154         new_val = -new_val;
3155         break;
3156       default:
3157         VIXL_UNIMPLEMENTED();
3158     }
3159   }
3160   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
3161   WriteRegister(reg_size, instr->GetRd(), new_val);
3162 }
3163 
3164 
3165 // clang-format off
3166 #define PAUTH_MODES(V)                                       \
3167   V(IA,  ReadXRegister(src), kPACKeyIA, kInstructionPointer) \
3168   V(IB,  ReadXRegister(src), kPACKeyIB, kInstructionPointer) \
3169   V(IZA, 0x00000000,         kPACKeyIA, kInstructionPointer) \
3170   V(IZB, 0x00000000,         kPACKeyIB, kInstructionPointer) \
3171   V(DA,  ReadXRegister(src), kPACKeyDA, kDataPointer)        \
3172   V(DB,  ReadXRegister(src), kPACKeyDB, kDataPointer)        \
3173   V(DZA, 0x00000000,         kPACKeyDA, kDataPointer)        \
3174   V(DZB, 0x00000000,         kPACKeyDB, kDataPointer)
3175 // clang-format on
3176 
VisitDataProcessing1Source(const Instruction * instr)3177 void Simulator::VisitDataProcessing1Source(const Instruction* instr) {
3178   unsigned dst = instr->GetRd();
3179   unsigned src = instr->GetRn();
3180 
3181   switch (instr->Mask(DataProcessing1SourceMask)) {
3182 #define DEFINE_PAUTH_FUNCS(SUFFIX, MOD, KEY, D)     \
3183   case PAC##SUFFIX: {                               \
3184     uint64_t ptr = ReadXRegister(dst);              \
3185     WriteXRegister(dst, AddPAC(ptr, MOD, KEY, D));  \
3186     break;                                          \
3187   }                                                 \
3188   case AUT##SUFFIX: {                               \
3189     uint64_t ptr = ReadXRegister(dst);              \
3190     WriteXRegister(dst, AuthPAC(ptr, MOD, KEY, D)); \
3191     break;                                          \
3192   }
3193 
3194     PAUTH_MODES(DEFINE_PAUTH_FUNCS)
3195 #undef DEFINE_PAUTH_FUNCS
3196 
3197     case XPACI:
3198       WriteXRegister(dst, StripPAC(ReadXRegister(dst), kInstructionPointer));
3199       break;
3200     case XPACD:
3201       WriteXRegister(dst, StripPAC(ReadXRegister(dst), kDataPointer));
3202       break;
3203     case RBIT_w:
3204       WriteWRegister(dst, ReverseBits(ReadWRegister(src)));
3205       break;
3206     case RBIT_x:
3207       WriteXRegister(dst, ReverseBits(ReadXRegister(src)));
3208       break;
3209     case REV16_w:
3210       WriteWRegister(dst, ReverseBytes(ReadWRegister(src), 1));
3211       break;
3212     case REV16_x:
3213       WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 1));
3214       break;
3215     case REV_w:
3216       WriteWRegister(dst, ReverseBytes(ReadWRegister(src), 2));
3217       break;
3218     case REV32_x:
3219       WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 2));
3220       break;
3221     case REV_x:
3222       WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 3));
3223       break;
3224     case CLZ_w:
3225       WriteWRegister(dst, CountLeadingZeros(ReadWRegister(src)));
3226       break;
3227     case CLZ_x:
3228       WriteXRegister(dst, CountLeadingZeros(ReadXRegister(src)));
3229       break;
3230     case CLS_w:
3231       WriteWRegister(dst, CountLeadingSignBits(ReadWRegister(src)));
3232       break;
3233     case CLS_x:
3234       WriteXRegister(dst, CountLeadingSignBits(ReadXRegister(src)));
3235       break;
3236     default:
3237       VIXL_UNIMPLEMENTED();
3238   }
3239 }
3240 
3241 
Poly32Mod2(unsigned n,uint64_t data,uint32_t poly)3242 uint32_t Simulator::Poly32Mod2(unsigned n, uint64_t data, uint32_t poly) {
3243   VIXL_ASSERT((n > 32) && (n <= 64));
3244   for (unsigned i = (n - 1); i >= 32; i--) {
3245     if (((data >> i) & 1) != 0) {
3246       uint64_t polysh32 = (uint64_t)poly << (i - 32);
3247       uint64_t mask = (UINT64_C(1) << i) - 1;
3248       data = ((data & mask) ^ polysh32);
3249     }
3250   }
3251   return data & 0xffffffff;
3252 }
3253 
3254 
3255 template <typename T>
Crc32Checksum(uint32_t acc,T val,uint32_t poly)3256 uint32_t Simulator::Crc32Checksum(uint32_t acc, T val, uint32_t poly) {
3257   unsigned size = sizeof(val) * 8;  // Number of bits in type T.
3258   VIXL_ASSERT((size == 8) || (size == 16) || (size == 32));
3259   uint64_t tempacc = static_cast<uint64_t>(ReverseBits(acc)) << size;
3260   uint64_t tempval = static_cast<uint64_t>(ReverseBits(val)) << 32;
3261   return ReverseBits(Poly32Mod2(32 + size, tempacc ^ tempval, poly));
3262 }
3263 
3264 
Crc32Checksum(uint32_t acc,uint64_t val,uint32_t poly)3265 uint32_t Simulator::Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly) {
3266   // Poly32Mod2 cannot handle inputs with more than 32 bits, so compute
3267   // the CRC of each 32-bit word sequentially.
3268   acc = Crc32Checksum(acc, (uint32_t)(val & 0xffffffff), poly);
3269   return Crc32Checksum(acc, (uint32_t)(val >> 32), poly);
3270 }
3271 
3272 
VisitDataProcessing2Source(const Instruction * instr)3273 void Simulator::VisitDataProcessing2Source(const Instruction* instr) {
3274   Shift shift_op = NO_SHIFT;
3275   int64_t result = 0;
3276   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
3277 
3278   switch (instr->Mask(DataProcessing2SourceMask)) {
3279     case SDIV_w: {
3280       int32_t rn = ReadWRegister(instr->GetRn());
3281       int32_t rm = ReadWRegister(instr->GetRm());
3282       if ((rn == kWMinInt) && (rm == -1)) {
3283         result = kWMinInt;
3284       } else if (rm == 0) {
3285         // Division by zero can be trapped, but not on A-class processors.
3286         result = 0;
3287       } else {
3288         result = rn / rm;
3289       }
3290       break;
3291     }
3292     case SDIV_x: {
3293       int64_t rn = ReadXRegister(instr->GetRn());
3294       int64_t rm = ReadXRegister(instr->GetRm());
3295       if ((rn == kXMinInt) && (rm == -1)) {
3296         result = kXMinInt;
3297       } else if (rm == 0) {
3298         // Division by zero can be trapped, but not on A-class processors.
3299         result = 0;
3300       } else {
3301         result = rn / rm;
3302       }
3303       break;
3304     }
3305     case UDIV_w: {
3306       uint32_t rn = static_cast<uint32_t>(ReadWRegister(instr->GetRn()));
3307       uint32_t rm = static_cast<uint32_t>(ReadWRegister(instr->GetRm()));
3308       if (rm == 0) {
3309         // Division by zero can be trapped, but not on A-class processors.
3310         result = 0;
3311       } else {
3312         result = rn / rm;
3313       }
3314       break;
3315     }
3316     case UDIV_x: {
3317       uint64_t rn = static_cast<uint64_t>(ReadXRegister(instr->GetRn()));
3318       uint64_t rm = static_cast<uint64_t>(ReadXRegister(instr->GetRm()));
3319       if (rm == 0) {
3320         // Division by zero can be trapped, but not on A-class processors.
3321         result = 0;
3322       } else {
3323         result = rn / rm;
3324       }
3325       break;
3326     }
3327     case LSLV_w:
3328     case LSLV_x:
3329       shift_op = LSL;
3330       break;
3331     case LSRV_w:
3332     case LSRV_x:
3333       shift_op = LSR;
3334       break;
3335     case ASRV_w:
3336     case ASRV_x:
3337       shift_op = ASR;
3338       break;
3339     case RORV_w:
3340     case RORV_x:
3341       shift_op = ROR;
3342       break;
3343     case PACGA: {
3344       uint64_t dst = static_cast<uint64_t>(ReadXRegister(instr->GetRn()));
3345       uint64_t src = static_cast<uint64_t>(
3346           ReadXRegister(instr->GetRm(), Reg31IsStackPointer));
3347       uint64_t code = ComputePAC(dst, src, kPACKeyGA);
3348       result = code & 0xffffffff00000000;
3349       break;
3350     }
3351     case CRC32B: {
3352       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
3353       uint8_t val = ReadRegister<uint8_t>(instr->GetRm());
3354       result = Crc32Checksum(acc, val, CRC32_POLY);
3355       break;
3356     }
3357     case CRC32H: {
3358       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
3359       uint16_t val = ReadRegister<uint16_t>(instr->GetRm());
3360       result = Crc32Checksum(acc, val, CRC32_POLY);
3361       break;
3362     }
3363     case CRC32W: {
3364       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
3365       uint32_t val = ReadRegister<uint32_t>(instr->GetRm());
3366       result = Crc32Checksum(acc, val, CRC32_POLY);
3367       break;
3368     }
3369     case CRC32X: {
3370       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
3371       uint64_t val = ReadRegister<uint64_t>(instr->GetRm());
3372       result = Crc32Checksum(acc, val, CRC32_POLY);
3373       reg_size = kWRegSize;
3374       break;
3375     }
3376     case CRC32CB: {
3377       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
3378       uint8_t val = ReadRegister<uint8_t>(instr->GetRm());
3379       result = Crc32Checksum(acc, val, CRC32C_POLY);
3380       break;
3381     }
3382     case CRC32CH: {
3383       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
3384       uint16_t val = ReadRegister<uint16_t>(instr->GetRm());
3385       result = Crc32Checksum(acc, val, CRC32C_POLY);
3386       break;
3387     }
3388     case CRC32CW: {
3389       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
3390       uint32_t val = ReadRegister<uint32_t>(instr->GetRm());
3391       result = Crc32Checksum(acc, val, CRC32C_POLY);
3392       break;
3393     }
3394     case CRC32CX: {
3395       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
3396       uint64_t val = ReadRegister<uint64_t>(instr->GetRm());
3397       result = Crc32Checksum(acc, val, CRC32C_POLY);
3398       reg_size = kWRegSize;
3399       break;
3400     }
3401     default:
3402       VIXL_UNIMPLEMENTED();
3403   }
3404 
3405   if (shift_op != NO_SHIFT) {
3406     // Shift distance encoded in the least-significant five/six bits of the
3407     // register.
3408     int mask = (instr->GetSixtyFourBits() == 1) ? 0x3f : 0x1f;
3409     unsigned shift = ReadWRegister(instr->GetRm()) & mask;
3410     result = ShiftOperand(reg_size,
3411                           ReadRegister(reg_size, instr->GetRn()),
3412                           shift_op,
3413                           shift);
3414   }
3415   WriteRegister(reg_size, instr->GetRd(), result);
3416 }
3417 
3418 
VisitDataProcessing3Source(const Instruction * instr)3419 void Simulator::VisitDataProcessing3Source(const Instruction* instr) {
3420   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
3421 
3422   uint64_t result = 0;
3423   // Extract and sign- or zero-extend 32-bit arguments for widening operations.
3424   uint64_t rn_u32 = ReadRegister<uint32_t>(instr->GetRn());
3425   uint64_t rm_u32 = ReadRegister<uint32_t>(instr->GetRm());
3426   int64_t rn_s32 = ReadRegister<int32_t>(instr->GetRn());
3427   int64_t rm_s32 = ReadRegister<int32_t>(instr->GetRm());
3428   uint64_t rn_u64 = ReadXRegister(instr->GetRn());
3429   uint64_t rm_u64 = ReadXRegister(instr->GetRm());
3430   switch (instr->Mask(DataProcessing3SourceMask)) {
3431     case MADD_w:
3432     case MADD_x:
3433       result = ReadXRegister(instr->GetRa()) + (rn_u64 * rm_u64);
3434       break;
3435     case MSUB_w:
3436     case MSUB_x:
3437       result = ReadXRegister(instr->GetRa()) - (rn_u64 * rm_u64);
3438       break;
3439     case SMADDL_x:
3440       result = ReadXRegister(instr->GetRa()) +
3441                static_cast<uint64_t>(rn_s32 * rm_s32);
3442       break;
3443     case SMSUBL_x:
3444       result = ReadXRegister(instr->GetRa()) -
3445                static_cast<uint64_t>(rn_s32 * rm_s32);
3446       break;
3447     case UMADDL_x:
3448       result = ReadXRegister(instr->GetRa()) + (rn_u32 * rm_u32);
3449       break;
3450     case UMSUBL_x:
3451       result = ReadXRegister(instr->GetRa()) - (rn_u32 * rm_u32);
3452       break;
3453     case UMULH_x:
3454       result =
3455           internal::MultiplyHigh<64>(ReadRegister<uint64_t>(instr->GetRn()),
3456                                      ReadRegister<uint64_t>(instr->GetRm()));
3457       break;
3458     case SMULH_x:
3459       result = internal::MultiplyHigh<64>(ReadXRegister(instr->GetRn()),
3460                                           ReadXRegister(instr->GetRm()));
3461       break;
3462     default:
3463       VIXL_UNIMPLEMENTED();
3464   }
3465   WriteRegister(reg_size, instr->GetRd(), result);
3466 }
3467 
3468 
VisitBitfield(const Instruction * instr)3469 void Simulator::VisitBitfield(const Instruction* instr) {
3470   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
3471   int64_t reg_mask = instr->GetSixtyFourBits() ? kXRegMask : kWRegMask;
3472   int R = instr->GetImmR();
3473   int S = instr->GetImmS();
3474   int diff = S - R;
3475   uint64_t mask;
3476   if (diff >= 0) {
3477     mask = ~UINT64_C(0) >> (64 - (diff + 1));
3478     mask = (static_cast<unsigned>(diff) < (reg_size - 1)) ? mask : reg_mask;
3479   } else {
3480     mask = ~UINT64_C(0) >> (64 - (S + 1));
3481     mask = RotateRight(mask, R, reg_size);
3482     diff += reg_size;
3483   }
3484 
3485   // inzero indicates if the extracted bitfield is inserted into the
3486   // destination register value or in zero.
3487   // If extend is true, extend the sign of the extracted bitfield.
3488   bool inzero = false;
3489   bool extend = false;
3490   switch (instr->Mask(BitfieldMask)) {
3491     case BFM_x:
3492     case BFM_w:
3493       break;
3494     case SBFM_x:
3495     case SBFM_w:
3496       inzero = true;
3497       extend = true;
3498       break;
3499     case UBFM_x:
3500     case UBFM_w:
3501       inzero = true;
3502       break;
3503     default:
3504       VIXL_UNIMPLEMENTED();
3505   }
3506 
3507   uint64_t dst = inzero ? 0 : ReadRegister(reg_size, instr->GetRd());
3508   uint64_t src = ReadRegister(reg_size, instr->GetRn());
3509   // Rotate source bitfield into place.
3510   uint64_t result = RotateRight(src, R, reg_size);
3511   // Determine the sign extension.
3512   uint64_t topbits = (diff == 63) ? 0 : (~UINT64_C(0) << (diff + 1));
3513   uint64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
3514 
3515   // Merge sign extension, dest/zero and bitfield.
3516   result = signbits | (result & mask) | (dst & ~mask);
3517 
3518   WriteRegister(reg_size, instr->GetRd(), result);
3519 }
3520 
3521 
VisitExtract(const Instruction * instr)3522 void Simulator::VisitExtract(const Instruction* instr) {
3523   unsigned lsb = instr->GetImmS();
3524   unsigned reg_size = (instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize;
3525   uint64_t low_res =
3526       static_cast<uint64_t>(ReadRegister(reg_size, instr->GetRm())) >> lsb;
3527   uint64_t high_res = (lsb == 0)
3528                           ? 0
3529                           : ReadRegister<uint64_t>(reg_size, instr->GetRn())
3530                                 << (reg_size - lsb);
3531   WriteRegister(reg_size, instr->GetRd(), low_res | high_res);
3532 }
3533 
3534 
VisitFPImmediate(const Instruction * instr)3535 void Simulator::VisitFPImmediate(const Instruction* instr) {
3536   AssertSupportedFPCR();
3537   unsigned dest = instr->GetRd();
3538   switch (instr->Mask(FPImmediateMask)) {
3539     case FMOV_h_imm:
3540       WriteHRegister(dest, Float16ToRawbits(instr->GetImmFP16()));
3541       break;
3542     case FMOV_s_imm:
3543       WriteSRegister(dest, instr->GetImmFP32());
3544       break;
3545     case FMOV_d_imm:
3546       WriteDRegister(dest, instr->GetImmFP64());
3547       break;
3548     default:
3549       VIXL_UNREACHABLE();
3550   }
3551 }
3552 
3553 
VisitFPIntegerConvert(const Instruction * instr)3554 void Simulator::VisitFPIntegerConvert(const Instruction* instr) {
3555   AssertSupportedFPCR();
3556 
3557   unsigned dst = instr->GetRd();
3558   unsigned src = instr->GetRn();
3559 
3560   FPRounding round = ReadRMode();
3561 
3562   switch (instr->Mask(FPIntegerConvertMask)) {
3563     case FCVTAS_wh:
3564       WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPTieAway));
3565       break;
3566     case FCVTAS_xh:
3567       WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPTieAway));
3568       break;
3569     case FCVTAS_ws:
3570       WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPTieAway));
3571       break;
3572     case FCVTAS_xs:
3573       WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPTieAway));
3574       break;
3575     case FCVTAS_wd:
3576       WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPTieAway));
3577       break;
3578     case FCVTAS_xd:
3579       WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPTieAway));
3580       break;
3581     case FCVTAU_wh:
3582       WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPTieAway));
3583       break;
3584     case FCVTAU_xh:
3585       WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPTieAway));
3586       break;
3587     case FCVTAU_ws:
3588       WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPTieAway));
3589       break;
3590     case FCVTAU_xs:
3591       WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPTieAway));
3592       break;
3593     case FCVTAU_wd:
3594       WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPTieAway));
3595       break;
3596     case FCVTAU_xd:
3597       WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPTieAway));
3598       break;
3599     case FCVTMS_wh:
3600       WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPNegativeInfinity));
3601       break;
3602     case FCVTMS_xh:
3603       WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPNegativeInfinity));
3604       break;
3605     case FCVTMS_ws:
3606       WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPNegativeInfinity));
3607       break;
3608     case FCVTMS_xs:
3609       WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPNegativeInfinity));
3610       break;
3611     case FCVTMS_wd:
3612       WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPNegativeInfinity));
3613       break;
3614     case FCVTMS_xd:
3615       WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPNegativeInfinity));
3616       break;
3617     case FCVTMU_wh:
3618       WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPNegativeInfinity));
3619       break;
3620     case FCVTMU_xh:
3621       WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPNegativeInfinity));
3622       break;
3623     case FCVTMU_ws:
3624       WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPNegativeInfinity));
3625       break;
3626     case FCVTMU_xs:
3627       WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPNegativeInfinity));
3628       break;
3629     case FCVTMU_wd:
3630       WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPNegativeInfinity));
3631       break;
3632     case FCVTMU_xd:
3633       WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPNegativeInfinity));
3634       break;
3635     case FCVTPS_wh:
3636       WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPPositiveInfinity));
3637       break;
3638     case FCVTPS_xh:
3639       WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPPositiveInfinity));
3640       break;
3641     case FCVTPS_ws:
3642       WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPPositiveInfinity));
3643       break;
3644     case FCVTPS_xs:
3645       WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPPositiveInfinity));
3646       break;
3647     case FCVTPS_wd:
3648       WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPPositiveInfinity));
3649       break;
3650     case FCVTPS_xd:
3651       WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPPositiveInfinity));
3652       break;
3653     case FCVTPU_wh:
3654       WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPPositiveInfinity));
3655       break;
3656     case FCVTPU_xh:
3657       WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPPositiveInfinity));
3658       break;
3659     case FCVTPU_ws:
3660       WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPPositiveInfinity));
3661       break;
3662     case FCVTPU_xs:
3663       WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPPositiveInfinity));
3664       break;
3665     case FCVTPU_wd:
3666       WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPPositiveInfinity));
3667       break;
3668     case FCVTPU_xd:
3669       WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPPositiveInfinity));
3670       break;
3671     case FCVTNS_wh:
3672       WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPTieEven));
3673       break;
3674     case FCVTNS_xh:
3675       WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPTieEven));
3676       break;
3677     case FCVTNS_ws:
3678       WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPTieEven));
3679       break;
3680     case FCVTNS_xs:
3681       WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPTieEven));
3682       break;
3683     case FCVTNS_wd:
3684       WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPTieEven));
3685       break;
3686     case FCVTNS_xd:
3687       WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPTieEven));
3688       break;
3689     case FCVTNU_wh:
3690       WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPTieEven));
3691       break;
3692     case FCVTNU_xh:
3693       WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPTieEven));
3694       break;
3695     case FCVTNU_ws:
3696       WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPTieEven));
3697       break;
3698     case FCVTNU_xs:
3699       WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPTieEven));
3700       break;
3701     case FCVTNU_wd:
3702       WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPTieEven));
3703       break;
3704     case FCVTNU_xd:
3705       WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPTieEven));
3706       break;
3707     case FCVTZS_wh:
3708       WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPZero));
3709       break;
3710     case FCVTZS_xh:
3711       WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPZero));
3712       break;
3713     case FCVTZS_ws:
3714       WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPZero));
3715       break;
3716     case FCVTZS_xs:
3717       WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPZero));
3718       break;
3719     case FCVTZS_wd:
3720       WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPZero));
3721       break;
3722     case FCVTZS_xd:
3723       WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPZero));
3724       break;
3725     case FCVTZU_wh:
3726       WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPZero));
3727       break;
3728     case FCVTZU_xh:
3729       WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPZero));
3730       break;
3731     case FCVTZU_ws:
3732       WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPZero));
3733       break;
3734     case FCVTZU_xs:
3735       WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPZero));
3736       break;
3737     case FCVTZU_wd:
3738       WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPZero));
3739       break;
3740     case FCVTZU_xd:
3741       WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPZero));
3742       break;
3743     case FJCVTZS:
3744       WriteWRegister(dst, FPToFixedJS(ReadDRegister(src)));
3745       break;
3746     case FMOV_hw:
3747       WriteHRegister(dst, ReadWRegister(src) & kHRegMask);
3748       break;
3749     case FMOV_wh:
3750       WriteWRegister(dst, ReadHRegisterBits(src));
3751       break;
3752     case FMOV_xh:
3753       WriteXRegister(dst, ReadHRegisterBits(src));
3754       break;
3755     case FMOV_hx:
3756       WriteHRegister(dst, ReadXRegister(src) & kHRegMask);
3757       break;
3758     case FMOV_ws:
3759       WriteWRegister(dst, ReadSRegisterBits(src));
3760       break;
3761     case FMOV_xd:
3762       WriteXRegister(dst, ReadDRegisterBits(src));
3763       break;
3764     case FMOV_sw:
3765       WriteSRegisterBits(dst, ReadWRegister(src));
3766       break;
3767     case FMOV_dx:
3768       WriteDRegisterBits(dst, ReadXRegister(src));
3769       break;
3770     case FMOV_d1_x:
3771       LogicVRegister(ReadVRegister(dst))
3772           .SetUint(kFormatD, 1, ReadXRegister(src));
3773       break;
3774     case FMOV_x_d1:
3775       WriteXRegister(dst, LogicVRegister(ReadVRegister(src)).Uint(kFormatD, 1));
3776       break;
3777 
3778     // A 32-bit input can be handled in the same way as a 64-bit input, since
3779     // the sign- or zero-extension will not affect the conversion.
3780     case SCVTF_dx:
3781       WriteDRegister(dst, FixedToDouble(ReadXRegister(src), 0, round));
3782       break;
3783     case SCVTF_dw:
3784       WriteDRegister(dst, FixedToDouble(ReadWRegister(src), 0, round));
3785       break;
3786     case UCVTF_dx:
3787       WriteDRegister(dst, UFixedToDouble(ReadXRegister(src), 0, round));
3788       break;
3789     case UCVTF_dw: {
3790       WriteDRegister(dst,
3791                      UFixedToDouble(ReadRegister<uint32_t>(src), 0, round));
3792       break;
3793     }
3794     case SCVTF_sx:
3795       WriteSRegister(dst, FixedToFloat(ReadXRegister(src), 0, round));
3796       break;
3797     case SCVTF_sw:
3798       WriteSRegister(dst, FixedToFloat(ReadWRegister(src), 0, round));
3799       break;
3800     case UCVTF_sx:
3801       WriteSRegister(dst, UFixedToFloat(ReadXRegister(src), 0, round));
3802       break;
3803     case UCVTF_sw: {
3804       WriteSRegister(dst, UFixedToFloat(ReadRegister<uint32_t>(src), 0, round));
3805       break;
3806     }
3807     case SCVTF_hx:
3808       WriteHRegister(dst, FixedToFloat16(ReadXRegister(src), 0, round));
3809       break;
3810     case SCVTF_hw:
3811       WriteHRegister(dst, FixedToFloat16(ReadWRegister(src), 0, round));
3812       break;
3813     case UCVTF_hx:
3814       WriteHRegister(dst, UFixedToFloat16(ReadXRegister(src), 0, round));
3815       break;
3816     case UCVTF_hw: {
3817       WriteHRegister(dst,
3818                      UFixedToFloat16(ReadRegister<uint32_t>(src), 0, round));
3819       break;
3820     }
3821 
3822     default:
3823       VIXL_UNREACHABLE();
3824   }
3825 }
3826 
3827 
VisitFPFixedPointConvert(const Instruction * instr)3828 void Simulator::VisitFPFixedPointConvert(const Instruction* instr) {
3829   AssertSupportedFPCR();
3830 
3831   unsigned dst = instr->GetRd();
3832   unsigned src = instr->GetRn();
3833   int fbits = 64 - instr->GetFPScale();
3834 
3835   FPRounding round = ReadRMode();
3836 
3837   switch (instr->Mask(FPFixedPointConvertMask)) {
3838     // A 32-bit input can be handled in the same way as a 64-bit input, since
3839     // the sign- or zero-extension will not affect the conversion.
3840     case SCVTF_dx_fixed:
3841       WriteDRegister(dst, FixedToDouble(ReadXRegister(src), fbits, round));
3842       break;
3843     case SCVTF_dw_fixed:
3844       WriteDRegister(dst, FixedToDouble(ReadWRegister(src), fbits, round));
3845       break;
3846     case UCVTF_dx_fixed:
3847       WriteDRegister(dst, UFixedToDouble(ReadXRegister(src), fbits, round));
3848       break;
3849     case UCVTF_dw_fixed: {
3850       WriteDRegister(dst,
3851                      UFixedToDouble(ReadRegister<uint32_t>(src), fbits, round));
3852       break;
3853     }
3854     case SCVTF_sx_fixed:
3855       WriteSRegister(dst, FixedToFloat(ReadXRegister(src), fbits, round));
3856       break;
3857     case SCVTF_sw_fixed:
3858       WriteSRegister(dst, FixedToFloat(ReadWRegister(src), fbits, round));
3859       break;
3860     case UCVTF_sx_fixed:
3861       WriteSRegister(dst, UFixedToFloat(ReadXRegister(src), fbits, round));
3862       break;
3863     case UCVTF_sw_fixed: {
3864       WriteSRegister(dst,
3865                      UFixedToFloat(ReadRegister<uint32_t>(src), fbits, round));
3866       break;
3867     }
3868     case SCVTF_hx_fixed:
3869       WriteHRegister(dst, FixedToFloat16(ReadXRegister(src), fbits, round));
3870       break;
3871     case SCVTF_hw_fixed:
3872       WriteHRegister(dst, FixedToFloat16(ReadWRegister(src), fbits, round));
3873       break;
3874     case UCVTF_hx_fixed:
3875       WriteHRegister(dst, UFixedToFloat16(ReadXRegister(src), fbits, round));
3876       break;
3877     case UCVTF_hw_fixed: {
3878       WriteHRegister(dst,
3879                      UFixedToFloat16(ReadRegister<uint32_t>(src),
3880                                      fbits,
3881                                      round));
3882       break;
3883     }
3884     case FCVTZS_xd_fixed:
3885       WriteXRegister(dst,
3886                      FPToInt64(ReadDRegister(src) * std::pow(2.0, fbits),
3887                                FPZero));
3888       break;
3889     case FCVTZS_wd_fixed:
3890       WriteWRegister(dst,
3891                      FPToInt32(ReadDRegister(src) * std::pow(2.0, fbits),
3892                                FPZero));
3893       break;
3894     case FCVTZU_xd_fixed:
3895       WriteXRegister(dst,
3896                      FPToUInt64(ReadDRegister(src) * std::pow(2.0, fbits),
3897                                 FPZero));
3898       break;
3899     case FCVTZU_wd_fixed:
3900       WriteWRegister(dst,
3901                      FPToUInt32(ReadDRegister(src) * std::pow(2.0, fbits),
3902                                 FPZero));
3903       break;
3904     case FCVTZS_xs_fixed:
3905       WriteXRegister(dst,
3906                      FPToInt64(ReadSRegister(src) * std::pow(2.0f, fbits),
3907                                FPZero));
3908       break;
3909     case FCVTZS_ws_fixed:
3910       WriteWRegister(dst,
3911                      FPToInt32(ReadSRegister(src) * std::pow(2.0f, fbits),
3912                                FPZero));
3913       break;
3914     case FCVTZU_xs_fixed:
3915       WriteXRegister(dst,
3916                      FPToUInt64(ReadSRegister(src) * std::pow(2.0f, fbits),
3917                                 FPZero));
3918       break;
3919     case FCVTZU_ws_fixed:
3920       WriteWRegister(dst,
3921                      FPToUInt32(ReadSRegister(src) * std::pow(2.0f, fbits),
3922                                 FPZero));
3923       break;
3924     case FCVTZS_xh_fixed: {
3925       double output =
3926           static_cast<double>(ReadHRegister(src)) * std::pow(2.0, fbits);
3927       WriteXRegister(dst, FPToInt64(output, FPZero));
3928       break;
3929     }
3930     case FCVTZS_wh_fixed: {
3931       double output =
3932           static_cast<double>(ReadHRegister(src)) * std::pow(2.0, fbits);
3933       WriteWRegister(dst, FPToInt32(output, FPZero));
3934       break;
3935     }
3936     case FCVTZU_xh_fixed: {
3937       double output =
3938           static_cast<double>(ReadHRegister(src)) * std::pow(2.0, fbits);
3939       WriteXRegister(dst, FPToUInt64(output, FPZero));
3940       break;
3941     }
3942     case FCVTZU_wh_fixed: {
3943       double output =
3944           static_cast<double>(ReadHRegister(src)) * std::pow(2.0, fbits);
3945       WriteWRegister(dst, FPToUInt32(output, FPZero));
3946       break;
3947     }
3948     default:
3949       VIXL_UNREACHABLE();
3950   }
3951 }
3952 
3953 
VisitFPCompare(const Instruction * instr)3954 void Simulator::VisitFPCompare(const Instruction* instr) {
3955   AssertSupportedFPCR();
3956 
3957   FPTrapFlags trap = DisableTrap;
3958   switch (instr->Mask(FPCompareMask)) {
3959     case FCMPE_h:
3960       trap = EnableTrap;
3961       VIXL_FALLTHROUGH();
3962     case FCMP_h:
3963       FPCompare(ReadHRegister(instr->GetRn()),
3964                 ReadHRegister(instr->GetRm()),
3965                 trap);
3966       break;
3967     case FCMPE_s:
3968       trap = EnableTrap;
3969       VIXL_FALLTHROUGH();
3970     case FCMP_s:
3971       FPCompare(ReadSRegister(instr->GetRn()),
3972                 ReadSRegister(instr->GetRm()),
3973                 trap);
3974       break;
3975     case FCMPE_d:
3976       trap = EnableTrap;
3977       VIXL_FALLTHROUGH();
3978     case FCMP_d:
3979       FPCompare(ReadDRegister(instr->GetRn()),
3980                 ReadDRegister(instr->GetRm()),
3981                 trap);
3982       break;
3983     case FCMPE_h_zero:
3984       trap = EnableTrap;
3985       VIXL_FALLTHROUGH();
3986     case FCMP_h_zero:
3987       FPCompare(ReadHRegister(instr->GetRn()), SimFloat16(0.0), trap);
3988       break;
3989     case FCMPE_s_zero:
3990       trap = EnableTrap;
3991       VIXL_FALLTHROUGH();
3992     case FCMP_s_zero:
3993       FPCompare(ReadSRegister(instr->GetRn()), 0.0f, trap);
3994       break;
3995     case FCMPE_d_zero:
3996       trap = EnableTrap;
3997       VIXL_FALLTHROUGH();
3998     case FCMP_d_zero:
3999       FPCompare(ReadDRegister(instr->GetRn()), 0.0, trap);
4000       break;
4001     default:
4002       VIXL_UNIMPLEMENTED();
4003   }
4004 }
4005 
4006 
VisitFPConditionalCompare(const Instruction * instr)4007 void Simulator::VisitFPConditionalCompare(const Instruction* instr) {
4008   AssertSupportedFPCR();
4009 
4010   FPTrapFlags trap = DisableTrap;
4011   switch (instr->Mask(FPConditionalCompareMask)) {
4012     case FCCMPE_h:
4013       trap = EnableTrap;
4014       VIXL_FALLTHROUGH();
4015     case FCCMP_h:
4016       if (ConditionPassed(instr->GetCondition())) {
4017         FPCompare(ReadHRegister(instr->GetRn()),
4018                   ReadHRegister(instr->GetRm()),
4019                   trap);
4020       } else {
4021         ReadNzcv().SetFlags(instr->GetNzcv());
4022         LogSystemRegister(NZCV);
4023       }
4024       break;
4025     case FCCMPE_s:
4026       trap = EnableTrap;
4027       VIXL_FALLTHROUGH();
4028     case FCCMP_s:
4029       if (ConditionPassed(instr->GetCondition())) {
4030         FPCompare(ReadSRegister(instr->GetRn()),
4031                   ReadSRegister(instr->GetRm()),
4032                   trap);
4033       } else {
4034         ReadNzcv().SetFlags(instr->GetNzcv());
4035         LogSystemRegister(NZCV);
4036       }
4037       break;
4038     case FCCMPE_d:
4039       trap = EnableTrap;
4040       VIXL_FALLTHROUGH();
4041     case FCCMP_d:
4042       if (ConditionPassed(instr->GetCondition())) {
4043         FPCompare(ReadDRegister(instr->GetRn()),
4044                   ReadDRegister(instr->GetRm()),
4045                   trap);
4046       } else {
4047         ReadNzcv().SetFlags(instr->GetNzcv());
4048         LogSystemRegister(NZCV);
4049       }
4050       break;
4051     default:
4052       VIXL_UNIMPLEMENTED();
4053   }
4054 }
4055 
4056 
VisitFPConditionalSelect(const Instruction * instr)4057 void Simulator::VisitFPConditionalSelect(const Instruction* instr) {
4058   AssertSupportedFPCR();
4059 
4060   Instr selected;
4061   if (ConditionPassed(instr->GetCondition())) {
4062     selected = instr->GetRn();
4063   } else {
4064     selected = instr->GetRm();
4065   }
4066 
4067   switch (instr->Mask(FPConditionalSelectMask)) {
4068     case FCSEL_h:
4069       WriteHRegister(instr->GetRd(), ReadHRegister(selected));
4070       break;
4071     case FCSEL_s:
4072       WriteSRegister(instr->GetRd(), ReadSRegister(selected));
4073       break;
4074     case FCSEL_d:
4075       WriteDRegister(instr->GetRd(), ReadDRegister(selected));
4076       break;
4077     default:
4078       VIXL_UNIMPLEMENTED();
4079   }
4080 }
4081 
4082 
VisitFPDataProcessing1Source(const Instruction * instr)4083 void Simulator::VisitFPDataProcessing1Source(const Instruction* instr) {
4084   AssertSupportedFPCR();
4085 
4086   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
4087   VectorFormat vform;
4088   switch (instr->Mask(FPTypeMask)) {
4089     default:
4090       VIXL_UNREACHABLE_OR_FALLTHROUGH();
4091     case FP64:
4092       vform = kFormatD;
4093       break;
4094     case FP32:
4095       vform = kFormatS;
4096       break;
4097     case FP16:
4098       vform = kFormatH;
4099       break;
4100   }
4101 
4102   SimVRegister& rd = ReadVRegister(instr->GetRd());
4103   SimVRegister& rn = ReadVRegister(instr->GetRn());
4104   bool inexact_exception = false;
4105   FrintMode frint_mode = kFrintToInteger;
4106 
4107   unsigned fd = instr->GetRd();
4108   unsigned fn = instr->GetRn();
4109 
4110   switch (instr->Mask(FPDataProcessing1SourceMask)) {
4111     case FMOV_h:
4112       WriteHRegister(fd, ReadHRegister(fn));
4113       return;
4114     case FMOV_s:
4115       WriteSRegister(fd, ReadSRegister(fn));
4116       return;
4117     case FMOV_d:
4118       WriteDRegister(fd, ReadDRegister(fn));
4119       return;
4120     case FABS_h:
4121     case FABS_s:
4122     case FABS_d:
4123       fabs_(vform, ReadVRegister(fd), ReadVRegister(fn));
4124       // Explicitly log the register update whilst we have type information.
4125       LogVRegister(fd, GetPrintRegisterFormatFP(vform));
4126       return;
4127     case FNEG_h:
4128     case FNEG_s:
4129     case FNEG_d:
4130       fneg(vform, ReadVRegister(fd), ReadVRegister(fn));
4131       // Explicitly log the register update whilst we have type information.
4132       LogVRegister(fd, GetPrintRegisterFormatFP(vform));
4133       return;
4134     case FCVT_ds:
4135       WriteDRegister(fd, FPToDouble(ReadSRegister(fn), ReadDN()));
4136       return;
4137     case FCVT_sd:
4138       WriteSRegister(fd, FPToFloat(ReadDRegister(fn), FPTieEven, ReadDN()));
4139       return;
4140     case FCVT_hs:
4141       WriteHRegister(fd,
4142                      Float16ToRawbits(
4143                          FPToFloat16(ReadSRegister(fn), FPTieEven, ReadDN())));
4144       return;
4145     case FCVT_sh:
4146       WriteSRegister(fd, FPToFloat(ReadHRegister(fn), ReadDN()));
4147       return;
4148     case FCVT_dh:
4149       WriteDRegister(fd, FPToDouble(ReadHRegister(fn), ReadDN()));
4150       return;
4151     case FCVT_hd:
4152       WriteHRegister(fd,
4153                      Float16ToRawbits(
4154                          FPToFloat16(ReadDRegister(fn), FPTieEven, ReadDN())));
4155       return;
4156     case FSQRT_h:
4157     case FSQRT_s:
4158     case FSQRT_d:
4159       fsqrt(vform, rd, rn);
4160       // Explicitly log the register update whilst we have type information.
4161       LogVRegister(fd, GetPrintRegisterFormatFP(vform));
4162       return;
4163     case FRINT32X_s:
4164     case FRINT32X_d:
4165       inexact_exception = true;
4166       frint_mode = kFrintToInt32;
4167       break;  // Use FPCR rounding mode.
4168     case FRINT64X_s:
4169     case FRINT64X_d:
4170       inexact_exception = true;
4171       frint_mode = kFrintToInt64;
4172       break;  // Use FPCR rounding mode.
4173     case FRINT32Z_s:
4174     case FRINT32Z_d:
4175       inexact_exception = true;
4176       frint_mode = kFrintToInt32;
4177       fpcr_rounding = FPZero;
4178       break;
4179     case FRINT64Z_s:
4180     case FRINT64Z_d:
4181       inexact_exception = true;
4182       frint_mode = kFrintToInt64;
4183       fpcr_rounding = FPZero;
4184       break;
4185     case FRINTI_h:
4186     case FRINTI_s:
4187     case FRINTI_d:
4188       break;  // Use FPCR rounding mode.
4189     case FRINTX_h:
4190     case FRINTX_s:
4191     case FRINTX_d:
4192       inexact_exception = true;
4193       break;
4194     case FRINTA_h:
4195     case FRINTA_s:
4196     case FRINTA_d:
4197       fpcr_rounding = FPTieAway;
4198       break;
4199     case FRINTM_h:
4200     case FRINTM_s:
4201     case FRINTM_d:
4202       fpcr_rounding = FPNegativeInfinity;
4203       break;
4204     case FRINTN_h:
4205     case FRINTN_s:
4206     case FRINTN_d:
4207       fpcr_rounding = FPTieEven;
4208       break;
4209     case FRINTP_h:
4210     case FRINTP_s:
4211     case FRINTP_d:
4212       fpcr_rounding = FPPositiveInfinity;
4213       break;
4214     case FRINTZ_h:
4215     case FRINTZ_s:
4216     case FRINTZ_d:
4217       fpcr_rounding = FPZero;
4218       break;
4219     default:
4220       VIXL_UNIMPLEMENTED();
4221   }
4222 
4223   // Only FRINT* instructions fall through the switch above.
4224   frint(vform, rd, rn, fpcr_rounding, inexact_exception, frint_mode);
4225   // Explicitly log the register update whilst we have type information.
4226   LogVRegister(fd, GetPrintRegisterFormatFP(vform));
4227 }
4228 
4229 
VisitFPDataProcessing2Source(const Instruction * instr)4230 void Simulator::VisitFPDataProcessing2Source(const Instruction* instr) {
4231   AssertSupportedFPCR();
4232 
4233   VectorFormat vform;
4234   switch (instr->Mask(FPTypeMask)) {
4235     default:
4236       VIXL_UNREACHABLE_OR_FALLTHROUGH();
4237     case FP64:
4238       vform = kFormatD;
4239       break;
4240     case FP32:
4241       vform = kFormatS;
4242       break;
4243     case FP16:
4244       vform = kFormatH;
4245       break;
4246   }
4247   SimVRegister& rd = ReadVRegister(instr->GetRd());
4248   SimVRegister& rn = ReadVRegister(instr->GetRn());
4249   SimVRegister& rm = ReadVRegister(instr->GetRm());
4250 
4251   switch (instr->Mask(FPDataProcessing2SourceMask)) {
4252     case FADD_h:
4253     case FADD_s:
4254     case FADD_d:
4255       fadd(vform, rd, rn, rm);
4256       break;
4257     case FSUB_h:
4258     case FSUB_s:
4259     case FSUB_d:
4260       fsub(vform, rd, rn, rm);
4261       break;
4262     case FMUL_h:
4263     case FMUL_s:
4264     case FMUL_d:
4265       fmul(vform, rd, rn, rm);
4266       break;
4267     case FNMUL_h:
4268     case FNMUL_s:
4269     case FNMUL_d:
4270       fnmul(vform, rd, rn, rm);
4271       break;
4272     case FDIV_h:
4273     case FDIV_s:
4274     case FDIV_d:
4275       fdiv(vform, rd, rn, rm);
4276       break;
4277     case FMAX_h:
4278     case FMAX_s:
4279     case FMAX_d:
4280       fmax(vform, rd, rn, rm);
4281       break;
4282     case FMIN_h:
4283     case FMIN_s:
4284     case FMIN_d:
4285       fmin(vform, rd, rn, rm);
4286       break;
4287     case FMAXNM_h:
4288     case FMAXNM_s:
4289     case FMAXNM_d:
4290       fmaxnm(vform, rd, rn, rm);
4291       break;
4292     case FMINNM_h:
4293     case FMINNM_s:
4294     case FMINNM_d:
4295       fminnm(vform, rd, rn, rm);
4296       break;
4297     default:
4298       VIXL_UNREACHABLE();
4299   }
4300   // Explicitly log the register update whilst we have type information.
4301   LogVRegister(instr->GetRd(), GetPrintRegisterFormatFP(vform));
4302 }
4303 
4304 
VisitFPDataProcessing3Source(const Instruction * instr)4305 void Simulator::VisitFPDataProcessing3Source(const Instruction* instr) {
4306   AssertSupportedFPCR();
4307 
4308   unsigned fd = instr->GetRd();
4309   unsigned fn = instr->GetRn();
4310   unsigned fm = instr->GetRm();
4311   unsigned fa = instr->GetRa();
4312 
4313   switch (instr->Mask(FPDataProcessing3SourceMask)) {
4314     // fd = fa +/- (fn * fm)
4315     case FMADD_h:
4316       WriteHRegister(fd,
4317                      FPMulAdd(ReadHRegister(fa),
4318                               ReadHRegister(fn),
4319                               ReadHRegister(fm)));
4320       break;
4321     case FMSUB_h:
4322       WriteHRegister(fd,
4323                      FPMulAdd(ReadHRegister(fa),
4324                               -ReadHRegister(fn),
4325                               ReadHRegister(fm)));
4326       break;
4327     case FMADD_s:
4328       WriteSRegister(fd,
4329                      FPMulAdd(ReadSRegister(fa),
4330                               ReadSRegister(fn),
4331                               ReadSRegister(fm)));
4332       break;
4333     case FMSUB_s:
4334       WriteSRegister(fd,
4335                      FPMulAdd(ReadSRegister(fa),
4336                               -ReadSRegister(fn),
4337                               ReadSRegister(fm)));
4338       break;
4339     case FMADD_d:
4340       WriteDRegister(fd,
4341                      FPMulAdd(ReadDRegister(fa),
4342                               ReadDRegister(fn),
4343                               ReadDRegister(fm)));
4344       break;
4345     case FMSUB_d:
4346       WriteDRegister(fd,
4347                      FPMulAdd(ReadDRegister(fa),
4348                               -ReadDRegister(fn),
4349                               ReadDRegister(fm)));
4350       break;
4351     // Negated variants of the above.
4352     case FNMADD_h:
4353       WriteHRegister(fd,
4354                      FPMulAdd(-ReadHRegister(fa),
4355                               -ReadHRegister(fn),
4356                               ReadHRegister(fm)));
4357       break;
4358     case FNMSUB_h:
4359       WriteHRegister(fd,
4360                      FPMulAdd(-ReadHRegister(fa),
4361                               ReadHRegister(fn),
4362                               ReadHRegister(fm)));
4363       break;
4364     case FNMADD_s:
4365       WriteSRegister(fd,
4366                      FPMulAdd(-ReadSRegister(fa),
4367                               -ReadSRegister(fn),
4368                               ReadSRegister(fm)));
4369       break;
4370     case FNMSUB_s:
4371       WriteSRegister(fd,
4372                      FPMulAdd(-ReadSRegister(fa),
4373                               ReadSRegister(fn),
4374                               ReadSRegister(fm)));
4375       break;
4376     case FNMADD_d:
4377       WriteDRegister(fd,
4378                      FPMulAdd(-ReadDRegister(fa),
4379                               -ReadDRegister(fn),
4380                               ReadDRegister(fm)));
4381       break;
4382     case FNMSUB_d:
4383       WriteDRegister(fd,
4384                      FPMulAdd(-ReadDRegister(fa),
4385                               ReadDRegister(fn),
4386                               ReadDRegister(fm)));
4387       break;
4388     default:
4389       VIXL_UNIMPLEMENTED();
4390   }
4391 }
4392 
4393 
FPProcessNaNs(const Instruction * instr)4394 bool Simulator::FPProcessNaNs(const Instruction* instr) {
4395   unsigned fd = instr->GetRd();
4396   unsigned fn = instr->GetRn();
4397   unsigned fm = instr->GetRm();
4398   bool done = false;
4399 
4400   if (instr->Mask(FP64) == FP64) {
4401     double result = FPProcessNaNs(ReadDRegister(fn), ReadDRegister(fm));
4402     if (IsNaN(result)) {
4403       WriteDRegister(fd, result);
4404       done = true;
4405     }
4406   } else if (instr->Mask(FP32) == FP32) {
4407     float result = FPProcessNaNs(ReadSRegister(fn), ReadSRegister(fm));
4408     if (IsNaN(result)) {
4409       WriteSRegister(fd, result);
4410       done = true;
4411     }
4412   } else {
4413     VIXL_ASSERT(instr->Mask(FP16) == FP16);
4414     VIXL_UNIMPLEMENTED();
4415   }
4416 
4417   return done;
4418 }
4419 
4420 
SysOp_W(int op,int64_t val)4421 void Simulator::SysOp_W(int op, int64_t val) {
4422   switch (op) {
4423     case IVAU:
4424     case CVAC:
4425     case CVAU:
4426     case CVAP:
4427     case CVADP:
4428     case CIVAC: {
4429       // Perform a placeholder memory access to ensure that we have read access
4430       // to the specified address.
4431       volatile uint8_t y = MemRead<uint8_t>(val);
4432       USE(y);
4433       // TODO: Implement "case ZVA:".
4434       break;
4435     }
4436     default:
4437       VIXL_UNIMPLEMENTED();
4438   }
4439 }
4440 
4441 
4442 // clang-format off
4443 #define PAUTH_SYSTEM_MODES(V)                                     \
4444   V(A1716, 17, ReadXRegister(16),                      kPACKeyIA) \
4445   V(B1716, 17, ReadXRegister(16),                      kPACKeyIB) \
4446   V(AZ,    30, 0x00000000,                             kPACKeyIA) \
4447   V(BZ,    30, 0x00000000,                             kPACKeyIB) \
4448   V(ASP,   30, ReadXRegister(31, Reg31IsStackPointer), kPACKeyIA) \
4449   V(BSP,   30, ReadXRegister(31, Reg31IsStackPointer), kPACKeyIB)
4450 // clang-format on
4451 
4452 
VisitSystem(const Instruction * instr)4453 void Simulator::VisitSystem(const Instruction* instr) {
4454   // Some system instructions hijack their Op and Cp fields to represent a
4455   // range of immediates instead of indicating a different instruction. This
4456   // makes the decoding tricky.
4457   if (instr->GetInstructionBits() == XPACLRI) {
4458     WriteXRegister(30, StripPAC(ReadXRegister(30), kInstructionPointer));
4459   } else if (instr->Mask(SystemPStateFMask) == SystemPStateFixed) {
4460     switch (instr->Mask(SystemPStateMask)) {
4461       case CFINV:
4462         ReadNzcv().SetC(!ReadC());
4463         break;
4464       case AXFLAG:
4465         ReadNzcv().SetN(0);
4466         ReadNzcv().SetZ(ReadNzcv().GetZ() | ReadNzcv().GetV());
4467         ReadNzcv().SetC(ReadNzcv().GetC() & ~ReadNzcv().GetV());
4468         ReadNzcv().SetV(0);
4469         break;
4470       case XAFLAG: {
4471         // Can't set the flags in place due to the logical dependencies.
4472         uint32_t n = (~ReadNzcv().GetC() & ~ReadNzcv().GetZ()) & 1;
4473         uint32_t z = ReadNzcv().GetZ() & ReadNzcv().GetC();
4474         uint32_t c = ReadNzcv().GetC() | ReadNzcv().GetZ();
4475         uint32_t v = ~ReadNzcv().GetC() & ReadNzcv().GetZ();
4476         ReadNzcv().SetN(n);
4477         ReadNzcv().SetZ(z);
4478         ReadNzcv().SetC(c);
4479         ReadNzcv().SetV(v);
4480         break;
4481       }
4482     }
4483   } else if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) {
4484     // Check BType allows PACI[AB]SP instructions.
4485     if (PcIsInGuardedPage()) {
4486       Instr i = instr->Mask(SystemPAuthMask);
4487       if ((i == PACIASP) || (i == PACIBSP)) {
4488         switch (ReadBType()) {
4489           case BranchFromGuardedNotToIP:
4490           // TODO: This case depends on the value of SCTLR_EL1.BT0, which we
4491           // assume here to be zero. This allows execution of PACI[AB]SP when
4492           // BTYPE is BranchFromGuardedNotToIP (0b11).
4493           case DefaultBType:
4494           case BranchFromUnguardedOrToIP:
4495           case BranchAndLink:
4496             break;
4497         }
4498       }
4499     }
4500 
4501     switch (instr->Mask(SystemPAuthMask)) {
4502 #define DEFINE_PAUTH_FUNCS(SUFFIX, DST, MOD, KEY)                              \
4503   case PACI##SUFFIX:                                                           \
4504     WriteXRegister(DST,                                                        \
4505                    AddPAC(ReadXRegister(DST), MOD, KEY, kInstructionPointer)); \
4506     break;                                                                     \
4507   case AUTI##SUFFIX:                                                           \
4508     WriteXRegister(DST,                                                        \
4509                    AuthPAC(ReadXRegister(DST),                                 \
4510                            MOD,                                                \
4511                            KEY,                                                \
4512                            kInstructionPointer));                              \
4513     break;
4514 
4515       PAUTH_SYSTEM_MODES(DEFINE_PAUTH_FUNCS)
4516 #undef DEFINE_PAUTH_FUNCS
4517     }
4518   } else if (instr->Mask(SystemExclusiveMonitorFMask) ==
4519              SystemExclusiveMonitorFixed) {
4520     VIXL_ASSERT(instr->Mask(SystemExclusiveMonitorMask) == CLREX);
4521     switch (instr->Mask(SystemExclusiveMonitorMask)) {
4522       case CLREX: {
4523         PrintExclusiveAccessWarning();
4524         ClearLocalMonitor();
4525         break;
4526       }
4527     }
4528   } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
4529     switch (instr->Mask(SystemSysRegMask)) {
4530       case MRS: {
4531         switch (instr->GetImmSystemRegister()) {
4532           case NZCV:
4533             WriteXRegister(instr->GetRt(), ReadNzcv().GetRawValue());
4534             break;
4535           case FPCR:
4536             WriteXRegister(instr->GetRt(), ReadFpcr().GetRawValue());
4537             break;
4538           case RNDR:
4539           case RNDRRS: {
4540             uint64_t high = jrand48(rand_state_);
4541             uint64_t low = jrand48(rand_state_);
4542             uint64_t rand_num = (high << 32) | (low & 0xffffffff);
4543             WriteXRegister(instr->GetRt(), rand_num);
4544             // Simulate successful random number generation.
4545             // TODO: Return failure occasionally as a random number cannot be
4546             // returned in a period of time.
4547             ReadNzcv().SetRawValue(NoFlag);
4548             LogSystemRegister(NZCV);
4549             break;
4550           }
4551           default:
4552             VIXL_UNIMPLEMENTED();
4553         }
4554         break;
4555       }
4556       case MSR: {
4557         switch (instr->GetImmSystemRegister()) {
4558           case NZCV:
4559             ReadNzcv().SetRawValue(ReadWRegister(instr->GetRt()));
4560             LogSystemRegister(NZCV);
4561             break;
4562           case FPCR:
4563             ReadFpcr().SetRawValue(ReadWRegister(instr->GetRt()));
4564             LogSystemRegister(FPCR);
4565             break;
4566           default:
4567             VIXL_UNIMPLEMENTED();
4568         }
4569         break;
4570       }
4571     }
4572   } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
4573     VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT);
4574     switch (instr->GetImmHint()) {
4575       case NOP:
4576       case ESB:
4577       case CSDB:
4578       case BTI_jc:
4579         break;
4580       case BTI:
4581         if (PcIsInGuardedPage() && (ReadBType() != DefaultBType)) {
4582           VIXL_ABORT_WITH_MSG("Executing BTI with wrong BType.");
4583         }
4584         break;
4585       case BTI_c:
4586         if (PcIsInGuardedPage() && (ReadBType() == BranchFromGuardedNotToIP)) {
4587           VIXL_ABORT_WITH_MSG("Executing BTI c with wrong BType.");
4588         }
4589         break;
4590       case BTI_j:
4591         if (PcIsInGuardedPage() && (ReadBType() == BranchAndLink)) {
4592           VIXL_ABORT_WITH_MSG("Executing BTI j with wrong BType.");
4593         }
4594         break;
4595       default:
4596         VIXL_UNIMPLEMENTED();
4597     }
4598   } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
4599     __sync_synchronize();
4600   } else if ((instr->Mask(SystemSysFMask) == SystemSysFixed)) {
4601     switch (instr->Mask(SystemSysMask)) {
4602       case SYS:
4603         SysOp_W(instr->GetSysOp(), ReadXRegister(instr->GetRt()));
4604         break;
4605       default:
4606         VIXL_UNIMPLEMENTED();
4607     }
4608   } else {
4609     VIXL_UNIMPLEMENTED();
4610   }
4611 }
4612 
4613 
VisitException(const Instruction * instr)4614 void Simulator::VisitException(const Instruction* instr) {
4615   switch (instr->Mask(ExceptionMask)) {
4616     case HLT:
4617       switch (instr->GetImmException()) {
4618         case kUnreachableOpcode:
4619           DoUnreachable(instr);
4620           return;
4621         case kTraceOpcode:
4622           DoTrace(instr);
4623           return;
4624         case kLogOpcode:
4625           DoLog(instr);
4626           return;
4627         case kPrintfOpcode:
4628           DoPrintf(instr);
4629           return;
4630         case kRuntimeCallOpcode:
4631           DoRuntimeCall(instr);
4632           return;
4633         case kSetCPUFeaturesOpcode:
4634         case kEnableCPUFeaturesOpcode:
4635         case kDisableCPUFeaturesOpcode:
4636           DoConfigureCPUFeatures(instr);
4637           return;
4638         case kSaveCPUFeaturesOpcode:
4639           DoSaveCPUFeatures(instr);
4640           return;
4641         case kRestoreCPUFeaturesOpcode:
4642           DoRestoreCPUFeatures(instr);
4643           return;
4644         default:
4645           HostBreakpoint();
4646           return;
4647       }
4648     case BRK:
4649       HostBreakpoint();
4650       return;
4651     default:
4652       VIXL_UNIMPLEMENTED();
4653   }
4654 }
4655 
4656 
VisitCrypto2RegSHA(const Instruction * instr)4657 void Simulator::VisitCrypto2RegSHA(const Instruction* instr) {
4658   VisitUnimplemented(instr);
4659 }
4660 
4661 
VisitCrypto3RegSHA(const Instruction * instr)4662 void Simulator::VisitCrypto3RegSHA(const Instruction* instr) {
4663   VisitUnimplemented(instr);
4664 }
4665 
4666 
VisitCryptoAES(const Instruction * instr)4667 void Simulator::VisitCryptoAES(const Instruction* instr) {
4668   VisitUnimplemented(instr);
4669 }
4670 
4671 
VisitNEON2RegMisc(const Instruction * instr)4672 void Simulator::VisitNEON2RegMisc(const Instruction* instr) {
4673   NEONFormatDecoder nfd(instr);
4674   VectorFormat vf = nfd.GetVectorFormat();
4675 
4676   static const NEONFormatMap map_lp =
4677       {{23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
4678   VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp);
4679 
4680   static const NEONFormatMap map_fcvtl = {{22}, {NF_4S, NF_2D}};
4681   VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl);
4682 
4683   static const NEONFormatMap map_fcvtn = {{22, 30},
4684                                           {NF_4H, NF_8H, NF_2S, NF_4S}};
4685   VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn);
4686 
4687   SimVRegister& rd = ReadVRegister(instr->GetRd());
4688   SimVRegister& rn = ReadVRegister(instr->GetRn());
4689 
4690   if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
4691     // These instructions all use a two bit size field, except NOT and RBIT,
4692     // which use the field to encode the operation.
4693     switch (instr->Mask(NEON2RegMiscMask)) {
4694       case NEON_REV64:
4695         rev64(vf, rd, rn);
4696         break;
4697       case NEON_REV32:
4698         rev32(vf, rd, rn);
4699         break;
4700       case NEON_REV16:
4701         rev16(vf, rd, rn);
4702         break;
4703       case NEON_SUQADD:
4704         suqadd(vf, rd, rn);
4705         break;
4706       case NEON_USQADD:
4707         usqadd(vf, rd, rn);
4708         break;
4709       case NEON_CLS:
4710         cls(vf, rd, rn);
4711         break;
4712       case NEON_CLZ:
4713         clz(vf, rd, rn);
4714         break;
4715       case NEON_CNT:
4716         cnt(vf, rd, rn);
4717         break;
4718       case NEON_SQABS:
4719         abs(vf, rd, rn).SignedSaturate(vf);
4720         break;
4721       case NEON_SQNEG:
4722         neg(vf, rd, rn).SignedSaturate(vf);
4723         break;
4724       case NEON_CMGT_zero:
4725         cmp(vf, rd, rn, 0, gt);
4726         break;
4727       case NEON_CMGE_zero:
4728         cmp(vf, rd, rn, 0, ge);
4729         break;
4730       case NEON_CMEQ_zero:
4731         cmp(vf, rd, rn, 0, eq);
4732         break;
4733       case NEON_CMLE_zero:
4734         cmp(vf, rd, rn, 0, le);
4735         break;
4736       case NEON_CMLT_zero:
4737         cmp(vf, rd, rn, 0, lt);
4738         break;
4739       case NEON_ABS:
4740         abs(vf, rd, rn);
4741         break;
4742       case NEON_NEG:
4743         neg(vf, rd, rn);
4744         break;
4745       case NEON_SADDLP:
4746         saddlp(vf_lp, rd, rn);
4747         break;
4748       case NEON_UADDLP:
4749         uaddlp(vf_lp, rd, rn);
4750         break;
4751       case NEON_SADALP:
4752         sadalp(vf_lp, rd, rn);
4753         break;
4754       case NEON_UADALP:
4755         uadalp(vf_lp, rd, rn);
4756         break;
4757       case NEON_RBIT_NOT:
4758         vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
4759         switch (instr->GetFPType()) {
4760           case 0:
4761             not_(vf, rd, rn);
4762             break;
4763           case 1:
4764             rbit(vf, rd, rn);
4765             break;
4766           default:
4767             VIXL_UNIMPLEMENTED();
4768         }
4769         break;
4770     }
4771   } else {
4772     VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap());
4773     FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
4774     bool inexact_exception = false;
4775     FrintMode frint_mode = kFrintToInteger;
4776 
4777     // These instructions all use a one bit size field, except XTN, SQXTUN,
4778     // SHLL, SQXTN and UQXTN, which use a two bit size field.
4779     switch (instr->Mask(NEON2RegMiscFPMask)) {
4780       case NEON_FABS:
4781         fabs_(fpf, rd, rn);
4782         return;
4783       case NEON_FNEG:
4784         fneg(fpf, rd, rn);
4785         return;
4786       case NEON_FSQRT:
4787         fsqrt(fpf, rd, rn);
4788         return;
4789       case NEON_FCVTL:
4790         if (instr->Mask(NEON_Q)) {
4791           fcvtl2(vf_fcvtl, rd, rn);
4792         } else {
4793           fcvtl(vf_fcvtl, rd, rn);
4794         }
4795         return;
4796       case NEON_FCVTN:
4797         if (instr->Mask(NEON_Q)) {
4798           fcvtn2(vf_fcvtn, rd, rn);
4799         } else {
4800           fcvtn(vf_fcvtn, rd, rn);
4801         }
4802         return;
4803       case NEON_FCVTXN:
4804         if (instr->Mask(NEON_Q)) {
4805           fcvtxn2(vf_fcvtn, rd, rn);
4806         } else {
4807           fcvtxn(vf_fcvtn, rd, rn);
4808         }
4809         return;
4810 
4811       // The following instructions break from the switch statement, rather
4812       // than return.
4813       case NEON_FRINT32X:
4814         inexact_exception = true;
4815         frint_mode = kFrintToInt32;
4816         break;  // Use FPCR rounding mode.
4817       case NEON_FRINT32Z:
4818         inexact_exception = true;
4819         frint_mode = kFrintToInt32;
4820         fpcr_rounding = FPZero;
4821         break;
4822       case NEON_FRINT64X:
4823         inexact_exception = true;
4824         frint_mode = kFrintToInt64;
4825         break;  // Use FPCR rounding mode.
4826       case NEON_FRINT64Z:
4827         inexact_exception = true;
4828         frint_mode = kFrintToInt64;
4829         fpcr_rounding = FPZero;
4830         break;
4831       case NEON_FRINTI:
4832         break;  // Use FPCR rounding mode.
4833       case NEON_FRINTX:
4834         inexact_exception = true;
4835         break;
4836       case NEON_FRINTA:
4837         fpcr_rounding = FPTieAway;
4838         break;
4839       case NEON_FRINTM:
4840         fpcr_rounding = FPNegativeInfinity;
4841         break;
4842       case NEON_FRINTN:
4843         fpcr_rounding = FPTieEven;
4844         break;
4845       case NEON_FRINTP:
4846         fpcr_rounding = FPPositiveInfinity;
4847         break;
4848       case NEON_FRINTZ:
4849         fpcr_rounding = FPZero;
4850         break;
4851 
4852       case NEON_FCVTNS:
4853         fcvts(fpf, rd, rn, FPTieEven);
4854         return;
4855       case NEON_FCVTNU:
4856         fcvtu(fpf, rd, rn, FPTieEven);
4857         return;
4858       case NEON_FCVTPS:
4859         fcvts(fpf, rd, rn, FPPositiveInfinity);
4860         return;
4861       case NEON_FCVTPU:
4862         fcvtu(fpf, rd, rn, FPPositiveInfinity);
4863         return;
4864       case NEON_FCVTMS:
4865         fcvts(fpf, rd, rn, FPNegativeInfinity);
4866         return;
4867       case NEON_FCVTMU:
4868         fcvtu(fpf, rd, rn, FPNegativeInfinity);
4869         return;
4870       case NEON_FCVTZS:
4871         fcvts(fpf, rd, rn, FPZero);
4872         return;
4873       case NEON_FCVTZU:
4874         fcvtu(fpf, rd, rn, FPZero);
4875         return;
4876       case NEON_FCVTAS:
4877         fcvts(fpf, rd, rn, FPTieAway);
4878         return;
4879       case NEON_FCVTAU:
4880         fcvtu(fpf, rd, rn, FPTieAway);
4881         return;
4882       case NEON_SCVTF:
4883         scvtf(fpf, rd, rn, 0, fpcr_rounding);
4884         return;
4885       case NEON_UCVTF:
4886         ucvtf(fpf, rd, rn, 0, fpcr_rounding);
4887         return;
4888       case NEON_URSQRTE:
4889         ursqrte(fpf, rd, rn);
4890         return;
4891       case NEON_URECPE:
4892         urecpe(fpf, rd, rn);
4893         return;
4894       case NEON_FRSQRTE:
4895         frsqrte(fpf, rd, rn);
4896         return;
4897       case NEON_FRECPE:
4898         frecpe(fpf, rd, rn, fpcr_rounding);
4899         return;
4900       case NEON_FCMGT_zero:
4901         fcmp_zero(fpf, rd, rn, gt);
4902         return;
4903       case NEON_FCMGE_zero:
4904         fcmp_zero(fpf, rd, rn, ge);
4905         return;
4906       case NEON_FCMEQ_zero:
4907         fcmp_zero(fpf, rd, rn, eq);
4908         return;
4909       case NEON_FCMLE_zero:
4910         fcmp_zero(fpf, rd, rn, le);
4911         return;
4912       case NEON_FCMLT_zero:
4913         fcmp_zero(fpf, rd, rn, lt);
4914         return;
4915       default:
4916         if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
4917             (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
4918           switch (instr->Mask(NEON2RegMiscMask)) {
4919             case NEON_XTN:
4920               xtn(vf, rd, rn);
4921               return;
4922             case NEON_SQXTN:
4923               sqxtn(vf, rd, rn);
4924               return;
4925             case NEON_UQXTN:
4926               uqxtn(vf, rd, rn);
4927               return;
4928             case NEON_SQXTUN:
4929               sqxtun(vf, rd, rn);
4930               return;
4931             case NEON_SHLL:
4932               vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
4933               if (instr->Mask(NEON_Q)) {
4934                 shll2(vf, rd, rn);
4935               } else {
4936                 shll(vf, rd, rn);
4937               }
4938               return;
4939             default:
4940               VIXL_UNIMPLEMENTED();
4941           }
4942         } else {
4943           VIXL_UNIMPLEMENTED();
4944         }
4945     }
4946 
4947     // Only FRINT* instructions fall through the switch above.
4948     frint(fpf, rd, rn, fpcr_rounding, inexact_exception, frint_mode);
4949   }
4950 }
4951 
4952 
VisitNEON2RegMiscFP16(const Instruction * instr)4953 void Simulator::VisitNEON2RegMiscFP16(const Instruction* instr) {
4954   static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}};
4955   NEONFormatDecoder nfd(instr);
4956   VectorFormat fpf = nfd.GetVectorFormat(&map_half);
4957 
4958   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
4959 
4960   SimVRegister& rd = ReadVRegister(instr->GetRd());
4961   SimVRegister& rn = ReadVRegister(instr->GetRn());
4962 
4963   switch (instr->Mask(NEON2RegMiscFP16Mask)) {
4964     case NEON_SCVTF_H:
4965       scvtf(fpf, rd, rn, 0, fpcr_rounding);
4966       return;
4967     case NEON_UCVTF_H:
4968       ucvtf(fpf, rd, rn, 0, fpcr_rounding);
4969       return;
4970     case NEON_FCVTNS_H:
4971       fcvts(fpf, rd, rn, FPTieEven);
4972       return;
4973     case NEON_FCVTNU_H:
4974       fcvtu(fpf, rd, rn, FPTieEven);
4975       return;
4976     case NEON_FCVTPS_H:
4977       fcvts(fpf, rd, rn, FPPositiveInfinity);
4978       return;
4979     case NEON_FCVTPU_H:
4980       fcvtu(fpf, rd, rn, FPPositiveInfinity);
4981       return;
4982     case NEON_FCVTMS_H:
4983       fcvts(fpf, rd, rn, FPNegativeInfinity);
4984       return;
4985     case NEON_FCVTMU_H:
4986       fcvtu(fpf, rd, rn, FPNegativeInfinity);
4987       return;
4988     case NEON_FCVTZS_H:
4989       fcvts(fpf, rd, rn, FPZero);
4990       return;
4991     case NEON_FCVTZU_H:
4992       fcvtu(fpf, rd, rn, FPZero);
4993       return;
4994     case NEON_FCVTAS_H:
4995       fcvts(fpf, rd, rn, FPTieAway);
4996       return;
4997     case NEON_FCVTAU_H:
4998       fcvtu(fpf, rd, rn, FPTieAway);
4999       return;
5000     case NEON_FRINTI_H:
5001       frint(fpf, rd, rn, fpcr_rounding, false);
5002       return;
5003     case NEON_FRINTX_H:
5004       frint(fpf, rd, rn, fpcr_rounding, true);
5005       return;
5006     case NEON_FRINTA_H:
5007       frint(fpf, rd, rn, FPTieAway, false);
5008       return;
5009     case NEON_FRINTM_H:
5010       frint(fpf, rd, rn, FPNegativeInfinity, false);
5011       return;
5012     case NEON_FRINTN_H:
5013       frint(fpf, rd, rn, FPTieEven, false);
5014       return;
5015     case NEON_FRINTP_H:
5016       frint(fpf, rd, rn, FPPositiveInfinity, false);
5017       return;
5018     case NEON_FRINTZ_H:
5019       frint(fpf, rd, rn, FPZero, false);
5020       return;
5021     case NEON_FABS_H:
5022       fabs_(fpf, rd, rn);
5023       return;
5024     case NEON_FNEG_H:
5025       fneg(fpf, rd, rn);
5026       return;
5027     case NEON_FSQRT_H:
5028       fsqrt(fpf, rd, rn);
5029       return;
5030     case NEON_FRSQRTE_H:
5031       frsqrte(fpf, rd, rn);
5032       return;
5033     case NEON_FRECPE_H:
5034       frecpe(fpf, rd, rn, fpcr_rounding);
5035       return;
5036     case NEON_FCMGT_H_zero:
5037       fcmp_zero(fpf, rd, rn, gt);
5038       return;
5039     case NEON_FCMGE_H_zero:
5040       fcmp_zero(fpf, rd, rn, ge);
5041       return;
5042     case NEON_FCMEQ_H_zero:
5043       fcmp_zero(fpf, rd, rn, eq);
5044       return;
5045     case NEON_FCMLE_H_zero:
5046       fcmp_zero(fpf, rd, rn, le);
5047       return;
5048     case NEON_FCMLT_H_zero:
5049       fcmp_zero(fpf, rd, rn, lt);
5050       return;
5051     default:
5052       VIXL_UNIMPLEMENTED();
5053       return;
5054   }
5055 }
5056 
5057 
VisitNEON3Same(const Instruction * instr)5058 void Simulator::VisitNEON3Same(const Instruction* instr) {
5059   NEONFormatDecoder nfd(instr);
5060   SimVRegister& rd = ReadVRegister(instr->GetRd());
5061   SimVRegister& rn = ReadVRegister(instr->GetRn());
5062   SimVRegister& rm = ReadVRegister(instr->GetRm());
5063 
5064   if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
5065     VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
5066     switch (instr->Mask(NEON3SameLogicalMask)) {
5067       case NEON_AND:
5068         and_(vf, rd, rn, rm);
5069         break;
5070       case NEON_ORR:
5071         orr(vf, rd, rn, rm);
5072         break;
5073       case NEON_ORN:
5074         orn(vf, rd, rn, rm);
5075         break;
5076       case NEON_EOR:
5077         eor(vf, rd, rn, rm);
5078         break;
5079       case NEON_BIC:
5080         bic(vf, rd, rn, rm);
5081         break;
5082       case NEON_BIF:
5083         bif(vf, rd, rn, rm);
5084         break;
5085       case NEON_BIT:
5086         bit(vf, rd, rn, rm);
5087         break;
5088       case NEON_BSL:
5089         bsl(vf, rd, rn, rm);
5090         break;
5091       default:
5092         VIXL_UNIMPLEMENTED();
5093     }
5094   } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
5095     VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
5096     switch (instr->Mask(NEON3SameFPMask)) {
5097       case NEON_FADD:
5098         fadd(vf, rd, rn, rm);
5099         break;
5100       case NEON_FSUB:
5101         fsub(vf, rd, rn, rm);
5102         break;
5103       case NEON_FMUL:
5104         fmul(vf, rd, rn, rm);
5105         break;
5106       case NEON_FDIV:
5107         fdiv(vf, rd, rn, rm);
5108         break;
5109       case NEON_FMAX:
5110         fmax(vf, rd, rn, rm);
5111         break;
5112       case NEON_FMIN:
5113         fmin(vf, rd, rn, rm);
5114         break;
5115       case NEON_FMAXNM:
5116         fmaxnm(vf, rd, rn, rm);
5117         break;
5118       case NEON_FMINNM:
5119         fminnm(vf, rd, rn, rm);
5120         break;
5121       case NEON_FMLA:
5122         fmla(vf, rd, rd, rn, rm);
5123         break;
5124       case NEON_FMLS:
5125         fmls(vf, rd, rd, rn, rm);
5126         break;
5127       case NEON_FMULX:
5128         fmulx(vf, rd, rn, rm);
5129         break;
5130       case NEON_FACGE:
5131         fabscmp(vf, rd, rn, rm, ge);
5132         break;
5133       case NEON_FACGT:
5134         fabscmp(vf, rd, rn, rm, gt);
5135         break;
5136       case NEON_FCMEQ:
5137         fcmp(vf, rd, rn, rm, eq);
5138         break;
5139       case NEON_FCMGE:
5140         fcmp(vf, rd, rn, rm, ge);
5141         break;
5142       case NEON_FCMGT:
5143         fcmp(vf, rd, rn, rm, gt);
5144         break;
5145       case NEON_FRECPS:
5146         frecps(vf, rd, rn, rm);
5147         break;
5148       case NEON_FRSQRTS:
5149         frsqrts(vf, rd, rn, rm);
5150         break;
5151       case NEON_FABD:
5152         fabd(vf, rd, rn, rm);
5153         break;
5154       case NEON_FADDP:
5155         faddp(vf, rd, rn, rm);
5156         break;
5157       case NEON_FMAXP:
5158         fmaxp(vf, rd, rn, rm);
5159         break;
5160       case NEON_FMAXNMP:
5161         fmaxnmp(vf, rd, rn, rm);
5162         break;
5163       case NEON_FMINP:
5164         fminp(vf, rd, rn, rm);
5165         break;
5166       case NEON_FMINNMP:
5167         fminnmp(vf, rd, rn, rm);
5168         break;
5169       default:
5170         // FMLAL{2} and FMLSL{2} have special-case encodings.
5171         switch (instr->Mask(NEON3SameFHMMask)) {
5172           case NEON_FMLAL:
5173             fmlal(vf, rd, rn, rm);
5174             break;
5175           case NEON_FMLAL2:
5176             fmlal2(vf, rd, rn, rm);
5177             break;
5178           case NEON_FMLSL:
5179             fmlsl(vf, rd, rn, rm);
5180             break;
5181           case NEON_FMLSL2:
5182             fmlsl2(vf, rd, rn, rm);
5183             break;
5184           default:
5185             VIXL_UNIMPLEMENTED();
5186         }
5187     }
5188   } else {
5189     VectorFormat vf = nfd.GetVectorFormat();
5190     switch (instr->Mask(NEON3SameMask)) {
5191       case NEON_ADD:
5192         add(vf, rd, rn, rm);
5193         break;
5194       case NEON_ADDP:
5195         addp(vf, rd, rn, rm);
5196         break;
5197       case NEON_CMEQ:
5198         cmp(vf, rd, rn, rm, eq);
5199         break;
5200       case NEON_CMGE:
5201         cmp(vf, rd, rn, rm, ge);
5202         break;
5203       case NEON_CMGT:
5204         cmp(vf, rd, rn, rm, gt);
5205         break;
5206       case NEON_CMHI:
5207         cmp(vf, rd, rn, rm, hi);
5208         break;
5209       case NEON_CMHS:
5210         cmp(vf, rd, rn, rm, hs);
5211         break;
5212       case NEON_CMTST:
5213         cmptst(vf, rd, rn, rm);
5214         break;
5215       case NEON_MLS:
5216         mls(vf, rd, rd, rn, rm);
5217         break;
5218       case NEON_MLA:
5219         mla(vf, rd, rd, rn, rm);
5220         break;
5221       case NEON_MUL:
5222         mul(vf, rd, rn, rm);
5223         break;
5224       case NEON_PMUL:
5225         pmul(vf, rd, rn, rm);
5226         break;
5227       case NEON_SMAX:
5228         smax(vf, rd, rn, rm);
5229         break;
5230       case NEON_SMAXP:
5231         smaxp(vf, rd, rn, rm);
5232         break;
5233       case NEON_SMIN:
5234         smin(vf, rd, rn, rm);
5235         break;
5236       case NEON_SMINP:
5237         sminp(vf, rd, rn, rm);
5238         break;
5239       case NEON_SUB:
5240         sub(vf, rd, rn, rm);
5241         break;
5242       case NEON_UMAX:
5243         umax(vf, rd, rn, rm);
5244         break;
5245       case NEON_UMAXP:
5246         umaxp(vf, rd, rn, rm);
5247         break;
5248       case NEON_UMIN:
5249         umin(vf, rd, rn, rm);
5250         break;
5251       case NEON_UMINP:
5252         uminp(vf, rd, rn, rm);
5253         break;
5254       case NEON_SSHL:
5255         sshl(vf, rd, rn, rm);
5256         break;
5257       case NEON_USHL:
5258         ushl(vf, rd, rn, rm);
5259         break;
5260       case NEON_SABD:
5261         absdiff(vf, rd, rn, rm, true);
5262         break;
5263       case NEON_UABD:
5264         absdiff(vf, rd, rn, rm, false);
5265         break;
5266       case NEON_SABA:
5267         saba(vf, rd, rn, rm);
5268         break;
5269       case NEON_UABA:
5270         uaba(vf, rd, rn, rm);
5271         break;
5272       case NEON_UQADD:
5273         add(vf, rd, rn, rm).UnsignedSaturate(vf);
5274         break;
5275       case NEON_SQADD:
5276         add(vf, rd, rn, rm).SignedSaturate(vf);
5277         break;
5278       case NEON_UQSUB:
5279         sub(vf, rd, rn, rm).UnsignedSaturate(vf);
5280         break;
5281       case NEON_SQSUB:
5282         sub(vf, rd, rn, rm).SignedSaturate(vf);
5283         break;
5284       case NEON_SQDMULH:
5285         sqdmulh(vf, rd, rn, rm);
5286         break;
5287       case NEON_SQRDMULH:
5288         sqrdmulh(vf, rd, rn, rm);
5289         break;
5290       case NEON_UQSHL:
5291         ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
5292         break;
5293       case NEON_SQSHL:
5294         sshl(vf, rd, rn, rm).SignedSaturate(vf);
5295         break;
5296       case NEON_URSHL:
5297         ushl(vf, rd, rn, rm).Round(vf);
5298         break;
5299       case NEON_SRSHL:
5300         sshl(vf, rd, rn, rm).Round(vf);
5301         break;
5302       case NEON_UQRSHL:
5303         ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
5304         break;
5305       case NEON_SQRSHL:
5306         sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
5307         break;
5308       case NEON_UHADD:
5309         add(vf, rd, rn, rm).Uhalve(vf);
5310         break;
5311       case NEON_URHADD:
5312         add(vf, rd, rn, rm).Uhalve(vf).Round(vf);
5313         break;
5314       case NEON_SHADD:
5315         add(vf, rd, rn, rm).Halve(vf);
5316         break;
5317       case NEON_SRHADD:
5318         add(vf, rd, rn, rm).Halve(vf).Round(vf);
5319         break;
5320       case NEON_UHSUB:
5321         sub(vf, rd, rn, rm).Uhalve(vf);
5322         break;
5323       case NEON_SHSUB:
5324         sub(vf, rd, rn, rm).Halve(vf);
5325         break;
5326       default:
5327         VIXL_UNIMPLEMENTED();
5328     }
5329   }
5330 }
5331 
5332 
VisitNEON3SameFP16(const Instruction * instr)5333 void Simulator::VisitNEON3SameFP16(const Instruction* instr) {
5334   NEONFormatDecoder nfd(instr);
5335   SimVRegister& rd = ReadVRegister(instr->GetRd());
5336   SimVRegister& rn = ReadVRegister(instr->GetRn());
5337   SimVRegister& rm = ReadVRegister(instr->GetRm());
5338 
5339   VectorFormat vf = nfd.GetVectorFormat(nfd.FP16FormatMap());
5340   switch (instr->Mask(NEON3SameFP16Mask)) {
5341 #define SIM_FUNC(A, B) \
5342   case NEON_##A##_H:   \
5343     B(vf, rd, rn, rm); \
5344     break;
5345     SIM_FUNC(FMAXNM, fmaxnm);
5346     SIM_FUNC(FADD, fadd);
5347     SIM_FUNC(FMULX, fmulx);
5348     SIM_FUNC(FMAX, fmax);
5349     SIM_FUNC(FRECPS, frecps);
5350     SIM_FUNC(FMINNM, fminnm);
5351     SIM_FUNC(FSUB, fsub);
5352     SIM_FUNC(FMIN, fmin);
5353     SIM_FUNC(FRSQRTS, frsqrts);
5354     SIM_FUNC(FMAXNMP, fmaxnmp);
5355     SIM_FUNC(FADDP, faddp);
5356     SIM_FUNC(FMUL, fmul);
5357     SIM_FUNC(FMAXP, fmaxp);
5358     SIM_FUNC(FDIV, fdiv);
5359     SIM_FUNC(FMINNMP, fminnmp);
5360     SIM_FUNC(FABD, fabd);
5361     SIM_FUNC(FMINP, fminp);
5362 #undef SIM_FUNC
5363     case NEON_FMLA_H:
5364       fmla(vf, rd, rd, rn, rm);
5365       break;
5366     case NEON_FMLS_H:
5367       fmls(vf, rd, rd, rn, rm);
5368       break;
5369     case NEON_FCMEQ_H:
5370       fcmp(vf, rd, rn, rm, eq);
5371       break;
5372     case NEON_FCMGE_H:
5373       fcmp(vf, rd, rn, rm, ge);
5374       break;
5375     case NEON_FACGE_H:
5376       fabscmp(vf, rd, rn, rm, ge);
5377       break;
5378     case NEON_FCMGT_H:
5379       fcmp(vf, rd, rn, rm, gt);
5380       break;
5381     case NEON_FACGT_H:
5382       fabscmp(vf, rd, rn, rm, gt);
5383       break;
5384     default:
5385       VIXL_UNIMPLEMENTED();
5386       break;
5387   }
5388 }
5389 
VisitNEON3SameExtra(const Instruction * instr)5390 void Simulator::VisitNEON3SameExtra(const Instruction* instr) {
5391   NEONFormatDecoder nfd(instr);
5392   SimVRegister& rd = ReadVRegister(instr->GetRd());
5393   SimVRegister& rn = ReadVRegister(instr->GetRn());
5394   SimVRegister& rm = ReadVRegister(instr->GetRm());
5395   int rot = 0;
5396   VectorFormat vf = nfd.GetVectorFormat();
5397   if (instr->Mask(NEON3SameExtraFCMLAMask) == NEON_FCMLA) {
5398     rot = instr->GetImmRotFcmlaVec();
5399     fcmla(vf, rd, rn, rm, rd, rot);
5400   } else if (instr->Mask(NEON3SameExtraFCADDMask) == NEON_FCADD) {
5401     rot = instr->GetImmRotFcadd();
5402     fcadd(vf, rd, rn, rm, rot);
5403   } else {
5404     switch (instr->Mask(NEON3SameExtraMask)) {
5405       case NEON_SDOT:
5406         sdot(vf, rd, rn, rm);
5407         break;
5408       case NEON_SQRDMLAH:
5409         sqrdmlah(vf, rd, rn, rm);
5410         break;
5411       case NEON_UDOT:
5412         udot(vf, rd, rn, rm);
5413         break;
5414       case NEON_SQRDMLSH:
5415         sqrdmlsh(vf, rd, rn, rm);
5416         break;
5417       default:
5418         VIXL_UNIMPLEMENTED();
5419         break;
5420     }
5421   }
5422 }
5423 
5424 
VisitNEON3Different(const Instruction * instr)5425 void Simulator::VisitNEON3Different(const Instruction* instr) {
5426   NEONFormatDecoder nfd(instr);
5427   VectorFormat vf = nfd.GetVectorFormat();
5428   VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
5429 
5430   SimVRegister& rd = ReadVRegister(instr->GetRd());
5431   SimVRegister& rn = ReadVRegister(instr->GetRn());
5432   SimVRegister& rm = ReadVRegister(instr->GetRm());
5433 
5434   switch (instr->Mask(NEON3DifferentMask)) {
5435     case NEON_PMULL:
5436       pmull(vf_l, rd, rn, rm);
5437       break;
5438     case NEON_PMULL2:
5439       pmull2(vf_l, rd, rn, rm);
5440       break;
5441     case NEON_UADDL:
5442       uaddl(vf_l, rd, rn, rm);
5443       break;
5444     case NEON_UADDL2:
5445       uaddl2(vf_l, rd, rn, rm);
5446       break;
5447     case NEON_SADDL:
5448       saddl(vf_l, rd, rn, rm);
5449       break;
5450     case NEON_SADDL2:
5451       saddl2(vf_l, rd, rn, rm);
5452       break;
5453     case NEON_USUBL:
5454       usubl(vf_l, rd, rn, rm);
5455       break;
5456     case NEON_USUBL2:
5457       usubl2(vf_l, rd, rn, rm);
5458       break;
5459     case NEON_SSUBL:
5460       ssubl(vf_l, rd, rn, rm);
5461       break;
5462     case NEON_SSUBL2:
5463       ssubl2(vf_l, rd, rn, rm);
5464       break;
5465     case NEON_SABAL:
5466       sabal(vf_l, rd, rn, rm);
5467       break;
5468     case NEON_SABAL2:
5469       sabal2(vf_l, rd, rn, rm);
5470       break;
5471     case NEON_UABAL:
5472       uabal(vf_l, rd, rn, rm);
5473       break;
5474     case NEON_UABAL2:
5475       uabal2(vf_l, rd, rn, rm);
5476       break;
5477     case NEON_SABDL:
5478       sabdl(vf_l, rd, rn, rm);
5479       break;
5480     case NEON_SABDL2:
5481       sabdl2(vf_l, rd, rn, rm);
5482       break;
5483     case NEON_UABDL:
5484       uabdl(vf_l, rd, rn, rm);
5485       break;
5486     case NEON_UABDL2:
5487       uabdl2(vf_l, rd, rn, rm);
5488       break;
5489     case NEON_SMLAL:
5490       smlal(vf_l, rd, rn, rm);
5491       break;
5492     case NEON_SMLAL2:
5493       smlal2(vf_l, rd, rn, rm);
5494       break;
5495     case NEON_UMLAL:
5496       umlal(vf_l, rd, rn, rm);
5497       break;
5498     case NEON_UMLAL2:
5499       umlal2(vf_l, rd, rn, rm);
5500       break;
5501     case NEON_SMLSL:
5502       smlsl(vf_l, rd, rn, rm);
5503       break;
5504     case NEON_SMLSL2:
5505       smlsl2(vf_l, rd, rn, rm);
5506       break;
5507     case NEON_UMLSL:
5508       umlsl(vf_l, rd, rn, rm);
5509       break;
5510     case NEON_UMLSL2:
5511       umlsl2(vf_l, rd, rn, rm);
5512       break;
5513     case NEON_SMULL:
5514       smull(vf_l, rd, rn, rm);
5515       break;
5516     case NEON_SMULL2:
5517       smull2(vf_l, rd, rn, rm);
5518       break;
5519     case NEON_UMULL:
5520       umull(vf_l, rd, rn, rm);
5521       break;
5522     case NEON_UMULL2:
5523       umull2(vf_l, rd, rn, rm);
5524       break;
5525     case NEON_SQDMLAL:
5526       sqdmlal(vf_l, rd, rn, rm);
5527       break;
5528     case NEON_SQDMLAL2:
5529       sqdmlal2(vf_l, rd, rn, rm);
5530       break;
5531     case NEON_SQDMLSL:
5532       sqdmlsl(vf_l, rd, rn, rm);
5533       break;
5534     case NEON_SQDMLSL2:
5535       sqdmlsl2(vf_l, rd, rn, rm);
5536       break;
5537     case NEON_SQDMULL:
5538       sqdmull(vf_l, rd, rn, rm);
5539       break;
5540     case NEON_SQDMULL2:
5541       sqdmull2(vf_l, rd, rn, rm);
5542       break;
5543     case NEON_UADDW:
5544       uaddw(vf_l, rd, rn, rm);
5545       break;
5546     case NEON_UADDW2:
5547       uaddw2(vf_l, rd, rn, rm);
5548       break;
5549     case NEON_SADDW:
5550       saddw(vf_l, rd, rn, rm);
5551       break;
5552     case NEON_SADDW2:
5553       saddw2(vf_l, rd, rn, rm);
5554       break;
5555     case NEON_USUBW:
5556       usubw(vf_l, rd, rn, rm);
5557       break;
5558     case NEON_USUBW2:
5559       usubw2(vf_l, rd, rn, rm);
5560       break;
5561     case NEON_SSUBW:
5562       ssubw(vf_l, rd, rn, rm);
5563       break;
5564     case NEON_SSUBW2:
5565       ssubw2(vf_l, rd, rn, rm);
5566       break;
5567     case NEON_ADDHN:
5568       addhn(vf, rd, rn, rm);
5569       break;
5570     case NEON_ADDHN2:
5571       addhn2(vf, rd, rn, rm);
5572       break;
5573     case NEON_RADDHN:
5574       raddhn(vf, rd, rn, rm);
5575       break;
5576     case NEON_RADDHN2:
5577       raddhn2(vf, rd, rn, rm);
5578       break;
5579     case NEON_SUBHN:
5580       subhn(vf, rd, rn, rm);
5581       break;
5582     case NEON_SUBHN2:
5583       subhn2(vf, rd, rn, rm);
5584       break;
5585     case NEON_RSUBHN:
5586       rsubhn(vf, rd, rn, rm);
5587       break;
5588     case NEON_RSUBHN2:
5589       rsubhn2(vf, rd, rn, rm);
5590       break;
5591     default:
5592       VIXL_UNIMPLEMENTED();
5593   }
5594 }
5595 
5596 
VisitNEONAcrossLanes(const Instruction * instr)5597 void Simulator::VisitNEONAcrossLanes(const Instruction* instr) {
5598   NEONFormatDecoder nfd(instr);
5599 
5600   static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}};
5601 
5602   SimVRegister& rd = ReadVRegister(instr->GetRd());
5603   SimVRegister& rn = ReadVRegister(instr->GetRn());
5604 
5605   if (instr->Mask(NEONAcrossLanesFP16FMask) == NEONAcrossLanesFP16Fixed) {
5606     VectorFormat vf = nfd.GetVectorFormat(&map_half);
5607     switch (instr->Mask(NEONAcrossLanesFP16Mask)) {
5608       case NEON_FMAXV_H:
5609         fmaxv(vf, rd, rn);
5610         break;
5611       case NEON_FMINV_H:
5612         fminv(vf, rd, rn);
5613         break;
5614       case NEON_FMAXNMV_H:
5615         fmaxnmv(vf, rd, rn);
5616         break;
5617       case NEON_FMINNMV_H:
5618         fminnmv(vf, rd, rn);
5619         break;
5620       default:
5621         VIXL_UNIMPLEMENTED();
5622     }
5623   } else if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
5624     // The input operand's VectorFormat is passed for these instructions.
5625     VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
5626 
5627     switch (instr->Mask(NEONAcrossLanesFPMask)) {
5628       case NEON_FMAXV:
5629         fmaxv(vf, rd, rn);
5630         break;
5631       case NEON_FMINV:
5632         fminv(vf, rd, rn);
5633         break;
5634       case NEON_FMAXNMV:
5635         fmaxnmv(vf, rd, rn);
5636         break;
5637       case NEON_FMINNMV:
5638         fminnmv(vf, rd, rn);
5639         break;
5640       default:
5641         VIXL_UNIMPLEMENTED();
5642     }
5643   } else {
5644     VectorFormat vf = nfd.GetVectorFormat();
5645 
5646     switch (instr->Mask(NEONAcrossLanesMask)) {
5647       case NEON_ADDV:
5648         addv(vf, rd, rn);
5649         break;
5650       case NEON_SMAXV:
5651         smaxv(vf, rd, rn);
5652         break;
5653       case NEON_SMINV:
5654         sminv(vf, rd, rn);
5655         break;
5656       case NEON_UMAXV:
5657         umaxv(vf, rd, rn);
5658         break;
5659       case NEON_UMINV:
5660         uminv(vf, rd, rn);
5661         break;
5662       case NEON_SADDLV:
5663         saddlv(vf, rd, rn);
5664         break;
5665       case NEON_UADDLV:
5666         uaddlv(vf, rd, rn);
5667         break;
5668       default:
5669         VIXL_UNIMPLEMENTED();
5670     }
5671   }
5672 }
5673 
5674 
VisitNEONByIndexedElement(const Instruction * instr)5675 void Simulator::VisitNEONByIndexedElement(const Instruction* instr) {
5676   NEONFormatDecoder nfd(instr);
5677   static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}};
5678   VectorFormat vf_r = nfd.GetVectorFormat();
5679   VectorFormat vf_half = nfd.GetVectorFormat(&map_half);
5680   VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
5681 
5682   SimVRegister& rd = ReadVRegister(instr->GetRd());
5683   SimVRegister& rn = ReadVRegister(instr->GetRn());
5684 
5685   ByElementOp Op = NULL;
5686 
5687   int rm_reg = instr->GetRm();
5688   int rm_low_reg = instr->GetRmLow16();
5689   int index = (instr->GetNEONH() << 1) | instr->GetNEONL();
5690   int index_hlm = (index << 1) | instr->GetNEONM();
5691 
5692   switch (instr->Mask(NEONByIndexedElementFPLongMask)) {
5693     // These are oddballs and are best handled as special cases.
5694     // - Rm is encoded with only 4 bits (and must be in the lower 16 registers).
5695     // - The index is always H:L:M.
5696     case NEON_FMLAL_H_byelement:
5697       fmlal(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm);
5698       return;
5699     case NEON_FMLAL2_H_byelement:
5700       fmlal2(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm);
5701       return;
5702     case NEON_FMLSL_H_byelement:
5703       fmlsl(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm);
5704       return;
5705     case NEON_FMLSL2_H_byelement:
5706       fmlsl2(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm);
5707       return;
5708   }
5709 
5710   if (instr->GetNEONSize() == 1) {
5711     rm_reg = rm_low_reg;
5712     index = index_hlm;
5713   }
5714 
5715   switch (instr->Mask(NEONByIndexedElementMask)) {
5716     case NEON_MUL_byelement:
5717       Op = &Simulator::mul;
5718       vf = vf_r;
5719       break;
5720     case NEON_MLA_byelement:
5721       Op = &Simulator::mla;
5722       vf = vf_r;
5723       break;
5724     case NEON_MLS_byelement:
5725       Op = &Simulator::mls;
5726       vf = vf_r;
5727       break;
5728     case NEON_SQDMULH_byelement:
5729       Op = &Simulator::sqdmulh;
5730       vf = vf_r;
5731       break;
5732     case NEON_SQRDMULH_byelement:
5733       Op = &Simulator::sqrdmulh;
5734       vf = vf_r;
5735       break;
5736     case NEON_SDOT_byelement:
5737       Op = &Simulator::sdot;
5738       vf = vf_r;
5739       break;
5740     case NEON_SQRDMLAH_byelement:
5741       Op = &Simulator::sqrdmlah;
5742       vf = vf_r;
5743       break;
5744     case NEON_UDOT_byelement:
5745       Op = &Simulator::udot;
5746       vf = vf_r;
5747       break;
5748     case NEON_SQRDMLSH_byelement:
5749       Op = &Simulator::sqrdmlsh;
5750       vf = vf_r;
5751       break;
5752     case NEON_SMULL_byelement:
5753       if (instr->Mask(NEON_Q)) {
5754         Op = &Simulator::smull2;
5755       } else {
5756         Op = &Simulator::smull;
5757       }
5758       break;
5759     case NEON_UMULL_byelement:
5760       if (instr->Mask(NEON_Q)) {
5761         Op = &Simulator::umull2;
5762       } else {
5763         Op = &Simulator::umull;
5764       }
5765       break;
5766     case NEON_SMLAL_byelement:
5767       if (instr->Mask(NEON_Q)) {
5768         Op = &Simulator::smlal2;
5769       } else {
5770         Op = &Simulator::smlal;
5771       }
5772       break;
5773     case NEON_UMLAL_byelement:
5774       if (instr->Mask(NEON_Q)) {
5775         Op = &Simulator::umlal2;
5776       } else {
5777         Op = &Simulator::umlal;
5778       }
5779       break;
5780     case NEON_SMLSL_byelement:
5781       if (instr->Mask(NEON_Q)) {
5782         Op = &Simulator::smlsl2;
5783       } else {
5784         Op = &Simulator::smlsl;
5785       }
5786       break;
5787     case NEON_UMLSL_byelement:
5788       if (instr->Mask(NEON_Q)) {
5789         Op = &Simulator::umlsl2;
5790       } else {
5791         Op = &Simulator::umlsl;
5792       }
5793       break;
5794     case NEON_SQDMULL_byelement:
5795       if (instr->Mask(NEON_Q)) {
5796         Op = &Simulator::sqdmull2;
5797       } else {
5798         Op = &Simulator::sqdmull;
5799       }
5800       break;
5801     case NEON_SQDMLAL_byelement:
5802       if (instr->Mask(NEON_Q)) {
5803         Op = &Simulator::sqdmlal2;
5804       } else {
5805         Op = &Simulator::sqdmlal;
5806       }
5807       break;
5808     case NEON_SQDMLSL_byelement:
5809       if (instr->Mask(NEON_Q)) {
5810         Op = &Simulator::sqdmlsl2;
5811       } else {
5812         Op = &Simulator::sqdmlsl;
5813       }
5814       break;
5815     default:
5816       index = instr->GetNEONH();
5817       if (instr->GetFPType() == 0) {
5818         rm_reg &= 0xf;
5819         index = (index << 2) | (instr->GetNEONL() << 1) | instr->GetNEONM();
5820       } else if ((instr->GetFPType() & 1) == 0) {
5821         index = (index << 1) | instr->GetNEONL();
5822       }
5823 
5824       vf = nfd.GetVectorFormat(nfd.FPFormatMap());
5825 
5826       switch (instr->Mask(NEONByIndexedElementFPMask)) {
5827         case NEON_FMUL_H_byelement:
5828           vf = vf_half;
5829           VIXL_FALLTHROUGH();
5830         case NEON_FMUL_byelement:
5831           Op = &Simulator::fmul;
5832           break;
5833         case NEON_FMLA_H_byelement:
5834           vf = vf_half;
5835           VIXL_FALLTHROUGH();
5836         case NEON_FMLA_byelement:
5837           Op = &Simulator::fmla;
5838           break;
5839         case NEON_FMLS_H_byelement:
5840           vf = vf_half;
5841           VIXL_FALLTHROUGH();
5842         case NEON_FMLS_byelement:
5843           Op = &Simulator::fmls;
5844           break;
5845         case NEON_FMULX_H_byelement:
5846           vf = vf_half;
5847           VIXL_FALLTHROUGH();
5848         case NEON_FMULX_byelement:
5849           Op = &Simulator::fmulx;
5850           break;
5851         default:
5852           if (instr->GetNEONSize() == 2) {
5853             index = instr->GetNEONH();
5854           } else {
5855             index = (instr->GetNEONH() << 1) | instr->GetNEONL();
5856           }
5857           switch (instr->Mask(NEONByIndexedElementFPComplexMask)) {
5858             case NEON_FCMLA_byelement:
5859               vf = vf_r;
5860               fcmla(vf,
5861                     rd,
5862                     rn,
5863                     ReadVRegister(instr->GetRm()),
5864                     index,
5865                     instr->GetImmRotFcmlaSca());
5866               return;
5867             default:
5868               VIXL_UNIMPLEMENTED();
5869           }
5870       }
5871   }
5872 
5873   (this->*Op)(vf, rd, rn, ReadVRegister(rm_reg), index);
5874 }
5875 
5876 
VisitNEONCopy(const Instruction * instr)5877 void Simulator::VisitNEONCopy(const Instruction* instr) {
5878   NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap());
5879   VectorFormat vf = nfd.GetVectorFormat();
5880 
5881   SimVRegister& rd = ReadVRegister(instr->GetRd());
5882   SimVRegister& rn = ReadVRegister(instr->GetRn());
5883   int imm5 = instr->GetImmNEON5();
5884   int tz = CountTrailingZeros(imm5, 32);
5885   int reg_index = imm5 >> (tz + 1);
5886 
5887   if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
5888     int imm4 = instr->GetImmNEON4();
5889     int rn_index = imm4 >> tz;
5890     ins_element(vf, rd, reg_index, rn, rn_index);
5891   } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
5892     ins_immediate(vf, rd, reg_index, ReadXRegister(instr->GetRn()));
5893   } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
5894     uint64_t value = LogicVRegister(rn).Uint(vf, reg_index);
5895     value &= MaxUintFromFormat(vf);
5896     WriteXRegister(instr->GetRd(), value);
5897   } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) {
5898     int64_t value = LogicVRegister(rn).Int(vf, reg_index);
5899     if (instr->GetNEONQ()) {
5900       WriteXRegister(instr->GetRd(), value);
5901     } else {
5902       WriteWRegister(instr->GetRd(), (int32_t)value);
5903     }
5904   } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
5905     dup_element(vf, rd, rn, reg_index);
5906   } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
5907     dup_immediate(vf, rd, ReadXRegister(instr->GetRn()));
5908   } else {
5909     VIXL_UNIMPLEMENTED();
5910   }
5911 }
5912 
5913 
VisitNEONExtract(const Instruction * instr)5914 void Simulator::VisitNEONExtract(const Instruction* instr) {
5915   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
5916   VectorFormat vf = nfd.GetVectorFormat();
5917   SimVRegister& rd = ReadVRegister(instr->GetRd());
5918   SimVRegister& rn = ReadVRegister(instr->GetRn());
5919   SimVRegister& rm = ReadVRegister(instr->GetRm());
5920   if (instr->Mask(NEONExtractMask) == NEON_EXT) {
5921     int index = instr->GetImmNEONExt();
5922     ext(vf, rd, rn, rm, index);
5923   } else {
5924     VIXL_UNIMPLEMENTED();
5925   }
5926 }
5927 
5928 
NEONLoadStoreMultiStructHelper(const Instruction * instr,AddrMode addr_mode)5929 void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
5930                                                AddrMode addr_mode) {
5931   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
5932   VectorFormat vf = nfd.GetVectorFormat();
5933 
5934   uint64_t addr_base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
5935   int reg_size = RegisterSizeInBytesFromFormat(vf);
5936 
5937   int reg[4];
5938   uint64_t addr[4];
5939   for (int i = 0; i < 4; i++) {
5940     reg[i] = (instr->GetRt() + i) % kNumberOfVRegisters;
5941     addr[i] = addr_base + (i * reg_size);
5942   }
5943   int struct_parts = 1;
5944   int reg_count = 1;
5945   bool log_read = true;
5946 
5947   // Bit 23 determines whether this is an offset or post-index addressing mode.
5948   // In offset mode, bits 20 to 16 should be zero; these bits encode the
5949   // register or immediate in post-index mode.
5950   if ((instr->ExtractBit(23) == 0) && (instr->ExtractBits(20, 16) != 0)) {
5951     VIXL_UNREACHABLE();
5952   }
5953 
5954   // We use the PostIndex mask here, as it works in this case for both Offset
5955   // and PostIndex addressing.
5956   switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
5957     case NEON_LD1_4v:
5958     case NEON_LD1_4v_post:
5959       ld1(vf, ReadVRegister(reg[3]), addr[3]);
5960       reg_count++;
5961       VIXL_FALLTHROUGH();
5962     case NEON_LD1_3v:
5963     case NEON_LD1_3v_post:
5964       ld1(vf, ReadVRegister(reg[2]), addr[2]);
5965       reg_count++;
5966       VIXL_FALLTHROUGH();
5967     case NEON_LD1_2v:
5968     case NEON_LD1_2v_post:
5969       ld1(vf, ReadVRegister(reg[1]), addr[1]);
5970       reg_count++;
5971       VIXL_FALLTHROUGH();
5972     case NEON_LD1_1v:
5973     case NEON_LD1_1v_post:
5974       ld1(vf, ReadVRegister(reg[0]), addr[0]);
5975       break;
5976     case NEON_ST1_4v:
5977     case NEON_ST1_4v_post:
5978       st1(vf, ReadVRegister(reg[3]), addr[3]);
5979       reg_count++;
5980       VIXL_FALLTHROUGH();
5981     case NEON_ST1_3v:
5982     case NEON_ST1_3v_post:
5983       st1(vf, ReadVRegister(reg[2]), addr[2]);
5984       reg_count++;
5985       VIXL_FALLTHROUGH();
5986     case NEON_ST1_2v:
5987     case NEON_ST1_2v_post:
5988       st1(vf, ReadVRegister(reg[1]), addr[1]);
5989       reg_count++;
5990       VIXL_FALLTHROUGH();
5991     case NEON_ST1_1v:
5992     case NEON_ST1_1v_post:
5993       st1(vf, ReadVRegister(reg[0]), addr[0]);
5994       log_read = false;
5995       break;
5996     case NEON_LD2_post:
5997     case NEON_LD2:
5998       ld2(vf, ReadVRegister(reg[0]), ReadVRegister(reg[1]), addr[0]);
5999       struct_parts = 2;
6000       reg_count = 2;
6001       break;
6002     case NEON_ST2:
6003     case NEON_ST2_post:
6004       st2(vf, ReadVRegister(reg[0]), ReadVRegister(reg[1]), addr[0]);
6005       struct_parts = 2;
6006       reg_count = 2;
6007       log_read = false;
6008       break;
6009     case NEON_LD3_post:
6010     case NEON_LD3:
6011       ld3(vf,
6012           ReadVRegister(reg[0]),
6013           ReadVRegister(reg[1]),
6014           ReadVRegister(reg[2]),
6015           addr[0]);
6016       struct_parts = 3;
6017       reg_count = 3;
6018       break;
6019     case NEON_ST3:
6020     case NEON_ST3_post:
6021       st3(vf,
6022           ReadVRegister(reg[0]),
6023           ReadVRegister(reg[1]),
6024           ReadVRegister(reg[2]),
6025           addr[0]);
6026       struct_parts = 3;
6027       reg_count = 3;
6028       log_read = false;
6029       break;
6030     case NEON_ST4:
6031     case NEON_ST4_post:
6032       st4(vf,
6033           ReadVRegister(reg[0]),
6034           ReadVRegister(reg[1]),
6035           ReadVRegister(reg[2]),
6036           ReadVRegister(reg[3]),
6037           addr[0]);
6038       struct_parts = 4;
6039       reg_count = 4;
6040       log_read = false;
6041       break;
6042     case NEON_LD4_post:
6043     case NEON_LD4:
6044       ld4(vf,
6045           ReadVRegister(reg[0]),
6046           ReadVRegister(reg[1]),
6047           ReadVRegister(reg[2]),
6048           ReadVRegister(reg[3]),
6049           addr[0]);
6050       struct_parts = 4;
6051       reg_count = 4;
6052       break;
6053     default:
6054       VIXL_UNIMPLEMENTED();
6055   }
6056 
6057   bool do_trace = log_read ? ShouldTraceVRegs() : ShouldTraceWrites();
6058   if (do_trace) {
6059     PrintRegisterFormat print_format =
6060         GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
6061     const char* op;
6062     if (log_read) {
6063       op = "<-";
6064     } else {
6065       op = "->";
6066       // Stores don't represent a change to the source register's value, so only
6067       // print the relevant part of the value.
6068       print_format = GetPrintRegPartial(print_format);
6069     }
6070 
6071     VIXL_ASSERT((struct_parts == reg_count) || (struct_parts == 1));
6072     for (int s = reg_count - struct_parts; s >= 0; s -= struct_parts) {
6073       uintptr_t address = addr_base + (s * RegisterSizeInBytesFromFormat(vf));
6074       PrintVStructAccess(reg[s], struct_parts, print_format, op, address);
6075     }
6076   }
6077 
6078   if (addr_mode == PostIndex) {
6079     int rm = instr->GetRm();
6080     // The immediate post index addressing mode is indicated by rm = 31.
6081     // The immediate is implied by the number of vector registers used.
6082     addr_base += (rm == 31) ? (RegisterSizeInBytesFromFormat(vf) * reg_count)
6083                             : ReadXRegister(rm);
6084     WriteXRegister(instr->GetRn(), addr_base);
6085   } else {
6086     VIXL_ASSERT(addr_mode == Offset);
6087   }
6088 }
6089 
6090 
VisitNEONLoadStoreMultiStruct(const Instruction * instr)6091 void Simulator::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
6092   NEONLoadStoreMultiStructHelper(instr, Offset);
6093 }
6094 
6095 
VisitNEONLoadStoreMultiStructPostIndex(const Instruction * instr)6096 void Simulator::VisitNEONLoadStoreMultiStructPostIndex(
6097     const Instruction* instr) {
6098   NEONLoadStoreMultiStructHelper(instr, PostIndex);
6099 }
6100 
6101 
NEONLoadStoreSingleStructHelper(const Instruction * instr,AddrMode addr_mode)6102 void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
6103                                                 AddrMode addr_mode) {
6104   uint64_t addr = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
6105   int rt = instr->GetRt();
6106 
6107   // Bit 23 determines whether this is an offset or post-index addressing mode.
6108   // In offset mode, bits 20 to 16 should be zero; these bits encode the
6109   // register or immediate in post-index mode.
6110   if ((instr->ExtractBit(23) == 0) && (instr->ExtractBits(20, 16) != 0)) {
6111     VIXL_UNREACHABLE();
6112   }
6113 
6114   // We use the PostIndex mask here, as it works in this case for both Offset
6115   // and PostIndex addressing.
6116   bool do_load = false;
6117 
6118   bool replicating = false;
6119 
6120   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
6121   VectorFormat vf_t = nfd.GetVectorFormat();
6122 
6123   VectorFormat vf = kFormat16B;
6124   switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
6125     case NEON_LD1_b:
6126     case NEON_LD1_b_post:
6127     case NEON_LD2_b:
6128     case NEON_LD2_b_post:
6129     case NEON_LD3_b:
6130     case NEON_LD3_b_post:
6131     case NEON_LD4_b:
6132     case NEON_LD4_b_post:
6133       do_load = true;
6134       VIXL_FALLTHROUGH();
6135     case NEON_ST1_b:
6136     case NEON_ST1_b_post:
6137     case NEON_ST2_b:
6138     case NEON_ST2_b_post:
6139     case NEON_ST3_b:
6140     case NEON_ST3_b_post:
6141     case NEON_ST4_b:
6142     case NEON_ST4_b_post:
6143       break;
6144 
6145     case NEON_LD1_h:
6146     case NEON_LD1_h_post:
6147     case NEON_LD2_h:
6148     case NEON_LD2_h_post:
6149     case NEON_LD3_h:
6150     case NEON_LD3_h_post:
6151     case NEON_LD4_h:
6152     case NEON_LD4_h_post:
6153       do_load = true;
6154       VIXL_FALLTHROUGH();
6155     case NEON_ST1_h:
6156     case NEON_ST1_h_post:
6157     case NEON_ST2_h:
6158     case NEON_ST2_h_post:
6159     case NEON_ST3_h:
6160     case NEON_ST3_h_post:
6161     case NEON_ST4_h:
6162     case NEON_ST4_h_post:
6163       vf = kFormat8H;
6164       break;
6165     case NEON_LD1_s:
6166     case NEON_LD1_s_post:
6167     case NEON_LD2_s:
6168     case NEON_LD2_s_post:
6169     case NEON_LD3_s:
6170     case NEON_LD3_s_post:
6171     case NEON_LD4_s:
6172     case NEON_LD4_s_post:
6173       do_load = true;
6174       VIXL_FALLTHROUGH();
6175     case NEON_ST1_s:
6176     case NEON_ST1_s_post:
6177     case NEON_ST2_s:
6178     case NEON_ST2_s_post:
6179     case NEON_ST3_s:
6180     case NEON_ST3_s_post:
6181     case NEON_ST4_s:
6182     case NEON_ST4_s_post: {
6183       VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
6184       VIXL_STATIC_ASSERT((NEON_LD1_s_post | (1 << NEONLSSize_offset)) ==
6185                          NEON_LD1_d_post);
6186       VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
6187       VIXL_STATIC_ASSERT((NEON_ST1_s_post | (1 << NEONLSSize_offset)) ==
6188                          NEON_ST1_d_post);
6189       vf = ((instr->GetNEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D;
6190       break;
6191     }
6192 
6193     case NEON_LD1R:
6194     case NEON_LD1R_post:
6195     case NEON_LD2R:
6196     case NEON_LD2R_post:
6197     case NEON_LD3R:
6198     case NEON_LD3R_post:
6199     case NEON_LD4R:
6200     case NEON_LD4R_post:
6201       vf = vf_t;
6202       do_load = true;
6203       replicating = true;
6204       break;
6205 
6206     default:
6207       VIXL_UNIMPLEMENTED();
6208   }
6209 
6210   int index_shift = LaneSizeInBytesLog2FromFormat(vf);
6211   int lane = instr->GetNEONLSIndex(index_shift);
6212   int reg_count = 0;
6213   int rt2 = (rt + 1) % kNumberOfVRegisters;
6214   int rt3 = (rt2 + 1) % kNumberOfVRegisters;
6215   int rt4 = (rt3 + 1) % kNumberOfVRegisters;
6216   switch (instr->Mask(NEONLoadStoreSingleLenMask)) {
6217     case NEONLoadStoreSingle1:
6218       reg_count = 1;
6219       if (replicating) {
6220         VIXL_ASSERT(do_load);
6221         ld1r(vf, ReadVRegister(rt), addr);
6222       } else if (do_load) {
6223         ld1(vf, ReadVRegister(rt), lane, addr);
6224       } else {
6225         st1(vf, ReadVRegister(rt), lane, addr);
6226       }
6227       break;
6228     case NEONLoadStoreSingle2:
6229       reg_count = 2;
6230       if (replicating) {
6231         VIXL_ASSERT(do_load);
6232         ld2r(vf, ReadVRegister(rt), ReadVRegister(rt2), addr);
6233       } else if (do_load) {
6234         ld2(vf, ReadVRegister(rt), ReadVRegister(rt2), lane, addr);
6235       } else {
6236         st2(vf, ReadVRegister(rt), ReadVRegister(rt2), lane, addr);
6237       }
6238       break;
6239     case NEONLoadStoreSingle3:
6240       reg_count = 3;
6241       if (replicating) {
6242         VIXL_ASSERT(do_load);
6243         ld3r(vf,
6244              ReadVRegister(rt),
6245              ReadVRegister(rt2),
6246              ReadVRegister(rt3),
6247              addr);
6248       } else if (do_load) {
6249         ld3(vf,
6250             ReadVRegister(rt),
6251             ReadVRegister(rt2),
6252             ReadVRegister(rt3),
6253             lane,
6254             addr);
6255       } else {
6256         st3(vf,
6257             ReadVRegister(rt),
6258             ReadVRegister(rt2),
6259             ReadVRegister(rt3),
6260             lane,
6261             addr);
6262       }
6263       break;
6264     case NEONLoadStoreSingle4:
6265       reg_count = 4;
6266       if (replicating) {
6267         VIXL_ASSERT(do_load);
6268         ld4r(vf,
6269              ReadVRegister(rt),
6270              ReadVRegister(rt2),
6271              ReadVRegister(rt3),
6272              ReadVRegister(rt4),
6273              addr);
6274       } else if (do_load) {
6275         ld4(vf,
6276             ReadVRegister(rt),
6277             ReadVRegister(rt2),
6278             ReadVRegister(rt3),
6279             ReadVRegister(rt4),
6280             lane,
6281             addr);
6282       } else {
6283         st4(vf,
6284             ReadVRegister(rt),
6285             ReadVRegister(rt2),
6286             ReadVRegister(rt3),
6287             ReadVRegister(rt4),
6288             lane,
6289             addr);
6290       }
6291       break;
6292     default:
6293       VIXL_UNIMPLEMENTED();
6294   }
6295 
6296   // Trace registers and/or memory writes.
6297   PrintRegisterFormat print_format =
6298       GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
6299   if (do_load) {
6300     if (ShouldTraceVRegs()) {
6301       if (replicating) {
6302         PrintVReplicatingStructAccess(rt, reg_count, print_format, "<-", addr);
6303       } else {
6304         PrintVSingleStructAccess(rt, reg_count, lane, print_format, "<-", addr);
6305       }
6306     }
6307   } else {
6308     if (ShouldTraceWrites()) {
6309       // Stores don't represent a change to the source register's value, so only
6310       // print the relevant part of the value.
6311       print_format = GetPrintRegPartial(print_format);
6312       PrintVSingleStructAccess(rt, reg_count, lane, print_format, "->", addr);
6313     }
6314   }
6315 
6316   if (addr_mode == PostIndex) {
6317     int rm = instr->GetRm();
6318     int lane_size = LaneSizeInBytesFromFormat(vf);
6319     WriteXRegister(instr->GetRn(),
6320                    addr + ((rm == 31) ? (reg_count * lane_size)
6321                                       : ReadXRegister(rm)));
6322   }
6323 }
6324 
6325 
VisitNEONLoadStoreSingleStruct(const Instruction * instr)6326 void Simulator::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
6327   NEONLoadStoreSingleStructHelper(instr, Offset);
6328 }
6329 
6330 
VisitNEONLoadStoreSingleStructPostIndex(const Instruction * instr)6331 void Simulator::VisitNEONLoadStoreSingleStructPostIndex(
6332     const Instruction* instr) {
6333   NEONLoadStoreSingleStructHelper(instr, PostIndex);
6334 }
6335 
6336 
VisitNEONModifiedImmediate(const Instruction * instr)6337 void Simulator::VisitNEONModifiedImmediate(const Instruction* instr) {
6338   SimVRegister& rd = ReadVRegister(instr->GetRd());
6339   int cmode = instr->GetNEONCmode();
6340   int cmode_3_1 = (cmode >> 1) & 7;
6341   int cmode_3 = (cmode >> 3) & 1;
6342   int cmode_2 = (cmode >> 2) & 1;
6343   int cmode_1 = (cmode >> 1) & 1;
6344   int cmode_0 = cmode & 1;
6345   int half_enc = instr->ExtractBit(11);
6346   int q = instr->GetNEONQ();
6347   int op_bit = instr->GetNEONModImmOp();
6348   uint64_t imm8 = instr->GetImmNEONabcdefgh();
6349   // Find the format and immediate value
6350   uint64_t imm = 0;
6351   VectorFormat vform = kFormatUndefined;
6352   switch (cmode_3_1) {
6353     case 0x0:
6354     case 0x1:
6355     case 0x2:
6356     case 0x3:
6357       vform = (q == 1) ? kFormat4S : kFormat2S;
6358       imm = imm8 << (8 * cmode_3_1);
6359       break;
6360     case 0x4:
6361     case 0x5:
6362       vform = (q == 1) ? kFormat8H : kFormat4H;
6363       imm = imm8 << (8 * cmode_1);
6364       break;
6365     case 0x6:
6366       vform = (q == 1) ? kFormat4S : kFormat2S;
6367       if (cmode_0 == 0) {
6368         imm = imm8 << 8 | 0x000000ff;
6369       } else {
6370         imm = imm8 << 16 | 0x0000ffff;
6371       }
6372       break;
6373     case 0x7:
6374       if (cmode_0 == 0 && op_bit == 0) {
6375         vform = q ? kFormat16B : kFormat8B;
6376         imm = imm8;
6377       } else if (cmode_0 == 0 && op_bit == 1) {
6378         vform = q ? kFormat2D : kFormat1D;
6379         imm = 0;
6380         for (int i = 0; i < 8; ++i) {
6381           if (imm8 & (1 << i)) {
6382             imm |= (UINT64_C(0xff) << (8 * i));
6383           }
6384         }
6385       } else {  // cmode_0 == 1, cmode == 0xf.
6386         if (half_enc == 1) {
6387           vform = q ? kFormat8H : kFormat4H;
6388           imm = Float16ToRawbits(instr->GetImmNEONFP16());
6389         } else if (op_bit == 0) {
6390           vform = q ? kFormat4S : kFormat2S;
6391           imm = FloatToRawbits(instr->GetImmNEONFP32());
6392         } else if (q == 1) {
6393           vform = kFormat2D;
6394           imm = DoubleToRawbits(instr->GetImmNEONFP64());
6395         } else {
6396           VIXL_ASSERT((q == 0) && (op_bit == 1) && (cmode == 0xf));
6397           VisitUnallocated(instr);
6398         }
6399       }
6400       break;
6401     default:
6402       VIXL_UNREACHABLE();
6403       break;
6404   }
6405 
6406   // Find the operation
6407   NEONModifiedImmediateOp op;
6408   if (cmode_3 == 0) {
6409     if (cmode_0 == 0) {
6410       op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
6411     } else {  // cmode<0> == '1'
6412       op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
6413     }
6414   } else {  // cmode<3> == '1'
6415     if (cmode_2 == 0) {
6416       if (cmode_0 == 0) {
6417         op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
6418       } else {  // cmode<0> == '1'
6419         op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
6420       }
6421     } else {  // cmode<2> == '1'
6422       if (cmode_1 == 0) {
6423         op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
6424       } else {  // cmode<1> == '1'
6425         if (cmode_0 == 0) {
6426           op = NEONModifiedImmediate_MOVI;
6427         } else {  // cmode<0> == '1'
6428           op = NEONModifiedImmediate_MOVI;
6429         }
6430       }
6431     }
6432   }
6433 
6434   // Call the logic function
6435   if (op == NEONModifiedImmediate_ORR) {
6436     orr(vform, rd, rd, imm);
6437   } else if (op == NEONModifiedImmediate_BIC) {
6438     bic(vform, rd, rd, imm);
6439   } else if (op == NEONModifiedImmediate_MOVI) {
6440     movi(vform, rd, imm);
6441   } else if (op == NEONModifiedImmediate_MVNI) {
6442     mvni(vform, rd, imm);
6443   } else {
6444     VisitUnimplemented(instr);
6445   }
6446 }
6447 
6448 
VisitNEONScalar2RegMisc(const Instruction * instr)6449 void Simulator::VisitNEONScalar2RegMisc(const Instruction* instr) {
6450   NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
6451   VectorFormat vf = nfd.GetVectorFormat();
6452 
6453   SimVRegister& rd = ReadVRegister(instr->GetRd());
6454   SimVRegister& rn = ReadVRegister(instr->GetRn());
6455 
6456   if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
6457     // These instructions all use a two bit size field, except NOT and RBIT,
6458     // which use the field to encode the operation.
6459     switch (instr->Mask(NEONScalar2RegMiscMask)) {
6460       case NEON_CMEQ_zero_scalar:
6461         cmp(vf, rd, rn, 0, eq);
6462         break;
6463       case NEON_CMGE_zero_scalar:
6464         cmp(vf, rd, rn, 0, ge);
6465         break;
6466       case NEON_CMGT_zero_scalar:
6467         cmp(vf, rd, rn, 0, gt);
6468         break;
6469       case NEON_CMLT_zero_scalar:
6470         cmp(vf, rd, rn, 0, lt);
6471         break;
6472       case NEON_CMLE_zero_scalar:
6473         cmp(vf, rd, rn, 0, le);
6474         break;
6475       case NEON_ABS_scalar:
6476         abs(vf, rd, rn);
6477         break;
6478       case NEON_SQABS_scalar:
6479         abs(vf, rd, rn).SignedSaturate(vf);
6480         break;
6481       case NEON_NEG_scalar:
6482         neg(vf, rd, rn);
6483         break;
6484       case NEON_SQNEG_scalar:
6485         neg(vf, rd, rn).SignedSaturate(vf);
6486         break;
6487       case NEON_SUQADD_scalar:
6488         suqadd(vf, rd, rn);
6489         break;
6490       case NEON_USQADD_scalar:
6491         usqadd(vf, rd, rn);
6492         break;
6493       default:
6494         VIXL_UNIMPLEMENTED();
6495         break;
6496     }
6497   } else {
6498     VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
6499     FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
6500 
6501     // These instructions all use a one bit size field, except SQXTUN, SQXTN
6502     // and UQXTN, which use a two bit size field.
6503     switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
6504       case NEON_FRECPE_scalar:
6505         frecpe(fpf, rd, rn, fpcr_rounding);
6506         break;
6507       case NEON_FRECPX_scalar:
6508         frecpx(fpf, rd, rn);
6509         break;
6510       case NEON_FRSQRTE_scalar:
6511         frsqrte(fpf, rd, rn);
6512         break;
6513       case NEON_FCMGT_zero_scalar:
6514         fcmp_zero(fpf, rd, rn, gt);
6515         break;
6516       case NEON_FCMGE_zero_scalar:
6517         fcmp_zero(fpf, rd, rn, ge);
6518         break;
6519       case NEON_FCMEQ_zero_scalar:
6520         fcmp_zero(fpf, rd, rn, eq);
6521         break;
6522       case NEON_FCMLE_zero_scalar:
6523         fcmp_zero(fpf, rd, rn, le);
6524         break;
6525       case NEON_FCMLT_zero_scalar:
6526         fcmp_zero(fpf, rd, rn, lt);
6527         break;
6528       case NEON_SCVTF_scalar:
6529         scvtf(fpf, rd, rn, 0, fpcr_rounding);
6530         break;
6531       case NEON_UCVTF_scalar:
6532         ucvtf(fpf, rd, rn, 0, fpcr_rounding);
6533         break;
6534       case NEON_FCVTNS_scalar:
6535         fcvts(fpf, rd, rn, FPTieEven);
6536         break;
6537       case NEON_FCVTNU_scalar:
6538         fcvtu(fpf, rd, rn, FPTieEven);
6539         break;
6540       case NEON_FCVTPS_scalar:
6541         fcvts(fpf, rd, rn, FPPositiveInfinity);
6542         break;
6543       case NEON_FCVTPU_scalar:
6544         fcvtu(fpf, rd, rn, FPPositiveInfinity);
6545         break;
6546       case NEON_FCVTMS_scalar:
6547         fcvts(fpf, rd, rn, FPNegativeInfinity);
6548         break;
6549       case NEON_FCVTMU_scalar:
6550         fcvtu(fpf, rd, rn, FPNegativeInfinity);
6551         break;
6552       case NEON_FCVTZS_scalar:
6553         fcvts(fpf, rd, rn, FPZero);
6554         break;
6555       case NEON_FCVTZU_scalar:
6556         fcvtu(fpf, rd, rn, FPZero);
6557         break;
6558       case NEON_FCVTAS_scalar:
6559         fcvts(fpf, rd, rn, FPTieAway);
6560         break;
6561       case NEON_FCVTAU_scalar:
6562         fcvtu(fpf, rd, rn, FPTieAway);
6563         break;
6564       case NEON_FCVTXN_scalar:
6565         // Unlike all of the other FP instructions above, fcvtxn encodes dest
6566         // size S as size<0>=1. There's only one case, so we ignore the form.
6567         VIXL_ASSERT(instr->ExtractBit(22) == 1);
6568         fcvtxn(kFormatS, rd, rn);
6569         break;
6570       default:
6571         switch (instr->Mask(NEONScalar2RegMiscMask)) {
6572           case NEON_SQXTN_scalar:
6573             sqxtn(vf, rd, rn);
6574             break;
6575           case NEON_UQXTN_scalar:
6576             uqxtn(vf, rd, rn);
6577             break;
6578           case NEON_SQXTUN_scalar:
6579             sqxtun(vf, rd, rn);
6580             break;
6581           default:
6582             VIXL_UNIMPLEMENTED();
6583         }
6584     }
6585   }
6586 }
6587 
6588 
VisitNEONScalar2RegMiscFP16(const Instruction * instr)6589 void Simulator::VisitNEONScalar2RegMiscFP16(const Instruction* instr) {
6590   VectorFormat fpf = kFormatH;
6591   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
6592 
6593   SimVRegister& rd = ReadVRegister(instr->GetRd());
6594   SimVRegister& rn = ReadVRegister(instr->GetRn());
6595 
6596   switch (instr->Mask(NEONScalar2RegMiscFP16Mask)) {
6597     case NEON_FRECPE_H_scalar:
6598       frecpe(fpf, rd, rn, fpcr_rounding);
6599       break;
6600     case NEON_FRECPX_H_scalar:
6601       frecpx(fpf, rd, rn);
6602       break;
6603     case NEON_FRSQRTE_H_scalar:
6604       frsqrte(fpf, rd, rn);
6605       break;
6606     case NEON_FCMGT_H_zero_scalar:
6607       fcmp_zero(fpf, rd, rn, gt);
6608       break;
6609     case NEON_FCMGE_H_zero_scalar:
6610       fcmp_zero(fpf, rd, rn, ge);
6611       break;
6612     case NEON_FCMEQ_H_zero_scalar:
6613       fcmp_zero(fpf, rd, rn, eq);
6614       break;
6615     case NEON_FCMLE_H_zero_scalar:
6616       fcmp_zero(fpf, rd, rn, le);
6617       break;
6618     case NEON_FCMLT_H_zero_scalar:
6619       fcmp_zero(fpf, rd, rn, lt);
6620       break;
6621     case NEON_SCVTF_H_scalar:
6622       scvtf(fpf, rd, rn, 0, fpcr_rounding);
6623       break;
6624     case NEON_UCVTF_H_scalar:
6625       ucvtf(fpf, rd, rn, 0, fpcr_rounding);
6626       break;
6627     case NEON_FCVTNS_H_scalar:
6628       fcvts(fpf, rd, rn, FPTieEven);
6629       break;
6630     case NEON_FCVTNU_H_scalar:
6631       fcvtu(fpf, rd, rn, FPTieEven);
6632       break;
6633     case NEON_FCVTPS_H_scalar:
6634       fcvts(fpf, rd, rn, FPPositiveInfinity);
6635       break;
6636     case NEON_FCVTPU_H_scalar:
6637       fcvtu(fpf, rd, rn, FPPositiveInfinity);
6638       break;
6639     case NEON_FCVTMS_H_scalar:
6640       fcvts(fpf, rd, rn, FPNegativeInfinity);
6641       break;
6642     case NEON_FCVTMU_H_scalar:
6643       fcvtu(fpf, rd, rn, FPNegativeInfinity);
6644       break;
6645     case NEON_FCVTZS_H_scalar:
6646       fcvts(fpf, rd, rn, FPZero);
6647       break;
6648     case NEON_FCVTZU_H_scalar:
6649       fcvtu(fpf, rd, rn, FPZero);
6650       break;
6651     case NEON_FCVTAS_H_scalar:
6652       fcvts(fpf, rd, rn, FPTieAway);
6653       break;
6654     case NEON_FCVTAU_H_scalar:
6655       fcvtu(fpf, rd, rn, FPTieAway);
6656       break;
6657   }
6658 }
6659 
6660 
VisitNEONScalar3Diff(const Instruction * instr)6661 void Simulator::VisitNEONScalar3Diff(const Instruction* instr) {
6662   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
6663   VectorFormat vf = nfd.GetVectorFormat();
6664 
6665   SimVRegister& rd = ReadVRegister(instr->GetRd());
6666   SimVRegister& rn = ReadVRegister(instr->GetRn());
6667   SimVRegister& rm = ReadVRegister(instr->GetRm());
6668   switch (instr->Mask(NEONScalar3DiffMask)) {
6669     case NEON_SQDMLAL_scalar:
6670       sqdmlal(vf, rd, rn, rm);
6671       break;
6672     case NEON_SQDMLSL_scalar:
6673       sqdmlsl(vf, rd, rn, rm);
6674       break;
6675     case NEON_SQDMULL_scalar:
6676       sqdmull(vf, rd, rn, rm);
6677       break;
6678     default:
6679       VIXL_UNIMPLEMENTED();
6680   }
6681 }
6682 
6683 
VisitNEONScalar3Same(const Instruction * instr)6684 void Simulator::VisitNEONScalar3Same(const Instruction* instr) {
6685   NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
6686   VectorFormat vf = nfd.GetVectorFormat();
6687 
6688   SimVRegister& rd = ReadVRegister(instr->GetRd());
6689   SimVRegister& rn = ReadVRegister(instr->GetRn());
6690   SimVRegister& rm = ReadVRegister(instr->GetRm());
6691 
6692   if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
6693     vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
6694     switch (instr->Mask(NEONScalar3SameFPMask)) {
6695       case NEON_FMULX_scalar:
6696         fmulx(vf, rd, rn, rm);
6697         break;
6698       case NEON_FACGE_scalar:
6699         fabscmp(vf, rd, rn, rm, ge);
6700         break;
6701       case NEON_FACGT_scalar:
6702         fabscmp(vf, rd, rn, rm, gt);
6703         break;
6704       case NEON_FCMEQ_scalar:
6705         fcmp(vf, rd, rn, rm, eq);
6706         break;
6707       case NEON_FCMGE_scalar:
6708         fcmp(vf, rd, rn, rm, ge);
6709         break;
6710       case NEON_FCMGT_scalar:
6711         fcmp(vf, rd, rn, rm, gt);
6712         break;
6713       case NEON_FRECPS_scalar:
6714         frecps(vf, rd, rn, rm);
6715         break;
6716       case NEON_FRSQRTS_scalar:
6717         frsqrts(vf, rd, rn, rm);
6718         break;
6719       case NEON_FABD_scalar:
6720         fabd(vf, rd, rn, rm);
6721         break;
6722       default:
6723         VIXL_UNIMPLEMENTED();
6724     }
6725   } else {
6726     switch (instr->Mask(NEONScalar3SameMask)) {
6727       case NEON_ADD_scalar:
6728         add(vf, rd, rn, rm);
6729         break;
6730       case NEON_SUB_scalar:
6731         sub(vf, rd, rn, rm);
6732         break;
6733       case NEON_CMEQ_scalar:
6734         cmp(vf, rd, rn, rm, eq);
6735         break;
6736       case NEON_CMGE_scalar:
6737         cmp(vf, rd, rn, rm, ge);
6738         break;
6739       case NEON_CMGT_scalar:
6740         cmp(vf, rd, rn, rm, gt);
6741         break;
6742       case NEON_CMHI_scalar:
6743         cmp(vf, rd, rn, rm, hi);
6744         break;
6745       case NEON_CMHS_scalar:
6746         cmp(vf, rd, rn, rm, hs);
6747         break;
6748       case NEON_CMTST_scalar:
6749         cmptst(vf, rd, rn, rm);
6750         break;
6751       case NEON_USHL_scalar:
6752         ushl(vf, rd, rn, rm);
6753         break;
6754       case NEON_SSHL_scalar:
6755         sshl(vf, rd, rn, rm);
6756         break;
6757       case NEON_SQDMULH_scalar:
6758         sqdmulh(vf, rd, rn, rm);
6759         break;
6760       case NEON_SQRDMULH_scalar:
6761         sqrdmulh(vf, rd, rn, rm);
6762         break;
6763       case NEON_UQADD_scalar:
6764         add(vf, rd, rn, rm).UnsignedSaturate(vf);
6765         break;
6766       case NEON_SQADD_scalar:
6767         add(vf, rd, rn, rm).SignedSaturate(vf);
6768         break;
6769       case NEON_UQSUB_scalar:
6770         sub(vf, rd, rn, rm).UnsignedSaturate(vf);
6771         break;
6772       case NEON_SQSUB_scalar:
6773         sub(vf, rd, rn, rm).SignedSaturate(vf);
6774         break;
6775       case NEON_UQSHL_scalar:
6776         ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
6777         break;
6778       case NEON_SQSHL_scalar:
6779         sshl(vf, rd, rn, rm).SignedSaturate(vf);
6780         break;
6781       case NEON_URSHL_scalar:
6782         ushl(vf, rd, rn, rm).Round(vf);
6783         break;
6784       case NEON_SRSHL_scalar:
6785         sshl(vf, rd, rn, rm).Round(vf);
6786         break;
6787       case NEON_UQRSHL_scalar:
6788         ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
6789         break;
6790       case NEON_SQRSHL_scalar:
6791         sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
6792         break;
6793       default:
6794         VIXL_UNIMPLEMENTED();
6795     }
6796   }
6797 }
6798 
VisitNEONScalar3SameFP16(const Instruction * instr)6799 void Simulator::VisitNEONScalar3SameFP16(const Instruction* instr) {
6800   SimVRegister& rd = ReadVRegister(instr->GetRd());
6801   SimVRegister& rn = ReadVRegister(instr->GetRn());
6802   SimVRegister& rm = ReadVRegister(instr->GetRm());
6803 
6804   switch (instr->Mask(NEONScalar3SameFP16Mask)) {
6805     case NEON_FABD_H_scalar:
6806       fabd(kFormatH, rd, rn, rm);
6807       break;
6808     case NEON_FMULX_H_scalar:
6809       fmulx(kFormatH, rd, rn, rm);
6810       break;
6811     case NEON_FCMEQ_H_scalar:
6812       fcmp(kFormatH, rd, rn, rm, eq);
6813       break;
6814     case NEON_FCMGE_H_scalar:
6815       fcmp(kFormatH, rd, rn, rm, ge);
6816       break;
6817     case NEON_FCMGT_H_scalar:
6818       fcmp(kFormatH, rd, rn, rm, gt);
6819       break;
6820     case NEON_FACGE_H_scalar:
6821       fabscmp(kFormatH, rd, rn, rm, ge);
6822       break;
6823     case NEON_FACGT_H_scalar:
6824       fabscmp(kFormatH, rd, rn, rm, gt);
6825       break;
6826     case NEON_FRECPS_H_scalar:
6827       frecps(kFormatH, rd, rn, rm);
6828       break;
6829     case NEON_FRSQRTS_H_scalar:
6830       frsqrts(kFormatH, rd, rn, rm);
6831       break;
6832     default:
6833       VIXL_UNREACHABLE();
6834   }
6835 }
6836 
6837 
VisitNEONScalar3SameExtra(const Instruction * instr)6838 void Simulator::VisitNEONScalar3SameExtra(const Instruction* instr) {
6839   NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
6840   VectorFormat vf = nfd.GetVectorFormat();
6841 
6842   SimVRegister& rd = ReadVRegister(instr->GetRd());
6843   SimVRegister& rn = ReadVRegister(instr->GetRn());
6844   SimVRegister& rm = ReadVRegister(instr->GetRm());
6845 
6846   switch (instr->Mask(NEONScalar3SameExtraMask)) {
6847     case NEON_SQRDMLAH_scalar:
6848       sqrdmlah(vf, rd, rn, rm);
6849       break;
6850     case NEON_SQRDMLSH_scalar:
6851       sqrdmlsh(vf, rd, rn, rm);
6852       break;
6853     default:
6854       VIXL_UNIMPLEMENTED();
6855   }
6856 }
6857 
VisitNEONScalarByIndexedElement(const Instruction * instr)6858 void Simulator::VisitNEONScalarByIndexedElement(const Instruction* instr) {
6859   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
6860   VectorFormat vf = nfd.GetVectorFormat();
6861   VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap());
6862 
6863   SimVRegister& rd = ReadVRegister(instr->GetRd());
6864   SimVRegister& rn = ReadVRegister(instr->GetRn());
6865   ByElementOp Op = NULL;
6866 
6867   int rm_reg = instr->GetRm();
6868   int index = (instr->GetNEONH() << 1) | instr->GetNEONL();
6869   if (instr->GetNEONSize() == 1) {
6870     rm_reg &= 0xf;
6871     index = (index << 1) | instr->GetNEONM();
6872   }
6873 
6874   switch (instr->Mask(NEONScalarByIndexedElementMask)) {
6875     case NEON_SQDMULL_byelement_scalar:
6876       Op = &Simulator::sqdmull;
6877       break;
6878     case NEON_SQDMLAL_byelement_scalar:
6879       Op = &Simulator::sqdmlal;
6880       break;
6881     case NEON_SQDMLSL_byelement_scalar:
6882       Op = &Simulator::sqdmlsl;
6883       break;
6884     case NEON_SQDMULH_byelement_scalar:
6885       Op = &Simulator::sqdmulh;
6886       vf = vf_r;
6887       break;
6888     case NEON_SQRDMULH_byelement_scalar:
6889       Op = &Simulator::sqrdmulh;
6890       vf = vf_r;
6891       break;
6892     case NEON_SQRDMLAH_byelement_scalar:
6893       Op = &Simulator::sqrdmlah;
6894       vf = vf_r;
6895       break;
6896     case NEON_SQRDMLSH_byelement_scalar:
6897       Op = &Simulator::sqrdmlsh;
6898       vf = vf_r;
6899       break;
6900     default:
6901       vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
6902       index = instr->GetNEONH();
6903       if (instr->GetFPType() == 0) {
6904         index = (index << 2) | (instr->GetNEONL() << 1) | instr->GetNEONM();
6905         rm_reg &= 0xf;
6906         vf = kFormatH;
6907       } else if ((instr->GetFPType() & 1) == 0) {
6908         index = (index << 1) | instr->GetNEONL();
6909       }
6910       switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
6911         case NEON_FMUL_H_byelement_scalar:
6912         case NEON_FMUL_byelement_scalar:
6913           Op = &Simulator::fmul;
6914           break;
6915         case NEON_FMLA_H_byelement_scalar:
6916         case NEON_FMLA_byelement_scalar:
6917           Op = &Simulator::fmla;
6918           break;
6919         case NEON_FMLS_H_byelement_scalar:
6920         case NEON_FMLS_byelement_scalar:
6921           Op = &Simulator::fmls;
6922           break;
6923         case NEON_FMULX_H_byelement_scalar:
6924         case NEON_FMULX_byelement_scalar:
6925           Op = &Simulator::fmulx;
6926           break;
6927         default:
6928           VIXL_UNIMPLEMENTED();
6929       }
6930   }
6931 
6932   (this->*Op)(vf, rd, rn, ReadVRegister(rm_reg), index);
6933 }
6934 
6935 
VisitNEONScalarCopy(const Instruction * instr)6936 void Simulator::VisitNEONScalarCopy(const Instruction* instr) {
6937   NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
6938   VectorFormat vf = nfd.GetVectorFormat();
6939 
6940   SimVRegister& rd = ReadVRegister(instr->GetRd());
6941   SimVRegister& rn = ReadVRegister(instr->GetRn());
6942 
6943   if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
6944     int imm5 = instr->GetImmNEON5();
6945     int tz = CountTrailingZeros(imm5, 32);
6946     int rn_index = imm5 >> (tz + 1);
6947     dup_element(vf, rd, rn, rn_index);
6948   } else {
6949     VIXL_UNIMPLEMENTED();
6950   }
6951 }
6952 
6953 
VisitNEONScalarPairwise(const Instruction * instr)6954 void Simulator::VisitNEONScalarPairwise(const Instruction* instr) {
6955   NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarPairwiseFormatMap());
6956   VectorFormat vf = nfd.GetVectorFormat();
6957 
6958   SimVRegister& rd = ReadVRegister(instr->GetRd());
6959   SimVRegister& rn = ReadVRegister(instr->GetRn());
6960   switch (instr->Mask(NEONScalarPairwiseMask)) {
6961     case NEON_ADDP_scalar: {
6962       // All pairwise operations except ADDP use bit U to differentiate FP16
6963       // from FP32/FP64 variations.
6964       NEONFormatDecoder nfd_addp(instr, NEONFormatDecoder::FPScalarFormatMap());
6965       addp(nfd_addp.GetVectorFormat(), rd, rn);
6966       break;
6967     }
6968     case NEON_FADDP_h_scalar:
6969     case NEON_FADDP_scalar:
6970       faddp(vf, rd, rn);
6971       break;
6972     case NEON_FMAXP_h_scalar:
6973     case NEON_FMAXP_scalar:
6974       fmaxp(vf, rd, rn);
6975       break;
6976     case NEON_FMAXNMP_h_scalar:
6977     case NEON_FMAXNMP_scalar:
6978       fmaxnmp(vf, rd, rn);
6979       break;
6980     case NEON_FMINP_h_scalar:
6981     case NEON_FMINP_scalar:
6982       fminp(vf, rd, rn);
6983       break;
6984     case NEON_FMINNMP_h_scalar:
6985     case NEON_FMINNMP_scalar:
6986       fminnmp(vf, rd, rn);
6987       break;
6988     default:
6989       VIXL_UNIMPLEMENTED();
6990   }
6991 }
6992 
6993 
VisitNEONScalarShiftImmediate(const Instruction * instr)6994 void Simulator::VisitNEONScalarShiftImmediate(const Instruction* instr) {
6995   SimVRegister& rd = ReadVRegister(instr->GetRd());
6996   SimVRegister& rn = ReadVRegister(instr->GetRn());
6997   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
6998 
6999   static const NEONFormatMap map = {{22, 21, 20, 19},
7000                                     {NF_UNDEF,
7001                                      NF_B,
7002                                      NF_H,
7003                                      NF_H,
7004                                      NF_S,
7005                                      NF_S,
7006                                      NF_S,
7007                                      NF_S,
7008                                      NF_D,
7009                                      NF_D,
7010                                      NF_D,
7011                                      NF_D,
7012                                      NF_D,
7013                                      NF_D,
7014                                      NF_D,
7015                                      NF_D}};
7016   NEONFormatDecoder nfd(instr, &map);
7017   VectorFormat vf = nfd.GetVectorFormat();
7018 
7019   int highest_set_bit = HighestSetBitPosition(instr->GetImmNEONImmh());
7020   int immh_immb = instr->GetImmNEONImmhImmb();
7021   int right_shift = (16 << highest_set_bit) - immh_immb;
7022   int left_shift = immh_immb - (8 << highest_set_bit);
7023   switch (instr->Mask(NEONScalarShiftImmediateMask)) {
7024     case NEON_SHL_scalar:
7025       shl(vf, rd, rn, left_shift);
7026       break;
7027     case NEON_SLI_scalar:
7028       sli(vf, rd, rn, left_shift);
7029       break;
7030     case NEON_SQSHL_imm_scalar:
7031       sqshl(vf, rd, rn, left_shift);
7032       break;
7033     case NEON_UQSHL_imm_scalar:
7034       uqshl(vf, rd, rn, left_shift);
7035       break;
7036     case NEON_SQSHLU_scalar:
7037       sqshlu(vf, rd, rn, left_shift);
7038       break;
7039     case NEON_SRI_scalar:
7040       sri(vf, rd, rn, right_shift);
7041       break;
7042     case NEON_SSHR_scalar:
7043       sshr(vf, rd, rn, right_shift);
7044       break;
7045     case NEON_USHR_scalar:
7046       ushr(vf, rd, rn, right_shift);
7047       break;
7048     case NEON_SRSHR_scalar:
7049       sshr(vf, rd, rn, right_shift).Round(vf);
7050       break;
7051     case NEON_URSHR_scalar:
7052       ushr(vf, rd, rn, right_shift).Round(vf);
7053       break;
7054     case NEON_SSRA_scalar:
7055       ssra(vf, rd, rn, right_shift);
7056       break;
7057     case NEON_USRA_scalar:
7058       usra(vf, rd, rn, right_shift);
7059       break;
7060     case NEON_SRSRA_scalar:
7061       srsra(vf, rd, rn, right_shift);
7062       break;
7063     case NEON_URSRA_scalar:
7064       ursra(vf, rd, rn, right_shift);
7065       break;
7066     case NEON_UQSHRN_scalar:
7067       uqshrn(vf, rd, rn, right_shift);
7068       break;
7069     case NEON_UQRSHRN_scalar:
7070       uqrshrn(vf, rd, rn, right_shift);
7071       break;
7072     case NEON_SQSHRN_scalar:
7073       sqshrn(vf, rd, rn, right_shift);
7074       break;
7075     case NEON_SQRSHRN_scalar:
7076       sqrshrn(vf, rd, rn, right_shift);
7077       break;
7078     case NEON_SQSHRUN_scalar:
7079       sqshrun(vf, rd, rn, right_shift);
7080       break;
7081     case NEON_SQRSHRUN_scalar:
7082       sqrshrun(vf, rd, rn, right_shift);
7083       break;
7084     case NEON_FCVTZS_imm_scalar:
7085       fcvts(vf, rd, rn, FPZero, right_shift);
7086       break;
7087     case NEON_FCVTZU_imm_scalar:
7088       fcvtu(vf, rd, rn, FPZero, right_shift);
7089       break;
7090     case NEON_SCVTF_imm_scalar:
7091       scvtf(vf, rd, rn, right_shift, fpcr_rounding);
7092       break;
7093     case NEON_UCVTF_imm_scalar:
7094       ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
7095       break;
7096     default:
7097       VIXL_UNIMPLEMENTED();
7098   }
7099 }
7100 
7101 
VisitNEONShiftImmediate(const Instruction * instr)7102 void Simulator::VisitNEONShiftImmediate(const Instruction* instr) {
7103   SimVRegister& rd = ReadVRegister(instr->GetRd());
7104   SimVRegister& rn = ReadVRegister(instr->GetRn());
7105   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
7106 
7107   // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
7108   // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
7109   static const NEONFormatMap map = {{22, 21, 20, 19, 30},
7110                                     {NF_UNDEF, NF_UNDEF, NF_8B,    NF_16B,
7111                                      NF_4H,    NF_8H,    NF_4H,    NF_8H,
7112                                      NF_2S,    NF_4S,    NF_2S,    NF_4S,
7113                                      NF_2S,    NF_4S,    NF_2S,    NF_4S,
7114                                      NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,
7115                                      NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,
7116                                      NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,
7117                                      NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D}};
7118   NEONFormatDecoder nfd(instr, &map);
7119   VectorFormat vf = nfd.GetVectorFormat();
7120 
7121   // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
7122   static const NEONFormatMap map_l =
7123       {{22, 21, 20, 19},
7124        {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}};
7125   VectorFormat vf_l = nfd.GetVectorFormat(&map_l);
7126 
7127   int highest_set_bit = HighestSetBitPosition(instr->GetImmNEONImmh());
7128   int immh_immb = instr->GetImmNEONImmhImmb();
7129   int right_shift = (16 << highest_set_bit) - immh_immb;
7130   int left_shift = immh_immb - (8 << highest_set_bit);
7131 
7132   switch (instr->Mask(NEONShiftImmediateMask)) {
7133     case NEON_SHL:
7134       shl(vf, rd, rn, left_shift);
7135       break;
7136     case NEON_SLI:
7137       sli(vf, rd, rn, left_shift);
7138       break;
7139     case NEON_SQSHLU:
7140       sqshlu(vf, rd, rn, left_shift);
7141       break;
7142     case NEON_SRI:
7143       sri(vf, rd, rn, right_shift);
7144       break;
7145     case NEON_SSHR:
7146       sshr(vf, rd, rn, right_shift);
7147       break;
7148     case NEON_USHR:
7149       ushr(vf, rd, rn, right_shift);
7150       break;
7151     case NEON_SRSHR:
7152       sshr(vf, rd, rn, right_shift).Round(vf);
7153       break;
7154     case NEON_URSHR:
7155       ushr(vf, rd, rn, right_shift).Round(vf);
7156       break;
7157     case NEON_SSRA:
7158       ssra(vf, rd, rn, right_shift);
7159       break;
7160     case NEON_USRA:
7161       usra(vf, rd, rn, right_shift);
7162       break;
7163     case NEON_SRSRA:
7164       srsra(vf, rd, rn, right_shift);
7165       break;
7166     case NEON_URSRA:
7167       ursra(vf, rd, rn, right_shift);
7168       break;
7169     case NEON_SQSHL_imm:
7170       sqshl(vf, rd, rn, left_shift);
7171       break;
7172     case NEON_UQSHL_imm:
7173       uqshl(vf, rd, rn, left_shift);
7174       break;
7175     case NEON_SCVTF_imm:
7176       scvtf(vf, rd, rn, right_shift, fpcr_rounding);
7177       break;
7178     case NEON_UCVTF_imm:
7179       ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
7180       break;
7181     case NEON_FCVTZS_imm:
7182       fcvts(vf, rd, rn, FPZero, right_shift);
7183       break;
7184     case NEON_FCVTZU_imm:
7185       fcvtu(vf, rd, rn, FPZero, right_shift);
7186       break;
7187     case NEON_SSHLL:
7188       vf = vf_l;
7189       if (instr->Mask(NEON_Q)) {
7190         sshll2(vf, rd, rn, left_shift);
7191       } else {
7192         sshll(vf, rd, rn, left_shift);
7193       }
7194       break;
7195     case NEON_USHLL:
7196       vf = vf_l;
7197       if (instr->Mask(NEON_Q)) {
7198         ushll2(vf, rd, rn, left_shift);
7199       } else {
7200         ushll(vf, rd, rn, left_shift);
7201       }
7202       break;
7203     case NEON_SHRN:
7204       if (instr->Mask(NEON_Q)) {
7205         shrn2(vf, rd, rn, right_shift);
7206       } else {
7207         shrn(vf, rd, rn, right_shift);
7208       }
7209       break;
7210     case NEON_RSHRN:
7211       if (instr->Mask(NEON_Q)) {
7212         rshrn2(vf, rd, rn, right_shift);
7213       } else {
7214         rshrn(vf, rd, rn, right_shift);
7215       }
7216       break;
7217     case NEON_UQSHRN:
7218       if (instr->Mask(NEON_Q)) {
7219         uqshrn2(vf, rd, rn, right_shift);
7220       } else {
7221         uqshrn(vf, rd, rn, right_shift);
7222       }
7223       break;
7224     case NEON_UQRSHRN:
7225       if (instr->Mask(NEON_Q)) {
7226         uqrshrn2(vf, rd, rn, right_shift);
7227       } else {
7228         uqrshrn(vf, rd, rn, right_shift);
7229       }
7230       break;
7231     case NEON_SQSHRN:
7232       if (instr->Mask(NEON_Q)) {
7233         sqshrn2(vf, rd, rn, right_shift);
7234       } else {
7235         sqshrn(vf, rd, rn, right_shift);
7236       }
7237       break;
7238     case NEON_SQRSHRN:
7239       if (instr->Mask(NEON_Q)) {
7240         sqrshrn2(vf, rd, rn, right_shift);
7241       } else {
7242         sqrshrn(vf, rd, rn, right_shift);
7243       }
7244       break;
7245     case NEON_SQSHRUN:
7246       if (instr->Mask(NEON_Q)) {
7247         sqshrun2(vf, rd, rn, right_shift);
7248       } else {
7249         sqshrun(vf, rd, rn, right_shift);
7250       }
7251       break;
7252     case NEON_SQRSHRUN:
7253       if (instr->Mask(NEON_Q)) {
7254         sqrshrun2(vf, rd, rn, right_shift);
7255       } else {
7256         sqrshrun(vf, rd, rn, right_shift);
7257       }
7258       break;
7259     default:
7260       VIXL_UNIMPLEMENTED();
7261   }
7262 }
7263 
7264 
VisitNEONTable(const Instruction * instr)7265 void Simulator::VisitNEONTable(const Instruction* instr) {
7266   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
7267   VectorFormat vf = nfd.GetVectorFormat();
7268 
7269   SimVRegister& rd = ReadVRegister(instr->GetRd());
7270   SimVRegister& rn = ReadVRegister(instr->GetRn());
7271   SimVRegister& rn2 = ReadVRegister((instr->GetRn() + 1) % kNumberOfVRegisters);
7272   SimVRegister& rn3 = ReadVRegister((instr->GetRn() + 2) % kNumberOfVRegisters);
7273   SimVRegister& rn4 = ReadVRegister((instr->GetRn() + 3) % kNumberOfVRegisters);
7274   SimVRegister& rm = ReadVRegister(instr->GetRm());
7275 
7276   switch (instr->Mask(NEONTableMask)) {
7277     case NEON_TBL_1v:
7278       tbl(vf, rd, rn, rm);
7279       break;
7280     case NEON_TBL_2v:
7281       tbl(vf, rd, rn, rn2, rm);
7282       break;
7283     case NEON_TBL_3v:
7284       tbl(vf, rd, rn, rn2, rn3, rm);
7285       break;
7286     case NEON_TBL_4v:
7287       tbl(vf, rd, rn, rn2, rn3, rn4, rm);
7288       break;
7289     case NEON_TBX_1v:
7290       tbx(vf, rd, rn, rm);
7291       break;
7292     case NEON_TBX_2v:
7293       tbx(vf, rd, rn, rn2, rm);
7294       break;
7295     case NEON_TBX_3v:
7296       tbx(vf, rd, rn, rn2, rn3, rm);
7297       break;
7298     case NEON_TBX_4v:
7299       tbx(vf, rd, rn, rn2, rn3, rn4, rm);
7300       break;
7301     default:
7302       VIXL_UNIMPLEMENTED();
7303   }
7304 }
7305 
7306 
VisitNEONPerm(const Instruction * instr)7307 void Simulator::VisitNEONPerm(const Instruction* instr) {
7308   NEONFormatDecoder nfd(instr);
7309   VectorFormat vf = nfd.GetVectorFormat();
7310 
7311   SimVRegister& rd = ReadVRegister(instr->GetRd());
7312   SimVRegister& rn = ReadVRegister(instr->GetRn());
7313   SimVRegister& rm = ReadVRegister(instr->GetRm());
7314 
7315   switch (instr->Mask(NEONPermMask)) {
7316     case NEON_TRN1:
7317       trn1(vf, rd, rn, rm);
7318       break;
7319     case NEON_TRN2:
7320       trn2(vf, rd, rn, rm);
7321       break;
7322     case NEON_UZP1:
7323       uzp1(vf, rd, rn, rm);
7324       break;
7325     case NEON_UZP2:
7326       uzp2(vf, rd, rn, rm);
7327       break;
7328     case NEON_ZIP1:
7329       zip1(vf, rd, rn, rm);
7330       break;
7331     case NEON_ZIP2:
7332       zip2(vf, rd, rn, rm);
7333       break;
7334     default:
7335       VIXL_UNIMPLEMENTED();
7336   }
7337 }
7338 
VisitSVEAddressGeneration(const Instruction * instr)7339 void Simulator::VisitSVEAddressGeneration(const Instruction* instr) {
7340   SimVRegister& zd = ReadVRegister(instr->GetRd());
7341   SimVRegister& zn = ReadVRegister(instr->GetRn());
7342   SimVRegister& zm = ReadVRegister(instr->GetRm());
7343   SimVRegister temp;
7344 
7345   VectorFormat vform = kFormatVnD;
7346   mov(vform, temp, zm);
7347 
7348   switch (instr->Mask(SVEAddressGenerationMask)) {
7349     case ADR_z_az_d_s32_scaled:
7350       sxt(vform, temp, temp, kSRegSize);
7351       break;
7352     case ADR_z_az_d_u32_scaled:
7353       uxt(vform, temp, temp, kSRegSize);
7354       break;
7355     case ADR_z_az_s_same_scaled:
7356       vform = kFormatVnS;
7357       break;
7358     case ADR_z_az_d_same_scaled:
7359       // Nothing to do.
7360       break;
7361     default:
7362       VIXL_UNIMPLEMENTED();
7363       break;
7364   }
7365 
7366   int shift_amount = instr->ExtractBits(11, 10);
7367   shl(vform, temp, temp, shift_amount);
7368   add(vform, zd, zn, temp);
7369 }
7370 
VisitSVEBitwiseLogicalWithImm_Unpredicated(const Instruction * instr)7371 void Simulator::VisitSVEBitwiseLogicalWithImm_Unpredicated(
7372     const Instruction* instr) {
7373   Instr op = instr->Mask(SVEBitwiseLogicalWithImm_UnpredicatedMask);
7374   switch (op) {
7375     case AND_z_zi:
7376     case EOR_z_zi:
7377     case ORR_z_zi: {
7378       int lane_size = instr->GetSVEBitwiseImmLaneSizeInBytesLog2();
7379       uint64_t imm = instr->GetSVEImmLogical();
7380       // Valid immediate is a non-zero bits
7381       VIXL_ASSERT(imm != 0);
7382       SVEBitwiseImmHelper(static_cast<SVEBitwiseLogicalWithImm_UnpredicatedOp>(
7383                               op),
7384                           SVEFormatFromLaneSizeInBytesLog2(lane_size),
7385                           ReadVRegister(instr->GetRd()),
7386                           imm);
7387       break;
7388     }
7389     default:
7390       VIXL_UNIMPLEMENTED();
7391       break;
7392   }
7393 }
7394 
VisitSVEBroadcastBitmaskImm(const Instruction * instr)7395 void Simulator::VisitSVEBroadcastBitmaskImm(const Instruction* instr) {
7396   switch (instr->Mask(SVEBroadcastBitmaskImmMask)) {
7397     case DUPM_z_i: {
7398       /* DUPM uses the same lane size and immediate encoding as bitwise logical
7399        * immediate instructions. */
7400       int lane_size = instr->GetSVEBitwiseImmLaneSizeInBytesLog2();
7401       uint64_t imm = instr->GetSVEImmLogical();
7402       VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
7403       dup_immediate(vform, ReadVRegister(instr->GetRd()), imm);
7404       break;
7405     }
7406     default:
7407       VIXL_UNIMPLEMENTED();
7408       break;
7409   }
7410 }
7411 
VisitSVEBitwiseLogicalUnpredicated(const Instruction * instr)7412 void Simulator::VisitSVEBitwiseLogicalUnpredicated(const Instruction* instr) {
7413   SimVRegister& zd = ReadVRegister(instr->GetRd());
7414   SimVRegister& zn = ReadVRegister(instr->GetRn());
7415   SimVRegister& zm = ReadVRegister(instr->GetRm());
7416   Instr op = instr->Mask(SVEBitwiseLogicalUnpredicatedMask);
7417 
7418   LogicalOp logical_op;
7419   switch (op) {
7420     case AND_z_zz:
7421       logical_op = AND;
7422       break;
7423     case BIC_z_zz:
7424       logical_op = BIC;
7425       break;
7426     case EOR_z_zz:
7427       logical_op = EOR;
7428       break;
7429     case ORR_z_zz:
7430       logical_op = ORR;
7431       break;
7432     default:
7433       logical_op = LogicalOpMask;
7434       VIXL_UNIMPLEMENTED();
7435       break;
7436   }
7437   // Lane size of registers is irrelevant to the bitwise operations, so perform
7438   // the operation on D-sized lanes.
7439   SVEBitwiseLogicalUnpredicatedHelper(logical_op, kFormatVnD, zd, zn, zm);
7440 }
7441 
VisitSVEBitwiseShiftByImm_Predicated(const Instruction * instr)7442 void Simulator::VisitSVEBitwiseShiftByImm_Predicated(const Instruction* instr) {
7443   SimVRegister& zdn = ReadVRegister(instr->GetRd());
7444   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
7445 
7446   SimVRegister scratch;
7447   SimVRegister result;
7448 
7449   bool for_division = false;
7450   Shift shift_op = NO_SHIFT;
7451   switch (instr->Mask(SVEBitwiseShiftByImm_PredicatedMask)) {
7452     case ASRD_z_p_zi:
7453       shift_op = ASR;
7454       for_division = true;
7455       break;
7456     case ASR_z_p_zi:
7457       shift_op = ASR;
7458       break;
7459     case LSL_z_p_zi:
7460       shift_op = LSL;
7461       break;
7462     case LSR_z_p_zi:
7463       shift_op = LSR;
7464       break;
7465     default:
7466       VIXL_UNIMPLEMENTED();
7467       break;
7468   }
7469 
7470   std::pair<int, int> shift_and_lane_size =
7471       instr->GetSVEImmShiftAndLaneSizeLog2(/* is_predicated = */ true);
7472   unsigned lane_size = shift_and_lane_size.second;
7473   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
7474   int shift_dist = shift_and_lane_size.first;
7475 
7476   if ((shift_op == ASR) && for_division) {
7477     asrd(vform, result, zdn, shift_dist);
7478   } else {
7479     if (shift_op == LSL) {
7480       // Shift distance is computed differently for LSL. Convert the result.
7481       shift_dist = (8 << lane_size) - shift_dist;
7482     }
7483     dup_immediate(vform, scratch, shift_dist);
7484     SVEBitwiseShiftHelper(shift_op, vform, result, zdn, scratch, false);
7485   }
7486   mov_merging(vform, zdn, pg, result);
7487 }
7488 
VisitSVEBitwiseShiftByVector_Predicated(const Instruction * instr)7489 void Simulator::VisitSVEBitwiseShiftByVector_Predicated(
7490     const Instruction* instr) {
7491   VectorFormat vform = instr->GetSVEVectorFormat();
7492   SimVRegister& zdn = ReadVRegister(instr->GetRd());
7493   SimVRegister& zm = ReadVRegister(instr->GetRn());
7494   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
7495 
7496   SimVRegister result;
7497   SimVRegister shiftand;  // Vector to be shifted.
7498   SimVRegister shiftor;   // Vector shift amount.
7499 
7500   Shift shift_op = ASR;
7501   mov(vform, shiftand, zdn);
7502   mov(vform, shiftor, zm);
7503 
7504   switch (instr->Mask(SVEBitwiseShiftByVector_PredicatedMask)) {
7505     case ASRR_z_p_zz:
7506       mov(vform, shiftand, zm);
7507       mov(vform, shiftor, zdn);
7508       VIXL_FALLTHROUGH();
7509     case ASR_z_p_zz:
7510       break;
7511     case LSLR_z_p_zz:
7512       mov(vform, shiftand, zm);
7513       mov(vform, shiftor, zdn);
7514       VIXL_FALLTHROUGH();
7515     case LSL_z_p_zz:
7516       shift_op = LSL;
7517       break;
7518     case LSRR_z_p_zz:
7519       mov(vform, shiftand, zm);
7520       mov(vform, shiftor, zdn);
7521       VIXL_FALLTHROUGH();
7522     case LSR_z_p_zz:
7523       shift_op = LSR;
7524       break;
7525     default:
7526       VIXL_UNIMPLEMENTED();
7527       break;
7528   }
7529   SVEBitwiseShiftHelper(shift_op,
7530                         vform,
7531                         result,
7532                         shiftand,
7533                         shiftor,
7534                         /* is_wide_elements = */ false);
7535   mov_merging(vform, zdn, pg, result);
7536 }
7537 
VisitSVEBitwiseShiftByWideElements_Predicated(const Instruction * instr)7538 void Simulator::VisitSVEBitwiseShiftByWideElements_Predicated(
7539     const Instruction* instr) {
7540   VectorFormat vform = instr->GetSVEVectorFormat();
7541   SimVRegister& zdn = ReadVRegister(instr->GetRd());
7542   SimVRegister& zm = ReadVRegister(instr->GetRn());
7543   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
7544 
7545   SimVRegister result;
7546   Shift shift_op = ASR;
7547 
7548   switch (instr->Mask(SVEBitwiseShiftByWideElements_PredicatedMask)) {
7549     case ASR_z_p_zw:
7550       break;
7551     case LSL_z_p_zw:
7552       shift_op = LSL;
7553       break;
7554     case LSR_z_p_zw:
7555       shift_op = LSR;
7556       break;
7557     default:
7558       VIXL_UNIMPLEMENTED();
7559       break;
7560   }
7561   SVEBitwiseShiftHelper(shift_op,
7562                         vform,
7563                         result,
7564                         zdn,
7565                         zm,
7566                         /* is_wide_elements = */ true);
7567   mov_merging(vform, zdn, pg, result);
7568 }
7569 
VisitSVEBitwiseShiftUnpredicated(const Instruction * instr)7570 void Simulator::VisitSVEBitwiseShiftUnpredicated(const Instruction* instr) {
7571   SimVRegister& zd = ReadVRegister(instr->GetRd());
7572   SimVRegister& zn = ReadVRegister(instr->GetRn());
7573 
7574   Shift shift_op;
7575   switch (instr->Mask(SVEBitwiseShiftUnpredicatedMask)) {
7576     case ASR_z_zi:
7577     case ASR_z_zw:
7578       shift_op = ASR;
7579       break;
7580     case LSL_z_zi:
7581     case LSL_z_zw:
7582       shift_op = LSL;
7583       break;
7584     case LSR_z_zi:
7585     case LSR_z_zw:
7586       shift_op = LSR;
7587       break;
7588     default:
7589       shift_op = NO_SHIFT;
7590       VIXL_UNIMPLEMENTED();
7591       break;
7592   }
7593 
7594   switch (instr->Mask(SVEBitwiseShiftUnpredicatedMask)) {
7595     case ASR_z_zi:
7596     case LSL_z_zi:
7597     case LSR_z_zi: {
7598       SimVRegister scratch;
7599       std::pair<int, int> shift_and_lane_size =
7600           instr->GetSVEImmShiftAndLaneSizeLog2(/* is_predicated = */ false);
7601       unsigned lane_size = shift_and_lane_size.second;
7602       VIXL_ASSERT(lane_size <= kDRegSizeInBytesLog2);
7603       VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
7604       int shift_dist = shift_and_lane_size.first;
7605       if (shift_op == LSL) {
7606         // Shift distance is computed differently for LSL. Convert the result.
7607         shift_dist = (8 << lane_size) - shift_dist;
7608       }
7609       dup_immediate(vform, scratch, shift_dist);
7610       SVEBitwiseShiftHelper(shift_op, vform, zd, zn, scratch, false);
7611       break;
7612     }
7613     case ASR_z_zw:
7614     case LSL_z_zw:
7615     case LSR_z_zw:
7616       SVEBitwiseShiftHelper(shift_op,
7617                             instr->GetSVEVectorFormat(),
7618                             zd,
7619                             zn,
7620                             ReadVRegister(instr->GetRm()),
7621                             true);
7622       break;
7623     default:
7624       VIXL_UNIMPLEMENTED();
7625       break;
7626   }
7627 }
7628 
VisitSVEIncDecRegisterByElementCount(const Instruction * instr)7629 void Simulator::VisitSVEIncDecRegisterByElementCount(const Instruction* instr) {
7630   // Although the instructions have a separate encoding class, the lane size is
7631   // encoded in the same way as most other SVE instructions.
7632   VectorFormat vform = instr->GetSVEVectorFormat();
7633 
7634   int pattern = instr->GetImmSVEPredicateConstraint();
7635   int count = GetPredicateConstraintLaneCount(vform, pattern);
7636   int multiplier = instr->ExtractBits(19, 16) + 1;
7637 
7638   switch (instr->Mask(SVEIncDecRegisterByElementCountMask)) {
7639     case DECB_r_rs:
7640     case DECD_r_rs:
7641     case DECH_r_rs:
7642     case DECW_r_rs:
7643       count = -count;
7644       break;
7645     case INCB_r_rs:
7646     case INCD_r_rs:
7647     case INCH_r_rs:
7648     case INCW_r_rs:
7649       // Nothing to do.
7650       break;
7651     default:
7652       VIXL_UNIMPLEMENTED();
7653       return;
7654   }
7655 
7656   WriteXRegister(instr->GetRd(),
7657                  IncDecN(ReadXRegister(instr->GetRd()),
7658                          count * multiplier,
7659                          kXRegSize));
7660 }
7661 
VisitSVEIncDecVectorByElementCount(const Instruction * instr)7662 void Simulator::VisitSVEIncDecVectorByElementCount(const Instruction* instr) {
7663   VectorFormat vform = instr->GetSVEVectorFormat();
7664   if (LaneSizeInBitsFromFormat(vform) == kBRegSize) {
7665     VIXL_UNIMPLEMENTED();
7666   }
7667 
7668   int pattern = instr->GetImmSVEPredicateConstraint();
7669   int count = GetPredicateConstraintLaneCount(vform, pattern);
7670   int multiplier = instr->ExtractBits(19, 16) + 1;
7671 
7672   switch (instr->Mask(SVEIncDecVectorByElementCountMask)) {
7673     case DECD_z_zs:
7674     case DECH_z_zs:
7675     case DECW_z_zs:
7676       count = -count;
7677       break;
7678     case INCD_z_zs:
7679     case INCH_z_zs:
7680     case INCW_z_zs:
7681       // Nothing to do.
7682       break;
7683     default:
7684       VIXL_UNIMPLEMENTED();
7685       break;
7686   }
7687 
7688   SimVRegister& zd = ReadVRegister(instr->GetRd());
7689   SimVRegister scratch;
7690   dup_immediate(vform,
7691                 scratch,
7692                 IncDecN(0,
7693                         count * multiplier,
7694                         LaneSizeInBitsFromFormat(vform)));
7695   add(vform, zd, zd, scratch);
7696 }
7697 
VisitSVESaturatingIncDecRegisterByElementCount(const Instruction * instr)7698 void Simulator::VisitSVESaturatingIncDecRegisterByElementCount(
7699     const Instruction* instr) {
7700   // Although the instructions have a separate encoding class, the lane size is
7701   // encoded in the same way as most other SVE instructions.
7702   VectorFormat vform = instr->GetSVEVectorFormat();
7703 
7704   int pattern = instr->GetImmSVEPredicateConstraint();
7705   int count = GetPredicateConstraintLaneCount(vform, pattern);
7706   int multiplier = instr->ExtractBits(19, 16) + 1;
7707 
7708   unsigned width = kXRegSize;
7709   bool is_signed = false;
7710 
7711   switch (instr->Mask(SVESaturatingIncDecRegisterByElementCountMask)) {
7712     case SQDECB_r_rs_sx:
7713     case SQDECD_r_rs_sx:
7714     case SQDECH_r_rs_sx:
7715     case SQDECW_r_rs_sx:
7716       width = kWRegSize;
7717       VIXL_FALLTHROUGH();
7718     case SQDECB_r_rs_x:
7719     case SQDECD_r_rs_x:
7720     case SQDECH_r_rs_x:
7721     case SQDECW_r_rs_x:
7722       is_signed = true;
7723       count = -count;
7724       break;
7725     case SQINCB_r_rs_sx:
7726     case SQINCD_r_rs_sx:
7727     case SQINCH_r_rs_sx:
7728     case SQINCW_r_rs_sx:
7729       width = kWRegSize;
7730       VIXL_FALLTHROUGH();
7731     case SQINCB_r_rs_x:
7732     case SQINCD_r_rs_x:
7733     case SQINCH_r_rs_x:
7734     case SQINCW_r_rs_x:
7735       is_signed = true;
7736       break;
7737     case UQDECB_r_rs_uw:
7738     case UQDECD_r_rs_uw:
7739     case UQDECH_r_rs_uw:
7740     case UQDECW_r_rs_uw:
7741       width = kWRegSize;
7742       VIXL_FALLTHROUGH();
7743     case UQDECB_r_rs_x:
7744     case UQDECD_r_rs_x:
7745     case UQDECH_r_rs_x:
7746     case UQDECW_r_rs_x:
7747       count = -count;
7748       break;
7749     case UQINCB_r_rs_uw:
7750     case UQINCD_r_rs_uw:
7751     case UQINCH_r_rs_uw:
7752     case UQINCW_r_rs_uw:
7753       width = kWRegSize;
7754       VIXL_FALLTHROUGH();
7755     case UQINCB_r_rs_x:
7756     case UQINCD_r_rs_x:
7757     case UQINCH_r_rs_x:
7758     case UQINCW_r_rs_x:
7759       // Nothing to do.
7760       break;
7761     default:
7762       VIXL_UNIMPLEMENTED();
7763       break;
7764   }
7765 
7766   WriteXRegister(instr->GetRd(),
7767                  IncDecN(ReadXRegister(instr->GetRd()),
7768                          count * multiplier,
7769                          width,
7770                          true,
7771                          is_signed));
7772 }
7773 
VisitSVESaturatingIncDecVectorByElementCount(const Instruction * instr)7774 void Simulator::VisitSVESaturatingIncDecVectorByElementCount(
7775     const Instruction* instr) {
7776   VectorFormat vform = instr->GetSVEVectorFormat();
7777   if (LaneSizeInBitsFromFormat(vform) == kBRegSize) {
7778     VIXL_UNIMPLEMENTED();
7779   }
7780 
7781   int pattern = instr->GetImmSVEPredicateConstraint();
7782   int count = GetPredicateConstraintLaneCount(vform, pattern);
7783   int multiplier = instr->ExtractBits(19, 16) + 1;
7784 
7785   SimVRegister& zd = ReadVRegister(instr->GetRd());
7786   SimVRegister scratch;
7787   dup_immediate(vform,
7788                 scratch,
7789                 IncDecN(0,
7790                         count * multiplier,
7791                         LaneSizeInBitsFromFormat(vform)));
7792 
7793   switch (instr->Mask(SVESaturatingIncDecVectorByElementCountMask)) {
7794     case SQDECD_z_zs:
7795     case SQDECH_z_zs:
7796     case SQDECW_z_zs:
7797       sub(vform, zd, zd, scratch).SignedSaturate(vform);
7798       break;
7799     case SQINCD_z_zs:
7800     case SQINCH_z_zs:
7801     case SQINCW_z_zs:
7802       add(vform, zd, zd, scratch).SignedSaturate(vform);
7803       break;
7804     case UQDECD_z_zs:
7805     case UQDECH_z_zs:
7806     case UQDECW_z_zs:
7807       sub(vform, zd, zd, scratch).UnsignedSaturate(vform);
7808       break;
7809     case UQINCD_z_zs:
7810     case UQINCH_z_zs:
7811     case UQINCW_z_zs:
7812       add(vform, zd, zd, scratch).UnsignedSaturate(vform);
7813       break;
7814     default:
7815       VIXL_UNIMPLEMENTED();
7816       break;
7817   }
7818 }
7819 
VisitSVEElementCount(const Instruction * instr)7820 void Simulator::VisitSVEElementCount(const Instruction* instr) {
7821   switch (instr->Mask(SVEElementCountMask)) {
7822     case CNTB_r_s:
7823     case CNTD_r_s:
7824     case CNTH_r_s:
7825     case CNTW_r_s:
7826       // All handled below.
7827       break;
7828     default:
7829       VIXL_UNIMPLEMENTED();
7830       break;
7831   }
7832 
7833   // Although the instructions are separated, the lane size is encoded in the
7834   // same way as most other SVE instructions.
7835   VectorFormat vform = instr->GetSVEVectorFormat();
7836 
7837   int pattern = instr->GetImmSVEPredicateConstraint();
7838   int count = GetPredicateConstraintLaneCount(vform, pattern);
7839   int multiplier = instr->ExtractBits(19, 16) + 1;
7840   WriteXRegister(instr->GetRd(), count * multiplier);
7841 }
7842 
VisitSVEFPAccumulatingReduction(const Instruction * instr)7843 void Simulator::VisitSVEFPAccumulatingReduction(const Instruction* instr) {
7844   VectorFormat vform = instr->GetSVEVectorFormat();
7845   SimVRegister& vdn = ReadVRegister(instr->GetRd());
7846   SimVRegister& zm = ReadVRegister(instr->GetRn());
7847   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
7848 
7849   switch (instr->Mask(SVEFPAccumulatingReductionMask)) {
7850     case FADDA_v_p_z:
7851       fadda(vform, vdn, pg, zm);
7852       break;
7853     default:
7854       VIXL_UNIMPLEMENTED();
7855       break;
7856   }
7857 }
7858 
VisitSVEFPArithmetic_Predicated(const Instruction * instr)7859 void Simulator::VisitSVEFPArithmetic_Predicated(const Instruction* instr) {
7860   VectorFormat vform = instr->GetSVEVectorFormat();
7861   SimVRegister& zdn = ReadVRegister(instr->GetRd());
7862   SimVRegister& zm = ReadVRegister(instr->GetRn());
7863   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
7864 
7865   SimVRegister result;
7866 
7867   switch (instr->Mask(SVEFPArithmetic_PredicatedMask)) {
7868     case FABD_z_p_zz:
7869       fabd(vform, result, zdn, zm);
7870       break;
7871     case FADD_z_p_zz:
7872       fadd(vform, result, zdn, zm);
7873       break;
7874     case FDIVR_z_p_zz:
7875       fdiv(vform, result, zm, zdn);
7876       break;
7877     case FDIV_z_p_zz:
7878       fdiv(vform, result, zdn, zm);
7879       break;
7880     case FMAXNM_z_p_zz:
7881       fmaxnm(vform, result, zdn, zm);
7882       break;
7883     case FMAX_z_p_zz:
7884       fmax(vform, result, zdn, zm);
7885       break;
7886     case FMINNM_z_p_zz:
7887       fminnm(vform, result, zdn, zm);
7888       break;
7889     case FMIN_z_p_zz:
7890       fmin(vform, result, zdn, zm);
7891       break;
7892     case FMULX_z_p_zz:
7893       fmulx(vform, result, zdn, zm);
7894       break;
7895     case FMUL_z_p_zz:
7896       fmul(vform, result, zdn, zm);
7897       break;
7898     case FSCALE_z_p_zz:
7899       fscale(vform, result, zdn, zm);
7900       break;
7901     case FSUBR_z_p_zz:
7902       fsub(vform, result, zm, zdn);
7903       break;
7904     case FSUB_z_p_zz:
7905       fsub(vform, result, zdn, zm);
7906       break;
7907     default:
7908       VIXL_UNIMPLEMENTED();
7909       break;
7910   }
7911   mov_merging(vform, zdn, pg, result);
7912 }
7913 
VisitSVEFPArithmeticWithImm_Predicated(const Instruction * instr)7914 void Simulator::VisitSVEFPArithmeticWithImm_Predicated(
7915     const Instruction* instr) {
7916   VectorFormat vform = instr->GetSVEVectorFormat();
7917   if (LaneSizeInBitsFromFormat(vform) == kBRegSize) {
7918     VIXL_UNIMPLEMENTED();
7919   }
7920 
7921   SimVRegister& zdn = ReadVRegister(instr->GetRd());
7922   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
7923   SimVRegister result;
7924 
7925   int i1 = instr->ExtractBit(5);
7926   SimVRegister add_sub_imm, min_max_imm, mul_imm;
7927   uint64_t half = FPToRawbitsWithSize(LaneSizeInBitsFromFormat(vform), 0.5);
7928   uint64_t one = FPToRawbitsWithSize(LaneSizeInBitsFromFormat(vform), 1.0);
7929   uint64_t two = FPToRawbitsWithSize(LaneSizeInBitsFromFormat(vform), 2.0);
7930   dup_immediate(vform, add_sub_imm, i1 ? one : half);
7931   dup_immediate(vform, min_max_imm, i1 ? one : 0);
7932   dup_immediate(vform, mul_imm, i1 ? two : half);
7933 
7934   switch (instr->Mask(SVEFPArithmeticWithImm_PredicatedMask)) {
7935     case FADD_z_p_zs:
7936       fadd(vform, result, zdn, add_sub_imm);
7937       break;
7938     case FMAXNM_z_p_zs:
7939       fmaxnm(vform, result, zdn, min_max_imm);
7940       break;
7941     case FMAX_z_p_zs:
7942       fmax(vform, result, zdn, min_max_imm);
7943       break;
7944     case FMINNM_z_p_zs:
7945       fminnm(vform, result, zdn, min_max_imm);
7946       break;
7947     case FMIN_z_p_zs:
7948       fmin(vform, result, zdn, min_max_imm);
7949       break;
7950     case FMUL_z_p_zs:
7951       fmul(vform, result, zdn, mul_imm);
7952       break;
7953     case FSUBR_z_p_zs:
7954       fsub(vform, result, add_sub_imm, zdn);
7955       break;
7956     case FSUB_z_p_zs:
7957       fsub(vform, result, zdn, add_sub_imm);
7958       break;
7959     default:
7960       VIXL_UNIMPLEMENTED();
7961       break;
7962   }
7963   mov_merging(vform, zdn, pg, result);
7964 }
7965 
VisitSVEFPTrigMulAddCoefficient(const Instruction * instr)7966 void Simulator::VisitSVEFPTrigMulAddCoefficient(const Instruction* instr) {
7967   VectorFormat vform = instr->GetSVEVectorFormat();
7968   SimVRegister& zd = ReadVRegister(instr->GetRd());
7969   SimVRegister& zm = ReadVRegister(instr->GetRn());
7970 
7971   switch (instr->Mask(SVEFPTrigMulAddCoefficientMask)) {
7972     case FTMAD_z_zzi:
7973       ftmad(vform, zd, zd, zm, instr->ExtractBits(18, 16));
7974       break;
7975     default:
7976       VIXL_UNIMPLEMENTED();
7977       break;
7978   }
7979 }
7980 
VisitSVEFPArithmeticUnpredicated(const Instruction * instr)7981 void Simulator::VisitSVEFPArithmeticUnpredicated(const Instruction* instr) {
7982   VectorFormat vform = instr->GetSVEVectorFormat();
7983   SimVRegister& zd = ReadVRegister(instr->GetRd());
7984   SimVRegister& zn = ReadVRegister(instr->GetRn());
7985   SimVRegister& zm = ReadVRegister(instr->GetRm());
7986 
7987   switch (instr->Mask(SVEFPArithmeticUnpredicatedMask)) {
7988     case FADD_z_zz:
7989       fadd(vform, zd, zn, zm);
7990       break;
7991     case FMUL_z_zz:
7992       fmul(vform, zd, zn, zm);
7993       break;
7994     case FRECPS_z_zz:
7995       frecps(vform, zd, zn, zm);
7996       break;
7997     case FRSQRTS_z_zz:
7998       frsqrts(vform, zd, zn, zm);
7999       break;
8000     case FSUB_z_zz:
8001       fsub(vform, zd, zn, zm);
8002       break;
8003     case FTSMUL_z_zz:
8004       ftsmul(vform, zd, zn, zm);
8005       break;
8006     default:
8007       VIXL_UNIMPLEMENTED();
8008       break;
8009   }
8010 }
8011 
VisitSVEFPCompareVectors(const Instruction * instr)8012 void Simulator::VisitSVEFPCompareVectors(const Instruction* instr) {
8013   SimPRegister& pd = ReadPRegister(instr->GetPd());
8014   SimVRegister& zn = ReadVRegister(instr->GetRn());
8015   SimVRegister& zm = ReadVRegister(instr->GetRm());
8016   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8017   VectorFormat vform = instr->GetSVEVectorFormat();
8018   SimVRegister result;
8019 
8020   switch (instr->Mask(SVEFPCompareVectorsMask)) {
8021     case FACGE_p_p_zz:
8022       fabscmp(vform, result, zn, zm, ge);
8023       break;
8024     case FACGT_p_p_zz:
8025       fabscmp(vform, result, zn, zm, gt);
8026       break;
8027     case FCMEQ_p_p_zz:
8028       fcmp(vform, result, zn, zm, eq);
8029       break;
8030     case FCMGE_p_p_zz:
8031       fcmp(vform, result, zn, zm, ge);
8032       break;
8033     case FCMGT_p_p_zz:
8034       fcmp(vform, result, zn, zm, gt);
8035       break;
8036     case FCMNE_p_p_zz:
8037       fcmp(vform, result, zn, zm, ne);
8038       break;
8039     case FCMUO_p_p_zz:
8040       fcmp(vform, result, zn, zm, uo);
8041       break;
8042     default:
8043       VIXL_UNIMPLEMENTED();
8044       break;
8045   }
8046 
8047   ExtractFromSimVRegister(vform, pd, result);
8048   mov_zeroing(pd, pg, pd);
8049 }
8050 
VisitSVEFPCompareWithZero(const Instruction * instr)8051 void Simulator::VisitSVEFPCompareWithZero(const Instruction* instr) {
8052   SimPRegister& pd = ReadPRegister(instr->GetPd());
8053   SimVRegister& zn = ReadVRegister(instr->GetRn());
8054   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8055   VectorFormat vform = instr->GetSVEVectorFormat();
8056   SimVRegister result;
8057 
8058   SimVRegister zeros;
8059   dup_immediate(kFormatVnD, zeros, 0);
8060 
8061   switch (instr->Mask(SVEFPCompareWithZeroMask)) {
8062     case FCMEQ_p_p_z0:
8063       fcmp(vform, result, zn, zeros, eq);
8064       break;
8065     case FCMGE_p_p_z0:
8066       fcmp(vform, result, zn, zeros, ge);
8067       break;
8068     case FCMGT_p_p_z0:
8069       fcmp(vform, result, zn, zeros, gt);
8070       break;
8071     case FCMLE_p_p_z0:
8072       fcmp(vform, result, zn, zeros, le);
8073       break;
8074     case FCMLT_p_p_z0:
8075       fcmp(vform, result, zn, zeros, lt);
8076       break;
8077     case FCMNE_p_p_z0:
8078       fcmp(vform, result, zn, zeros, ne);
8079       break;
8080     default:
8081       VIXL_UNIMPLEMENTED();
8082       break;
8083   }
8084 
8085   ExtractFromSimVRegister(vform, pd, result);
8086   mov_zeroing(pd, pg, pd);
8087 }
8088 
VisitSVEFPComplexAddition(const Instruction * instr)8089 void Simulator::VisitSVEFPComplexAddition(const Instruction* instr) {
8090   VectorFormat vform = instr->GetSVEVectorFormat();
8091 
8092   if (LaneSizeInBitsFromFormat(vform) == kBRegSize) {
8093     VIXL_UNIMPLEMENTED();
8094   }
8095 
8096   SimVRegister& zdn = ReadVRegister(instr->GetRd());
8097   SimVRegister& zm = ReadVRegister(instr->GetRn());
8098   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8099   int rot = instr->ExtractBit(16);
8100 
8101   SimVRegister result;
8102 
8103   switch (instr->Mask(SVEFPComplexAdditionMask)) {
8104     case FCADD_z_p_zz:
8105       fcadd(vform, result, zdn, zm, rot);
8106       break;
8107     default:
8108       VIXL_UNIMPLEMENTED();
8109       break;
8110   }
8111   mov_merging(vform, zdn, pg, result);
8112 }
8113 
VisitSVEFPComplexMulAdd(const Instruction * instr)8114 void Simulator::VisitSVEFPComplexMulAdd(const Instruction* instr) {
8115   VectorFormat vform = instr->GetSVEVectorFormat();
8116 
8117   if (LaneSizeInBitsFromFormat(vform) == kBRegSize) {
8118     VIXL_UNIMPLEMENTED();
8119   }
8120 
8121   SimVRegister& zda = ReadVRegister(instr->GetRd());
8122   SimVRegister& zn = ReadVRegister(instr->GetRn());
8123   SimVRegister& zm = ReadVRegister(instr->GetRm());
8124   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8125   int rot = instr->ExtractBits(14, 13);
8126 
8127   SimVRegister result;
8128 
8129   switch (instr->Mask(SVEFPComplexMulAddMask)) {
8130     case FCMLA_z_p_zzz:
8131       fcmla(vform, result, zn, zm, zda, rot);
8132       break;
8133     default:
8134       VIXL_UNIMPLEMENTED();
8135       break;
8136   }
8137   mov_merging(vform, zda, pg, result);
8138 }
8139 
VisitSVEFPComplexMulAddIndex(const Instruction * instr)8140 void Simulator::VisitSVEFPComplexMulAddIndex(const Instruction* instr) {
8141   SimVRegister& zda = ReadVRegister(instr->GetRd());
8142   SimVRegister& zn = ReadVRegister(instr->GetRn());
8143   int rot = instr->ExtractBits(11, 10);
8144   unsigned zm_code = instr->GetRm();
8145   int index = -1;
8146   VectorFormat vform, vform_dup;
8147 
8148   switch (instr->Mask(SVEFPComplexMulAddIndexMask)) {
8149     case FCMLA_z_zzzi_h:
8150       vform = kFormatVnH;
8151       vform_dup = kFormatVnS;
8152       index = zm_code >> 3;
8153       zm_code &= 0x7;
8154       break;
8155     case FCMLA_z_zzzi_s:
8156       vform = kFormatVnS;
8157       vform_dup = kFormatVnD;
8158       index = zm_code >> 4;
8159       zm_code &= 0xf;
8160       break;
8161     default:
8162       VIXL_UNIMPLEMENTED();
8163       break;
8164   }
8165 
8166   if (index >= 0) {
8167     SimVRegister temp;
8168     dup_elements_to_segments(vform_dup, temp, ReadVRegister(zm_code), index);
8169     fcmla(vform, zda, zn, temp, zda, rot);
8170   }
8171 }
8172 
8173 typedef LogicVRegister (Simulator::*FastReduceFn)(VectorFormat vform,
8174                                                   LogicVRegister dst,
8175                                                   const LogicVRegister& src);
8176 
VisitSVEFPFastReduction(const Instruction * instr)8177 void Simulator::VisitSVEFPFastReduction(const Instruction* instr) {
8178   VectorFormat vform = instr->GetSVEVectorFormat();
8179   SimVRegister& vd = ReadVRegister(instr->GetRd());
8180   SimVRegister& zn = ReadVRegister(instr->GetRn());
8181   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8182   int lane_size = LaneSizeInBitsFromFormat(vform);
8183 
8184   uint64_t inactive_value = 0;
8185   FastReduceFn fn = nullptr;
8186 
8187   switch (instr->Mask(SVEFPFastReductionMask)) {
8188     case FADDV_v_p_z:
8189       fn = &Simulator::faddv;
8190       break;
8191     case FMAXNMV_v_p_z:
8192       inactive_value = FPToRawbitsWithSize(lane_size, kFP64DefaultNaN);
8193       fn = &Simulator::fmaxnmv;
8194       break;
8195     case FMAXV_v_p_z:
8196       inactive_value = FPToRawbitsWithSize(lane_size, kFP64NegativeInfinity);
8197       fn = &Simulator::fmaxv;
8198       break;
8199     case FMINNMV_v_p_z:
8200       inactive_value = FPToRawbitsWithSize(lane_size, kFP64DefaultNaN);
8201       fn = &Simulator::fminnmv;
8202       break;
8203     case FMINV_v_p_z:
8204       inactive_value = FPToRawbitsWithSize(lane_size, kFP64PositiveInfinity);
8205       fn = &Simulator::fminv;
8206       break;
8207     default:
8208       VIXL_UNIMPLEMENTED();
8209       break;
8210   }
8211 
8212   SimVRegister scratch;
8213   dup_immediate(vform, scratch, inactive_value);
8214   mov_merging(vform, scratch, pg, zn);
8215   if (fn != nullptr) (this->*fn)(vform, vd, scratch);
8216 }
8217 
VisitSVEFPMulIndex(const Instruction * instr)8218 void Simulator::VisitSVEFPMulIndex(const Instruction* instr) {
8219   VectorFormat vform = kFormatUndefined;
8220   unsigned zm_code = instr->GetRm() & 0xf;
8221   unsigned index = instr->ExtractBits(20, 19);
8222 
8223   switch (instr->Mask(SVEFPMulIndexMask)) {
8224     case FMUL_z_zzi_d:
8225       vform = kFormatVnD;
8226       index >>= 1;  // Only bit 20 is the index for D lanes.
8227       break;
8228     case FMUL_z_zzi_h_i3h:
8229       index += 4;  // Bit 22 (i3h) is the top bit of index.
8230       VIXL_FALLTHROUGH();
8231     case FMUL_z_zzi_h:
8232       vform = kFormatVnH;
8233       zm_code &= 7;  // Three bits used for zm.
8234       break;
8235     case FMUL_z_zzi_s:
8236       vform = kFormatVnS;
8237       zm_code &= 7;  // Three bits used for zm.
8238       break;
8239     default:
8240       VIXL_UNIMPLEMENTED();
8241       break;
8242   }
8243 
8244   SimVRegister& zd = ReadVRegister(instr->GetRd());
8245   SimVRegister& zn = ReadVRegister(instr->GetRn());
8246   SimVRegister temp;
8247 
8248   dup_elements_to_segments(vform, temp, ReadVRegister(zm_code), index);
8249   fmul(vform, zd, zn, temp);
8250 }
8251 
VisitSVEFPMulAdd(const Instruction * instr)8252 void Simulator::VisitSVEFPMulAdd(const Instruction* instr) {
8253   VectorFormat vform = instr->GetSVEVectorFormat();
8254 
8255   SimVRegister& zd = ReadVRegister(instr->GetRd());
8256   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8257   SimVRegister result;
8258 
8259   if (instr->ExtractBit(15) == 0) {
8260     // Floating-point multiply-accumulate writing addend.
8261     SimVRegister& zm = ReadVRegister(instr->GetRm());
8262     SimVRegister& zn = ReadVRegister(instr->GetRn());
8263 
8264     switch (instr->Mask(SVEFPMulAddMask)) {
8265       // zda = zda + zn * zm
8266       case FMLA_z_p_zzz:
8267         fmla(vform, result, zd, zn, zm);
8268         break;
8269       // zda = -zda + -zn * zm
8270       case FNMLA_z_p_zzz:
8271         fneg(vform, result, zd);
8272         fmls(vform, result, result, zn, zm);
8273         break;
8274       // zda = zda + -zn * zm
8275       case FMLS_z_p_zzz:
8276         fmls(vform, result, zd, zn, zm);
8277         break;
8278       // zda = -zda + zn * zm
8279       case FNMLS_z_p_zzz:
8280         fneg(vform, result, zd);
8281         fmla(vform, result, result, zn, zm);
8282         break;
8283       default:
8284         VIXL_UNIMPLEMENTED();
8285         break;
8286     }
8287   } else {
8288     // Floating-point multiply-accumulate writing multiplicand.
8289     SimVRegister& za = ReadVRegister(instr->GetRm());
8290     SimVRegister& zm = ReadVRegister(instr->GetRn());
8291 
8292     switch (instr->Mask(SVEFPMulAddMask)) {
8293       // zdn = za + zdn * zm
8294       case FMAD_z_p_zzz:
8295         fmla(vform, result, za, zd, zm);
8296         break;
8297       // zdn = -za + -zdn * zm
8298       case FNMAD_z_p_zzz:
8299         fneg(vform, result, za);
8300         fmls(vform, result, result, zd, zm);
8301         break;
8302       // zdn = za + -zdn * zm
8303       case FMSB_z_p_zzz:
8304         fmls(vform, result, za, zd, zm);
8305         break;
8306       // zdn = -za + zdn * zm
8307       case FNMSB_z_p_zzz:
8308         fneg(vform, result, za);
8309         fmla(vform, result, result, zd, zm);
8310         break;
8311       default:
8312         VIXL_UNIMPLEMENTED();
8313         break;
8314     }
8315   }
8316 
8317   mov_merging(vform, zd, pg, result);
8318 }
8319 
VisitSVEFPMulAddIndex(const Instruction * instr)8320 void Simulator::VisitSVEFPMulAddIndex(const Instruction* instr) {
8321   VectorFormat vform = kFormatUndefined;
8322   unsigned zm_code = 0xffffffff;
8323   unsigned index = 0xffffffff;
8324 
8325   switch (instr->Mask(SVEFPMulAddIndexMask)) {
8326     case FMLA_z_zzzi_d:
8327     case FMLS_z_zzzi_d:
8328       vform = kFormatVnD;
8329       zm_code = instr->GetRmLow16();
8330       // Only bit 20 is the index for D lanes.
8331       index = instr->ExtractBit(20);
8332       break;
8333     case FMLA_z_zzzi_s:
8334     case FMLS_z_zzzi_s:
8335       vform = kFormatVnS;
8336       zm_code = instr->GetRm() & 0x7;  // Three bits used for zm.
8337       index = instr->ExtractBits(20, 19);
8338       break;
8339     case FMLA_z_zzzi_h:
8340     case FMLS_z_zzzi_h:
8341     case FMLA_z_zzzi_h_i3h:
8342     case FMLS_z_zzzi_h_i3h:
8343       vform = kFormatVnH;
8344       zm_code = instr->GetRm() & 0x7;  // Three bits used for zm.
8345       index = (instr->ExtractBit(22) << 2) | instr->ExtractBits(20, 19);
8346       break;
8347     default:
8348       VIXL_UNIMPLEMENTED();
8349       break;
8350   }
8351 
8352   SimVRegister& zd = ReadVRegister(instr->GetRd());
8353   SimVRegister& zn = ReadVRegister(instr->GetRn());
8354   SimVRegister temp;
8355 
8356   dup_elements_to_segments(vform, temp, ReadVRegister(zm_code), index);
8357   if (instr->ExtractBit(10) == 1) {
8358     fmls(vform, zd, zd, zn, temp);
8359   } else {
8360     fmla(vform, zd, zd, zn, temp);
8361   }
8362 }
8363 
VisitSVEFPConvertToInt(const Instruction * instr)8364 void Simulator::VisitSVEFPConvertToInt(const Instruction* instr) {
8365   SimVRegister& zd = ReadVRegister(instr->GetRd());
8366   SimVRegister& zn = ReadVRegister(instr->GetRn());
8367   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8368   int dst_data_size;
8369   int src_data_size;
8370 
8371   switch (instr->Mask(SVEFPConvertToIntMask)) {
8372     case FCVTZS_z_p_z_d2w:
8373     case FCVTZU_z_p_z_d2w:
8374       dst_data_size = kSRegSize;
8375       src_data_size = kDRegSize;
8376       break;
8377     case FCVTZS_z_p_z_d2x:
8378     case FCVTZU_z_p_z_d2x:
8379       dst_data_size = kDRegSize;
8380       src_data_size = kDRegSize;
8381       break;
8382     case FCVTZS_z_p_z_fp162h:
8383     case FCVTZU_z_p_z_fp162h:
8384       dst_data_size = kHRegSize;
8385       src_data_size = kHRegSize;
8386       break;
8387     case FCVTZS_z_p_z_fp162w:
8388     case FCVTZU_z_p_z_fp162w:
8389       dst_data_size = kSRegSize;
8390       src_data_size = kHRegSize;
8391       break;
8392     case FCVTZS_z_p_z_fp162x:
8393     case FCVTZU_z_p_z_fp162x:
8394       dst_data_size = kDRegSize;
8395       src_data_size = kHRegSize;
8396       break;
8397     case FCVTZS_z_p_z_s2w:
8398     case FCVTZU_z_p_z_s2w:
8399       dst_data_size = kSRegSize;
8400       src_data_size = kSRegSize;
8401       break;
8402     case FCVTZS_z_p_z_s2x:
8403     case FCVTZU_z_p_z_s2x:
8404       dst_data_size = kDRegSize;
8405       src_data_size = kSRegSize;
8406       break;
8407     default:
8408       VIXL_UNIMPLEMENTED();
8409       dst_data_size = 0;
8410       src_data_size = 0;
8411       break;
8412   }
8413 
8414   VectorFormat vform =
8415       SVEFormatFromLaneSizeInBits(std::max(dst_data_size, src_data_size));
8416 
8417   if (instr->ExtractBit(16) == 0) {
8418     fcvts(vform, dst_data_size, src_data_size, zd, pg, zn, FPZero);
8419   } else {
8420     fcvtu(vform, dst_data_size, src_data_size, zd, pg, zn, FPZero);
8421   }
8422 }
8423 
VisitSVEFPConvertPrecision(const Instruction * instr)8424 void Simulator::VisitSVEFPConvertPrecision(const Instruction* instr) {
8425   SimVRegister& zd = ReadVRegister(instr->GetRd());
8426   SimVRegister& zn = ReadVRegister(instr->GetRn());
8427   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8428   int dst_data_size;
8429   int src_data_size;
8430 
8431   switch (instr->Mask(SVEFPConvertPrecisionMask)) {
8432     case FCVT_z_p_z_d2h:
8433       dst_data_size = kHRegSize;
8434       src_data_size = kDRegSize;
8435       break;
8436     case FCVT_z_p_z_d2s:
8437       dst_data_size = kSRegSize;
8438       src_data_size = kDRegSize;
8439       break;
8440     case FCVT_z_p_z_h2d:
8441       dst_data_size = kDRegSize;
8442       src_data_size = kHRegSize;
8443       break;
8444     case FCVT_z_p_z_h2s:
8445       dst_data_size = kSRegSize;
8446       src_data_size = kHRegSize;
8447       break;
8448     case FCVT_z_p_z_s2d:
8449       dst_data_size = kDRegSize;
8450       src_data_size = kSRegSize;
8451       break;
8452     case FCVT_z_p_z_s2h:
8453       dst_data_size = kHRegSize;
8454       src_data_size = kSRegSize;
8455       break;
8456     default:
8457       VIXL_UNIMPLEMENTED();
8458       dst_data_size = 0;
8459       src_data_size = 0;
8460       break;
8461   }
8462   VectorFormat vform =
8463       SVEFormatFromLaneSizeInBits(std::max(dst_data_size, src_data_size));
8464 
8465   fcvt(vform, dst_data_size, src_data_size, zd, pg, zn);
8466 }
8467 
VisitSVEFPUnaryOp(const Instruction * instr)8468 void Simulator::VisitSVEFPUnaryOp(const Instruction* instr) {
8469   SimVRegister& zd = ReadVRegister(instr->GetRd());
8470   SimVRegister& zn = ReadVRegister(instr->GetRn());
8471   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8472   VectorFormat vform = instr->GetSVEVectorFormat();
8473   SimVRegister result;
8474 
8475   switch (instr->Mask(SVEFPUnaryOpMask)) {
8476     case FRECPX_z_p_z:
8477       frecpx(vform, result, zn);
8478       break;
8479     case FSQRT_z_p_z:
8480       fsqrt(vform, result, zn);
8481       break;
8482     default:
8483       VIXL_UNIMPLEMENTED();
8484       break;
8485   }
8486   mov_merging(vform, zd, pg, result);
8487 }
8488 
VisitSVEFPRoundToIntegralValue(const Instruction * instr)8489 void Simulator::VisitSVEFPRoundToIntegralValue(const Instruction* instr) {
8490   SimVRegister& zd = ReadVRegister(instr->GetRd());
8491   SimVRegister& zn = ReadVRegister(instr->GetRn());
8492   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8493   VectorFormat vform = instr->GetSVEVectorFormat();
8494   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
8495   bool exact_exception = false;
8496 
8497   switch (instr->Mask(SVEFPRoundToIntegralValueMask)) {
8498     case FRINTA_z_p_z:
8499       fpcr_rounding = FPTieAway;
8500       break;
8501     case FRINTI_z_p_z:
8502       break;  // Use FPCR rounding mode.
8503     case FRINTM_z_p_z:
8504       fpcr_rounding = FPNegativeInfinity;
8505       break;
8506     case FRINTN_z_p_z:
8507       fpcr_rounding = FPTieEven;
8508       break;
8509     case FRINTP_z_p_z:
8510       fpcr_rounding = FPPositiveInfinity;
8511       break;
8512     case FRINTX_z_p_z:
8513       exact_exception = true;
8514       break;
8515     case FRINTZ_z_p_z:
8516       fpcr_rounding = FPZero;
8517       break;
8518     default:
8519       VIXL_UNIMPLEMENTED();
8520       break;
8521   }
8522 
8523   SimVRegister result;
8524   frint(vform, result, zn, fpcr_rounding, exact_exception, kFrintToInteger);
8525   mov_merging(vform, zd, pg, result);
8526 }
8527 
VisitSVEIntConvertToFP(const Instruction * instr)8528 void Simulator::VisitSVEIntConvertToFP(const Instruction* instr) {
8529   SimVRegister& zd = ReadVRegister(instr->GetRd());
8530   SimVRegister& zn = ReadVRegister(instr->GetRn());
8531   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8532   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
8533   int dst_data_size;
8534   int src_data_size;
8535 
8536   switch (instr->Mask(SVEIntConvertToFPMask)) {
8537     case SCVTF_z_p_z_h2fp16:
8538     case UCVTF_z_p_z_h2fp16:
8539       dst_data_size = kHRegSize;
8540       src_data_size = kHRegSize;
8541       break;
8542     case SCVTF_z_p_z_w2d:
8543     case UCVTF_z_p_z_w2d:
8544       dst_data_size = kDRegSize;
8545       src_data_size = kSRegSize;
8546       break;
8547     case SCVTF_z_p_z_w2fp16:
8548     case UCVTF_z_p_z_w2fp16:
8549       dst_data_size = kHRegSize;
8550       src_data_size = kSRegSize;
8551       break;
8552     case SCVTF_z_p_z_w2s:
8553     case UCVTF_z_p_z_w2s:
8554       dst_data_size = kSRegSize;
8555       src_data_size = kSRegSize;
8556       break;
8557     case SCVTF_z_p_z_x2d:
8558     case UCVTF_z_p_z_x2d:
8559       dst_data_size = kDRegSize;
8560       src_data_size = kDRegSize;
8561       break;
8562     case SCVTF_z_p_z_x2fp16:
8563     case UCVTF_z_p_z_x2fp16:
8564       dst_data_size = kHRegSize;
8565       src_data_size = kDRegSize;
8566       break;
8567     case SCVTF_z_p_z_x2s:
8568     case UCVTF_z_p_z_x2s:
8569       dst_data_size = kSRegSize;
8570       src_data_size = kDRegSize;
8571       break;
8572     default:
8573       VIXL_UNIMPLEMENTED();
8574       dst_data_size = 0;
8575       src_data_size = 0;
8576       break;
8577   }
8578 
8579   VectorFormat vform =
8580       SVEFormatFromLaneSizeInBits(std::max(dst_data_size, src_data_size));
8581 
8582   if (instr->ExtractBit(16) == 0) {
8583     scvtf(vform, dst_data_size, src_data_size, zd, pg, zn, fpcr_rounding);
8584   } else {
8585     ucvtf(vform, dst_data_size, src_data_size, zd, pg, zn, fpcr_rounding);
8586   }
8587 }
8588 
VisitSVEFPUnaryOpUnpredicated(const Instruction * instr)8589 void Simulator::VisitSVEFPUnaryOpUnpredicated(const Instruction* instr) {
8590   VectorFormat vform = instr->GetSVEVectorFormat();
8591   SimVRegister& zd = ReadVRegister(instr->GetRd());
8592   SimVRegister& zn = ReadVRegister(instr->GetRn());
8593   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
8594 
8595   switch (instr->Mask(SVEFPUnaryOpUnpredicatedMask)) {
8596     case FRECPE_z_z:
8597       frecpe(vform, zd, zn, fpcr_rounding);
8598       break;
8599     case FRSQRTE_z_z:
8600       frsqrte(vform, zd, zn);
8601       break;
8602     default:
8603       VIXL_UNIMPLEMENTED();
8604       break;
8605   }
8606 }
8607 
VisitSVEIncDecByPredicateCount(const Instruction * instr)8608 void Simulator::VisitSVEIncDecByPredicateCount(const Instruction* instr) {
8609   VectorFormat vform = instr->GetSVEVectorFormat();
8610   SimPRegister& pg = ReadPRegister(instr->ExtractBits(8, 5));
8611 
8612   int count = CountActiveLanes(vform, pg);
8613 
8614   if (instr->ExtractBit(11) == 0) {
8615     SimVRegister& zdn = ReadVRegister(instr->GetRd());
8616     switch (instr->Mask(SVEIncDecByPredicateCountMask)) {
8617       case DECP_z_p_z:
8618         sub_uint(vform, zdn, zdn, count);
8619         break;
8620       case INCP_z_p_z:
8621         add_uint(vform, zdn, zdn, count);
8622         break;
8623       case SQDECP_z_p_z:
8624         sub_uint(vform, zdn, zdn, count).SignedSaturate(vform);
8625         break;
8626       case SQINCP_z_p_z:
8627         add_uint(vform, zdn, zdn, count).SignedSaturate(vform);
8628         break;
8629       case UQDECP_z_p_z:
8630         sub_uint(vform, zdn, zdn, count).UnsignedSaturate(vform);
8631         break;
8632       case UQINCP_z_p_z:
8633         add_uint(vform, zdn, zdn, count).UnsignedSaturate(vform);
8634         break;
8635       default:
8636         VIXL_UNIMPLEMENTED();
8637         break;
8638     }
8639   } else {
8640     bool is_saturating = (instr->ExtractBit(18) == 0);
8641     bool decrement =
8642         is_saturating ? instr->ExtractBit(17) : instr->ExtractBit(16);
8643     bool is_signed = (instr->ExtractBit(16) == 0);
8644     bool sf = is_saturating ? (instr->ExtractBit(10) != 0) : true;
8645     unsigned width = sf ? kXRegSize : kWRegSize;
8646 
8647     switch (instr->Mask(SVEIncDecByPredicateCountMask)) {
8648       case DECP_r_p_r:
8649       case INCP_r_p_r:
8650       case SQDECP_r_p_r_sx:
8651       case SQDECP_r_p_r_x:
8652       case SQINCP_r_p_r_sx:
8653       case SQINCP_r_p_r_x:
8654       case UQDECP_r_p_r_uw:
8655       case UQDECP_r_p_r_x:
8656       case UQINCP_r_p_r_uw:
8657       case UQINCP_r_p_r_x:
8658         WriteXRegister(instr->GetRd(),
8659                        IncDecN(ReadXRegister(instr->GetRd()),
8660                                decrement ? -count : count,
8661                                width,
8662                                is_saturating,
8663                                is_signed));
8664         break;
8665       default:
8666         VIXL_UNIMPLEMENTED();
8667         break;
8668     }
8669   }
8670 }
8671 
IncDecN(uint64_t acc,int64_t delta,unsigned n,bool is_saturating,bool is_signed)8672 uint64_t Simulator::IncDecN(uint64_t acc,
8673                             int64_t delta,
8674                             unsigned n,
8675                             bool is_saturating,
8676                             bool is_signed) {
8677   VIXL_ASSERT(n <= 64);
8678   VIXL_ASSERT(IsIntN(n, delta));
8679 
8680   uint64_t sign_mask = UINT64_C(1) << (n - 1);
8681   uint64_t mask = GetUintMask(n);
8682 
8683   acc &= mask;  // Ignore initial accumulator high bits.
8684   uint64_t result = (acc + delta) & mask;
8685 
8686   bool result_negative = ((result & sign_mask) != 0);
8687 
8688   if (is_saturating) {
8689     if (is_signed) {
8690       bool acc_negative = ((acc & sign_mask) != 0);
8691       bool delta_negative = delta < 0;
8692 
8693       // If the signs of the operands are the same, but different from the
8694       // result, there was an overflow.
8695       if ((acc_negative == delta_negative) &&
8696           (acc_negative != result_negative)) {
8697         if (result_negative) {
8698           // Saturate to [..., INT<n>_MAX].
8699           result_negative = false;
8700           result = mask & ~sign_mask;  // E.g. 0x000000007fffffff
8701         } else {
8702           // Saturate to [INT<n>_MIN, ...].
8703           result_negative = true;
8704           result = ~mask | sign_mask;  // E.g. 0xffffffff80000000
8705         }
8706       }
8707     } else {
8708       if ((delta < 0) && (result > acc)) {
8709         // Saturate to [0, ...].
8710         result = 0;
8711       } else if ((delta > 0) && (result < acc)) {
8712         // Saturate to [..., UINT<n>_MAX].
8713         result = mask;
8714       }
8715     }
8716   }
8717 
8718   // Sign-extend if necessary.
8719   if (result_negative && is_signed) result |= ~mask;
8720 
8721   return result;
8722 }
8723 
VisitSVEIndexGeneration(const Instruction * instr)8724 void Simulator::VisitSVEIndexGeneration(const Instruction* instr) {
8725   VectorFormat vform = instr->GetSVEVectorFormat();
8726   SimVRegister& zd = ReadVRegister(instr->GetRd());
8727   switch (instr->Mask(SVEIndexGenerationMask)) {
8728     case INDEX_z_ii:
8729     case INDEX_z_ir:
8730     case INDEX_z_ri:
8731     case INDEX_z_rr: {
8732       uint64_t start = instr->ExtractBit(10) ? ReadXRegister(instr->GetRn())
8733                                              : instr->ExtractSignedBits(9, 5);
8734       uint64_t step = instr->ExtractBit(11) ? ReadXRegister(instr->GetRm())
8735                                             : instr->ExtractSignedBits(20, 16);
8736       index(vform, zd, start, step);
8737       break;
8738     }
8739     default:
8740       VIXL_UNIMPLEMENTED();
8741       break;
8742   }
8743 }
8744 
VisitSVEIntArithmeticUnpredicated(const Instruction * instr)8745 void Simulator::VisitSVEIntArithmeticUnpredicated(const Instruction* instr) {
8746   VectorFormat vform = instr->GetSVEVectorFormat();
8747   SimVRegister& zd = ReadVRegister(instr->GetRd());
8748   SimVRegister& zn = ReadVRegister(instr->GetRn());
8749   SimVRegister& zm = ReadVRegister(instr->GetRm());
8750   switch (instr->Mask(SVEIntArithmeticUnpredicatedMask)) {
8751     case ADD_z_zz:
8752       add(vform, zd, zn, zm);
8753       break;
8754     case SQADD_z_zz:
8755       add(vform, zd, zn, zm).SignedSaturate(vform);
8756       break;
8757     case SQSUB_z_zz:
8758       sub(vform, zd, zn, zm).SignedSaturate(vform);
8759       break;
8760     case SUB_z_zz:
8761       sub(vform, zd, zn, zm);
8762       break;
8763     case UQADD_z_zz:
8764       add(vform, zd, zn, zm).UnsignedSaturate(vform);
8765       break;
8766     case UQSUB_z_zz:
8767       sub(vform, zd, zn, zm).UnsignedSaturate(vform);
8768       break;
8769     default:
8770       VIXL_UNIMPLEMENTED();
8771       break;
8772   }
8773 }
8774 
VisitSVEIntAddSubtractVectors_Predicated(const Instruction * instr)8775 void Simulator::VisitSVEIntAddSubtractVectors_Predicated(
8776     const Instruction* instr) {
8777   VectorFormat vform = instr->GetSVEVectorFormat();
8778   SimVRegister& zdn = ReadVRegister(instr->GetRd());
8779   SimVRegister& zm = ReadVRegister(instr->GetRn());
8780   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8781   SimVRegister result;
8782 
8783   switch (instr->Mask(SVEIntAddSubtractVectors_PredicatedMask)) {
8784     case ADD_z_p_zz:
8785       add(vform, result, zdn, zm);
8786       break;
8787     case SUBR_z_p_zz:
8788       sub(vform, result, zm, zdn);
8789       break;
8790     case SUB_z_p_zz:
8791       sub(vform, result, zdn, zm);
8792       break;
8793     default:
8794       VIXL_UNIMPLEMENTED();
8795       break;
8796   }
8797   mov_merging(vform, zdn, pg, result);
8798 }
8799 
VisitSVEBitwiseLogical_Predicated(const Instruction * instr)8800 void Simulator::VisitSVEBitwiseLogical_Predicated(const Instruction* instr) {
8801   VectorFormat vform = instr->GetSVEVectorFormat();
8802   SimVRegister& zdn = ReadVRegister(instr->GetRd());
8803   SimVRegister& zm = ReadVRegister(instr->GetRn());
8804   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8805   SimVRegister result;
8806 
8807   switch (instr->Mask(SVEBitwiseLogical_PredicatedMask)) {
8808     case AND_z_p_zz:
8809       SVEBitwiseLogicalUnpredicatedHelper(AND, vform, result, zdn, zm);
8810       break;
8811     case BIC_z_p_zz:
8812       SVEBitwiseLogicalUnpredicatedHelper(BIC, vform, result, zdn, zm);
8813       break;
8814     case EOR_z_p_zz:
8815       SVEBitwiseLogicalUnpredicatedHelper(EOR, vform, result, zdn, zm);
8816       break;
8817     case ORR_z_p_zz:
8818       SVEBitwiseLogicalUnpredicatedHelper(ORR, vform, result, zdn, zm);
8819       break;
8820     default:
8821       VIXL_UNIMPLEMENTED();
8822       break;
8823   }
8824   mov_merging(vform, zdn, pg, result);
8825 }
8826 
VisitSVEIntMulVectors_Predicated(const Instruction * instr)8827 void Simulator::VisitSVEIntMulVectors_Predicated(const Instruction* instr) {
8828   VectorFormat vform = instr->GetSVEVectorFormat();
8829   SimVRegister& zdn = ReadVRegister(instr->GetRd());
8830   SimVRegister& zm = ReadVRegister(instr->GetRn());
8831   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8832   SimVRegister result;
8833 
8834   switch (instr->Mask(SVEIntMulVectors_PredicatedMask)) {
8835     case MUL_z_p_zz:
8836       mul(vform, result, zdn, zm);
8837       break;
8838     case SMULH_z_p_zz:
8839       smulh(vform, result, zdn, zm);
8840       break;
8841     case UMULH_z_p_zz:
8842       umulh(vform, result, zdn, zm);
8843       break;
8844     default:
8845       VIXL_UNIMPLEMENTED();
8846       break;
8847   }
8848   mov_merging(vform, zdn, pg, result);
8849 }
8850 
VisitSVEIntMinMaxDifference_Predicated(const Instruction * instr)8851 void Simulator::VisitSVEIntMinMaxDifference_Predicated(
8852     const Instruction* instr) {
8853   VectorFormat vform = instr->GetSVEVectorFormat();
8854   SimVRegister& zdn = ReadVRegister(instr->GetRd());
8855   SimVRegister& zm = ReadVRegister(instr->GetRn());
8856   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8857   SimVRegister result;
8858 
8859   switch (instr->Mask(SVEIntMinMaxDifference_PredicatedMask)) {
8860     case SABD_z_p_zz:
8861       absdiff(vform, result, zdn, zm, true);
8862       break;
8863     case SMAX_z_p_zz:
8864       smax(vform, result, zdn, zm);
8865       break;
8866     case SMIN_z_p_zz:
8867       smin(vform, result, zdn, zm);
8868       break;
8869     case UABD_z_p_zz:
8870       absdiff(vform, result, zdn, zm, false);
8871       break;
8872     case UMAX_z_p_zz:
8873       umax(vform, result, zdn, zm);
8874       break;
8875     case UMIN_z_p_zz:
8876       umin(vform, result, zdn, zm);
8877       break;
8878     default:
8879       VIXL_UNIMPLEMENTED();
8880       break;
8881   }
8882   mov_merging(vform, zdn, pg, result);
8883 }
8884 
VisitSVEIntMulImm_Unpredicated(const Instruction * instr)8885 void Simulator::VisitSVEIntMulImm_Unpredicated(const Instruction* instr) {
8886   VectorFormat vform = instr->GetSVEVectorFormat();
8887   SimVRegister& zd = ReadVRegister(instr->GetRd());
8888   SimVRegister scratch;
8889 
8890   switch (instr->Mask(SVEIntMulImm_UnpredicatedMask)) {
8891     case MUL_z_zi:
8892       dup_immediate(vform, scratch, instr->GetImmSVEIntWideSigned());
8893       mul(vform, zd, zd, scratch);
8894       break;
8895     default:
8896       VIXL_UNIMPLEMENTED();
8897       break;
8898   }
8899 }
8900 
VisitSVEIntDivideVectors_Predicated(const Instruction * instr)8901 void Simulator::VisitSVEIntDivideVectors_Predicated(const Instruction* instr) {
8902   VectorFormat vform = instr->GetSVEVectorFormat();
8903   SimVRegister& zdn = ReadVRegister(instr->GetRd());
8904   SimVRegister& zm = ReadVRegister(instr->GetRn());
8905   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
8906   SimVRegister result;
8907 
8908   VIXL_ASSERT((vform == kFormatVnS) || (vform == kFormatVnD));
8909 
8910   switch (instr->Mask(SVEIntDivideVectors_PredicatedMask)) {
8911     case SDIVR_z_p_zz:
8912       sdiv(vform, result, zm, zdn);
8913       break;
8914     case SDIV_z_p_zz:
8915       sdiv(vform, result, zdn, zm);
8916       break;
8917     case UDIVR_z_p_zz:
8918       udiv(vform, result, zm, zdn);
8919       break;
8920     case UDIV_z_p_zz:
8921       udiv(vform, result, zdn, zm);
8922       break;
8923     default:
8924       VIXL_UNIMPLEMENTED();
8925       break;
8926   }
8927   mov_merging(vform, zdn, pg, result);
8928 }
8929 
VisitSVEIntMinMaxImm_Unpredicated(const Instruction * instr)8930 void Simulator::VisitSVEIntMinMaxImm_Unpredicated(const Instruction* instr) {
8931   VectorFormat vform = instr->GetSVEVectorFormat();
8932   SimVRegister& zd = ReadVRegister(instr->GetRd());
8933   SimVRegister scratch;
8934 
8935   uint64_t unsigned_imm = instr->GetImmSVEIntWideUnsigned();
8936   int64_t signed_imm = instr->GetImmSVEIntWideSigned();
8937 
8938   switch (instr->Mask(SVEIntMinMaxImm_UnpredicatedMask)) {
8939     case SMAX_z_zi:
8940       dup_immediate(vform, scratch, signed_imm);
8941       smax(vform, zd, zd, scratch);
8942       break;
8943     case SMIN_z_zi:
8944       dup_immediate(vform, scratch, signed_imm);
8945       smin(vform, zd, zd, scratch);
8946       break;
8947     case UMAX_z_zi:
8948       dup_immediate(vform, scratch, unsigned_imm);
8949       umax(vform, zd, zd, scratch);
8950       break;
8951     case UMIN_z_zi:
8952       dup_immediate(vform, scratch, unsigned_imm);
8953       umin(vform, zd, zd, scratch);
8954       break;
8955     default:
8956       VIXL_UNIMPLEMENTED();
8957       break;
8958   }
8959 }
8960 
VisitSVEIntCompareScalarCountAndLimit(const Instruction * instr)8961 void Simulator::VisitSVEIntCompareScalarCountAndLimit(
8962     const Instruction* instr) {
8963   unsigned rn_code = instr->GetRn();
8964   unsigned rm_code = instr->GetRm();
8965   SimPRegister& pd = ReadPRegister(instr->GetPd());
8966   VectorFormat vform = instr->GetSVEVectorFormat();
8967 
8968   bool is_64_bit = instr->ExtractBit(12) == 1;
8969   int rsize = is_64_bit ? kXRegSize : kWRegSize;
8970   uint64_t mask = is_64_bit ? kXRegMask : kWRegMask;
8971 
8972   uint64_t usrc1 = ReadXRegister(rn_code);
8973   int64_t ssrc2 = is_64_bit ? ReadXRegister(rm_code) : ReadWRegister(rm_code);
8974   uint64_t usrc2 = ssrc2 & mask;
8975 
8976   bool last = true;
8977   for (int lane = 0; lane < LaneCountFromFormat(vform); lane++) {
8978     usrc1 &= mask;
8979     int64_t ssrc1 = ExtractSignedBitfield64(rsize - 1, 0, usrc1);
8980 
8981     bool cond = false;
8982     switch (instr->Mask(SVEIntCompareScalarCountAndLimitMask)) {
8983       case WHILELE_p_p_rr:
8984         cond = ssrc1 <= ssrc2;
8985         break;
8986       case WHILELO_p_p_rr:
8987         cond = usrc1 < usrc2;
8988         break;
8989       case WHILELS_p_p_rr:
8990         cond = usrc1 <= usrc2;
8991         break;
8992       case WHILELT_p_p_rr:
8993         cond = ssrc1 < ssrc2;
8994         break;
8995       default:
8996         VIXL_UNIMPLEMENTED();
8997         break;
8998     }
8999     last = last && cond;
9000     LogicPRegister dst(pd);
9001     dst.SetActive(vform, lane, last);
9002     usrc1++;
9003   }
9004 
9005   PredTest(vform, GetPTrue(), pd);
9006   LogSystemRegister(NZCV);
9007 }
9008 
VisitSVEConditionallyTerminateScalars(const Instruction * instr)9009 void Simulator::VisitSVEConditionallyTerminateScalars(
9010     const Instruction* instr) {
9011   unsigned rn_code = instr->GetRn();
9012   unsigned rm_code = instr->GetRm();
9013   bool is_64_bit = instr->ExtractBit(22) == 1;
9014   uint64_t src1 = is_64_bit ? ReadXRegister(rn_code) : ReadWRegister(rn_code);
9015   uint64_t src2 = is_64_bit ? ReadXRegister(rm_code) : ReadWRegister(rm_code);
9016   bool term;
9017   switch (instr->Mask(SVEConditionallyTerminateScalarsMask)) {
9018     case CTERMEQ_rr:
9019       term = src1 == src2;
9020       break;
9021     case CTERMNE_rr:
9022       term = src1 != src2;
9023       break;
9024     default:
9025       term = false;
9026       VIXL_UNIMPLEMENTED();
9027       break;
9028   }
9029   ReadNzcv().SetN(term ? 1 : 0);
9030   ReadNzcv().SetV(term ? 0 : !ReadC());
9031   LogSystemRegister(NZCV);
9032 }
9033 
VisitSVEIntCompareSignedImm(const Instruction * instr)9034 void Simulator::VisitSVEIntCompareSignedImm(const Instruction* instr) {
9035   bool commute_inputs = false;
9036   Condition cond;
9037   switch (instr->Mask(SVEIntCompareSignedImmMask)) {
9038     case CMPEQ_p_p_zi:
9039       cond = eq;
9040       break;
9041     case CMPGE_p_p_zi:
9042       cond = ge;
9043       break;
9044     case CMPGT_p_p_zi:
9045       cond = gt;
9046       break;
9047     case CMPLE_p_p_zi:
9048       cond = ge;
9049       commute_inputs = true;
9050       break;
9051     case CMPLT_p_p_zi:
9052       cond = gt;
9053       commute_inputs = true;
9054       break;
9055     case CMPNE_p_p_zi:
9056       cond = ne;
9057       break;
9058     default:
9059       cond = al;
9060       VIXL_UNIMPLEMENTED();
9061       break;
9062   }
9063 
9064   VectorFormat vform = instr->GetSVEVectorFormat();
9065   SimVRegister src2;
9066   dup_immediate(vform,
9067                 src2,
9068                 ExtractSignedBitfield64(4, 0, instr->ExtractBits(20, 16)));
9069   SVEIntCompareVectorsHelper(cond,
9070                              vform,
9071                              ReadPRegister(instr->GetPd()),
9072                              ReadPRegister(instr->GetPgLow8()),
9073                              commute_inputs ? src2
9074                                             : ReadVRegister(instr->GetRn()),
9075                              commute_inputs ? ReadVRegister(instr->GetRn())
9076                                             : src2);
9077 }
9078 
VisitSVEIntCompareUnsignedImm(const Instruction * instr)9079 void Simulator::VisitSVEIntCompareUnsignedImm(const Instruction* instr) {
9080   bool commute_inputs = false;
9081   Condition cond;
9082   switch (instr->Mask(SVEIntCompareUnsignedImmMask)) {
9083     case CMPHI_p_p_zi:
9084       cond = hi;
9085       break;
9086     case CMPHS_p_p_zi:
9087       cond = hs;
9088       break;
9089     case CMPLO_p_p_zi:
9090       cond = hi;
9091       commute_inputs = true;
9092       break;
9093     case CMPLS_p_p_zi:
9094       cond = hs;
9095       commute_inputs = true;
9096       break;
9097     default:
9098       cond = al;
9099       VIXL_UNIMPLEMENTED();
9100       break;
9101   }
9102 
9103   VectorFormat vform = instr->GetSVEVectorFormat();
9104   SimVRegister src2;
9105   dup_immediate(vform, src2, instr->ExtractBits(20, 14));
9106   SVEIntCompareVectorsHelper(cond,
9107                              vform,
9108                              ReadPRegister(instr->GetPd()),
9109                              ReadPRegister(instr->GetPgLow8()),
9110                              commute_inputs ? src2
9111                                             : ReadVRegister(instr->GetRn()),
9112                              commute_inputs ? ReadVRegister(instr->GetRn())
9113                                             : src2);
9114 }
9115 
VisitSVEIntCompareVectors(const Instruction * instr)9116 void Simulator::VisitSVEIntCompareVectors(const Instruction* instr) {
9117   Instr op = instr->Mask(SVEIntCompareVectorsMask);
9118   bool is_wide_elements = false;
9119   switch (op) {
9120     case CMPEQ_p_p_zw:
9121     case CMPGE_p_p_zw:
9122     case CMPGT_p_p_zw:
9123     case CMPHI_p_p_zw:
9124     case CMPHS_p_p_zw:
9125     case CMPLE_p_p_zw:
9126     case CMPLO_p_p_zw:
9127     case CMPLS_p_p_zw:
9128     case CMPLT_p_p_zw:
9129     case CMPNE_p_p_zw:
9130       is_wide_elements = true;
9131       break;
9132   }
9133 
9134   Condition cond;
9135   switch (op) {
9136     case CMPEQ_p_p_zw:
9137     case CMPEQ_p_p_zz:
9138       cond = eq;
9139       break;
9140     case CMPGE_p_p_zw:
9141     case CMPGE_p_p_zz:
9142       cond = ge;
9143       break;
9144     case CMPGT_p_p_zw:
9145     case CMPGT_p_p_zz:
9146       cond = gt;
9147       break;
9148     case CMPHI_p_p_zw:
9149     case CMPHI_p_p_zz:
9150       cond = hi;
9151       break;
9152     case CMPHS_p_p_zw:
9153     case CMPHS_p_p_zz:
9154       cond = hs;
9155       break;
9156     case CMPNE_p_p_zw:
9157     case CMPNE_p_p_zz:
9158       cond = ne;
9159       break;
9160     case CMPLE_p_p_zw:
9161       cond = le;
9162       break;
9163     case CMPLO_p_p_zw:
9164       cond = lo;
9165       break;
9166     case CMPLS_p_p_zw:
9167       cond = ls;
9168       break;
9169     case CMPLT_p_p_zw:
9170       cond = lt;
9171       break;
9172     default:
9173       VIXL_UNIMPLEMENTED();
9174       cond = al;
9175       break;
9176   }
9177 
9178   SVEIntCompareVectorsHelper(cond,
9179                              instr->GetSVEVectorFormat(),
9180                              ReadPRegister(instr->GetPd()),
9181                              ReadPRegister(instr->GetPgLow8()),
9182                              ReadVRegister(instr->GetRn()),
9183                              ReadVRegister(instr->GetRm()),
9184                              is_wide_elements);
9185 }
9186 
VisitSVEFPExponentialAccelerator(const Instruction * instr)9187 void Simulator::VisitSVEFPExponentialAccelerator(const Instruction* instr) {
9188   VectorFormat vform = instr->GetSVEVectorFormat();
9189   SimVRegister& zd = ReadVRegister(instr->GetRd());
9190   SimVRegister& zn = ReadVRegister(instr->GetRn());
9191 
9192   VIXL_ASSERT((vform == kFormatVnH) || (vform == kFormatVnS) ||
9193               (vform == kFormatVnD));
9194 
9195   switch (instr->Mask(SVEFPExponentialAcceleratorMask)) {
9196     case FEXPA_z_z:
9197       fexpa(vform, zd, zn);
9198       break;
9199     default:
9200       VIXL_UNIMPLEMENTED();
9201       break;
9202   }
9203 }
9204 
VisitSVEFPTrigSelectCoefficient(const Instruction * instr)9205 void Simulator::VisitSVEFPTrigSelectCoefficient(const Instruction* instr) {
9206   VectorFormat vform = instr->GetSVEVectorFormat();
9207   SimVRegister& zd = ReadVRegister(instr->GetRd());
9208   SimVRegister& zn = ReadVRegister(instr->GetRn());
9209   SimVRegister& zm = ReadVRegister(instr->GetRm());
9210 
9211   VIXL_ASSERT((vform == kFormatVnH) || (vform == kFormatVnS) ||
9212               (vform == kFormatVnD));
9213 
9214   switch (instr->Mask(SVEFPTrigSelectCoefficientMask)) {
9215     case FTSSEL_z_zz:
9216       ftssel(vform, zd, zn, zm);
9217       break;
9218     default:
9219       VIXL_UNIMPLEMENTED();
9220       break;
9221   }
9222 }
9223 
VisitSVEConstructivePrefix_Unpredicated(const Instruction * instr)9224 void Simulator::VisitSVEConstructivePrefix_Unpredicated(
9225     const Instruction* instr) {
9226   SimVRegister& zd = ReadVRegister(instr->GetRd());
9227   SimVRegister& zn = ReadVRegister(instr->GetRn());
9228 
9229   switch (instr->Mask(SVEConstructivePrefix_UnpredicatedMask)) {
9230     case MOVPRFX_z_z:
9231       mov(kFormatVnD, zd, zn);  // The lane size is arbitrary.
9232       // Record the movprfx, so the next ExecuteInstruction() can check it.
9233       movprfx_ = instr;
9234       break;
9235     default:
9236       VIXL_UNIMPLEMENTED();
9237       break;
9238   }
9239 }
9240 
VisitSVEIntMulAddPredicated(const Instruction * instr)9241 void Simulator::VisitSVEIntMulAddPredicated(const Instruction* instr) {
9242   VectorFormat vform = instr->GetSVEVectorFormat();
9243 
9244   SimVRegister& zd = ReadVRegister(instr->GetRd());
9245   SimVRegister& zm = ReadVRegister(instr->GetRm());
9246 
9247   SimVRegister result;
9248   switch (instr->Mask(SVEIntMulAddPredicatedMask)) {
9249     case MLA_z_p_zzz:
9250       mla(vform, result, zd, ReadVRegister(instr->GetRn()), zm);
9251       break;
9252     case MLS_z_p_zzz:
9253       mls(vform, result, zd, ReadVRegister(instr->GetRn()), zm);
9254       break;
9255     case MAD_z_p_zzz:
9256       // 'za' is encoded in 'Rn'.
9257       mla(vform, result, ReadVRegister(instr->GetRn()), zd, zm);
9258       break;
9259     case MSB_z_p_zzz: {
9260       // 'za' is encoded in 'Rn'.
9261       mls(vform, result, ReadVRegister(instr->GetRn()), zd, zm);
9262       break;
9263     }
9264     default:
9265       VIXL_UNIMPLEMENTED();
9266       break;
9267   }
9268   mov_merging(vform, zd, ReadPRegister(instr->GetPgLow8()), result);
9269 }
9270 
VisitSVEIntMulAddUnpredicated(const Instruction * instr)9271 void Simulator::VisitSVEIntMulAddUnpredicated(const Instruction* instr) {
9272   VectorFormat vform = instr->GetSVEVectorFormat();
9273   SimVRegister& zda = ReadVRegister(instr->GetRd());
9274   SimVRegister& zn = ReadVRegister(instr->GetRn());
9275   SimVRegister& zm = ReadVRegister(instr->GetRm());
9276 
9277   switch (instr->Mask(SVEIntMulAddUnpredicatedMask)) {
9278     case SDOT_z_zzz:
9279       sdot(vform, zda, zn, zm);
9280       break;
9281     case UDOT_z_zzz:
9282       udot(vform, zda, zn, zm);
9283       break;
9284     default:
9285       VIXL_UNIMPLEMENTED();
9286       break;
9287   }
9288 }
9289 
VisitSVEMovprfx(const Instruction * instr)9290 void Simulator::VisitSVEMovprfx(const Instruction* instr) {
9291   VectorFormat vform = instr->GetSVEVectorFormat();
9292   SimVRegister& zn = ReadVRegister(instr->GetRn());
9293   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
9294   SimVRegister& zd = ReadVRegister(instr->GetRd());
9295 
9296   switch (instr->Mask(SVEMovprfxMask)) {
9297     case MOVPRFX_z_p_z:
9298       if (instr->ExtractBit(16)) {
9299         mov_merging(vform, zd, pg, zn);
9300       } else {
9301         mov_zeroing(vform, zd, pg, zn);
9302       }
9303 
9304       // Record the movprfx, so the next ExecuteInstruction() can check it.
9305       movprfx_ = instr;
9306       break;
9307     default:
9308       VIXL_UNIMPLEMENTED();
9309       break;
9310   }
9311 }
9312 
VisitSVEIntReduction(const Instruction * instr)9313 void Simulator::VisitSVEIntReduction(const Instruction* instr) {
9314   VectorFormat vform = instr->GetSVEVectorFormat();
9315   SimVRegister& vd = ReadVRegister(instr->GetRd());
9316   SimVRegister& zn = ReadVRegister(instr->GetRn());
9317   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
9318 
9319   if (instr->Mask(SVEIntReductionLogicalFMask) == SVEIntReductionLogicalFixed) {
9320     switch (instr->Mask(SVEIntReductionLogicalMask)) {
9321       case ANDV_r_p_z:
9322         andv(vform, vd, pg, zn);
9323         break;
9324       case EORV_r_p_z:
9325         eorv(vform, vd, pg, zn);
9326         break;
9327       case ORV_r_p_z:
9328         orv(vform, vd, pg, zn);
9329         break;
9330       default:
9331         VIXL_UNIMPLEMENTED();
9332         break;
9333     }
9334   } else {
9335     switch (instr->Mask(SVEIntReductionMask)) {
9336       case SADDV_r_p_z:
9337         saddv(vform, vd, pg, zn);
9338         break;
9339       case SMAXV_r_p_z:
9340         smaxv(vform, vd, pg, zn);
9341         break;
9342       case SMINV_r_p_z:
9343         sminv(vform, vd, pg, zn);
9344         break;
9345       case UADDV_r_p_z:
9346         uaddv(vform, vd, pg, zn);
9347         break;
9348       case UMAXV_r_p_z:
9349         umaxv(vform, vd, pg, zn);
9350         break;
9351       case UMINV_r_p_z:
9352         uminv(vform, vd, pg, zn);
9353         break;
9354       default:
9355         VIXL_UNIMPLEMENTED();
9356         break;
9357     }
9358   }
9359 }
9360 
VisitSVEIntUnaryArithmeticPredicated(const Instruction * instr)9361 void Simulator::VisitSVEIntUnaryArithmeticPredicated(const Instruction* instr) {
9362   VectorFormat vform = instr->GetSVEVectorFormat();
9363   SimVRegister& zn = ReadVRegister(instr->GetRn());
9364 
9365   SimVRegister result;
9366   switch (instr->Mask(SVEIntUnaryArithmeticPredicatedMask)) {
9367     case ABS_z_p_z:
9368       abs(vform, result, zn);
9369       break;
9370     case CLS_z_p_z:
9371       cls(vform, result, zn);
9372       break;
9373     case CLZ_z_p_z:
9374       clz(vform, result, zn);
9375       break;
9376     case CNOT_z_p_z:
9377       cnot(vform, result, zn);
9378       break;
9379     case CNT_z_p_z:
9380       cnt(vform, result, zn);
9381       break;
9382     case FABS_z_p_z:
9383       fabs_(vform, result, zn);
9384       break;
9385     case FNEG_z_p_z:
9386       fneg(vform, result, zn);
9387       break;
9388     case NEG_z_p_z:
9389       neg(vform, result, zn);
9390       break;
9391     case NOT_z_p_z:
9392       not_(vform, result, zn);
9393       break;
9394     case SXTB_z_p_z:
9395     case SXTH_z_p_z:
9396     case SXTW_z_p_z:
9397       sxt(vform, result, zn, (kBitsPerByte << instr->ExtractBits(18, 17)));
9398       break;
9399     case UXTB_z_p_z:
9400     case UXTH_z_p_z:
9401     case UXTW_z_p_z:
9402       uxt(vform, result, zn, (kBitsPerByte << instr->ExtractBits(18, 17)));
9403       break;
9404     default:
9405       VIXL_UNIMPLEMENTED();
9406       break;
9407   }
9408 
9409   SimVRegister& zd = ReadVRegister(instr->GetRd());
9410   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
9411   mov_merging(vform, zd, pg, result);
9412 }
9413 
VisitSVECopyFPImm_Predicated(const Instruction * instr)9414 void Simulator::VisitSVECopyFPImm_Predicated(const Instruction* instr) {
9415   // There is only one instruction in this group.
9416   VIXL_ASSERT(instr->Mask(SVECopyFPImm_PredicatedMask) == FCPY_z_p_i);
9417 
9418   VectorFormat vform = instr->GetSVEVectorFormat();
9419   SimPRegister& pg = ReadPRegister(instr->ExtractBits(19, 16));
9420   SimVRegister& zd = ReadVRegister(instr->GetRd());
9421 
9422   SimVRegister result;
9423   switch (instr->Mask(SVECopyFPImm_PredicatedMask)) {
9424     case FCPY_z_p_i: {
9425       int imm8 = instr->ExtractBits(12, 5);
9426       uint64_t value = FPToRawbitsWithSize(LaneSizeInBitsFromFormat(vform),
9427                                            Instruction::Imm8ToFP64(imm8));
9428       dup_immediate(vform, result, value);
9429       break;
9430     }
9431     default:
9432       VIXL_UNIMPLEMENTED();
9433       break;
9434   }
9435   mov_merging(vform, zd, pg, result);
9436 }
9437 
VisitSVEIntAddSubtractImm_Unpredicated(const Instruction * instr)9438 void Simulator::VisitSVEIntAddSubtractImm_Unpredicated(
9439     const Instruction* instr) {
9440   VectorFormat vform = instr->GetSVEVectorFormat();
9441   SimVRegister& zd = ReadVRegister(instr->GetRd());
9442   SimVRegister scratch;
9443 
9444   uint64_t imm = instr->GetImmSVEIntWideUnsigned();
9445   imm <<= instr->ExtractBit(13) * 8;
9446 
9447   switch (instr->Mask(SVEIntAddSubtractImm_UnpredicatedMask)) {
9448     case ADD_z_zi:
9449       add_uint(vform, zd, zd, imm);
9450       break;
9451     case SQADD_z_zi:
9452       add_uint(vform, zd, zd, imm).SignedSaturate(vform);
9453       break;
9454     case SQSUB_z_zi:
9455       sub_uint(vform, zd, zd, imm).SignedSaturate(vform);
9456       break;
9457     case SUBR_z_zi:
9458       dup_immediate(vform, scratch, imm);
9459       sub(vform, zd, scratch, zd);
9460       break;
9461     case SUB_z_zi:
9462       sub_uint(vform, zd, zd, imm);
9463       break;
9464     case UQADD_z_zi:
9465       add_uint(vform, zd, zd, imm).UnsignedSaturate(vform);
9466       break;
9467     case UQSUB_z_zi:
9468       sub_uint(vform, zd, zd, imm).UnsignedSaturate(vform);
9469       break;
9470     default:
9471       break;
9472   }
9473 }
9474 
VisitSVEBroadcastIntImm_Unpredicated(const Instruction * instr)9475 void Simulator::VisitSVEBroadcastIntImm_Unpredicated(const Instruction* instr) {
9476   SimVRegister& zd = ReadVRegister(instr->GetRd());
9477 
9478   VectorFormat format = instr->GetSVEVectorFormat();
9479   int64_t imm = instr->GetImmSVEIntWideSigned();
9480   int shift = instr->ExtractBit(13) * 8;
9481   imm *= 1 << shift;
9482 
9483   switch (instr->Mask(SVEBroadcastIntImm_UnpredicatedMask)) {
9484     case DUP_z_i:
9485       // The encoding of byte-sized lanes with lsl #8 is undefined.
9486       if ((format == kFormatVnB) && (shift == 8)) {
9487         VIXL_UNIMPLEMENTED();
9488       } else {
9489         dup_immediate(format, zd, imm);
9490       }
9491       break;
9492     default:
9493       VIXL_UNIMPLEMENTED();
9494       break;
9495   }
9496 }
9497 
VisitSVEBroadcastFPImm_Unpredicated(const Instruction * instr)9498 void Simulator::VisitSVEBroadcastFPImm_Unpredicated(const Instruction* instr) {
9499   VectorFormat vform = instr->GetSVEVectorFormat();
9500   SimVRegister& zd = ReadVRegister(instr->GetRd());
9501 
9502   switch (instr->Mask(SVEBroadcastFPImm_UnpredicatedMask)) {
9503     case FDUP_z_i:
9504       switch (vform) {
9505         case kFormatVnH:
9506           dup_immediate(vform, zd, Float16ToRawbits(instr->GetSVEImmFP16()));
9507           break;
9508         case kFormatVnS:
9509           dup_immediate(vform, zd, FloatToRawbits(instr->GetSVEImmFP32()));
9510           break;
9511         case kFormatVnD:
9512           dup_immediate(vform, zd, DoubleToRawbits(instr->GetSVEImmFP64()));
9513           break;
9514         default:
9515           VIXL_UNIMPLEMENTED();
9516       }
9517       break;
9518     default:
9519       VIXL_UNIMPLEMENTED();
9520       break;
9521   }
9522 }
9523 
VisitSVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsets(const Instruction * instr)9524 void Simulator::VisitSVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsets(
9525     const Instruction* instr) {
9526   switch (instr->Mask(
9527       SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsetsMask)) {
9528     case LD1H_z_p_bz_s_x32_scaled:
9529     case LD1SH_z_p_bz_s_x32_scaled:
9530     case LDFF1H_z_p_bz_s_x32_scaled:
9531     case LDFF1SH_z_p_bz_s_x32_scaled:
9532       break;
9533     default:
9534       VIXL_UNIMPLEMENTED();
9535       break;
9536   }
9537 
9538   SVEOffsetModifier mod = (instr->ExtractBit(22) == 1) ? SVE_SXTW : SVE_UXTW;
9539   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnS, mod);
9540 }
9541 
VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets(const Instruction * instr)9542 void Simulator::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets(
9543     const Instruction* instr) {
9544   switch (instr->Mask(SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsMask)) {
9545     case LD1B_z_p_bz_s_x32_unscaled:
9546     case LD1H_z_p_bz_s_x32_unscaled:
9547     case LD1SB_z_p_bz_s_x32_unscaled:
9548     case LD1SH_z_p_bz_s_x32_unscaled:
9549     case LD1W_z_p_bz_s_x32_unscaled:
9550     case LDFF1B_z_p_bz_s_x32_unscaled:
9551     case LDFF1H_z_p_bz_s_x32_unscaled:
9552     case LDFF1SB_z_p_bz_s_x32_unscaled:
9553     case LDFF1SH_z_p_bz_s_x32_unscaled:
9554     case LDFF1W_z_p_bz_s_x32_unscaled:
9555       break;
9556     default:
9557       VIXL_UNIMPLEMENTED();
9558       break;
9559   }
9560 
9561   SVEOffsetModifier mod = (instr->ExtractBit(22) == 1) ? SVE_SXTW : SVE_UXTW;
9562   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnS, mod);
9563 }
9564 
VisitSVE32BitGatherLoad_VectorPlusImm(const Instruction * instr)9565 void Simulator::VisitSVE32BitGatherLoad_VectorPlusImm(
9566     const Instruction* instr) {
9567   switch (instr->Mask(SVE32BitGatherLoad_VectorPlusImmMask)) {
9568     case LD1B_z_p_ai_s:
9569       VIXL_UNIMPLEMENTED();
9570       break;
9571     case LD1H_z_p_ai_s:
9572       VIXL_UNIMPLEMENTED();
9573       break;
9574     case LD1SB_z_p_ai_s:
9575       VIXL_UNIMPLEMENTED();
9576       break;
9577     case LD1SH_z_p_ai_s:
9578       VIXL_UNIMPLEMENTED();
9579       break;
9580     case LD1W_z_p_ai_s:
9581       VIXL_UNIMPLEMENTED();
9582       break;
9583     case LDFF1B_z_p_ai_s:
9584       VIXL_UNIMPLEMENTED();
9585       break;
9586     case LDFF1H_z_p_ai_s:
9587       VIXL_UNIMPLEMENTED();
9588       break;
9589     case LDFF1SB_z_p_ai_s:
9590       VIXL_UNIMPLEMENTED();
9591       break;
9592     case LDFF1SH_z_p_ai_s:
9593       VIXL_UNIMPLEMENTED();
9594       break;
9595     case LDFF1W_z_p_ai_s:
9596       VIXL_UNIMPLEMENTED();
9597       break;
9598     default:
9599       VIXL_UNIMPLEMENTED();
9600       break;
9601   }
9602 }
9603 
VisitSVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsets(const Instruction * instr)9604 void Simulator::VisitSVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsets(
9605     const Instruction* instr) {
9606   switch (
9607       instr->Mask(SVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsetsMask)) {
9608     case LD1W_z_p_bz_s_x32_scaled:
9609     case LDFF1W_z_p_bz_s_x32_scaled:
9610       break;
9611     default:
9612       VIXL_UNIMPLEMENTED();
9613       break;
9614   }
9615 
9616   SVEOffsetModifier mod = (instr->ExtractBit(22) == 1) ? SVE_SXTW : SVE_UXTW;
9617   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnS, mod);
9618 }
9619 
VisitSVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsets(const Instruction * instr)9620 void Simulator::VisitSVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsets(
9621     const Instruction* instr) {
9622   switch (
9623       instr->Mask(SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsetsMask)) {
9624     // Ignore prefetch hint instructions.
9625     case PRFB_i_p_bz_s_x32_scaled:
9626     case PRFD_i_p_bz_s_x32_scaled:
9627     case PRFH_i_p_bz_s_x32_scaled:
9628     case PRFW_i_p_bz_s_x32_scaled:
9629       break;
9630     default:
9631       VIXL_UNIMPLEMENTED();
9632       break;
9633   }
9634 }
9635 
VisitSVE32BitGatherPrefetch_VectorPlusImm(const Instruction * instr)9636 void Simulator::VisitSVE32BitGatherPrefetch_VectorPlusImm(
9637     const Instruction* instr) {
9638   switch (instr->Mask(SVE32BitGatherPrefetch_VectorPlusImmMask)) {
9639     // Ignore prefetch hint instructions.
9640     case PRFB_i_p_ai_s:
9641     case PRFD_i_p_ai_s:
9642     case PRFH_i_p_ai_s:
9643     case PRFW_i_p_ai_s:
9644       break;
9645     default:
9646       VIXL_UNIMPLEMENTED();
9647       break;
9648   }
9649 }
9650 
VisitSVEContiguousPrefetch_ScalarPlusImm(const Instruction * instr)9651 void Simulator::VisitSVEContiguousPrefetch_ScalarPlusImm(
9652     const Instruction* instr) {
9653   switch (instr->Mask(SVEContiguousPrefetch_ScalarPlusImmMask)) {
9654     // Ignore prefetch hint instructions.
9655     case PRFB_i_p_bi_s:
9656     case PRFD_i_p_bi_s:
9657     case PRFH_i_p_bi_s:
9658     case PRFW_i_p_bi_s:
9659       break;
9660     default:
9661       VIXL_UNIMPLEMENTED();
9662       break;
9663   }
9664 }
9665 
VisitSVEContiguousPrefetch_ScalarPlusScalar(const Instruction * instr)9666 void Simulator::VisitSVEContiguousPrefetch_ScalarPlusScalar(
9667     const Instruction* instr) {
9668   switch (instr->Mask(SVEContiguousPrefetch_ScalarPlusScalarMask)) {
9669     // Ignore prefetch hint instructions.
9670     case PRFB_i_p_br_s:
9671     case PRFD_i_p_br_s:
9672     case PRFH_i_p_br_s:
9673     case PRFW_i_p_br_s:
9674       if (instr->GetRm() == kZeroRegCode) {
9675         VIXL_UNIMPLEMENTED();
9676       }
9677       break;
9678     default:
9679       VIXL_UNIMPLEMENTED();
9680       break;
9681   }
9682 }
9683 
VisitSVELoadAndBroadcastElement(const Instruction * instr)9684 void Simulator::VisitSVELoadAndBroadcastElement(const Instruction* instr) {
9685   bool is_signed;
9686   switch (instr->Mask(SVELoadAndBroadcastElementMask)) {
9687     case LD1RB_z_p_bi_u8:
9688     case LD1RB_z_p_bi_u16:
9689     case LD1RB_z_p_bi_u32:
9690     case LD1RB_z_p_bi_u64:
9691     case LD1RH_z_p_bi_u16:
9692     case LD1RH_z_p_bi_u32:
9693     case LD1RH_z_p_bi_u64:
9694     case LD1RW_z_p_bi_u32:
9695     case LD1RW_z_p_bi_u64:
9696     case LD1RD_z_p_bi_u64:
9697       is_signed = false;
9698       break;
9699     case LD1RSB_z_p_bi_s16:
9700     case LD1RSB_z_p_bi_s32:
9701     case LD1RSB_z_p_bi_s64:
9702     case LD1RSH_z_p_bi_s32:
9703     case LD1RSH_z_p_bi_s64:
9704     case LD1RSW_z_p_bi_s64:
9705       is_signed = true;
9706       break;
9707     default:
9708       // This encoding group is complete, so no other values should be possible.
9709       VIXL_UNREACHABLE();
9710       is_signed = false;
9711       break;
9712   }
9713 
9714   int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(is_signed);
9715   int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(is_signed, 13);
9716   VIXL_ASSERT(msize_in_bytes_log2 <= esize_in_bytes_log2);
9717   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
9718   uint64_t offset = instr->ExtractBits(21, 16) << msize_in_bytes_log2;
9719   uint64_t base = ReadXRegister(instr->GetRn()) + offset;
9720   VectorFormat unpack_vform =
9721       SVEFormatFromLaneSizeInBytesLog2(msize_in_bytes_log2);
9722   SimVRegister temp;
9723   ld1r(vform, unpack_vform, temp, base, is_signed);
9724   mov_zeroing(vform,
9725               ReadVRegister(instr->GetRt()),
9726               ReadPRegister(instr->GetPgLow8()),
9727               temp);
9728 }
9729 
VisitSVELoadPredicateRegister(const Instruction * instr)9730 void Simulator::VisitSVELoadPredicateRegister(const Instruction* instr) {
9731   switch (instr->Mask(SVELoadPredicateRegisterMask)) {
9732     case LDR_p_bi: {
9733       SimPRegister& pt = ReadPRegister(instr->GetPt());
9734       int pl = GetPredicateLengthInBytes();
9735       int imm9 = (instr->ExtractBits(21, 16) << 3) | instr->ExtractBits(12, 10);
9736       uint64_t multiplier = ExtractSignedBitfield64(8, 0, imm9);
9737       uint64_t address = ReadXRegister(instr->GetRn()) + multiplier * pl;
9738       for (int i = 0; i < pl; i++) {
9739         pt.Insert(i, MemRead<uint8_t>(address + i));
9740       }
9741       LogPRead(instr->GetPt(), address);
9742       break;
9743     }
9744     default:
9745       VIXL_UNIMPLEMENTED();
9746       break;
9747   }
9748 }
9749 
VisitSVELoadVectorRegister(const Instruction * instr)9750 void Simulator::VisitSVELoadVectorRegister(const Instruction* instr) {
9751   switch (instr->Mask(SVELoadVectorRegisterMask)) {
9752     case LDR_z_bi: {
9753       SimVRegister& zt = ReadVRegister(instr->GetRt());
9754       int vl = GetVectorLengthInBytes();
9755       int imm9 = (instr->ExtractBits(21, 16) << 3) | instr->ExtractBits(12, 10);
9756       uint64_t multiplier = ExtractSignedBitfield64(8, 0, imm9);
9757       uint64_t address = ReadXRegister(instr->GetRn()) + multiplier * vl;
9758       for (int i = 0; i < vl; i++) {
9759         zt.Insert(i, MemRead<uint8_t>(address + i));
9760       }
9761       LogZRead(instr->GetRt(), address);
9762       break;
9763     }
9764     default:
9765       VIXL_UNIMPLEMENTED();
9766       break;
9767   }
9768 }
9769 
VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets(const Instruction * instr)9770 void Simulator::VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets(
9771     const Instruction* instr) {
9772   switch (instr->Mask(
9773       SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsMask)) {
9774     case LD1D_z_p_bz_d_x32_scaled:
9775     case LD1H_z_p_bz_d_x32_scaled:
9776     case LD1SH_z_p_bz_d_x32_scaled:
9777     case LD1SW_z_p_bz_d_x32_scaled:
9778     case LD1W_z_p_bz_d_x32_scaled:
9779     case LDFF1H_z_p_bz_d_x32_scaled:
9780     case LDFF1W_z_p_bz_d_x32_scaled:
9781     case LDFF1D_z_p_bz_d_x32_scaled:
9782     case LDFF1SH_z_p_bz_d_x32_scaled:
9783     case LDFF1SW_z_p_bz_d_x32_scaled:
9784       break;
9785     default:
9786       VIXL_UNIMPLEMENTED();
9787       break;
9788   }
9789 
9790   SVEOffsetModifier mod = (instr->ExtractBit(22) == 1) ? SVE_SXTW : SVE_UXTW;
9791   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnD, mod);
9792 }
9793 
VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets(const Instruction * instr)9794 void Simulator::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets(
9795     const Instruction* instr) {
9796   switch (instr->Mask(SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsMask)) {
9797     case LD1D_z_p_bz_d_64_scaled:
9798     case LD1H_z_p_bz_d_64_scaled:
9799     case LD1SH_z_p_bz_d_64_scaled:
9800     case LD1SW_z_p_bz_d_64_scaled:
9801     case LD1W_z_p_bz_d_64_scaled:
9802     case LDFF1H_z_p_bz_d_64_scaled:
9803     case LDFF1W_z_p_bz_d_64_scaled:
9804     case LDFF1D_z_p_bz_d_64_scaled:
9805     case LDFF1SH_z_p_bz_d_64_scaled:
9806     case LDFF1SW_z_p_bz_d_64_scaled:
9807       break;
9808     default:
9809       VIXL_UNIMPLEMENTED();
9810       break;
9811   }
9812 
9813   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnD, SVE_LSL);
9814 }
9815 
VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets(const Instruction * instr)9816 void Simulator::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets(
9817     const Instruction* instr) {
9818   switch (instr->Mask(SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsMask)) {
9819     case LD1B_z_p_bz_d_64_unscaled:
9820     case LD1D_z_p_bz_d_64_unscaled:
9821     case LD1H_z_p_bz_d_64_unscaled:
9822     case LD1SB_z_p_bz_d_64_unscaled:
9823     case LD1SH_z_p_bz_d_64_unscaled:
9824     case LD1SW_z_p_bz_d_64_unscaled:
9825     case LD1W_z_p_bz_d_64_unscaled:
9826     case LDFF1B_z_p_bz_d_64_unscaled:
9827     case LDFF1D_z_p_bz_d_64_unscaled:
9828     case LDFF1H_z_p_bz_d_64_unscaled:
9829     case LDFF1SB_z_p_bz_d_64_unscaled:
9830     case LDFF1SH_z_p_bz_d_64_unscaled:
9831     case LDFF1SW_z_p_bz_d_64_unscaled:
9832     case LDFF1W_z_p_bz_d_64_unscaled:
9833       break;
9834     default:
9835       VIXL_UNIMPLEMENTED();
9836       break;
9837   }
9838 
9839   SVEGatherLoadScalarPlusVectorHelper(instr,
9840                                       kFormatVnD,
9841                                       NO_SVE_OFFSET_MODIFIER);
9842 }
9843 
VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets(const Instruction * instr)9844 void Simulator::VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets(
9845     const Instruction* instr) {
9846   switch (instr->Mask(
9847       SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsMask)) {
9848     case LD1B_z_p_bz_d_x32_unscaled:
9849     case LD1D_z_p_bz_d_x32_unscaled:
9850     case LD1H_z_p_bz_d_x32_unscaled:
9851     case LD1SB_z_p_bz_d_x32_unscaled:
9852     case LD1SH_z_p_bz_d_x32_unscaled:
9853     case LD1SW_z_p_bz_d_x32_unscaled:
9854     case LD1W_z_p_bz_d_x32_unscaled:
9855     case LDFF1B_z_p_bz_d_x32_unscaled:
9856     case LDFF1H_z_p_bz_d_x32_unscaled:
9857     case LDFF1W_z_p_bz_d_x32_unscaled:
9858     case LDFF1D_z_p_bz_d_x32_unscaled:
9859     case LDFF1SB_z_p_bz_d_x32_unscaled:
9860     case LDFF1SH_z_p_bz_d_x32_unscaled:
9861     case LDFF1SW_z_p_bz_d_x32_unscaled:
9862       break;
9863     default:
9864       VIXL_UNIMPLEMENTED();
9865       break;
9866   }
9867 
9868   SVEOffsetModifier mod = (instr->ExtractBit(22) == 1) ? SVE_SXTW : SVE_UXTW;
9869   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnD, mod);
9870 }
9871 
VisitSVE64BitGatherLoad_VectorPlusImm(const Instruction * instr)9872 void Simulator::VisitSVE64BitGatherLoad_VectorPlusImm(
9873     const Instruction* instr) {
9874   switch (instr->Mask(SVE64BitGatherLoad_VectorPlusImmMask)) {
9875     case LD1B_z_p_ai_d:
9876     case LD1D_z_p_ai_d:
9877     case LD1H_z_p_ai_d:
9878     case LD1SB_z_p_ai_d:
9879     case LD1SH_z_p_ai_d:
9880     case LD1SW_z_p_ai_d:
9881     case LD1W_z_p_ai_d:
9882     case LDFF1B_z_p_ai_d:
9883     case LDFF1D_z_p_ai_d:
9884     case LDFF1H_z_p_ai_d:
9885     case LDFF1SB_z_p_ai_d:
9886     case LDFF1SH_z_p_ai_d:
9887     case LDFF1SW_z_p_ai_d:
9888     case LDFF1W_z_p_ai_d:
9889       break;
9890     default:
9891       VIXL_UNIMPLEMENTED();
9892       break;
9893   }
9894   bool is_signed = instr->ExtractBit(14) == 0;
9895   bool is_ff = instr->ExtractBit(13) == 1;
9896   // Note that these instructions don't use the Dtype encoding.
9897   int msize_in_bytes_log2 = instr->ExtractBits(24, 23);
9898   uint64_t imm = instr->ExtractBits(20, 16) << msize_in_bytes_log2;
9899   LogicSVEAddressVector addr(imm, &ReadVRegister(instr->GetRn()), kFormatVnD);
9900   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
9901   if (is_ff) {
9902     VIXL_UNIMPLEMENTED();
9903   } else {
9904     SVEStructuredLoadHelper(kFormatVnD,
9905                             ReadPRegister(instr->GetPgLow8()),
9906                             instr->GetRt(),
9907                             addr,
9908                             is_signed);
9909   }
9910 }
9911 
VisitSVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsets(const Instruction * instr)9912 void Simulator::VisitSVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsets(
9913     const Instruction* instr) {
9914   switch (
9915       instr->Mask(SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsetsMask)) {
9916     // Ignore prefetch hint instructions.
9917     case PRFB_i_p_bz_d_64_scaled:
9918     case PRFD_i_p_bz_d_64_scaled:
9919     case PRFH_i_p_bz_d_64_scaled:
9920     case PRFW_i_p_bz_d_64_scaled:
9921       break;
9922     default:
9923       VIXL_UNIMPLEMENTED();
9924       break;
9925   }
9926 }
9927 
9928 void Simulator::
VisitSVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsets(const Instruction * instr)9929     VisitSVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsets(
9930         const Instruction* instr) {
9931   switch (instr->Mask(
9932       SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsetsMask)) {
9933     // Ignore prefetch hint instructions.
9934     case PRFB_i_p_bz_d_x32_scaled:
9935     case PRFD_i_p_bz_d_x32_scaled:
9936     case PRFH_i_p_bz_d_x32_scaled:
9937     case PRFW_i_p_bz_d_x32_scaled:
9938       break;
9939     default:
9940       VIXL_UNIMPLEMENTED();
9941       break;
9942   }
9943 }
9944 
VisitSVE64BitGatherPrefetch_VectorPlusImm(const Instruction * instr)9945 void Simulator::VisitSVE64BitGatherPrefetch_VectorPlusImm(
9946     const Instruction* instr) {
9947   switch (instr->Mask(SVE64BitGatherPrefetch_VectorPlusImmMask)) {
9948     // Ignore prefetch hint instructions.
9949     case PRFB_i_p_ai_d:
9950     case PRFD_i_p_ai_d:
9951     case PRFH_i_p_ai_d:
9952     case PRFW_i_p_ai_d:
9953       break;
9954     default:
9955       VIXL_UNIMPLEMENTED();
9956       break;
9957   }
9958 }
9959 
VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar(const Instruction * instr)9960 void Simulator::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar(
9961     const Instruction* instr) {
9962   bool is_signed;
9963   switch (instr->Mask(SVEContiguousLoad_ScalarPlusScalarMask)) {
9964     case LDFF1B_z_p_br_u8:
9965     case LDFF1B_z_p_br_u16:
9966     case LDFF1B_z_p_br_u32:
9967     case LDFF1B_z_p_br_u64:
9968     case LDFF1H_z_p_br_u16:
9969     case LDFF1H_z_p_br_u32:
9970     case LDFF1H_z_p_br_u64:
9971     case LDFF1W_z_p_br_u32:
9972     case LDFF1W_z_p_br_u64:
9973     case LDFF1D_z_p_br_u64:
9974       is_signed = false;
9975       break;
9976     case LDFF1SB_z_p_br_s16:
9977     case LDFF1SB_z_p_br_s32:
9978     case LDFF1SB_z_p_br_s64:
9979     case LDFF1SH_z_p_br_s32:
9980     case LDFF1SH_z_p_br_s64:
9981     case LDFF1SW_z_p_br_s64:
9982       is_signed = true;
9983       break;
9984     default:
9985       // This encoding group is complete, so no other values should be possible.
9986       VIXL_UNREACHABLE();
9987       is_signed = false;
9988       break;
9989   }
9990 
9991   int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(is_signed);
9992   int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(is_signed);
9993   VIXL_ASSERT(msize_in_bytes_log2 <= esize_in_bytes_log2);
9994   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
9995   uint64_t offset = ReadXRegister(instr->GetRm());
9996   offset <<= msize_in_bytes_log2;
9997   LogicSVEAddressVector addr(ReadXRegister(instr->GetRn()) + offset);
9998   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
9999   SVEFaultTolerantLoadHelper(vform,
10000                              ReadPRegister(instr->GetPgLow8()),
10001                              instr->GetRt(),
10002                              addr,
10003                              kSVEFirstFaultLoad,
10004                              is_signed);
10005 }
10006 
VisitSVEContiguousNonFaultLoad_ScalarPlusImm(const Instruction * instr)10007 void Simulator::VisitSVEContiguousNonFaultLoad_ScalarPlusImm(
10008     const Instruction* instr) {
10009   bool is_signed = false;
10010   switch (instr->Mask(SVEContiguousNonFaultLoad_ScalarPlusImmMask)) {
10011     case LDNF1B_z_p_bi_u16:
10012     case LDNF1B_z_p_bi_u32:
10013     case LDNF1B_z_p_bi_u64:
10014     case LDNF1B_z_p_bi_u8:
10015     case LDNF1D_z_p_bi_u64:
10016     case LDNF1H_z_p_bi_u16:
10017     case LDNF1H_z_p_bi_u32:
10018     case LDNF1H_z_p_bi_u64:
10019     case LDNF1W_z_p_bi_u32:
10020     case LDNF1W_z_p_bi_u64:
10021       break;
10022     case LDNF1SB_z_p_bi_s16:
10023     case LDNF1SB_z_p_bi_s32:
10024     case LDNF1SB_z_p_bi_s64:
10025     case LDNF1SH_z_p_bi_s32:
10026     case LDNF1SH_z_p_bi_s64:
10027     case LDNF1SW_z_p_bi_s64:
10028       is_signed = true;
10029       break;
10030     default:
10031       VIXL_UNIMPLEMENTED();
10032       break;
10033   }
10034   int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(is_signed);
10035   int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(is_signed);
10036   VIXL_ASSERT(msize_in_bytes_log2 <= esize_in_bytes_log2);
10037   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
10038   int vl = GetVectorLengthInBytes();
10039   int vl_divisor_log2 = esize_in_bytes_log2 - msize_in_bytes_log2;
10040   uint64_t offset =
10041       (instr->ExtractSignedBits(19, 16) * vl) / (1 << vl_divisor_log2);
10042   LogicSVEAddressVector addr(ReadXRegister(instr->GetRn()) + offset);
10043   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10044   SVEFaultTolerantLoadHelper(vform,
10045                              ReadPRegister(instr->GetPgLow8()),
10046                              instr->GetRt(),
10047                              addr,
10048                              kSVENonFaultLoad,
10049                              is_signed);
10050 }
10051 
VisitSVEContiguousNonTemporalLoad_ScalarPlusImm(const Instruction * instr)10052 void Simulator::VisitSVEContiguousNonTemporalLoad_ScalarPlusImm(
10053     const Instruction* instr) {
10054   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10055   VectorFormat vform = kFormatUndefined;
10056 
10057   switch (instr->Mask(SVEContiguousNonTemporalLoad_ScalarPlusImmMask)) {
10058     case LDNT1B_z_p_bi_contiguous:
10059       vform = kFormatVnB;
10060       break;
10061     case LDNT1D_z_p_bi_contiguous:
10062       vform = kFormatVnD;
10063       break;
10064     case LDNT1H_z_p_bi_contiguous:
10065       vform = kFormatVnH;
10066       break;
10067     case LDNT1W_z_p_bi_contiguous:
10068       vform = kFormatVnS;
10069       break;
10070     default:
10071       VIXL_UNIMPLEMENTED();
10072       break;
10073   }
10074   int msize_in_bytes_log2 = LaneSizeInBytesLog2FromFormat(vform);
10075   int vl = GetVectorLengthInBytes();
10076   uint64_t offset = instr->ExtractSignedBits(19, 16) * vl;
10077   LogicSVEAddressVector addr(ReadXRegister(instr->GetRn()) + offset);
10078   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10079   SVEStructuredLoadHelper(vform,
10080                           pg,
10081                           instr->GetRt(),
10082                           addr,
10083                           /* is_signed = */ false);
10084 }
10085 
VisitSVEContiguousNonTemporalLoad_ScalarPlusScalar(const Instruction * instr)10086 void Simulator::VisitSVEContiguousNonTemporalLoad_ScalarPlusScalar(
10087     const Instruction* instr) {
10088   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10089   VectorFormat vform = kFormatUndefined;
10090 
10091   switch (instr->Mask(SVEContiguousNonTemporalLoad_ScalarPlusScalarMask)) {
10092     case LDNT1B_z_p_br_contiguous:
10093       vform = kFormatVnB;
10094       break;
10095     case LDNT1D_z_p_br_contiguous:
10096       vform = kFormatVnD;
10097       break;
10098     case LDNT1H_z_p_br_contiguous:
10099       vform = kFormatVnH;
10100       break;
10101     case LDNT1W_z_p_br_contiguous:
10102       vform = kFormatVnS;
10103       break;
10104     default:
10105       VIXL_UNIMPLEMENTED();
10106       break;
10107   }
10108   int msize_in_bytes_log2 = LaneSizeInBytesLog2FromFormat(vform);
10109   uint64_t offset = ReadXRegister(instr->GetRm()) << msize_in_bytes_log2;
10110   LogicSVEAddressVector addr(ReadXRegister(instr->GetRn()) + offset);
10111   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10112   SVEStructuredLoadHelper(vform,
10113                           pg,
10114                           instr->GetRt(),
10115                           addr,
10116                           /* is_signed = */ false);
10117 }
10118 
VisitSVELoadAndBroadcastQuadword_ScalarPlusImm(const Instruction * instr)10119 void Simulator::VisitSVELoadAndBroadcastQuadword_ScalarPlusImm(
10120     const Instruction* instr) {
10121   SimVRegister& zt = ReadVRegister(instr->GetRt());
10122   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10123 
10124   uint64_t addr = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
10125   uint64_t offset = instr->ExtractSignedBits(19, 16) * 16;
10126 
10127   VectorFormat vform = kFormatUndefined;
10128   switch (instr->Mask(SVELoadAndBroadcastQuadword_ScalarPlusImmMask)) {
10129     case LD1RQB_z_p_bi_u8:
10130       vform = kFormatVnB;
10131       break;
10132     case LD1RQD_z_p_bi_u64:
10133       vform = kFormatVnD;
10134       break;
10135     case LD1RQH_z_p_bi_u16:
10136       vform = kFormatVnH;
10137       break;
10138     case LD1RQW_z_p_bi_u32:
10139       vform = kFormatVnS;
10140       break;
10141     default:
10142       addr = offset = 0;
10143       break;
10144   }
10145   ld1(kFormat16B, zt, addr + offset);
10146   mov_zeroing(vform, zt, pg, zt);
10147   dup_element(kFormatVnQ, zt, zt, 0);
10148 }
10149 
VisitSVELoadAndBroadcastQuadword_ScalarPlusScalar(const Instruction * instr)10150 void Simulator::VisitSVELoadAndBroadcastQuadword_ScalarPlusScalar(
10151     const Instruction* instr) {
10152   SimVRegister& zt = ReadVRegister(instr->GetRt());
10153   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10154 
10155   uint64_t addr = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
10156   uint64_t offset = ReadXRegister(instr->GetRm());
10157 
10158   VectorFormat vform = kFormatUndefined;
10159   switch (instr->Mask(SVELoadAndBroadcastQuadword_ScalarPlusScalarMask)) {
10160     case LD1RQB_z_p_br_contiguous:
10161       vform = kFormatVnB;
10162       break;
10163     case LD1RQD_z_p_br_contiguous:
10164       vform = kFormatVnD;
10165       offset <<= 3;
10166       break;
10167     case LD1RQH_z_p_br_contiguous:
10168       vform = kFormatVnH;
10169       offset <<= 1;
10170       break;
10171     case LD1RQW_z_p_br_contiguous:
10172       vform = kFormatVnS;
10173       offset <<= 2;
10174       break;
10175     default:
10176       addr = offset = 0;
10177       break;
10178   }
10179   ld1(kFormat16B, zt, addr + offset);
10180   mov_zeroing(vform, zt, pg, zt);
10181   dup_element(kFormatVnQ, zt, zt, 0);
10182 }
10183 
VisitSVELoadMultipleStructures_ScalarPlusImm(const Instruction * instr)10184 void Simulator::VisitSVELoadMultipleStructures_ScalarPlusImm(
10185     const Instruction* instr) {
10186   switch (instr->Mask(SVELoadMultipleStructures_ScalarPlusImmMask)) {
10187     case LD2B_z_p_bi_contiguous:
10188     case LD2D_z_p_bi_contiguous:
10189     case LD2H_z_p_bi_contiguous:
10190     case LD2W_z_p_bi_contiguous:
10191     case LD3B_z_p_bi_contiguous:
10192     case LD3D_z_p_bi_contiguous:
10193     case LD3H_z_p_bi_contiguous:
10194     case LD3W_z_p_bi_contiguous:
10195     case LD4B_z_p_bi_contiguous:
10196     case LD4D_z_p_bi_contiguous:
10197     case LD4H_z_p_bi_contiguous:
10198     case LD4W_z_p_bi_contiguous: {
10199       int vl = GetVectorLengthInBytes();
10200       int msz = instr->ExtractBits(24, 23);
10201       int reg_count = instr->ExtractBits(22, 21) + 1;
10202       uint64_t offset = instr->ExtractSignedBits(19, 16) * vl * reg_count;
10203       LogicSVEAddressVector addr(
10204           ReadXRegister(instr->GetRn(), Reg31IsStackPointer) + offset);
10205       addr.SetMsizeInBytesLog2(msz);
10206       addr.SetRegCount(reg_count);
10207       SVEStructuredLoadHelper(SVEFormatFromLaneSizeInBytesLog2(msz),
10208                               ReadPRegister(instr->GetPgLow8()),
10209                               instr->GetRt(),
10210                               addr);
10211       break;
10212     }
10213     default:
10214       VIXL_UNIMPLEMENTED();
10215       break;
10216   }
10217 }
10218 
VisitSVELoadMultipleStructures_ScalarPlusScalar(const Instruction * instr)10219 void Simulator::VisitSVELoadMultipleStructures_ScalarPlusScalar(
10220     const Instruction* instr) {
10221   switch (instr->Mask(SVELoadMultipleStructures_ScalarPlusScalarMask)) {
10222     case LD2B_z_p_br_contiguous:
10223     case LD2D_z_p_br_contiguous:
10224     case LD2H_z_p_br_contiguous:
10225     case LD2W_z_p_br_contiguous:
10226     case LD3B_z_p_br_contiguous:
10227     case LD3D_z_p_br_contiguous:
10228     case LD3H_z_p_br_contiguous:
10229     case LD3W_z_p_br_contiguous:
10230     case LD4B_z_p_br_contiguous:
10231     case LD4D_z_p_br_contiguous:
10232     case LD4H_z_p_br_contiguous:
10233     case LD4W_z_p_br_contiguous: {
10234       int msz = instr->ExtractBits(24, 23);
10235       uint64_t offset = ReadXRegister(instr->GetRm()) * (1 << msz);
10236       VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(msz);
10237       LogicSVEAddressVector addr(
10238           ReadXRegister(instr->GetRn(), Reg31IsStackPointer) + offset);
10239       addr.SetMsizeInBytesLog2(msz);
10240       addr.SetRegCount(instr->ExtractBits(22, 21) + 1);
10241       SVEStructuredLoadHelper(vform,
10242                               ReadPRegister(instr->GetPgLow8()),
10243                               instr->GetRt(),
10244                               addr,
10245                               false);
10246       break;
10247     }
10248     default:
10249       VIXL_UNIMPLEMENTED();
10250       break;
10251   }
10252 }
10253 
VisitSVE32BitScatterStore_ScalarPlus32BitScaledOffsets(const Instruction * instr)10254 void Simulator::VisitSVE32BitScatterStore_ScalarPlus32BitScaledOffsets(
10255     const Instruction* instr) {
10256   switch (instr->Mask(SVE32BitScatterStore_ScalarPlus32BitScaledOffsetsMask)) {
10257     case ST1H_z_p_bz_s_x32_scaled:
10258     case ST1W_z_p_bz_s_x32_scaled: {
10259       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
10260       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
10261       int scale = instr->ExtractBit(21) * msize_in_bytes_log2;
10262       uint64_t base = ReadXRegister(instr->GetRn());
10263       SVEOffsetModifier mod =
10264           (instr->ExtractBit(14) == 1) ? SVE_SXTW : SVE_UXTW;
10265       LogicSVEAddressVector addr(base,
10266                                  &ReadVRegister(instr->GetRm()),
10267                                  kFormatVnS,
10268                                  mod,
10269                                  scale);
10270       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10271       SVEStructuredStoreHelper(kFormatVnS,
10272                                ReadPRegister(instr->GetPgLow8()),
10273                                instr->GetRt(),
10274                                addr);
10275       break;
10276     }
10277     default:
10278       VIXL_UNIMPLEMENTED();
10279       break;
10280   }
10281 }
10282 
VisitSVE32BitScatterStore_ScalarPlus32BitUnscaledOffsets(const Instruction * instr)10283 void Simulator::VisitSVE32BitScatterStore_ScalarPlus32BitUnscaledOffsets(
10284     const Instruction* instr) {
10285   switch (
10286       instr->Mask(SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsetsMask)) {
10287     case ST1B_z_p_bz_s_x32_unscaled:
10288     case ST1H_z_p_bz_s_x32_unscaled:
10289     case ST1W_z_p_bz_s_x32_unscaled: {
10290       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
10291       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
10292       uint64_t base = ReadXRegister(instr->GetRn());
10293       SVEOffsetModifier mod =
10294           (instr->ExtractBit(14) == 1) ? SVE_SXTW : SVE_UXTW;
10295       LogicSVEAddressVector addr(base,
10296                                  &ReadVRegister(instr->GetRm()),
10297                                  kFormatVnS,
10298                                  mod);
10299       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10300       SVEStructuredStoreHelper(kFormatVnS,
10301                                ReadPRegister(instr->GetPgLow8()),
10302                                instr->GetRt(),
10303                                addr);
10304       break;
10305     }
10306     default:
10307       VIXL_UNIMPLEMENTED();
10308       break;
10309   }
10310 }
10311 
VisitSVE32BitScatterStore_VectorPlusImm(const Instruction * instr)10312 void Simulator::VisitSVE32BitScatterStore_VectorPlusImm(
10313     const Instruction* instr) {
10314   int msz = 0;
10315   switch (instr->Mask(SVE32BitScatterStore_VectorPlusImmMask)) {
10316     case ST1B_z_p_ai_s:
10317       msz = 0;
10318       break;
10319     case ST1H_z_p_ai_s:
10320       msz = 1;
10321       break;
10322     case ST1W_z_p_ai_s:
10323       msz = 2;
10324       break;
10325     default:
10326       VIXL_UNIMPLEMENTED();
10327       break;
10328   }
10329   uint64_t imm = instr->ExtractBits(20, 16) << msz;
10330   LogicSVEAddressVector addr(imm, &ReadVRegister(instr->GetRn()), kFormatVnS);
10331   addr.SetMsizeInBytesLog2(msz);
10332   SVEStructuredStoreHelper(kFormatVnS,
10333                            ReadPRegister(instr->GetPgLow8()),
10334                            instr->GetRt(),
10335                            addr);
10336 }
10337 
VisitSVE64BitScatterStore_ScalarPlus64BitScaledOffsets(const Instruction * instr)10338 void Simulator::VisitSVE64BitScatterStore_ScalarPlus64BitScaledOffsets(
10339     const Instruction* instr) {
10340   switch (instr->Mask(SVE64BitScatterStore_ScalarPlus64BitScaledOffsetsMask)) {
10341     case ST1D_z_p_bz_d_64_scaled:
10342     case ST1H_z_p_bz_d_64_scaled:
10343     case ST1W_z_p_bz_d_64_scaled: {
10344       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
10345       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
10346       int scale = instr->ExtractBit(21) * msize_in_bytes_log2;
10347       uint64_t base = ReadXRegister(instr->GetRn());
10348       LogicSVEAddressVector addr(base,
10349                                  &ReadVRegister(instr->GetRm()),
10350                                  kFormatVnD,
10351                                  SVE_LSL,
10352                                  scale);
10353       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10354       SVEStructuredStoreHelper(kFormatVnD,
10355                                ReadPRegister(instr->GetPgLow8()),
10356                                instr->GetRt(),
10357                                addr);
10358       break;
10359     }
10360     default:
10361       VIXL_UNIMPLEMENTED();
10362       break;
10363   }
10364 }
10365 
VisitSVE64BitScatterStore_ScalarPlus64BitUnscaledOffsets(const Instruction * instr)10366 void Simulator::VisitSVE64BitScatterStore_ScalarPlus64BitUnscaledOffsets(
10367     const Instruction* instr) {
10368   switch (
10369       instr->Mask(SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsetsMask)) {
10370     case ST1B_z_p_bz_d_64_unscaled:
10371     case ST1D_z_p_bz_d_64_unscaled:
10372     case ST1H_z_p_bz_d_64_unscaled:
10373     case ST1W_z_p_bz_d_64_unscaled: {
10374       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
10375       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
10376       uint64_t base = ReadXRegister(instr->GetRn());
10377       LogicSVEAddressVector addr(base,
10378                                  &ReadVRegister(instr->GetRm()),
10379                                  kFormatVnD,
10380                                  NO_SVE_OFFSET_MODIFIER);
10381       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10382       SVEStructuredStoreHelper(kFormatVnD,
10383                                ReadPRegister(instr->GetPgLow8()),
10384                                instr->GetRt(),
10385                                addr);
10386       break;
10387     }
10388     default:
10389       VIXL_UNIMPLEMENTED();
10390       break;
10391   }
10392 }
10393 
VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsets(const Instruction * instr)10394 void Simulator::VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsets(
10395     const Instruction* instr) {
10396   switch (instr->Mask(
10397       SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsetsMask)) {
10398     case ST1D_z_p_bz_d_x32_scaled:
10399     case ST1H_z_p_bz_d_x32_scaled:
10400     case ST1W_z_p_bz_d_x32_scaled: {
10401       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
10402       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
10403       int scale = instr->ExtractBit(21) * msize_in_bytes_log2;
10404       uint64_t base = ReadXRegister(instr->GetRn());
10405       SVEOffsetModifier mod =
10406           (instr->ExtractBit(14) == 1) ? SVE_SXTW : SVE_UXTW;
10407       LogicSVEAddressVector addr(base,
10408                                  &ReadVRegister(instr->GetRm()),
10409                                  kFormatVnD,
10410                                  mod,
10411                                  scale);
10412       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10413       SVEStructuredStoreHelper(kFormatVnD,
10414                                ReadPRegister(instr->GetPgLow8()),
10415                                instr->GetRt(),
10416                                addr);
10417       break;
10418     }
10419     default:
10420       VIXL_UNIMPLEMENTED();
10421       break;
10422   }
10423 }
10424 
10425 void Simulator::
VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsets(const Instruction * instr)10426     VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsets(
10427         const Instruction* instr) {
10428   switch (instr->Mask(
10429       SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsetsMask)) {
10430     case ST1B_z_p_bz_d_x32_unscaled:
10431     case ST1D_z_p_bz_d_x32_unscaled:
10432     case ST1H_z_p_bz_d_x32_unscaled:
10433     case ST1W_z_p_bz_d_x32_unscaled: {
10434       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
10435       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
10436       uint64_t base = ReadXRegister(instr->GetRn());
10437       SVEOffsetModifier mod =
10438           (instr->ExtractBit(14) == 1) ? SVE_SXTW : SVE_UXTW;
10439       LogicSVEAddressVector addr(base,
10440                                  &ReadVRegister(instr->GetRm()),
10441                                  kFormatVnD,
10442                                  mod);
10443       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10444       SVEStructuredStoreHelper(kFormatVnD,
10445                                ReadPRegister(instr->GetPgLow8()),
10446                                instr->GetRt(),
10447                                addr);
10448       break;
10449     }
10450     default:
10451       VIXL_UNIMPLEMENTED();
10452       break;
10453   }
10454 }
10455 
VisitSVE64BitScatterStore_VectorPlusImm(const Instruction * instr)10456 void Simulator::VisitSVE64BitScatterStore_VectorPlusImm(
10457     const Instruction* instr) {
10458   int msz = 0;
10459   switch (instr->Mask(SVE64BitScatterStore_VectorPlusImmMask)) {
10460     case ST1B_z_p_ai_d:
10461       msz = 0;
10462       break;
10463     case ST1D_z_p_ai_d:
10464       msz = 3;
10465       break;
10466     case ST1H_z_p_ai_d:
10467       msz = 1;
10468       break;
10469     case ST1W_z_p_ai_d:
10470       msz = 2;
10471       break;
10472     default:
10473       VIXL_UNIMPLEMENTED();
10474       break;
10475   }
10476   uint64_t imm = instr->ExtractBits(20, 16) << msz;
10477   LogicSVEAddressVector addr(imm, &ReadVRegister(instr->GetRn()), kFormatVnD);
10478   addr.SetMsizeInBytesLog2(msz);
10479   SVEStructuredStoreHelper(kFormatVnD,
10480                            ReadPRegister(instr->GetPgLow8()),
10481                            instr->GetRt(),
10482                            addr);
10483 }
10484 
VisitSVEContiguousNonTemporalStore_ScalarPlusImm(const Instruction * instr)10485 void Simulator::VisitSVEContiguousNonTemporalStore_ScalarPlusImm(
10486     const Instruction* instr) {
10487   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10488   VectorFormat vform = kFormatUndefined;
10489 
10490   switch (instr->Mask(SVEContiguousNonTemporalStore_ScalarPlusImmMask)) {
10491     case STNT1B_z_p_bi_contiguous:
10492       vform = kFormatVnB;
10493       break;
10494     case STNT1D_z_p_bi_contiguous:
10495       vform = kFormatVnD;
10496       break;
10497     case STNT1H_z_p_bi_contiguous:
10498       vform = kFormatVnH;
10499       break;
10500     case STNT1W_z_p_bi_contiguous:
10501       vform = kFormatVnS;
10502       break;
10503     default:
10504       VIXL_UNIMPLEMENTED();
10505       break;
10506   }
10507   int msize_in_bytes_log2 = LaneSizeInBytesLog2FromFormat(vform);
10508   int vl = GetVectorLengthInBytes();
10509   uint64_t offset = instr->ExtractSignedBits(19, 16) * vl;
10510   LogicSVEAddressVector addr(ReadXRegister(instr->GetRn()) + offset);
10511   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10512   SVEStructuredStoreHelper(vform, pg, instr->GetRt(), addr);
10513 }
10514 
VisitSVEContiguousNonTemporalStore_ScalarPlusScalar(const Instruction * instr)10515 void Simulator::VisitSVEContiguousNonTemporalStore_ScalarPlusScalar(
10516     const Instruction* instr) {
10517   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10518   VectorFormat vform = kFormatUndefined;
10519 
10520   switch (instr->Mask(SVEContiguousNonTemporalStore_ScalarPlusScalarMask)) {
10521     case STNT1B_z_p_br_contiguous:
10522       vform = kFormatVnB;
10523       break;
10524     case STNT1D_z_p_br_contiguous:
10525       vform = kFormatVnD;
10526       break;
10527     case STNT1H_z_p_br_contiguous:
10528       vform = kFormatVnH;
10529       break;
10530     case STNT1W_z_p_br_contiguous:
10531       vform = kFormatVnS;
10532       break;
10533     default:
10534       VIXL_UNIMPLEMENTED();
10535       break;
10536   }
10537   int msize_in_bytes_log2 = LaneSizeInBytesLog2FromFormat(vform);
10538   uint64_t offset = ReadXRegister(instr->GetRm()) << msize_in_bytes_log2;
10539   LogicSVEAddressVector addr(ReadXRegister(instr->GetRn()) + offset);
10540   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10541   SVEStructuredStoreHelper(vform, pg, instr->GetRt(), addr);
10542 }
10543 
VisitSVEContiguousStore_ScalarPlusImm(const Instruction * instr)10544 void Simulator::VisitSVEContiguousStore_ScalarPlusImm(
10545     const Instruction* instr) {
10546   switch (instr->Mask(SVEContiguousStore_ScalarPlusImmMask)) {
10547     case ST1B_z_p_bi:
10548     case ST1D_z_p_bi:
10549     case ST1H_z_p_bi:
10550     case ST1W_z_p_bi: {
10551       int vl = GetVectorLengthInBytes();
10552       int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
10553       int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(false);
10554       VIXL_ASSERT(esize_in_bytes_log2 >= msize_in_bytes_log2);
10555       int vl_divisor_log2 = esize_in_bytes_log2 - msize_in_bytes_log2;
10556       uint64_t offset =
10557           (instr->ExtractSignedBits(19, 16) * vl) / (1 << vl_divisor_log2);
10558       VectorFormat vform =
10559           SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
10560       LogicSVEAddressVector addr(ReadXRegister(instr->GetRn()) + offset);
10561       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
10562       SVEStructuredStoreHelper(vform,
10563                                ReadPRegister(instr->GetPgLow8()),
10564                                instr->GetRt(),
10565                                addr);
10566       break;
10567     }
10568     default:
10569       VIXL_UNIMPLEMENTED();
10570       break;
10571   }
10572 }
10573 
VisitSVEContiguousStore_ScalarPlusScalar(const Instruction * instr)10574 void Simulator::VisitSVEContiguousStore_ScalarPlusScalar(
10575     const Instruction* instr) {
10576   switch (instr->Mask(SVEContiguousStore_ScalarPlusScalarMask)) {
10577     case ST1B_z_p_br:
10578     case ST1D_z_p_br:
10579     case ST1H_z_p_br:
10580     case ST1W_z_p_br: {
10581       uint64_t offset = ReadXRegister(instr->GetRm());
10582       offset <<= instr->ExtractBits(24, 23);
10583       VectorFormat vform =
10584           SVEFormatFromLaneSizeInBytesLog2(instr->ExtractBits(22, 21));
10585       LogicSVEAddressVector addr(ReadXRegister(instr->GetRn()) + offset);
10586       addr.SetMsizeInBytesLog2(instr->ExtractBits(24, 23));
10587       SVEStructuredStoreHelper(vform,
10588                                ReadPRegister(instr->GetPgLow8()),
10589                                instr->GetRt(),
10590                                addr);
10591       break;
10592     }
10593     default:
10594       VIXL_UNIMPLEMENTED();
10595       break;
10596   }
10597 }
10598 
VisitSVECopySIMDFPScalarRegisterToVector_Predicated(const Instruction * instr)10599 void Simulator::VisitSVECopySIMDFPScalarRegisterToVector_Predicated(
10600     const Instruction* instr) {
10601   VectorFormat vform = instr->GetSVEVectorFormat();
10602   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10603   SimVRegister z_result;
10604 
10605   switch (instr->Mask(SVECopySIMDFPScalarRegisterToVector_PredicatedMask)) {
10606     case CPY_z_p_v:
10607       dup_element(vform, z_result, ReadVRegister(instr->GetRn()), 0);
10608       mov_merging(vform, ReadVRegister(instr->GetRd()), pg, z_result);
10609       break;
10610     default:
10611       VIXL_UNIMPLEMENTED();
10612       break;
10613   }
10614 }
10615 
VisitSVEStoreMultipleStructures_ScalarPlusImm(const Instruction * instr)10616 void Simulator::VisitSVEStoreMultipleStructures_ScalarPlusImm(
10617     const Instruction* instr) {
10618   switch (instr->Mask(SVEStoreMultipleStructures_ScalarPlusImmMask)) {
10619     case ST2B_z_p_bi_contiguous:
10620     case ST2D_z_p_bi_contiguous:
10621     case ST2H_z_p_bi_contiguous:
10622     case ST2W_z_p_bi_contiguous:
10623     case ST3B_z_p_bi_contiguous:
10624     case ST3D_z_p_bi_contiguous:
10625     case ST3H_z_p_bi_contiguous:
10626     case ST3W_z_p_bi_contiguous:
10627     case ST4B_z_p_bi_contiguous:
10628     case ST4D_z_p_bi_contiguous:
10629     case ST4H_z_p_bi_contiguous:
10630     case ST4W_z_p_bi_contiguous: {
10631       int vl = GetVectorLengthInBytes();
10632       int msz = instr->ExtractBits(24, 23);
10633       int reg_count = instr->ExtractBits(22, 21) + 1;
10634       uint64_t offset = instr->ExtractSignedBits(19, 16) * vl * reg_count;
10635       LogicSVEAddressVector addr(
10636           ReadXRegister(instr->GetRn(), Reg31IsStackPointer) + offset);
10637       addr.SetMsizeInBytesLog2(msz);
10638       addr.SetRegCount(reg_count);
10639       SVEStructuredStoreHelper(SVEFormatFromLaneSizeInBytesLog2(msz),
10640                                ReadPRegister(instr->GetPgLow8()),
10641                                instr->GetRt(),
10642                                addr);
10643       break;
10644     }
10645     default:
10646       VIXL_UNIMPLEMENTED();
10647       break;
10648   }
10649 }
10650 
VisitSVEStoreMultipleStructures_ScalarPlusScalar(const Instruction * instr)10651 void Simulator::VisitSVEStoreMultipleStructures_ScalarPlusScalar(
10652     const Instruction* instr) {
10653   switch (instr->Mask(SVEStoreMultipleStructures_ScalarPlusScalarMask)) {
10654     case ST2B_z_p_br_contiguous:
10655     case ST2D_z_p_br_contiguous:
10656     case ST2H_z_p_br_contiguous:
10657     case ST2W_z_p_br_contiguous:
10658     case ST3B_z_p_br_contiguous:
10659     case ST3D_z_p_br_contiguous:
10660     case ST3H_z_p_br_contiguous:
10661     case ST3W_z_p_br_contiguous:
10662     case ST4B_z_p_br_contiguous:
10663     case ST4D_z_p_br_contiguous:
10664     case ST4H_z_p_br_contiguous:
10665     case ST4W_z_p_br_contiguous: {
10666       int msz = instr->ExtractBits(24, 23);
10667       uint64_t offset = ReadXRegister(instr->GetRm()) * (1 << msz);
10668       VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(msz);
10669       LogicSVEAddressVector addr(
10670           ReadXRegister(instr->GetRn(), Reg31IsStackPointer) + offset);
10671       addr.SetMsizeInBytesLog2(msz);
10672       addr.SetRegCount(instr->ExtractBits(22, 21) + 1);
10673       SVEStructuredStoreHelper(vform,
10674                                ReadPRegister(instr->GetPgLow8()),
10675                                instr->GetRt(),
10676                                addr);
10677       break;
10678     }
10679     default:
10680       VIXL_UNIMPLEMENTED();
10681       break;
10682   }
10683 }
10684 
VisitSVEStorePredicateRegister(const Instruction * instr)10685 void Simulator::VisitSVEStorePredicateRegister(const Instruction* instr) {
10686   switch (instr->Mask(SVEStorePredicateRegisterMask)) {
10687     case STR_p_bi: {
10688       SimPRegister& pt = ReadPRegister(instr->GetPt());
10689       int pl = GetPredicateLengthInBytes();
10690       int imm9 = (instr->ExtractBits(21, 16) << 3) | instr->ExtractBits(12, 10);
10691       uint64_t multiplier = ExtractSignedBitfield64(8, 0, imm9);
10692       uint64_t address = ReadXRegister(instr->GetRn()) + multiplier * pl;
10693       for (int i = 0; i < pl; i++) {
10694         MemWrite(address + i, pt.GetLane<uint8_t>(i));
10695       }
10696       LogPWrite(instr->GetPt(), address);
10697       break;
10698     }
10699     default:
10700       VIXL_UNIMPLEMENTED();
10701       break;
10702   }
10703 }
10704 
VisitSVEStoreVectorRegister(const Instruction * instr)10705 void Simulator::VisitSVEStoreVectorRegister(const Instruction* instr) {
10706   switch (instr->Mask(SVEStoreVectorRegisterMask)) {
10707     case STR_z_bi: {
10708       SimVRegister& zt = ReadVRegister(instr->GetRt());
10709       int vl = GetVectorLengthInBytes();
10710       int imm9 = (instr->ExtractBits(21, 16) << 3) | instr->ExtractBits(12, 10);
10711       uint64_t multiplier = ExtractSignedBitfield64(8, 0, imm9);
10712       uint64_t address = ReadXRegister(instr->GetRn()) + multiplier * vl;
10713       for (int i = 0; i < vl; i++) {
10714         MemWrite(address + i, zt.GetLane<uint8_t>(i));
10715       }
10716       LogZWrite(instr->GetRt(), address);
10717       break;
10718     }
10719     default:
10720       VIXL_UNIMPLEMENTED();
10721       break;
10722   }
10723 }
10724 
VisitSVEMulIndex(const Instruction * instr)10725 void Simulator::VisitSVEMulIndex(const Instruction* instr) {
10726   VectorFormat vform = instr->GetSVEVectorFormat();
10727   SimVRegister& zda = ReadVRegister(instr->GetRd());
10728   SimVRegister& zn = ReadVRegister(instr->GetRn());
10729 
10730   switch (instr->Mask(SVEMulIndexMask)) {
10731     case SDOT_z_zzzi_d:
10732       sdot(vform,
10733            zda,
10734            zn,
10735            ReadVRegister(instr->ExtractBits(19, 16)),
10736            instr->ExtractBit(20));
10737       break;
10738     case SDOT_z_zzzi_s:
10739       sdot(vform,
10740            zda,
10741            zn,
10742            ReadVRegister(instr->ExtractBits(18, 16)),
10743            instr->ExtractBits(20, 19));
10744       break;
10745     case UDOT_z_zzzi_d:
10746       udot(vform,
10747            zda,
10748            zn,
10749            ReadVRegister(instr->ExtractBits(19, 16)),
10750            instr->ExtractBit(20));
10751       break;
10752     case UDOT_z_zzzi_s:
10753       udot(vform,
10754            zda,
10755            zn,
10756            ReadVRegister(instr->ExtractBits(18, 16)),
10757            instr->ExtractBits(20, 19));
10758       break;
10759     default:
10760       VIXL_UNIMPLEMENTED();
10761       break;
10762   }
10763 }
10764 
VisitSVEPartitionBreakCondition(const Instruction * instr)10765 void Simulator::VisitSVEPartitionBreakCondition(const Instruction* instr) {
10766   SimPRegister& pd = ReadPRegister(instr->GetPd());
10767   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
10768   SimPRegister& pn = ReadPRegister(instr->GetPn());
10769   SimPRegister result;
10770 
10771   switch (instr->Mask(SVEPartitionBreakConditionMask)) {
10772     case BRKAS_p_p_p_z:
10773     case BRKA_p_p_p:
10774       brka(result, pg, pn);
10775       break;
10776     case BRKBS_p_p_p_z:
10777     case BRKB_p_p_p:
10778       brkb(result, pg, pn);
10779       break;
10780     default:
10781       VIXL_UNIMPLEMENTED();
10782       break;
10783   }
10784 
10785   if (instr->ExtractBit(4) == 1) {
10786     mov_merging(pd, pg, result);
10787   } else {
10788     mov_zeroing(pd, pg, result);
10789   }
10790 
10791   // Set flag if needed.
10792   if (instr->ExtractBit(22) == 1) {
10793     PredTest(kFormatVnB, pg, pd);
10794   }
10795 }
10796 
VisitSVEPropagateBreakToNextPartition(const Instruction * instr)10797 void Simulator::VisitSVEPropagateBreakToNextPartition(
10798     const Instruction* instr) {
10799   SimPRegister& pdm = ReadPRegister(instr->GetPd());
10800   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
10801   SimPRegister& pn = ReadPRegister(instr->GetPn());
10802 
10803   switch (instr->Mask(SVEPropagateBreakToNextPartitionMask)) {
10804     case BRKNS_p_p_pp:
10805     case BRKN_p_p_pp:
10806       brkn(pdm, pg, pn);
10807       break;
10808     default:
10809       VIXL_UNIMPLEMENTED();
10810       break;
10811   }
10812 
10813   // Set flag if needed.
10814   if (instr->ExtractBit(22) == 1) {
10815     // Note that this ignores `pg`.
10816     PredTest(kFormatVnB, GetPTrue(), pdm);
10817   }
10818 }
10819 
VisitSVEUnpackPredicateElements(const Instruction * instr)10820 void Simulator::VisitSVEUnpackPredicateElements(const Instruction* instr) {
10821   SimPRegister& pd = ReadPRegister(instr->GetPd());
10822   SimPRegister& pn = ReadPRegister(instr->GetPn());
10823 
10824   SimVRegister temp = Simulator::ExpandToSimVRegister(pn);
10825   SimVRegister zero;
10826   dup_immediate(kFormatVnB, zero, 0);
10827 
10828   switch (instr->Mask(SVEUnpackPredicateElementsMask)) {
10829     case PUNPKHI_p_p:
10830       zip2(kFormatVnB, temp, temp, zero);
10831       break;
10832     case PUNPKLO_p_p:
10833       zip1(kFormatVnB, temp, temp, zero);
10834       break;
10835     default:
10836       VIXL_UNIMPLEMENTED();
10837       break;
10838   }
10839   Simulator::ExtractFromSimVRegister(kFormatVnB, pd, temp);
10840 }
10841 
VisitSVEPermutePredicateElements(const Instruction * instr)10842 void Simulator::VisitSVEPermutePredicateElements(const Instruction* instr) {
10843   VectorFormat vform = instr->GetSVEVectorFormat();
10844   SimPRegister& pd = ReadPRegister(instr->GetPd());
10845   SimPRegister& pn = ReadPRegister(instr->GetPn());
10846   SimPRegister& pm = ReadPRegister(instr->GetPm());
10847 
10848   SimVRegister temp0 = Simulator::ExpandToSimVRegister(pn);
10849   SimVRegister temp1 = Simulator::ExpandToSimVRegister(pm);
10850 
10851   switch (instr->Mask(SVEPermutePredicateElementsMask)) {
10852     case TRN1_p_pp:
10853       trn1(vform, temp0, temp0, temp1);
10854       break;
10855     case TRN2_p_pp:
10856       trn2(vform, temp0, temp0, temp1);
10857       break;
10858     case UZP1_p_pp:
10859       uzp1(vform, temp0, temp0, temp1);
10860       break;
10861     case UZP2_p_pp:
10862       uzp2(vform, temp0, temp0, temp1);
10863       break;
10864     case ZIP1_p_pp:
10865       zip1(vform, temp0, temp0, temp1);
10866       break;
10867     case ZIP2_p_pp:
10868       zip2(vform, temp0, temp0, temp1);
10869       break;
10870     default:
10871       VIXL_UNIMPLEMENTED();
10872       break;
10873   }
10874   Simulator::ExtractFromSimVRegister(kFormatVnB, pd, temp0);
10875 }
10876 
VisitSVEReversePredicateElements(const Instruction * instr)10877 void Simulator::VisitSVEReversePredicateElements(const Instruction* instr) {
10878   switch (instr->Mask(SVEReversePredicateElementsMask)) {
10879     case REV_p_p: {
10880       VectorFormat vform = instr->GetSVEVectorFormat();
10881       SimPRegister& pn = ReadPRegister(instr->GetPn());
10882       SimPRegister& pd = ReadPRegister(instr->GetPd());
10883       SimVRegister temp = Simulator::ExpandToSimVRegister(pn);
10884       rev(vform, temp, temp);
10885       Simulator::ExtractFromSimVRegister(kFormatVnB, pd, temp);
10886       break;
10887     }
10888     default:
10889       VIXL_UNIMPLEMENTED();
10890       break;
10891   }
10892 }
10893 
VisitSVEPermuteVectorExtract(const Instruction * instr)10894 void Simulator::VisitSVEPermuteVectorExtract(const Instruction* instr) {
10895   SimVRegister& zdn = ReadVRegister(instr->GetRd());
10896   // Second source register "Zm" is encoded where "Zn" would usually be.
10897   SimVRegister& zm = ReadVRegister(instr->GetRn());
10898 
10899   const int imm8h_mask = 0x001F0000;
10900   const int imm8l_mask = 0x00001C00;
10901   int index = instr->ExtractBits<imm8h_mask | imm8l_mask>();
10902   int vl = GetVectorLengthInBytes();
10903   index = (index >= vl) ? 0 : index;
10904 
10905   switch (instr->Mask(SVEPermuteVectorExtractMask)) {
10906     case EXT_z_zi_des:
10907       ext(kFormatVnB, zdn, zdn, zm, index);
10908       break;
10909     default:
10910       VIXL_UNIMPLEMENTED();
10911       break;
10912   }
10913 }
10914 
VisitSVEPermuteVectorInterleaving(const Instruction * instr)10915 void Simulator::VisitSVEPermuteVectorInterleaving(const Instruction* instr) {
10916   VectorFormat vform = instr->GetSVEVectorFormat();
10917   SimVRegister& zd = ReadVRegister(instr->GetRd());
10918   SimVRegister& zn = ReadVRegister(instr->GetRn());
10919   SimVRegister& zm = ReadVRegister(instr->GetRm());
10920 
10921   switch (instr->Mask(SVEPermuteVectorInterleavingMask)) {
10922     case TRN1_z_zz:
10923       trn1(vform, zd, zn, zm);
10924       break;
10925     case TRN2_z_zz:
10926       trn2(vform, zd, zn, zm);
10927       break;
10928     case UZP1_z_zz:
10929       uzp1(vform, zd, zn, zm);
10930       break;
10931     case UZP2_z_zz:
10932       uzp2(vform, zd, zn, zm);
10933       break;
10934     case ZIP1_z_zz:
10935       zip1(vform, zd, zn, zm);
10936       break;
10937     case ZIP2_z_zz:
10938       zip2(vform, zd, zn, zm);
10939       break;
10940     default:
10941       VIXL_UNIMPLEMENTED();
10942       break;
10943   }
10944 }
10945 
VisitSVEConditionallyBroadcastElementToVector(const Instruction * instr)10946 void Simulator::VisitSVEConditionallyBroadcastElementToVector(
10947     const Instruction* instr) {
10948   VectorFormat vform = instr->GetSVEVectorFormat();
10949   SimVRegister& zdn = ReadVRegister(instr->GetRd());
10950   SimVRegister& zm = ReadVRegister(instr->GetRn());
10951   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10952 
10953   int active_offset = -1;
10954   switch (instr->Mask(SVEConditionallyBroadcastElementToVectorMask)) {
10955     case CLASTA_z_p_zz:
10956       active_offset = 1;
10957       break;
10958     case CLASTB_z_p_zz:
10959       active_offset = 0;
10960       break;
10961     default:
10962       VIXL_UNIMPLEMENTED();
10963       break;
10964   }
10965 
10966   if (active_offset >= 0) {
10967     std::pair<bool, uint64_t> value = clast(vform, pg, zm, active_offset);
10968     if (value.first) {
10969       dup_immediate(vform, zdn, value.second);
10970     } else {
10971       // Trigger a line of trace for the operation, even though it doesn't
10972       // change the register value.
10973       mov(vform, zdn, zdn);
10974     }
10975   }
10976 }
10977 
VisitSVEConditionallyExtractElementToSIMDFPScalar(const Instruction * instr)10978 void Simulator::VisitSVEConditionallyExtractElementToSIMDFPScalar(
10979     const Instruction* instr) {
10980   VectorFormat vform = instr->GetSVEVectorFormat();
10981   SimVRegister& vdn = ReadVRegister(instr->GetRd());
10982   SimVRegister& zm = ReadVRegister(instr->GetRn());
10983   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10984 
10985   int active_offset = -1;
10986   switch (instr->Mask(SVEConditionallyExtractElementToSIMDFPScalarMask)) {
10987     case CLASTA_v_p_z:
10988       active_offset = 1;
10989       break;
10990     case CLASTB_v_p_z:
10991       active_offset = 0;
10992       break;
10993     default:
10994       VIXL_UNIMPLEMENTED();
10995       break;
10996   }
10997 
10998   if (active_offset >= 0) {
10999     LogicVRegister dst(vdn);
11000     uint64_t src1_value = dst.Uint(vform, 0);
11001     std::pair<bool, uint64_t> src2_value = clast(vform, pg, zm, active_offset);
11002     dup_immediate(vform, vdn, 0);
11003     dst.SetUint(vform, 0, src2_value.first ? src2_value.second : src1_value);
11004   }
11005 }
11006 
VisitSVEConditionallyExtractElementToGeneralRegister(const Instruction * instr)11007 void Simulator::VisitSVEConditionallyExtractElementToGeneralRegister(
11008     const Instruction* instr) {
11009   VectorFormat vform = instr->GetSVEVectorFormat();
11010   SimVRegister& zm = ReadVRegister(instr->GetRn());
11011   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11012 
11013   int active_offset = -1;
11014   switch (instr->Mask(SVEConditionallyExtractElementToGeneralRegisterMask)) {
11015     case CLASTA_r_p_z:
11016       active_offset = 1;
11017       break;
11018     case CLASTB_r_p_z:
11019       active_offset = 0;
11020       break;
11021     default:
11022       VIXL_UNIMPLEMENTED();
11023       break;
11024   }
11025 
11026   if (active_offset >= 0) {
11027     std::pair<bool, uint64_t> value = clast(vform, pg, zm, active_offset);
11028     uint64_t masked_src = ReadXRegister(instr->GetRd()) &
11029                           GetUintMask(LaneSizeInBitsFromFormat(vform));
11030     WriteXRegister(instr->GetRd(), value.first ? value.second : masked_src);
11031   }
11032 }
11033 
VisitSVEExtractElementToSIMDFPScalarRegister(const Instruction * instr)11034 void Simulator::VisitSVEExtractElementToSIMDFPScalarRegister(
11035     const Instruction* instr) {
11036   VectorFormat vform = instr->GetSVEVectorFormat();
11037   SimVRegister& vdn = ReadVRegister(instr->GetRd());
11038   SimVRegister& zm = ReadVRegister(instr->GetRn());
11039   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11040 
11041   int active_offset = -1;
11042   switch (instr->Mask(SVEExtractElementToSIMDFPScalarRegisterMask)) {
11043     case LASTA_v_p_z:
11044       active_offset = 1;
11045       break;
11046     case LASTB_v_p_z:
11047       active_offset = 0;
11048       break;
11049     default:
11050       VIXL_UNIMPLEMENTED();
11051       break;
11052   }
11053 
11054   if (active_offset >= 0) {
11055     LogicVRegister dst(vdn);
11056     std::pair<bool, uint64_t> value = clast(vform, pg, zm, active_offset);
11057     dup_immediate(vform, vdn, 0);
11058     dst.SetUint(vform, 0, value.second);
11059   }
11060 }
11061 
VisitSVEExtractElementToGeneralRegister(const Instruction * instr)11062 void Simulator::VisitSVEExtractElementToGeneralRegister(
11063     const Instruction* instr) {
11064   VectorFormat vform = instr->GetSVEVectorFormat();
11065   SimVRegister& zm = ReadVRegister(instr->GetRn());
11066   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11067 
11068   int active_offset = -1;
11069   switch (instr->Mask(SVEExtractElementToGeneralRegisterMask)) {
11070     case LASTA_r_p_z:
11071       active_offset = 1;
11072       break;
11073     case LASTB_r_p_z:
11074       active_offset = 0;
11075       break;
11076     default:
11077       VIXL_UNIMPLEMENTED();
11078       break;
11079   }
11080 
11081   if (active_offset >= 0) {
11082     std::pair<bool, uint64_t> value = clast(vform, pg, zm, active_offset);
11083     WriteXRegister(instr->GetRd(), value.second);
11084   }
11085 }
11086 
VisitSVECompressActiveElements(const Instruction * instr)11087 void Simulator::VisitSVECompressActiveElements(const Instruction* instr) {
11088   VectorFormat vform = instr->GetSVEVectorFormat();
11089   SimVRegister& zd = ReadVRegister(instr->GetRd());
11090   SimVRegister& zn = ReadVRegister(instr->GetRn());
11091   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11092 
11093   switch (instr->Mask(SVECompressActiveElementsMask)) {
11094     case COMPACT_z_p_z:
11095       compact(vform, zd, pg, zn);
11096       break;
11097     default:
11098       VIXL_UNIMPLEMENTED();
11099       break;
11100   }
11101 }
11102 
VisitSVECopyGeneralRegisterToVector_Predicated(const Instruction * instr)11103 void Simulator::VisitSVECopyGeneralRegisterToVector_Predicated(
11104     const Instruction* instr) {
11105   VectorFormat vform = instr->GetSVEVectorFormat();
11106   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11107   SimVRegister z_result;
11108 
11109   switch (instr->Mask(SVECopyGeneralRegisterToVector_PredicatedMask)) {
11110     case CPY_z_p_r:
11111       dup_immediate(vform,
11112                     z_result,
11113                     ReadXRegister(instr->GetRn(), Reg31IsStackPointer));
11114       mov_merging(vform, ReadVRegister(instr->GetRd()), pg, z_result);
11115       break;
11116     default:
11117       VIXL_UNIMPLEMENTED();
11118       break;
11119   }
11120 }
11121 
VisitSVECopyIntImm_Predicated(const Instruction * instr)11122 void Simulator::VisitSVECopyIntImm_Predicated(const Instruction* instr) {
11123   VectorFormat vform = instr->GetSVEVectorFormat();
11124   SimPRegister& pg = ReadPRegister(instr->ExtractBits(19, 16));
11125   SimVRegister& zd = ReadVRegister(instr->GetRd());
11126 
11127   SimVRegister result;
11128   switch (instr->Mask(SVECopyIntImm_PredicatedMask)) {
11129     case CPY_z_p_i: {
11130       // Use unsigned arithmetic to avoid undefined behaviour during the shift.
11131       uint64_t imm8 = instr->GetImmSVEIntWideSigned();
11132       dup_immediate(vform, result, imm8 << (instr->ExtractBit(13) * 8));
11133       break;
11134     }
11135     default:
11136       VIXL_UNIMPLEMENTED();
11137       break;
11138   }
11139 
11140   if (instr->ExtractBit(14) != 0) {
11141     mov_merging(vform, zd, pg, result);
11142   } else {
11143     mov_zeroing(vform, zd, pg, result);
11144   }
11145 }
11146 
VisitSVEReverseWithinElements(const Instruction * instr)11147 void Simulator::VisitSVEReverseWithinElements(const Instruction* instr) {
11148   SimVRegister& zd = ReadVRegister(instr->GetRd());
11149   SimVRegister& zn = ReadVRegister(instr->GetRn());
11150   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11151   SimVRegister result;
11152 
11153   // In NEON, the chunk size in which elements are REVersed is in the
11154   // instruction mnemonic, and the element size attached to the register.
11155   // SVE reverses the semantics; the mapping to logic functions below is to
11156   // account for this.
11157   VectorFormat chunk_form = instr->GetSVEVectorFormat();
11158   VectorFormat element_form = kFormatUndefined;
11159 
11160   switch (instr->Mask(SVEReverseWithinElementsMask)) {
11161     case RBIT_z_p_z:
11162       rbit(chunk_form, result, zn);
11163       break;
11164     case REVB_z_z:
11165       VIXL_ASSERT((chunk_form == kFormatVnH) || (chunk_form == kFormatVnS) ||
11166                   (chunk_form == kFormatVnD));
11167       element_form = kFormatVnB;
11168       break;
11169     case REVH_z_z:
11170       VIXL_ASSERT((chunk_form == kFormatVnS) || (chunk_form == kFormatVnD));
11171       element_form = kFormatVnH;
11172       break;
11173     case REVW_z_z:
11174       VIXL_ASSERT(chunk_form == kFormatVnD);
11175       element_form = kFormatVnS;
11176       break;
11177     default:
11178       VIXL_UNIMPLEMENTED();
11179       break;
11180   }
11181 
11182   if (instr->Mask(SVEReverseWithinElementsMask) != RBIT_z_p_z) {
11183     VIXL_ASSERT(element_form != kFormatUndefined);
11184     switch (chunk_form) {
11185       case kFormatVnH:
11186         rev16(element_form, result, zn);
11187         break;
11188       case kFormatVnS:
11189         rev32(element_form, result, zn);
11190         break;
11191       case kFormatVnD:
11192         rev64(element_form, result, zn);
11193         break;
11194       default:
11195         VIXL_UNIMPLEMENTED();
11196     }
11197   }
11198 
11199   mov_merging(chunk_form, zd, pg, result);
11200 }
11201 
VisitSVEVectorSplice_Destructive(const Instruction * instr)11202 void Simulator::VisitSVEVectorSplice_Destructive(const Instruction* instr) {
11203   VectorFormat vform = instr->GetSVEVectorFormat();
11204   SimVRegister& zdn = ReadVRegister(instr->GetRd());
11205   SimVRegister& zm = ReadVRegister(instr->GetRn());
11206   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11207 
11208   switch (instr->Mask(SVEVectorSplice_DestructiveMask)) {
11209     case SPLICE_z_p_zz_des:
11210       splice(vform, zdn, pg, zdn, zm);
11211       break;
11212     default:
11213       VIXL_UNIMPLEMENTED();
11214       break;
11215   }
11216 }
11217 
VisitSVEBroadcastGeneralRegister(const Instruction * instr)11218 void Simulator::VisitSVEBroadcastGeneralRegister(const Instruction* instr) {
11219   SimVRegister& zd = ReadVRegister(instr->GetRd());
11220   switch (instr->Mask(SVEBroadcastGeneralRegisterMask)) {
11221     case DUP_z_r:
11222       dup_immediate(instr->GetSVEVectorFormat(),
11223                     zd,
11224                     ReadXRegister(instr->GetRn(), Reg31IsStackPointer));
11225       break;
11226     default:
11227       VIXL_UNIMPLEMENTED();
11228       break;
11229   }
11230 }
11231 
VisitSVEInsertSIMDFPScalarRegister(const Instruction * instr)11232 void Simulator::VisitSVEInsertSIMDFPScalarRegister(const Instruction* instr) {
11233   SimVRegister& zd = ReadVRegister(instr->GetRd());
11234   VectorFormat vform = instr->GetSVEVectorFormat();
11235   switch (instr->Mask(SVEInsertSIMDFPScalarRegisterMask)) {
11236     case INSR_z_v:
11237       insr(vform, zd, ReadDRegisterBits(instr->GetRn()));
11238       break;
11239     default:
11240       VIXL_UNIMPLEMENTED();
11241       break;
11242   }
11243 }
11244 
VisitSVEInsertGeneralRegister(const Instruction * instr)11245 void Simulator::VisitSVEInsertGeneralRegister(const Instruction* instr) {
11246   SimVRegister& zd = ReadVRegister(instr->GetRd());
11247   VectorFormat vform = instr->GetSVEVectorFormat();
11248   switch (instr->Mask(SVEInsertGeneralRegisterMask)) {
11249     case INSR_z_r:
11250       insr(vform, zd, ReadXRegister(instr->GetRn()));
11251       break;
11252     default:
11253       VIXL_UNIMPLEMENTED();
11254       break;
11255   }
11256 }
11257 
VisitSVEBroadcastIndexElement(const Instruction * instr)11258 void Simulator::VisitSVEBroadcastIndexElement(const Instruction* instr) {
11259   SimVRegister& zd = ReadVRegister(instr->GetRd());
11260   switch (instr->Mask(SVEBroadcastIndexElementMask)) {
11261     case DUP_z_zi: {
11262       std::pair<int, int> index_and_lane_size =
11263           instr->GetSVEPermuteIndexAndLaneSizeLog2();
11264       int index = index_and_lane_size.first;
11265       int lane_size_in_bytes_log_2 = index_and_lane_size.second;
11266       VectorFormat vform =
11267           SVEFormatFromLaneSizeInBytesLog2(lane_size_in_bytes_log_2);
11268       if ((index < 0) || (index >= LaneCountFromFormat(vform))) {
11269         // Out of bounds, set the destination register to zero.
11270         dup_immediate(kFormatVnD, zd, 0);
11271       } else {
11272         dup_element(vform, zd, ReadVRegister(instr->GetRn()), index);
11273       }
11274       return;
11275     }
11276     default:
11277       VIXL_UNIMPLEMENTED();
11278       break;
11279   }
11280 }
11281 
VisitSVEReverseVectorElements(const Instruction * instr)11282 void Simulator::VisitSVEReverseVectorElements(const Instruction* instr) {
11283   SimVRegister& zd = ReadVRegister(instr->GetRd());
11284   VectorFormat vform = instr->GetSVEVectorFormat();
11285   switch (instr->Mask(SVEReverseVectorElementsMask)) {
11286     case REV_z_z:
11287       rev(vform, zd, ReadVRegister(instr->GetRn()));
11288       break;
11289     default:
11290       VIXL_UNIMPLEMENTED();
11291       break;
11292   }
11293 }
11294 
VisitSVEUnpackVectorElements(const Instruction * instr)11295 void Simulator::VisitSVEUnpackVectorElements(const Instruction* instr) {
11296   SimVRegister& zd = ReadVRegister(instr->GetRd());
11297   VectorFormat vform = instr->GetSVEVectorFormat();
11298   switch (instr->Mask(SVEUnpackVectorElementsMask)) {
11299     case SUNPKHI_z_z:
11300       unpk(vform, zd, ReadVRegister(instr->GetRn()), kHiHalf, kSignedExtend);
11301       break;
11302     case SUNPKLO_z_z:
11303       unpk(vform, zd, ReadVRegister(instr->GetRn()), kLoHalf, kSignedExtend);
11304       break;
11305     case UUNPKHI_z_z:
11306       unpk(vform, zd, ReadVRegister(instr->GetRn()), kHiHalf, kUnsignedExtend);
11307       break;
11308     case UUNPKLO_z_z:
11309       unpk(vform, zd, ReadVRegister(instr->GetRn()), kLoHalf, kUnsignedExtend);
11310       break;
11311     default:
11312       VIXL_UNIMPLEMENTED();
11313       break;
11314   }
11315 }
11316 
VisitSVETableLookup(const Instruction * instr)11317 void Simulator::VisitSVETableLookup(const Instruction* instr) {
11318   SimVRegister& zd = ReadVRegister(instr->GetRd());
11319   switch (instr->Mask(SVETableLookupMask)) {
11320     case TBL_z_zz_1:
11321       Table(instr->GetSVEVectorFormat(),
11322             zd,
11323             ReadVRegister(instr->GetRn()),
11324             ReadVRegister(instr->GetRm()));
11325       return;
11326     default:
11327       break;
11328   }
11329 }
11330 
VisitSVEPredicateCount(const Instruction * instr)11331 void Simulator::VisitSVEPredicateCount(const Instruction* instr) {
11332   VectorFormat vform = instr->GetSVEVectorFormat();
11333   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
11334   SimPRegister& pn = ReadPRegister(instr->GetPn());
11335 
11336   switch (instr->Mask(SVEPredicateCountMask)) {
11337     case CNTP_r_p_p: {
11338       WriteXRegister(instr->GetRd(), CountActiveAndTrueLanes(vform, pg, pn));
11339       break;
11340     }
11341     default:
11342       VIXL_UNIMPLEMENTED();
11343       break;
11344   }
11345 }
11346 
VisitSVEPredicateLogical(const Instruction * instr)11347 void Simulator::VisitSVEPredicateLogical(const Instruction* instr) {
11348   Instr op = instr->Mask(SVEPredicateLogicalMask);
11349   SimPRegister& pd = ReadPRegister(instr->GetPd());
11350   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
11351   SimPRegister& pn = ReadPRegister(instr->GetPn());
11352   SimPRegister& pm = ReadPRegister(instr->GetPm());
11353   SimPRegister result;
11354   switch (op) {
11355     case ANDS_p_p_pp_z:
11356     case AND_p_p_pp_z:
11357     case BICS_p_p_pp_z:
11358     case BIC_p_p_pp_z:
11359     case EORS_p_p_pp_z:
11360     case EOR_p_p_pp_z:
11361     case NANDS_p_p_pp_z:
11362     case NAND_p_p_pp_z:
11363     case NORS_p_p_pp_z:
11364     case NOR_p_p_pp_z:
11365     case ORNS_p_p_pp_z:
11366     case ORN_p_p_pp_z:
11367     case ORRS_p_p_pp_z:
11368     case ORR_p_p_pp_z:
11369       SVEPredicateLogicalHelper(static_cast<SVEPredicateLogicalOp>(op),
11370                                 result,
11371                                 pn,
11372                                 pm);
11373       break;
11374     case SEL_p_p_pp:
11375       sel(pd, pg, pn, pm);
11376       return;
11377     default:
11378       VIXL_UNIMPLEMENTED();
11379       break;
11380   }
11381 
11382   mov_zeroing(pd, pg, result);
11383   if (instr->Mask(SVEPredicateLogicalSetFlagsBit) != 0) {
11384     PredTest(kFormatVnB, pg, pd);
11385   }
11386 }
11387 
VisitSVEPredicateFirstActive(const Instruction * instr)11388 void Simulator::VisitSVEPredicateFirstActive(const Instruction* instr) {
11389   LogicPRegister pg = ReadPRegister(instr->ExtractBits(8, 5));
11390   LogicPRegister pdn = ReadPRegister(instr->GetPd());
11391   switch (instr->Mask(SVEPredicateFirstActiveMask)) {
11392     case PFIRST_p_p_p:
11393       pfirst(pdn, pg, pdn);
11394       // TODO: Is this broken when pg == pdn?
11395       PredTest(kFormatVnB, pg, pdn);
11396       break;
11397     default:
11398       VIXL_UNIMPLEMENTED();
11399       break;
11400   }
11401 }
11402 
VisitSVEPredicateInitialize(const Instruction * instr)11403 void Simulator::VisitSVEPredicateInitialize(const Instruction* instr) {
11404   // This group only contains PTRUE{S}, and there are no unallocated encodings.
11405   VIXL_STATIC_ASSERT(
11406       SVEPredicateInitializeMask ==
11407       (SVEPredicateInitializeFMask | SVEPredicateInitializeSetFlagsBit));
11408   VIXL_ASSERT((instr->Mask(SVEPredicateInitializeMask) == PTRUE_p_s) ||
11409               (instr->Mask(SVEPredicateInitializeMask) == PTRUES_p_s));
11410 
11411   LogicPRegister pdn = ReadPRegister(instr->GetPd());
11412   VectorFormat vform = instr->GetSVEVectorFormat();
11413 
11414   ptrue(vform, pdn, instr->GetImmSVEPredicateConstraint());
11415   if (instr->ExtractBit(16)) PredTest(vform, pdn, pdn);
11416 }
11417 
VisitSVEPredicateNextActive(const Instruction * instr)11418 void Simulator::VisitSVEPredicateNextActive(const Instruction* instr) {
11419   // This group only contains PNEXT, and there are no unallocated encodings.
11420   VIXL_STATIC_ASSERT(SVEPredicateNextActiveFMask == SVEPredicateNextActiveMask);
11421   VIXL_ASSERT(instr->Mask(SVEPredicateNextActiveMask) == PNEXT_p_p_p);
11422 
11423   LogicPRegister pg = ReadPRegister(instr->ExtractBits(8, 5));
11424   LogicPRegister pdn = ReadPRegister(instr->GetPd());
11425   VectorFormat vform = instr->GetSVEVectorFormat();
11426 
11427   pnext(vform, pdn, pg, pdn);
11428   // TODO: Is this broken when pg == pdn?
11429   PredTest(vform, pg, pdn);
11430 }
11431 
VisitSVEPredicateReadFromFFR_Predicated(const Instruction * instr)11432 void Simulator::VisitSVEPredicateReadFromFFR_Predicated(
11433     const Instruction* instr) {
11434   LogicPRegister pd(ReadPRegister(instr->GetPd()));
11435   LogicPRegister pg(ReadPRegister(instr->GetPn()));
11436   FlagsUpdate flags = LeaveFlags;
11437   switch (instr->Mask(SVEPredicateReadFromFFR_PredicatedMask)) {
11438     case RDFFR_p_p_f:
11439       // Do nothing.
11440       break;
11441     case RDFFRS_p_p_f:
11442       flags = SetFlags;
11443       break;
11444     default:
11445       VIXL_UNIMPLEMENTED();
11446       break;
11447   }
11448 
11449   LogicPRegister ffr(ReadFFR());
11450   mov_zeroing(pd, pg, ffr);
11451 
11452   if (flags == SetFlags) {
11453     PredTest(kFormatVnB, pg, pd);
11454   }
11455 }
11456 
VisitSVEPredicateReadFromFFR_Unpredicated(const Instruction * instr)11457 void Simulator::VisitSVEPredicateReadFromFFR_Unpredicated(
11458     const Instruction* instr) {
11459   LogicPRegister pd(ReadPRegister(instr->GetPd()));
11460   LogicPRegister ffr(ReadFFR());
11461   switch (instr->Mask(SVEPredicateReadFromFFR_UnpredicatedMask)) {
11462     case RDFFR_p_f:
11463       mov(pd, ffr);
11464       break;
11465     default:
11466       VIXL_UNIMPLEMENTED();
11467       break;
11468   }
11469 }
11470 
VisitSVEPredicateTest(const Instruction * instr)11471 void Simulator::VisitSVEPredicateTest(const Instruction* instr) {
11472   switch (instr->Mask(SVEPredicateTestMask)) {
11473     case PTEST_p_p:
11474       PredTest(kFormatVnB,
11475                ReadPRegister(instr->ExtractBits(13, 10)),
11476                ReadPRegister(instr->GetPn()));
11477       break;
11478     default:
11479       VIXL_UNIMPLEMENTED();
11480       break;
11481   }
11482 }
11483 
VisitSVEPredicateZero(const Instruction * instr)11484 void Simulator::VisitSVEPredicateZero(const Instruction* instr) {
11485   switch (instr->Mask(SVEPredicateZeroMask)) {
11486     case PFALSE_p:
11487       pfalse(ReadPRegister(instr->GetPd()));
11488       break;
11489     default:
11490       VIXL_UNIMPLEMENTED();
11491       break;
11492   }
11493 }
11494 
VisitSVEPropagateBreak(const Instruction * instr)11495 void Simulator::VisitSVEPropagateBreak(const Instruction* instr) {
11496   SimPRegister& pd = ReadPRegister(instr->GetPd());
11497   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
11498   SimPRegister& pn = ReadPRegister(instr->GetPn());
11499   SimPRegister& pm = ReadPRegister(instr->GetPm());
11500 
11501   bool set_flags = false;
11502   switch (instr->Mask(SVEPropagateBreakMask)) {
11503     case BRKPAS_p_p_pp:
11504       set_flags = true;
11505       VIXL_FALLTHROUGH();
11506     case BRKPA_p_p_pp:
11507       brkpa(pd, pg, pn, pm);
11508       break;
11509     case BRKPBS_p_p_pp:
11510       set_flags = true;
11511       VIXL_FALLTHROUGH();
11512     case BRKPB_p_p_pp:
11513       brkpb(pd, pg, pn, pm);
11514       break;
11515     default:
11516       VIXL_UNIMPLEMENTED();
11517       break;
11518   }
11519 
11520   if (set_flags) {
11521     PredTest(kFormatVnB, pg, pd);
11522   }
11523 }
11524 
VisitSVEStackFrameAdjustment(const Instruction * instr)11525 void Simulator::VisitSVEStackFrameAdjustment(const Instruction* instr) {
11526   uint64_t length = 0;
11527   switch (instr->Mask(SVEStackFrameAdjustmentMask)) {
11528     case ADDPL_r_ri:
11529       length = GetPredicateLengthInBytes();
11530       break;
11531     case ADDVL_r_ri:
11532       length = GetVectorLengthInBytes();
11533       break;
11534     default:
11535       VIXL_UNIMPLEMENTED();
11536   }
11537   uint64_t base = ReadXRegister(instr->GetRm(), Reg31IsStackPointer);
11538   WriteXRegister(instr->GetRd(),
11539                  base + (length * instr->GetImmSVEVLScale()),
11540                  LogRegWrites,
11541                  Reg31IsStackPointer);
11542 }
11543 
VisitSVEStackFrameSize(const Instruction * instr)11544 void Simulator::VisitSVEStackFrameSize(const Instruction* instr) {
11545   int64_t scale = instr->GetImmSVEVLScale();
11546 
11547   switch (instr->Mask(SVEStackFrameSizeMask)) {
11548     case RDVL_r_i:
11549       WriteXRegister(instr->GetRd(), GetVectorLengthInBytes() * scale);
11550       break;
11551     default:
11552       VIXL_UNIMPLEMENTED();
11553   }
11554 }
11555 
VisitSVEVectorSelect(const Instruction * instr)11556 void Simulator::VisitSVEVectorSelect(const Instruction* instr) {
11557   // The only instruction in this group is `sel`, and there are no unused
11558   // encodings.
11559   VIXL_ASSERT(instr->Mask(SVEVectorSelectMask) == SEL_z_p_zz);
11560 
11561   VectorFormat vform = instr->GetSVEVectorFormat();
11562   SimVRegister& zd = ReadVRegister(instr->GetRd());
11563   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
11564   SimVRegister& zn = ReadVRegister(instr->GetRn());
11565   SimVRegister& zm = ReadVRegister(instr->GetRm());
11566 
11567   sel(vform, zd, pg, zn, zm);
11568 }
11569 
VisitSVEFFRInitialise(const Instruction * instr)11570 void Simulator::VisitSVEFFRInitialise(const Instruction* instr) {
11571   switch (instr->Mask(SVEFFRInitialiseMask)) {
11572     case SETFFR_f: {
11573       LogicPRegister ffr(ReadFFR());
11574       ffr.SetAllBits();
11575       break;
11576     }
11577     default:
11578       VIXL_UNIMPLEMENTED();
11579       break;
11580   }
11581 }
11582 
VisitSVEFFRWriteFromPredicate(const Instruction * instr)11583 void Simulator::VisitSVEFFRWriteFromPredicate(const Instruction* instr) {
11584   switch (instr->Mask(SVEFFRWriteFromPredicateMask)) {
11585     case WRFFR_f_p: {
11586       SimPRegister pn(ReadPRegister(instr->GetPn()));
11587       bool last_active = true;
11588       for (unsigned i = 0; i < pn.GetSizeInBits(); i++) {
11589         bool active = pn.GetBit(i);
11590         if (active && !last_active) {
11591           // `pn` is non-monotonic. This is UNPREDICTABLE.
11592           VIXL_ABORT();
11593         }
11594         last_active = active;
11595       }
11596       mov(ReadFFR(), pn);
11597       break;
11598     }
11599     default:
11600       VIXL_UNIMPLEMENTED();
11601       break;
11602   }
11603 }
11604 
VisitSVEContiguousLoad_ScalarPlusImm(const Instruction * instr)11605 void Simulator::VisitSVEContiguousLoad_ScalarPlusImm(const Instruction* instr) {
11606   bool is_signed;
11607   switch (instr->Mask(SVEContiguousLoad_ScalarPlusImmMask)) {
11608     case LD1B_z_p_bi_u8:
11609     case LD1B_z_p_bi_u16:
11610     case LD1B_z_p_bi_u32:
11611     case LD1B_z_p_bi_u64:
11612     case LD1H_z_p_bi_u16:
11613     case LD1H_z_p_bi_u32:
11614     case LD1H_z_p_bi_u64:
11615     case LD1W_z_p_bi_u32:
11616     case LD1W_z_p_bi_u64:
11617     case LD1D_z_p_bi_u64:
11618       is_signed = false;
11619       break;
11620     case LD1SB_z_p_bi_s16:
11621     case LD1SB_z_p_bi_s32:
11622     case LD1SB_z_p_bi_s64:
11623     case LD1SH_z_p_bi_s32:
11624     case LD1SH_z_p_bi_s64:
11625     case LD1SW_z_p_bi_s64:
11626       is_signed = true;
11627       break;
11628     default:
11629       // This encoding group is complete, so no other values should be possible.
11630       VIXL_UNREACHABLE();
11631       is_signed = false;
11632       break;
11633   }
11634 
11635   int vl = GetVectorLengthInBytes();
11636   int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(is_signed);
11637   int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(is_signed);
11638   VIXL_ASSERT(esize_in_bytes_log2 >= msize_in_bytes_log2);
11639   int vl_divisor_log2 = esize_in_bytes_log2 - msize_in_bytes_log2;
11640   uint64_t offset =
11641       (instr->ExtractSignedBits(19, 16) * vl) / (1 << vl_divisor_log2);
11642   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
11643   LogicSVEAddressVector addr(ReadXRegister(instr->GetRn()) + offset);
11644   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
11645   SVEStructuredLoadHelper(vform,
11646                           ReadPRegister(instr->GetPgLow8()),
11647                           instr->GetRt(),
11648                           addr,
11649                           is_signed);
11650 }
11651 
VisitSVEContiguousLoad_ScalarPlusScalar(const Instruction * instr)11652 void Simulator::VisitSVEContiguousLoad_ScalarPlusScalar(
11653     const Instruction* instr) {
11654   bool is_signed;
11655   switch (instr->Mask(SVEContiguousLoad_ScalarPlusScalarMask)) {
11656     case LD1B_z_p_br_u8:
11657     case LD1B_z_p_br_u16:
11658     case LD1B_z_p_br_u32:
11659     case LD1B_z_p_br_u64:
11660     case LD1H_z_p_br_u16:
11661     case LD1H_z_p_br_u32:
11662     case LD1H_z_p_br_u64:
11663     case LD1W_z_p_br_u32:
11664     case LD1W_z_p_br_u64:
11665     case LD1D_z_p_br_u64:
11666       is_signed = false;
11667       break;
11668     case LD1SB_z_p_br_s16:
11669     case LD1SB_z_p_br_s32:
11670     case LD1SB_z_p_br_s64:
11671     case LD1SH_z_p_br_s32:
11672     case LD1SH_z_p_br_s64:
11673     case LD1SW_z_p_br_s64:
11674       is_signed = true;
11675       break;
11676     default:
11677       // This encoding group is complete, so no other values should be possible.
11678       VIXL_UNREACHABLE();
11679       is_signed = false;
11680       break;
11681   }
11682 
11683   int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(is_signed);
11684   int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(is_signed);
11685   VIXL_ASSERT(msize_in_bytes_log2 <= esize_in_bytes_log2);
11686   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
11687   uint64_t offset = ReadXRegister(instr->GetRm());
11688   offset <<= msize_in_bytes_log2;
11689   LogicSVEAddressVector addr(ReadXRegister(instr->GetRn()) + offset);
11690   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
11691   SVEStructuredLoadHelper(vform,
11692                           ReadPRegister(instr->GetPgLow8()),
11693                           instr->GetRt(),
11694                           addr,
11695                           is_signed);
11696 }
11697 
DoUnreachable(const Instruction * instr)11698 void Simulator::DoUnreachable(const Instruction* instr) {
11699   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
11700               (instr->GetImmException() == kUnreachableOpcode));
11701 
11702   fprintf(stream_,
11703           "Hit UNREACHABLE marker at pc=%p.\n",
11704           reinterpret_cast<const void*>(instr));
11705   abort();
11706 }
11707 
11708 
DoTrace(const Instruction * instr)11709 void Simulator::DoTrace(const Instruction* instr) {
11710   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
11711               (instr->GetImmException() == kTraceOpcode));
11712 
11713   // Read the arguments encoded inline in the instruction stream.
11714   uint32_t parameters;
11715   uint32_t command;
11716 
11717   VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
11718   memcpy(&parameters, instr + kTraceParamsOffset, sizeof(parameters));
11719   memcpy(&command, instr + kTraceCommandOffset, sizeof(command));
11720 
11721   switch (command) {
11722     case TRACE_ENABLE:
11723       SetTraceParameters(GetTraceParameters() | parameters);
11724       break;
11725     case TRACE_DISABLE:
11726       SetTraceParameters(GetTraceParameters() & ~parameters);
11727       break;
11728     default:
11729       VIXL_UNREACHABLE();
11730   }
11731 
11732   WritePc(instr->GetInstructionAtOffset(kTraceLength));
11733 }
11734 
11735 
DoLog(const Instruction * instr)11736 void Simulator::DoLog(const Instruction* instr) {
11737   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
11738               (instr->GetImmException() == kLogOpcode));
11739 
11740   // Read the arguments encoded inline in the instruction stream.
11741   uint32_t parameters;
11742 
11743   VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
11744   memcpy(&parameters, instr + kTraceParamsOffset, sizeof(parameters));
11745 
11746   // We don't support a one-shot LOG_DISASM.
11747   VIXL_ASSERT((parameters & LOG_DISASM) == 0);
11748   // Print the requested information.
11749   if (parameters & LOG_SYSREGS) PrintSystemRegisters();
11750   if (parameters & LOG_REGS) PrintRegisters();
11751   if (parameters & LOG_VREGS) PrintVRegisters();
11752 
11753   WritePc(instr->GetInstructionAtOffset(kLogLength));
11754 }
11755 
11756 
DoPrintf(const Instruction * instr)11757 void Simulator::DoPrintf(const Instruction* instr) {
11758   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
11759               (instr->GetImmException() == kPrintfOpcode));
11760 
11761   // Read the arguments encoded inline in the instruction stream.
11762   uint32_t arg_count;
11763   uint32_t arg_pattern_list;
11764   VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
11765   memcpy(&arg_count, instr + kPrintfArgCountOffset, sizeof(arg_count));
11766   memcpy(&arg_pattern_list,
11767          instr + kPrintfArgPatternListOffset,
11768          sizeof(arg_pattern_list));
11769 
11770   VIXL_ASSERT(arg_count <= kPrintfMaxArgCount);
11771   VIXL_ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
11772 
11773   // We need to call the host printf function with a set of arguments defined by
11774   // arg_pattern_list. Because we don't know the types and sizes of the
11775   // arguments, this is very difficult to do in a robust and portable way. To
11776   // work around the problem, we pick apart the format string, and print one
11777   // format placeholder at a time.
11778 
11779   // Allocate space for the format string. We take a copy, so we can modify it.
11780   // Leave enough space for one extra character per expected argument (plus the
11781   // '\0' termination).
11782   const char* format_base = ReadRegister<const char*>(0);
11783   VIXL_ASSERT(format_base != NULL);
11784   size_t length = strlen(format_base) + 1;
11785   char* const format = new char[length + arg_count];
11786 
11787   // A list of chunks, each with exactly one format placeholder.
11788   const char* chunks[kPrintfMaxArgCount];
11789 
11790   // Copy the format string and search for format placeholders.
11791   uint32_t placeholder_count = 0;
11792   char* format_scratch = format;
11793   for (size_t i = 0; i < length; i++) {
11794     if (format_base[i] != '%') {
11795       *format_scratch++ = format_base[i];
11796     } else {
11797       if (format_base[i + 1] == '%') {
11798         // Ignore explicit "%%" sequences.
11799         *format_scratch++ = format_base[i];
11800         i++;
11801         // Chunks after the first are passed as format strings to printf, so we
11802         // need to escape '%' characters in those chunks.
11803         if (placeholder_count > 0) *format_scratch++ = format_base[i];
11804       } else {
11805         VIXL_CHECK(placeholder_count < arg_count);
11806         // Insert '\0' before placeholders, and store their locations.
11807         *format_scratch++ = '\0';
11808         chunks[placeholder_count++] = format_scratch;
11809         *format_scratch++ = format_base[i];
11810       }
11811     }
11812   }
11813   VIXL_CHECK(placeholder_count == arg_count);
11814 
11815   // Finally, call printf with each chunk, passing the appropriate register
11816   // argument. Normally, printf returns the number of bytes transmitted, so we
11817   // can emulate a single printf call by adding the result from each chunk. If
11818   // any call returns a negative (error) value, though, just return that value.
11819 
11820   printf("%s", clr_printf);
11821 
11822   // Because '\0' is inserted before each placeholder, the first string in
11823   // 'format' contains no format placeholders and should be printed literally.
11824   int result = printf("%s", format);
11825   int pcs_r = 1;  // Start at x1. x0 holds the format string.
11826   int pcs_f = 0;  // Start at d0.
11827   if (result >= 0) {
11828     for (uint32_t i = 0; i < placeholder_count; i++) {
11829       int part_result = -1;
11830 
11831       uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits);
11832       arg_pattern &= (1 << kPrintfArgPatternBits) - 1;
11833       switch (arg_pattern) {
11834         case kPrintfArgW:
11835           part_result = printf(chunks[i], ReadWRegister(pcs_r++));
11836           break;
11837         case kPrintfArgX:
11838           part_result = printf(chunks[i], ReadXRegister(pcs_r++));
11839           break;
11840         case kPrintfArgD:
11841           part_result = printf(chunks[i], ReadDRegister(pcs_f++));
11842           break;
11843         default:
11844           VIXL_UNREACHABLE();
11845       }
11846 
11847       if (part_result < 0) {
11848         // Handle error values.
11849         result = part_result;
11850         break;
11851       }
11852 
11853       result += part_result;
11854     }
11855   }
11856 
11857   printf("%s", clr_normal);
11858 
11859   // Printf returns its result in x0 (just like the C library's printf).
11860   WriteXRegister(0, result);
11861 
11862   // The printf parameters are inlined in the code, so skip them.
11863   WritePc(instr->GetInstructionAtOffset(kPrintfLength));
11864 
11865   // Set LR as if we'd just called a native printf function.
11866   WriteLr(ReadPc());
11867 
11868   delete[] format;
11869 }
11870 
11871 
11872 #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
DoRuntimeCall(const Instruction * instr)11873 void Simulator::DoRuntimeCall(const Instruction* instr) {
11874   VIXL_STATIC_ASSERT(kRuntimeCallAddressSize == sizeof(uintptr_t));
11875   // The appropriate `Simulator::SimulateRuntimeCall()` wrapper and the function
11876   // to call are passed inlined in the assembly.
11877   uintptr_t call_wrapper_address =
11878       MemRead<uintptr_t>(instr + kRuntimeCallWrapperOffset);
11879   uintptr_t function_address =
11880       MemRead<uintptr_t>(instr + kRuntimeCallFunctionOffset);
11881   RuntimeCallType call_type = static_cast<RuntimeCallType>(
11882       MemRead<uint32_t>(instr + kRuntimeCallTypeOffset));
11883   auto runtime_call_wrapper =
11884       reinterpret_cast<void (*)(Simulator*, uintptr_t)>(call_wrapper_address);
11885 
11886   if (call_type == kCallRuntime) {
11887     WriteRegister(kLinkRegCode,
11888                   instr->GetInstructionAtOffset(kRuntimeCallLength));
11889   }
11890   runtime_call_wrapper(this, function_address);
11891   // Read the return address from `lr` and write it into `pc`.
11892   WritePc(ReadRegister<Instruction*>(kLinkRegCode));
11893 }
11894 #else
DoRuntimeCall(const Instruction * instr)11895 void Simulator::DoRuntimeCall(const Instruction* instr) {
11896   USE(instr);
11897   VIXL_UNREACHABLE();
11898 }
11899 #endif
11900 
11901 
DoConfigureCPUFeatures(const Instruction * instr)11902 void Simulator::DoConfigureCPUFeatures(const Instruction* instr) {
11903   VIXL_ASSERT(instr->Mask(ExceptionMask) == HLT);
11904 
11905   typedef ConfigureCPUFeaturesElementType ElementType;
11906   VIXL_ASSERT(CPUFeatures::kNumberOfFeatures <
11907               std::numeric_limits<ElementType>::max());
11908 
11909   // k{Set,Enable,Disable}CPUFeatures have the same parameter encoding.
11910 
11911   size_t element_size = sizeof(ElementType);
11912   size_t offset = kConfigureCPUFeaturesListOffset;
11913 
11914   // Read the kNone-terminated list of features.
11915   CPUFeatures parameters;
11916   while (true) {
11917     ElementType feature = MemRead<ElementType>(instr + offset);
11918     offset += element_size;
11919     if (feature == static_cast<ElementType>(CPUFeatures::kNone)) break;
11920     parameters.Combine(static_cast<CPUFeatures::Feature>(feature));
11921   }
11922 
11923   switch (instr->GetImmException()) {
11924     case kSetCPUFeaturesOpcode:
11925       SetCPUFeatures(parameters);
11926       break;
11927     case kEnableCPUFeaturesOpcode:
11928       GetCPUFeatures()->Combine(parameters);
11929       break;
11930     case kDisableCPUFeaturesOpcode:
11931       GetCPUFeatures()->Remove(parameters);
11932       break;
11933     default:
11934       VIXL_UNREACHABLE();
11935       break;
11936   }
11937 
11938   WritePc(instr->GetInstructionAtOffset(AlignUp(offset, kInstructionSize)));
11939 }
11940 
11941 
DoSaveCPUFeatures(const Instruction * instr)11942 void Simulator::DoSaveCPUFeatures(const Instruction* instr) {
11943   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
11944               (instr->GetImmException() == kSaveCPUFeaturesOpcode));
11945   USE(instr);
11946 
11947   saved_cpu_features_.push_back(*GetCPUFeatures());
11948 }
11949 
11950 
DoRestoreCPUFeatures(const Instruction * instr)11951 void Simulator::DoRestoreCPUFeatures(const Instruction* instr) {
11952   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
11953               (instr->GetImmException() == kRestoreCPUFeaturesOpcode));
11954   USE(instr);
11955 
11956   SetCPUFeatures(saved_cpu_features_.back());
11957   saved_cpu_features_.pop_back();
11958 }
11959 
11960 
11961 }  // namespace aarch64
11962 }  // namespace vixl
11963 
11964 #endif  // VIXL_INCLUDE_SIMULATOR_AARCH64
11965