• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
28 
29 #include "simulator-aarch64.h"
30 
31 #include <cmath>
32 #include <cstring>
33 #include <errno.h>
34 #include <limits>
35 
36 #ifdef _WIN32
37 #define WIN32_LEAN_AND_MEAN
38 #define NOMINMAX
39 #include <Windows.h>
40 #undef MultiplyHigh
41 #include <Memoryapi.h>
42 #else
43 #include <sys/mman.h>
44 #include <unistd.h>
45 #endif
46 
47 #ifdef _MSC_VER
48 #define VIXL_SYNC() MemoryBarrier()
49 #else
50 #define VIXL_SYNC() __sync_synchronize()
51 #endif
52 
53 namespace vixl {
54 namespace aarch64 {
55 
56 using vixl::internal::SimFloat16;
57 
58 const Instruction* Simulator::kEndOfSimAddress = NULL;
59 
TryMemoryAccess(uintptr_t address,uintptr_t access_size)60 MemoryAccessResult TryMemoryAccess(uintptr_t address, uintptr_t access_size) {
61 #ifdef VIXL_ENABLE_IMPLICIT_CHECKS
62   for (uintptr_t i = 0; i < access_size; i++) {
63     if (_vixl_internal_ReadMemory(address, i) == MemoryAccessResult::Failure) {
64       // The memory access failed.
65       return MemoryAccessResult::Failure;
66     }
67   }
68 
69   // Either the memory access did not raise a signal or the signal handler did
70   // not correctly return MemoryAccessResult::Failure.
71   return MemoryAccessResult::Success;
72 #else
73   USE(address);
74   USE(access_size);
75   return MemoryAccessResult::Success;
76 #endif  // VIXL_ENABLE_IMPLICIT_CHECKS
77 }
78 
79 bool MetaDataDepot::MetaDataMTE::is_active = false;
80 
SetBits(int msb,int lsb,uint32_t bits)81 void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
82   int width = msb - lsb + 1;
83   VIXL_ASSERT(IsUintN(width, bits) || IsIntN(width, bits));
84 
85   bits <<= lsb;
86   uint32_t mask = ((1 << width) - 1) << lsb;
87   VIXL_ASSERT((mask & write_ignore_mask_) == 0);
88 
89   value_ = (value_ & ~mask) | (bits & mask);
90 }
91 
92 
DefaultValueFor(SystemRegister id)93 SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
94   switch (id) {
95     case NZCV:
96       return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
97     case FPCR:
98       return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
99     default:
100       VIXL_UNREACHABLE();
101       return SimSystemRegister();
102   }
103 }
104 
GetFormToVisitorFnMap()105 const Simulator::FormToVisitorFnMap* Simulator::GetFormToVisitorFnMap() {
106   static const FormToVisitorFnMap form_to_visitor = {
107       DEFAULT_FORM_TO_VISITOR_MAP(Simulator),
108       SIM_AUD_VISITOR_MAP(Simulator),
109       {"smlal_asimdelem_l"_h, &Simulator::SimulateNEONMulByElementLong},
110       {"smlsl_asimdelem_l"_h, &Simulator::SimulateNEONMulByElementLong},
111       {"smull_asimdelem_l"_h, &Simulator::SimulateNEONMulByElementLong},
112       {"sqdmlal_asimdelem_l"_h, &Simulator::SimulateNEONMulByElementLong},
113       {"sqdmlsl_asimdelem_l"_h, &Simulator::SimulateNEONMulByElementLong},
114       {"sqdmull_asimdelem_l"_h, &Simulator::SimulateNEONMulByElementLong},
115       {"umlal_asimdelem_l"_h, &Simulator::SimulateNEONMulByElementLong},
116       {"umlsl_asimdelem_l"_h, &Simulator::SimulateNEONMulByElementLong},
117       {"umull_asimdelem_l"_h, &Simulator::SimulateNEONMulByElementLong},
118       {"fcmla_asimdelem_c_h"_h, &Simulator::SimulateNEONComplexMulByElement},
119       {"fcmla_asimdelem_c_s"_h, &Simulator::SimulateNEONComplexMulByElement},
120       {"fmlal2_asimdelem_lh"_h, &Simulator::SimulateNEONFPMulByElementLong},
121       {"fmlal_asimdelem_lh"_h, &Simulator::SimulateNEONFPMulByElementLong},
122       {"fmlsl2_asimdelem_lh"_h, &Simulator::SimulateNEONFPMulByElementLong},
123       {"fmlsl_asimdelem_lh"_h, &Simulator::SimulateNEONFPMulByElementLong},
124       {"fmla_asimdelem_rh_h"_h, &Simulator::SimulateNEONFPMulByElement},
125       {"fmls_asimdelem_rh_h"_h, &Simulator::SimulateNEONFPMulByElement},
126       {"fmulx_asimdelem_rh_h"_h, &Simulator::SimulateNEONFPMulByElement},
127       {"fmul_asimdelem_rh_h"_h, &Simulator::SimulateNEONFPMulByElement},
128       {"fmla_asimdelem_r_sd"_h, &Simulator::SimulateNEONFPMulByElement},
129       {"fmls_asimdelem_r_sd"_h, &Simulator::SimulateNEONFPMulByElement},
130       {"fmulx_asimdelem_r_sd"_h, &Simulator::SimulateNEONFPMulByElement},
131       {"fmul_asimdelem_r_sd"_h, &Simulator::SimulateNEONFPMulByElement},
132       {"sdot_asimdelem_d"_h, &Simulator::SimulateNEONDotProdByElement},
133       {"udot_asimdelem_d"_h, &Simulator::SimulateNEONDotProdByElement},
134       {"adclb_z_zzz"_h, &Simulator::SimulateSVEAddSubCarry},
135       {"adclt_z_zzz"_h, &Simulator::SimulateSVEAddSubCarry},
136       {"addhnb_z_zz"_h, &Simulator::SimulateSVEAddSubHigh},
137       {"addhnt_z_zz"_h, &Simulator::SimulateSVEAddSubHigh},
138       {"addp_z_p_zz"_h, &Simulator::SimulateSVEIntArithPair},
139       {"bcax_z_zzz"_h, &Simulator::SimulateSVEBitwiseTernary},
140       {"bdep_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmT},
141       {"bext_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmT},
142       {"bgrp_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmT},
143       {"bsl1n_z_zzz"_h, &Simulator::SimulateSVEBitwiseTernary},
144       {"bsl2n_z_zzz"_h, &Simulator::SimulateSVEBitwiseTernary},
145       {"bsl_z_zzz"_h, &Simulator::SimulateSVEBitwiseTernary},
146       {"cadd_z_zz"_h, &Simulator::Simulate_ZdnT_ZdnT_ZmT_const},
147       {"cdot_z_zzz"_h, &Simulator::SimulateSVEComplexDotProduct},
148       {"cdot_z_zzzi_d"_h, &Simulator::SimulateSVEComplexDotProduct},
149       {"cdot_z_zzzi_s"_h, &Simulator::SimulateSVEComplexDotProduct},
150       {"cmla_z_zzz"_h, &Simulator::SimulateSVEComplexIntMulAdd},
151       {"cmla_z_zzzi_h"_h, &Simulator::SimulateSVEComplexIntMulAdd},
152       {"cmla_z_zzzi_s"_h, &Simulator::SimulateSVEComplexIntMulAdd},
153       {"eor3_z_zzz"_h, &Simulator::SimulateSVEBitwiseTernary},
154       {"eorbt_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmT},
155       {"eortb_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmT},
156       {"ext_z_zi_con"_h, &Simulator::Simulate_ZdB_Zn1B_Zn2B_imm},
157       {"faddp_z_p_zz"_h, &Simulator::Simulate_ZdnT_PgM_ZdnT_ZmT},
158       {"fcvtlt_z_p_z_h2s"_h, &Simulator::SimulateSVEFPConvertLong},
159       {"fcvtlt_z_p_z_s2d"_h, &Simulator::SimulateSVEFPConvertLong},
160       {"fcvtnt_z_p_z_d2s"_h, &Simulator::Simulate_ZdS_PgM_ZnD},
161       {"fcvtnt_z_p_z_s2h"_h, &Simulator::Simulate_ZdH_PgM_ZnS},
162       {"fcvtx_z_p_z_d2s"_h, &Simulator::Simulate_ZdS_PgM_ZnD},
163       {"fcvtxnt_z_p_z_d2s"_h, &Simulator::Simulate_ZdS_PgM_ZnD},
164       {"flogb_z_p_z"_h, &Simulator::Simulate_ZdT_PgM_ZnT},
165       {"fmaxnmp_z_p_zz"_h, &Simulator::Simulate_ZdnT_PgM_ZdnT_ZmT},
166       {"fmaxp_z_p_zz"_h, &Simulator::Simulate_ZdnT_PgM_ZdnT_ZmT},
167       {"fminnmp_z_p_zz"_h, &Simulator::Simulate_ZdnT_PgM_ZdnT_ZmT},
168       {"fminp_z_p_zz"_h, &Simulator::Simulate_ZdnT_PgM_ZdnT_ZmT},
169       {"fmlalb_z_zzz"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH},
170       {"fmlalb_z_zzzi_s"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH_imm},
171       {"fmlalt_z_zzz"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH},
172       {"fmlalt_z_zzzi_s"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH_imm},
173       {"fmlslb_z_zzz"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH},
174       {"fmlslb_z_zzzi_s"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH_imm},
175       {"fmlslt_z_zzz"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH},
176       {"fmlslt_z_zzzi_s"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH_imm},
177       {"histcnt_z_p_zz"_h, &Simulator::Simulate_ZdT_PgZ_ZnT_ZmT},
178       {"histseg_z_zz"_h, &Simulator::Simulate_ZdB_ZnB_ZmB},
179       {"ldnt1b_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_PgZ_ZnD_Xm},
180       {"ldnt1b_z_p_ar_s_x32_unscaled"_h, &Simulator::Simulate_ZtS_PgZ_ZnS_Xm},
181       {"ldnt1d_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_PgZ_ZnD_Xm},
182       {"ldnt1h_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_PgZ_ZnD_Xm},
183       {"ldnt1h_z_p_ar_s_x32_unscaled"_h, &Simulator::Simulate_ZtS_PgZ_ZnS_Xm},
184       {"ldnt1sb_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_PgZ_ZnD_Xm},
185       {"ldnt1sb_z_p_ar_s_x32_unscaled"_h, &Simulator::Simulate_ZtS_PgZ_ZnS_Xm},
186       {"ldnt1sh_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_PgZ_ZnD_Xm},
187       {"ldnt1sh_z_p_ar_s_x32_unscaled"_h, &Simulator::Simulate_ZtS_PgZ_ZnS_Xm},
188       {"ldnt1sw_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_PgZ_ZnD_Xm},
189       {"ldnt1w_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_PgZ_ZnD_Xm},
190       {"ldnt1w_z_p_ar_s_x32_unscaled"_h, &Simulator::Simulate_ZtS_PgZ_ZnS_Xm},
191       {"match_p_p_zz"_h, &Simulator::Simulate_PdT_PgZ_ZnT_ZmT},
192       {"mla_z_zzzi_d"_h, &Simulator::SimulateSVEMlaMlsIndex},
193       {"mla_z_zzzi_h"_h, &Simulator::SimulateSVEMlaMlsIndex},
194       {"mla_z_zzzi_s"_h, &Simulator::SimulateSVEMlaMlsIndex},
195       {"mls_z_zzzi_d"_h, &Simulator::SimulateSVEMlaMlsIndex},
196       {"mls_z_zzzi_h"_h, &Simulator::SimulateSVEMlaMlsIndex},
197       {"mls_z_zzzi_s"_h, &Simulator::SimulateSVEMlaMlsIndex},
198       {"mul_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmT},
199       {"mul_z_zzi_d"_h, &Simulator::SimulateSVEMulIndex},
200       {"mul_z_zzi_h"_h, &Simulator::SimulateSVEMulIndex},
201       {"mul_z_zzi_s"_h, &Simulator::SimulateSVEMulIndex},
202       {"nbsl_z_zzz"_h, &Simulator::SimulateSVEBitwiseTernary},
203       {"nmatch_p_p_zz"_h, &Simulator::Simulate_PdT_PgZ_ZnT_ZmT},
204       {"pmul_z_zz"_h, &Simulator::Simulate_ZdB_ZnB_ZmB},
205       {"pmullb_z_zz"_h, &Simulator::SimulateSVEIntMulLongVec},
206       {"pmullt_z_zz"_h, &Simulator::SimulateSVEIntMulLongVec},
207       {"raddhnb_z_zz"_h, &Simulator::SimulateSVEAddSubHigh},
208       {"raddhnt_z_zz"_h, &Simulator::SimulateSVEAddSubHigh},
209       {"rshrnb_z_zi"_h, &Simulator::SimulateSVENarrow},
210       {"rshrnt_z_zi"_h, &Simulator::SimulateSVENarrow},
211       {"rsubhnb_z_zz"_h, &Simulator::SimulateSVEAddSubHigh},
212       {"rsubhnt_z_zz"_h, &Simulator::SimulateSVEAddSubHigh},
213       {"saba_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnT_ZmT},
214       {"sabalb_z_zzz"_h, &Simulator::SimulateSVEInterleavedArithLong},
215       {"sabalt_z_zzz"_h, &Simulator::SimulateSVEInterleavedArithLong},
216       {"sabdlb_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
217       {"sabdlt_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
218       {"sadalp_z_p_z"_h, &Simulator::Simulate_ZdaT_PgM_ZnTb},
219       {"saddlb_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
220       {"saddlbt_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
221       {"saddlt_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
222       {"saddwb_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmTb},
223       {"saddwt_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmTb},
224       {"sbclb_z_zzz"_h, &Simulator::SimulateSVEAddSubCarry},
225       {"sbclt_z_zzz"_h, &Simulator::SimulateSVEAddSubCarry},
226       {"shadd_z_p_zz"_h, &Simulator::SimulateSVEHalvingAddSub},
227       {"shrnb_z_zi"_h, &Simulator::SimulateSVENarrow},
228       {"shrnt_z_zi"_h, &Simulator::SimulateSVENarrow},
229       {"shsub_z_p_zz"_h, &Simulator::SimulateSVEHalvingAddSub},
230       {"shsubr_z_p_zz"_h, &Simulator::SimulateSVEHalvingAddSub},
231       {"sli_z_zzi"_h, &Simulator::Simulate_ZdT_ZnT_const},
232       {"smaxp_z_p_zz"_h, &Simulator::SimulateSVEIntArithPair},
233       {"sminp_z_p_zz"_h, &Simulator::SimulateSVEIntArithPair},
234       {"smlalb_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
235       {"smlalb_z_zzzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
236       {"smlalb_z_zzzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
237       {"smlalt_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
238       {"smlalt_z_zzzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
239       {"smlalt_z_zzzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
240       {"smlslb_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
241       {"smlslb_z_zzzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
242       {"smlslb_z_zzzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
243       {"smlslt_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
244       {"smlslt_z_zzzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
245       {"smlslt_z_zzzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
246       {"smulh_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmT},
247       {"smullb_z_zz"_h, &Simulator::SimulateSVEIntMulLongVec},
248       {"smullb_z_zzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
249       {"smullb_z_zzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
250       {"smullt_z_zz"_h, &Simulator::SimulateSVEIntMulLongVec},
251       {"smullt_z_zzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
252       {"smullt_z_zzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
253       {"splice_z_p_zz_con"_h, &Simulator::VisitSVEVectorSplice},
254       {"sqabs_z_p_z"_h, &Simulator::Simulate_ZdT_PgM_ZnT},
255       {"sqadd_z_p_zz"_h, &Simulator::SimulateSVESaturatingArithmetic},
256       {"sqcadd_z_zz"_h, &Simulator::Simulate_ZdnT_ZdnT_ZmT_const},
257       {"sqdmlalb_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
258       {"sqdmlalb_z_zzzi_d"_h, &Simulator::Simulate_ZdaD_ZnS_ZmS_imm},
259       {"sqdmlalb_z_zzzi_s"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH_imm},
260       {"sqdmlalbt_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
261       {"sqdmlalt_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
262       {"sqdmlalt_z_zzzi_d"_h, &Simulator::Simulate_ZdaD_ZnS_ZmS_imm},
263       {"sqdmlalt_z_zzzi_s"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH_imm},
264       {"sqdmlslb_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
265       {"sqdmlslb_z_zzzi_d"_h, &Simulator::Simulate_ZdaD_ZnS_ZmS_imm},
266       {"sqdmlslb_z_zzzi_s"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH_imm},
267       {"sqdmlslbt_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
268       {"sqdmlslt_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
269       {"sqdmlslt_z_zzzi_d"_h, &Simulator::Simulate_ZdaD_ZnS_ZmS_imm},
270       {"sqdmlslt_z_zzzi_s"_h, &Simulator::Simulate_ZdaS_ZnH_ZmH_imm},
271       {"sqdmulh_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmT},
272       {"sqdmulh_z_zzi_d"_h, &Simulator::SimulateSVESaturatingMulHighIndex},
273       {"sqdmulh_z_zzi_h"_h, &Simulator::SimulateSVESaturatingMulHighIndex},
274       {"sqdmulh_z_zzi_s"_h, &Simulator::SimulateSVESaturatingMulHighIndex},
275       {"sqdmullb_z_zz"_h, &Simulator::SimulateSVEIntMulLongVec},
276       {"sqdmullb_z_zzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
277       {"sqdmullb_z_zzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
278       {"sqdmullt_z_zz"_h, &Simulator::SimulateSVEIntMulLongVec},
279       {"sqdmullt_z_zzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
280       {"sqdmullt_z_zzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
281       {"sqneg_z_p_z"_h, &Simulator::Simulate_ZdT_PgM_ZnT},
282       {"sqrdcmlah_z_zzz"_h, &Simulator::SimulateSVEComplexIntMulAdd},
283       {"sqrdcmlah_z_zzzi_h"_h, &Simulator::SimulateSVEComplexIntMulAdd},
284       {"sqrdcmlah_z_zzzi_s"_h, &Simulator::SimulateSVEComplexIntMulAdd},
285       {"sqrdmlah_z_zzz"_h, &Simulator::SimulateSVESaturatingMulAddHigh},
286       {"sqrdmlah_z_zzzi_d"_h, &Simulator::SimulateSVESaturatingMulAddHigh},
287       {"sqrdmlah_z_zzzi_h"_h, &Simulator::SimulateSVESaturatingMulAddHigh},
288       {"sqrdmlah_z_zzzi_s"_h, &Simulator::SimulateSVESaturatingMulAddHigh},
289       {"sqrdmlsh_z_zzz"_h, &Simulator::SimulateSVESaturatingMulAddHigh},
290       {"sqrdmlsh_z_zzzi_d"_h, &Simulator::SimulateSVESaturatingMulAddHigh},
291       {"sqrdmlsh_z_zzzi_h"_h, &Simulator::SimulateSVESaturatingMulAddHigh},
292       {"sqrdmlsh_z_zzzi_s"_h, &Simulator::SimulateSVESaturatingMulAddHigh},
293       {"sqrdmulh_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmT},
294       {"sqrdmulh_z_zzi_d"_h, &Simulator::SimulateSVESaturatingMulHighIndex},
295       {"sqrdmulh_z_zzi_h"_h, &Simulator::SimulateSVESaturatingMulHighIndex},
296       {"sqrdmulh_z_zzi_s"_h, &Simulator::SimulateSVESaturatingMulHighIndex},
297       {"sqrshl_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
298       {"sqrshlr_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
299       {"sqrshrnb_z_zi"_h, &Simulator::SimulateSVENarrow},
300       {"sqrshrnt_z_zi"_h, &Simulator::SimulateSVENarrow},
301       {"sqrshrunb_z_zi"_h, &Simulator::SimulateSVENarrow},
302       {"sqrshrunt_z_zi"_h, &Simulator::SimulateSVENarrow},
303       {"sqshl_z_p_zi"_h, &Simulator::Simulate_ZdnT_PgM_ZdnT_const},
304       {"sqshl_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
305       {"sqshlr_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
306       {"sqshlu_z_p_zi"_h, &Simulator::Simulate_ZdnT_PgM_ZdnT_const},
307       {"sqshrnb_z_zi"_h, &Simulator::SimulateSVENarrow},
308       {"sqshrnt_z_zi"_h, &Simulator::SimulateSVENarrow},
309       {"sqshrunb_z_zi"_h, &Simulator::SimulateSVENarrow},
310       {"sqshrunt_z_zi"_h, &Simulator::SimulateSVENarrow},
311       {"sqsub_z_p_zz"_h, &Simulator::SimulateSVESaturatingArithmetic},
312       {"sqsubr_z_p_zz"_h, &Simulator::SimulateSVESaturatingArithmetic},
313       {"sqxtnb_z_zz"_h, &Simulator::SimulateSVENarrow},
314       {"sqxtnt_z_zz"_h, &Simulator::SimulateSVENarrow},
315       {"sqxtunb_z_zz"_h, &Simulator::SimulateSVENarrow},
316       {"sqxtunt_z_zz"_h, &Simulator::SimulateSVENarrow},
317       {"srhadd_z_p_zz"_h, &Simulator::SimulateSVEHalvingAddSub},
318       {"sri_z_zzi"_h, &Simulator::Simulate_ZdT_ZnT_const},
319       {"srshl_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
320       {"srshlr_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
321       {"srshr_z_p_zi"_h, &Simulator::Simulate_ZdnT_PgM_ZdnT_const},
322       {"srsra_z_zi"_h, &Simulator::Simulate_ZdaT_ZnT_const},
323       {"sshllb_z_zi"_h, &Simulator::SimulateSVEShiftLeftImm},
324       {"sshllt_z_zi"_h, &Simulator::SimulateSVEShiftLeftImm},
325       {"ssra_z_zi"_h, &Simulator::Simulate_ZdaT_ZnT_const},
326       {"ssublb_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
327       {"ssublbt_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
328       {"ssublt_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
329       {"ssubltb_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
330       {"ssubwb_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmTb},
331       {"ssubwt_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmTb},
332       {"stnt1b_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_Pg_ZnD_Xm},
333       {"stnt1b_z_p_ar_s_x32_unscaled"_h, &Simulator::Simulate_ZtS_Pg_ZnS_Xm},
334       {"stnt1d_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_Pg_ZnD_Xm},
335       {"stnt1h_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_Pg_ZnD_Xm},
336       {"stnt1h_z_p_ar_s_x32_unscaled"_h, &Simulator::Simulate_ZtS_Pg_ZnS_Xm},
337       {"stnt1w_z_p_ar_d_64_unscaled"_h, &Simulator::Simulate_ZtD_Pg_ZnD_Xm},
338       {"stnt1w_z_p_ar_s_x32_unscaled"_h, &Simulator::Simulate_ZtS_Pg_ZnS_Xm},
339       {"subhnb_z_zz"_h, &Simulator::SimulateSVEAddSubHigh},
340       {"subhnt_z_zz"_h, &Simulator::SimulateSVEAddSubHigh},
341       {"suqadd_z_p_zz"_h, &Simulator::SimulateSVESaturatingArithmetic},
342       {"tbl_z_zz_2"_h, &Simulator::VisitSVETableLookup},
343       {"tbx_z_zz"_h, &Simulator::VisitSVETableLookup},
344       {"uaba_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnT_ZmT},
345       {"uabalb_z_zzz"_h, &Simulator::SimulateSVEInterleavedArithLong},
346       {"uabalt_z_zzz"_h, &Simulator::SimulateSVEInterleavedArithLong},
347       {"uabdlb_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
348       {"uabdlt_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
349       {"uadalp_z_p_z"_h, &Simulator::Simulate_ZdaT_PgM_ZnTb},
350       {"uaddlb_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
351       {"uaddlt_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
352       {"uaddwb_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmTb},
353       {"uaddwt_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmTb},
354       {"uhadd_z_p_zz"_h, &Simulator::SimulateSVEHalvingAddSub},
355       {"uhsub_z_p_zz"_h, &Simulator::SimulateSVEHalvingAddSub},
356       {"uhsubr_z_p_zz"_h, &Simulator::SimulateSVEHalvingAddSub},
357       {"umaxp_z_p_zz"_h, &Simulator::SimulateSVEIntArithPair},
358       {"uminp_z_p_zz"_h, &Simulator::SimulateSVEIntArithPair},
359       {"umlalb_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
360       {"umlalb_z_zzzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
361       {"umlalb_z_zzzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
362       {"umlalt_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
363       {"umlalt_z_zzzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
364       {"umlalt_z_zzzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
365       {"umlslb_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
366       {"umlslb_z_zzzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
367       {"umlslb_z_zzzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
368       {"umlslt_z_zzz"_h, &Simulator::Simulate_ZdaT_ZnTb_ZmTb},
369       {"umlslt_z_zzzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
370       {"umlslt_z_zzzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
371       {"umulh_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmT},
372       {"umullb_z_zz"_h, &Simulator::SimulateSVEIntMulLongVec},
373       {"umullb_z_zzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
374       {"umullb_z_zzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
375       {"umullt_z_zz"_h, &Simulator::SimulateSVEIntMulLongVec},
376       {"umullt_z_zzi_d"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
377       {"umullt_z_zzi_s"_h, &Simulator::SimulateSVESaturatingIntMulLongIdx},
378       {"uqadd_z_p_zz"_h, &Simulator::SimulateSVESaturatingArithmetic},
379       {"uqrshl_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
380       {"uqrshlr_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
381       {"uqrshrnb_z_zi"_h, &Simulator::SimulateSVENarrow},
382       {"uqrshrnt_z_zi"_h, &Simulator::SimulateSVENarrow},
383       {"uqshl_z_p_zi"_h, &Simulator::Simulate_ZdnT_PgM_ZdnT_const},
384       {"uqshl_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
385       {"uqshlr_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
386       {"uqshrnb_z_zi"_h, &Simulator::SimulateSVENarrow},
387       {"uqshrnt_z_zi"_h, &Simulator::SimulateSVENarrow},
388       {"uqsub_z_p_zz"_h, &Simulator::SimulateSVESaturatingArithmetic},
389       {"uqsubr_z_p_zz"_h, &Simulator::SimulateSVESaturatingArithmetic},
390       {"uqxtnb_z_zz"_h, &Simulator::SimulateSVENarrow},
391       {"uqxtnt_z_zz"_h, &Simulator::SimulateSVENarrow},
392       {"urecpe_z_p_z"_h, &Simulator::Simulate_ZdS_PgM_ZnS},
393       {"urhadd_z_p_zz"_h, &Simulator::SimulateSVEHalvingAddSub},
394       {"urshl_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
395       {"urshlr_z_p_zz"_h, &Simulator::VisitSVEBitwiseShiftByVector_Predicated},
396       {"urshr_z_p_zi"_h, &Simulator::Simulate_ZdnT_PgM_ZdnT_const},
397       {"ursqrte_z_p_z"_h, &Simulator::Simulate_ZdS_PgM_ZnS},
398       {"ursra_z_zi"_h, &Simulator::Simulate_ZdaT_ZnT_const},
399       {"ushllb_z_zi"_h, &Simulator::SimulateSVEShiftLeftImm},
400       {"ushllt_z_zi"_h, &Simulator::SimulateSVEShiftLeftImm},
401       {"usqadd_z_p_zz"_h, &Simulator::SimulateSVESaturatingArithmetic},
402       {"usra_z_zi"_h, &Simulator::Simulate_ZdaT_ZnT_const},
403       {"usublb_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
404       {"usublt_z_zz"_h, &Simulator::SimulateSVEInterleavedArithLong},
405       {"usubwb_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmTb},
406       {"usubwt_z_zz"_h, &Simulator::Simulate_ZdT_ZnT_ZmTb},
407       {"whilege_p_p_rr"_h, &Simulator::VisitSVEIntCompareScalarCountAndLimit},
408       {"whilegt_p_p_rr"_h, &Simulator::VisitSVEIntCompareScalarCountAndLimit},
409       {"whilehi_p_p_rr"_h, &Simulator::VisitSVEIntCompareScalarCountAndLimit},
410       {"whilehs_p_p_rr"_h, &Simulator::VisitSVEIntCompareScalarCountAndLimit},
411       {"whilerw_p_rr"_h, &Simulator::Simulate_PdT_Xn_Xm},
412       {"whilewr_p_rr"_h, &Simulator::Simulate_PdT_Xn_Xm},
413       {"xar_z_zzi"_h, &Simulator::SimulateSVEExclusiveOrRotate},
414       {"smmla_z_zzz"_h, &Simulator::SimulateMatrixMul},
415       {"ummla_z_zzz"_h, &Simulator::SimulateMatrixMul},
416       {"usmmla_z_zzz"_h, &Simulator::SimulateMatrixMul},
417       {"smmla_asimdsame2_g"_h, &Simulator::SimulateMatrixMul},
418       {"ummla_asimdsame2_g"_h, &Simulator::SimulateMatrixMul},
419       {"usmmla_asimdsame2_g"_h, &Simulator::SimulateMatrixMul},
420       {"fmmla_z_zzz_s"_h, &Simulator::SimulateSVEFPMatrixMul},
421       {"fmmla_z_zzz_d"_h, &Simulator::SimulateSVEFPMatrixMul},
422       {"ld1row_z_p_bi_u32"_h,
423        &Simulator::VisitSVELoadAndBroadcastQOWord_ScalarPlusImm},
424       {"ld1row_z_p_br_contiguous"_h,
425        &Simulator::VisitSVELoadAndBroadcastQOWord_ScalarPlusScalar},
426       {"ld1rod_z_p_bi_u64"_h,
427        &Simulator::VisitSVELoadAndBroadcastQOWord_ScalarPlusImm},
428       {"ld1rod_z_p_br_contiguous"_h,
429        &Simulator::VisitSVELoadAndBroadcastQOWord_ScalarPlusScalar},
430       {"ld1rob_z_p_bi_u8"_h,
431        &Simulator::VisitSVELoadAndBroadcastQOWord_ScalarPlusImm},
432       {"ld1rob_z_p_br_contiguous"_h,
433        &Simulator::VisitSVELoadAndBroadcastQOWord_ScalarPlusScalar},
434       {"ld1roh_z_p_bi_u16"_h,
435        &Simulator::VisitSVELoadAndBroadcastQOWord_ScalarPlusImm},
436       {"ld1roh_z_p_br_contiguous"_h,
437        &Simulator::VisitSVELoadAndBroadcastQOWord_ScalarPlusScalar},
438       {"usdot_z_zzz_s"_h, &Simulator::VisitSVEIntMulAddUnpredicated},
439       {"sudot_z_zzzi_s"_h, &Simulator::VisitSVEMulIndex},
440       {"usdot_z_zzzi_s"_h, &Simulator::VisitSVEMulIndex},
441       {"usdot_asimdsame2_d"_h, &Simulator::VisitNEON3SameExtra},
442       {"sudot_asimdelem_d"_h, &Simulator::SimulateNEONDotProdByElement},
443       {"usdot_asimdelem_d"_h, &Simulator::SimulateNEONDotProdByElement},
444       {"addg_64_addsub_immtags"_h, &Simulator::SimulateMTEAddSubTag},
445       {"gmi_64g_dp_2src"_h, &Simulator::SimulateMTETagMaskInsert},
446       {"irg_64i_dp_2src"_h, &Simulator::Simulate_XdSP_XnSP_Xm},
447       {"ldg_64loffset_ldsttags"_h, &Simulator::SimulateMTELoadTag},
448       {"st2g_64soffset_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
449       {"st2g_64spost_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
450       {"st2g_64spre_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
451       {"stgp_64_ldstpair_off"_h, &Simulator::SimulateMTEStoreTagPair},
452       {"stgp_64_ldstpair_post"_h, &Simulator::SimulateMTEStoreTagPair},
453       {"stgp_64_ldstpair_pre"_h, &Simulator::SimulateMTEStoreTagPair},
454       {"stg_64soffset_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
455       {"stg_64spost_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
456       {"stg_64spre_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
457       {"stz2g_64soffset_ldsttags"_h,
458        &Simulator::Simulator::SimulateMTEStoreTag},
459       {"stz2g_64spost_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
460       {"stz2g_64spre_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
461       {"stzg_64soffset_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
462       {"stzg_64spost_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
463       {"stzg_64spre_ldsttags"_h, &Simulator::Simulator::SimulateMTEStoreTag},
464       {"subg_64_addsub_immtags"_h, &Simulator::SimulateMTEAddSubTag},
465       {"subps_64s_dp_2src"_h, &Simulator::SimulateMTESubPointer},
466       {"subp_64s_dp_2src"_h, &Simulator::SimulateMTESubPointer},
467       {"cpyen_cpy_memcms"_h, &Simulator::SimulateCpyE},
468       {"cpyern_cpy_memcms"_h, &Simulator::SimulateCpyE},
469       {"cpyewn_cpy_memcms"_h, &Simulator::SimulateCpyE},
470       {"cpye_cpy_memcms"_h, &Simulator::SimulateCpyE},
471       {"cpyfen_cpy_memcms"_h, &Simulator::SimulateCpyE},
472       {"cpyfern_cpy_memcms"_h, &Simulator::SimulateCpyE},
473       {"cpyfewn_cpy_memcms"_h, &Simulator::SimulateCpyE},
474       {"cpyfe_cpy_memcms"_h, &Simulator::SimulateCpyE},
475       {"cpyfmn_cpy_memcms"_h, &Simulator::SimulateCpyM},
476       {"cpyfmrn_cpy_memcms"_h, &Simulator::SimulateCpyM},
477       {"cpyfmwn_cpy_memcms"_h, &Simulator::SimulateCpyM},
478       {"cpyfm_cpy_memcms"_h, &Simulator::SimulateCpyM},
479       {"cpyfpn_cpy_memcms"_h, &Simulator::SimulateCpyFP},
480       {"cpyfprn_cpy_memcms"_h, &Simulator::SimulateCpyFP},
481       {"cpyfpwn_cpy_memcms"_h, &Simulator::SimulateCpyFP},
482       {"cpyfp_cpy_memcms"_h, &Simulator::SimulateCpyFP},
483       {"cpymn_cpy_memcms"_h, &Simulator::SimulateCpyM},
484       {"cpymrn_cpy_memcms"_h, &Simulator::SimulateCpyM},
485       {"cpymwn_cpy_memcms"_h, &Simulator::SimulateCpyM},
486       {"cpym_cpy_memcms"_h, &Simulator::SimulateCpyM},
487       {"cpypn_cpy_memcms"_h, &Simulator::SimulateCpyP},
488       {"cpyprn_cpy_memcms"_h, &Simulator::SimulateCpyP},
489       {"cpypwn_cpy_memcms"_h, &Simulator::SimulateCpyP},
490       {"cpyp_cpy_memcms"_h, &Simulator::SimulateCpyP},
491       {"setp_set_memcms"_h, &Simulator::SimulateSetP},
492       {"setpn_set_memcms"_h, &Simulator::SimulateSetP},
493       {"setgp_set_memcms"_h, &Simulator::SimulateSetGP},
494       {"setgpn_set_memcms"_h, &Simulator::SimulateSetGP},
495       {"setm_set_memcms"_h, &Simulator::SimulateSetM},
496       {"setmn_set_memcms"_h, &Simulator::SimulateSetM},
497       {"setgm_set_memcms"_h, &Simulator::SimulateSetGM},
498       {"setgmn_set_memcms"_h, &Simulator::SimulateSetGM},
499       {"sete_set_memcms"_h, &Simulator::SimulateSetE},
500       {"seten_set_memcms"_h, &Simulator::SimulateSetE},
501       {"setge_set_memcms"_h, &Simulator::SimulateSetE},
502       {"setgen_set_memcms"_h, &Simulator::SimulateSetE},
503       {"abs_32_dp_1src"_h, &Simulator::VisitDataProcessing1Source},
504       {"abs_64_dp_1src"_h, &Simulator::VisitDataProcessing1Source},
505       {"cnt_32_dp_1src"_h, &Simulator::VisitDataProcessing1Source},
506       {"cnt_64_dp_1src"_h, &Simulator::VisitDataProcessing1Source},
507       {"ctz_32_dp_1src"_h, &Simulator::VisitDataProcessing1Source},
508       {"ctz_64_dp_1src"_h, &Simulator::VisitDataProcessing1Source},
509       {"smax_32_dp_2src"_h, &Simulator::SimulateSignedMinMax},
510       {"smax_64_dp_2src"_h, &Simulator::SimulateSignedMinMax},
511       {"smin_32_dp_2src"_h, &Simulator::SimulateSignedMinMax},
512       {"smin_64_dp_2src"_h, &Simulator::SimulateSignedMinMax},
513       {"smax_32_minmax_imm"_h, &Simulator::SimulateSignedMinMax},
514       {"smax_64_minmax_imm"_h, &Simulator::SimulateSignedMinMax},
515       {"smin_32_minmax_imm"_h, &Simulator::SimulateSignedMinMax},
516       {"smin_64_minmax_imm"_h, &Simulator::SimulateSignedMinMax},
517       {"umax_32_dp_2src"_h, &Simulator::SimulateUnsignedMinMax},
518       {"umax_64_dp_2src"_h, &Simulator::SimulateUnsignedMinMax},
519       {"umin_32_dp_2src"_h, &Simulator::SimulateUnsignedMinMax},
520       {"umin_64_dp_2src"_h, &Simulator::SimulateUnsignedMinMax},
521       {"umax_32u_minmax_imm"_h, &Simulator::SimulateUnsignedMinMax},
522       {"umax_64u_minmax_imm"_h, &Simulator::SimulateUnsignedMinMax},
523       {"umin_32u_minmax_imm"_h, &Simulator::SimulateUnsignedMinMax},
524       {"umin_64u_minmax_imm"_h, &Simulator::SimulateUnsignedMinMax},
525       {"bcax_vvv16_crypto4"_h, &Simulator::SimulateNEONSHA3},
526       {"eor3_vvv16_crypto4"_h, &Simulator::SimulateNEONSHA3},
527       {"rax1_vvv2_cryptosha512_3"_h, &Simulator::SimulateNEONSHA3},
528       {"xar_vvv2_crypto3_imm6"_h, &Simulator::SimulateNEONSHA3},
529       {"sha512h_qqv_cryptosha512_3"_h, &Simulator::SimulateSHA512},
530       {"sha512h2_qqv_cryptosha512_3"_h, &Simulator::SimulateSHA512},
531       {"sha512su0_vv2_cryptosha512_2"_h, &Simulator::SimulateSHA512},
532       {"sha512su1_vvv2_cryptosha512_3"_h, &Simulator::SimulateSHA512},
533       {"pmullb_z_zz_q"_h, &Simulator::SimulateSVEPmull128},
534       {"pmullt_z_zz_q"_h, &Simulator::SimulateSVEPmull128},
535   };
536   return &form_to_visitor;
537 }
538 
539 // Try to access the piece of memory given by the address passed in RDI and the
540 // offset passed in RSI, using testb. If a signal is raised then the signal
541 // handler should set RIP to _vixl_internal_AccessMemory_continue and RAX to
542 // MemoryAccessResult::Failure. If no signal is raised then zero RAX before
543 // returning.
544 #ifdef VIXL_ENABLE_IMPLICIT_CHECKS
545 #ifdef __x86_64__
546 asm(R"(
547   .globl _vixl_internal_ReadMemory
548   _vixl_internal_ReadMemory:
549     testb (%rdi, %rsi), %al
550     xorq %rax, %rax
551     ret
552   .globl _vixl_internal_AccessMemory_continue
553   _vixl_internal_AccessMemory_continue:
554     ret
555 )");
556 #else
557 asm(R"(
558   .globl _vixl_internal_ReadMemory
559   _vixl_internal_ReadMemory:
560     ret
561 )");
562 #endif  // __x86_64__
563 #endif  // VIXL_ENABLE_IMPLICIT_CHECKS
564 
Simulator(Decoder * decoder,FILE * stream,SimStack::Allocated stack)565 Simulator::Simulator(Decoder* decoder, FILE* stream, SimStack::Allocated stack)
566     : memory_(std::move(stack)),
567       last_instr_(NULL),
568       cpu_features_auditor_(decoder, CPUFeatures::All()),
569       gcs_(kGCSNoStack),
570       gcs_enabled_(false) {
571   // Ensure that shift operations act as the simulator expects.
572   VIXL_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
573   VIXL_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7fffffff);
574 
575   // Set up a placeholder pipe for CanReadMemory.
576 #ifndef _WIN32
577   VIXL_CHECK(pipe(placeholder_pipe_fd_) == 0);
578 #endif
579 
580   // Set up the decoder.
581   decoder_ = decoder;
582   decoder_->AppendVisitor(this);
583 
584   stream_ = stream;
585 
586   print_disasm_ = new PrintDisassembler(stream_);
587 
588   memory_.AppendMetaData(&meta_data_);
589 
590   // The Simulator and Disassembler share the same available list, held by the
591   // auditor. The Disassembler only annotates instructions with features that
592   // are _not_ available, so registering the auditor should have no effect
593   // unless the simulator is about to abort (due to missing features). In
594   // practice, this means that with trace enabled, the simulator will crash just
595   // after the disassembler prints the instruction, with the missing features
596   // enumerated.
597   print_disasm_->RegisterCPUFeaturesAuditor(&cpu_features_auditor_);
598 
599   SetColouredTrace(false);
600   trace_parameters_ = LOG_NONE;
601 
602   // We have to configure the SVE vector register length before calling
603   // ResetState().
604   SetVectorLengthInBits(kZRegMinSize);
605 
606   ResetState();
607 
608   // Print a warning about exclusive-access instructions, but only the first
609   // time they are encountered. This warning can be silenced using
610   // SilenceExclusiveAccessWarning().
611   print_exclusive_access_warning_ = true;
612 
613   guard_pages_ = false;
614 
615   // Initialize the common state of RNDR and RNDRRS.
616   uint64_t seed = (11 + (22 << 16) + (static_cast<uint64_t>(33) << 32));
617   rand_gen_.seed(seed);
618 
619   // Initialize all bits of pseudo predicate register to true.
620   LogicPRegister ones(pregister_all_true_);
621   ones.SetAllBits();
622 
623   // Initialize the debugger but disable it by default.
624   SetDebuggerEnabled(false);
625   debugger_ = std::make_unique<Debugger>(this);
626 }
627 
ResetSystemRegisters()628 void Simulator::ResetSystemRegisters() {
629   // Reset the system registers.
630   nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
631   fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
632   ResetFFR();
633 }
634 
ResetRegisters()635 void Simulator::ResetRegisters() {
636   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
637     WriteXRegister(i, 0xbadbeef);
638   }
639   // Returning to address 0 exits the Simulator.
640   WriteLr(kEndOfSimAddress);
641 }
642 
ResetVRegisters()643 void Simulator::ResetVRegisters() {
644   // Set SVE/FP registers to a value that is a NaN in both 32-bit and 64-bit FP.
645   VIXL_ASSERT((GetVectorLengthInBytes() % kDRegSizeInBytes) == 0);
646   int lane_count = GetVectorLengthInBytes() / kDRegSizeInBytes;
647   for (unsigned i = 0; i < kNumberOfZRegisters; i++) {
648     VIXL_ASSERT(vregisters_[i].GetSizeInBytes() == GetVectorLengthInBytes());
649     vregisters_[i].NotifyAccessAsZ();
650     for (int lane = 0; lane < lane_count; lane++) {
651       // Encode the register number and (D-sized) lane into each NaN, to
652       // make them easier to trace.
653       uint64_t nan_bits = 0x7ff0f0007f80f000 | (0x0000000100000000 * i) |
654                           (0x0000000000000001 * lane);
655       VIXL_ASSERT(IsSignallingNaN(RawbitsToDouble(nan_bits & kDRegMask)));
656       VIXL_ASSERT(IsSignallingNaN(RawbitsToFloat(nan_bits & kSRegMask)));
657       vregisters_[i].Insert(lane, nan_bits);
658     }
659   }
660 }
661 
ResetPRegisters()662 void Simulator::ResetPRegisters() {
663   VIXL_ASSERT((GetPredicateLengthInBytes() % kHRegSizeInBytes) == 0);
664   int lane_count = GetPredicateLengthInBytes() / kHRegSizeInBytes;
665   // Ensure the register configuration fits in this bit encoding.
666   VIXL_STATIC_ASSERT(kNumberOfPRegisters <= UINT8_MAX);
667   VIXL_ASSERT(lane_count <= UINT8_MAX);
668   for (unsigned i = 0; i < kNumberOfPRegisters; i++) {
669     VIXL_ASSERT(pregisters_[i].GetSizeInBytes() == GetPredicateLengthInBytes());
670     for (int lane = 0; lane < lane_count; lane++) {
671       // Encode the register number and (H-sized) lane into each lane slot.
672       uint16_t bits = (0x0100 * lane) | i;
673       pregisters_[i].Insert(lane, bits);
674     }
675   }
676 }
677 
ResetFFR()678 void Simulator::ResetFFR() {
679   VIXL_ASSERT((GetPredicateLengthInBytes() % kHRegSizeInBytes) == 0);
680   int default_active_lanes = GetPredicateLengthInBytes() / kHRegSizeInBytes;
681   ffr_register_.Write(static_cast<uint16_t>(GetUintMask(default_active_lanes)));
682 }
683 
ResetState()684 void Simulator::ResetState() {
685   ResetSystemRegisters();
686   ResetRegisters();
687   ResetVRegisters();
688   ResetPRegisters();
689 
690   WriteSp(memory_.GetStack().GetBase());
691   ResetGCSState();
692   EnableGCSCheck();
693 
694   pc_ = NULL;
695   pc_modified_ = false;
696 
697   // BTI state.
698   btype_ = DefaultBType;
699   next_btype_ = DefaultBType;
700 
701   meta_data_.ResetState();
702 }
703 
SetVectorLengthInBits(unsigned vector_length)704 void Simulator::SetVectorLengthInBits(unsigned vector_length) {
705   VIXL_ASSERT((vector_length >= kZRegMinSize) &&
706               (vector_length <= kZRegMaxSize));
707   VIXL_ASSERT((vector_length % kZRegMinSize) == 0);
708   vector_length_ = vector_length;
709 
710   for (unsigned i = 0; i < kNumberOfZRegisters; i++) {
711     vregisters_[i].SetSizeInBytes(GetVectorLengthInBytes());
712   }
713   for (unsigned i = 0; i < kNumberOfPRegisters; i++) {
714     pregisters_[i].SetSizeInBytes(GetPredicateLengthInBytes());
715   }
716 
717   ffr_register_.SetSizeInBytes(GetPredicateLengthInBytes());
718 
719   ResetVRegisters();
720   ResetPRegisters();
721   ResetFFR();
722 }
723 
~Simulator()724 Simulator::~Simulator() {
725   // The decoder may outlive the simulator.
726   decoder_->RemoveVisitor(print_disasm_);
727   delete print_disasm_;
728 #ifndef _WIN32
729   close(placeholder_pipe_fd_[0]);
730   close(placeholder_pipe_fd_[1]);
731 #endif
732   if (IsAllocatedGCS(gcs_)) {
733     GetGCSManager().FreeStack(gcs_);
734   }
735 }
736 
737 
Run()738 void Simulator::Run() {
739   // Flush any written registers before executing anything, so that
740   // manually-set registers are logged _before_ the first instruction.
741   LogAllWrittenRegisters();
742 
743   if (debugger_enabled_) {
744     // Slow path to check for breakpoints only if the debugger is enabled.
745     Debugger* debugger = GetDebugger();
746     while (!IsSimulationFinished()) {
747       if (debugger->IsAtBreakpoint()) {
748         fprintf(stream_, "Debugger hit breakpoint, breaking...\n");
749         debugger->Debug();
750       } else {
751         ExecuteInstruction();
752       }
753     }
754   } else {
755     while (!IsSimulationFinished()) {
756       ExecuteInstruction();
757     }
758   }
759 }
760 
761 
RunFrom(const Instruction * first)762 void Simulator::RunFrom(const Instruction* first) {
763   WritePc(first, NoBranchLog);
764   Run();
765 }
766 
767 
768 // clang-format off
769 const char* Simulator::xreg_names[] = {"x0",  "x1",  "x2",  "x3",  "x4",  "x5",
770                                        "x6",  "x7",  "x8",  "x9",  "x10", "x11",
771                                        "x12", "x13", "x14", "x15", "x16", "x17",
772                                        "x18", "x19", "x20", "x21", "x22", "x23",
773                                        "x24", "x25", "x26", "x27", "x28", "x29",
774                                        "lr",  "xzr", "sp"};
775 
776 const char* Simulator::wreg_names[] = {"w0",  "w1",  "w2",  "w3",  "w4",  "w5",
777                                        "w6",  "w7",  "w8",  "w9",  "w10", "w11",
778                                        "w12", "w13", "w14", "w15", "w16", "w17",
779                                        "w18", "w19", "w20", "w21", "w22", "w23",
780                                        "w24", "w25", "w26", "w27", "w28", "w29",
781                                        "w30", "wzr", "wsp"};
782 
783 const char* Simulator::breg_names[] = {"b0",  "b1",  "b2",  "b3",  "b4",  "b5",
784                                        "b6",  "b7",  "b8",  "b9",  "b10", "b11",
785                                        "b12", "b13", "b14", "b15", "b16", "b17",
786                                        "b18", "b19", "b20", "b21", "b22", "b23",
787                                        "b24", "b25", "b26", "b27", "b28", "b29",
788                                        "b30", "b31"};
789 
790 const char* Simulator::hreg_names[] = {"h0",  "h1",  "h2",  "h3",  "h4",  "h5",
791                                        "h6",  "h7",  "h8",  "h9",  "h10", "h11",
792                                        "h12", "h13", "h14", "h15", "h16", "h17",
793                                        "h18", "h19", "h20", "h21", "h22", "h23",
794                                        "h24", "h25", "h26", "h27", "h28", "h29",
795                                        "h30", "h31"};
796 
797 const char* Simulator::sreg_names[] = {"s0",  "s1",  "s2",  "s3",  "s4",  "s5",
798                                        "s6",  "s7",  "s8",  "s9",  "s10", "s11",
799                                        "s12", "s13", "s14", "s15", "s16", "s17",
800                                        "s18", "s19", "s20", "s21", "s22", "s23",
801                                        "s24", "s25", "s26", "s27", "s28", "s29",
802                                        "s30", "s31"};
803 
804 const char* Simulator::dreg_names[] = {"d0",  "d1",  "d2",  "d3",  "d4",  "d5",
805                                        "d6",  "d7",  "d8",  "d9",  "d10", "d11",
806                                        "d12", "d13", "d14", "d15", "d16", "d17",
807                                        "d18", "d19", "d20", "d21", "d22", "d23",
808                                        "d24", "d25", "d26", "d27", "d28", "d29",
809                                        "d30", "d31"};
810 
811 const char* Simulator::vreg_names[] = {"v0",  "v1",  "v2",  "v3",  "v4",  "v5",
812                                        "v6",  "v7",  "v8",  "v9",  "v10", "v11",
813                                        "v12", "v13", "v14", "v15", "v16", "v17",
814                                        "v18", "v19", "v20", "v21", "v22", "v23",
815                                        "v24", "v25", "v26", "v27", "v28", "v29",
816                                        "v30", "v31"};
817 
818 const char* Simulator::zreg_names[] = {"z0",  "z1",  "z2",  "z3",  "z4",  "z5",
819                                        "z6",  "z7",  "z8",  "z9",  "z10", "z11",
820                                        "z12", "z13", "z14", "z15", "z16", "z17",
821                                        "z18", "z19", "z20", "z21", "z22", "z23",
822                                        "z24", "z25", "z26", "z27", "z28", "z29",
823                                        "z30", "z31"};
824 
825 const char* Simulator::preg_names[] = {"p0",  "p1",  "p2",  "p3",  "p4",  "p5",
826                                        "p6",  "p7",  "p8",  "p9",  "p10", "p11",
827                                        "p12", "p13", "p14", "p15"};
828 // clang-format on
829 
830 
WRegNameForCode(unsigned code,Reg31Mode mode)831 const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
832   // If the code represents the stack pointer, index the name after zr.
833   if ((code == kSPRegInternalCode) ||
834       ((code == kZeroRegCode) && (mode == Reg31IsStackPointer))) {
835     code = kZeroRegCode + 1;
836   }
837   VIXL_ASSERT(code < ArrayLength(wreg_names));
838   return wreg_names[code];
839 }
840 
841 
XRegNameForCode(unsigned code,Reg31Mode mode)842 const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
843   // If the code represents the stack pointer, index the name after zr.
844   if ((code == kSPRegInternalCode) ||
845       ((code == kZeroRegCode) && (mode == Reg31IsStackPointer))) {
846     code = kZeroRegCode + 1;
847   }
848   VIXL_ASSERT(code < ArrayLength(xreg_names));
849   return xreg_names[code];
850 }
851 
852 
BRegNameForCode(unsigned code)853 const char* Simulator::BRegNameForCode(unsigned code) {
854   VIXL_ASSERT(code < kNumberOfVRegisters);
855   return breg_names[code];
856 }
857 
858 
HRegNameForCode(unsigned code)859 const char* Simulator::HRegNameForCode(unsigned code) {
860   VIXL_ASSERT(code < kNumberOfVRegisters);
861   return hreg_names[code];
862 }
863 
864 
SRegNameForCode(unsigned code)865 const char* Simulator::SRegNameForCode(unsigned code) {
866   VIXL_ASSERT(code < kNumberOfVRegisters);
867   return sreg_names[code];
868 }
869 
870 
DRegNameForCode(unsigned code)871 const char* Simulator::DRegNameForCode(unsigned code) {
872   VIXL_ASSERT(code < kNumberOfVRegisters);
873   return dreg_names[code];
874 }
875 
876 
VRegNameForCode(unsigned code)877 const char* Simulator::VRegNameForCode(unsigned code) {
878   VIXL_ASSERT(code < kNumberOfVRegisters);
879   return vreg_names[code];
880 }
881 
882 
ZRegNameForCode(unsigned code)883 const char* Simulator::ZRegNameForCode(unsigned code) {
884   VIXL_ASSERT(code < kNumberOfZRegisters);
885   return zreg_names[code];
886 }
887 
888 
PRegNameForCode(unsigned code)889 const char* Simulator::PRegNameForCode(unsigned code) {
890   VIXL_ASSERT(code < kNumberOfPRegisters);
891   return preg_names[code];
892 }
893 
ExpandToSimVRegister(const SimPRegister & pg)894 SimVRegister Simulator::ExpandToSimVRegister(const SimPRegister& pg) {
895   SimVRegister ones, result;
896   dup_immediate(kFormatVnB, ones, 0xff);
897   mov_zeroing(kFormatVnB, result, pg, ones);
898   return result;
899 }
900 
ExtractFromSimVRegister(VectorFormat vform,SimPRegister & pd,SimVRegister vreg)901 void Simulator::ExtractFromSimVRegister(VectorFormat vform,
902                                         SimPRegister& pd,
903                                         SimVRegister vreg) {
904   SimVRegister zero;
905   dup_immediate(kFormatVnB, zero, 0);
906   SVEIntCompareVectorsHelper(ne,
907                              vform,
908                              pd,
909                              GetPTrue(),
910                              vreg,
911                              zero,
912                              false,
913                              LeaveFlags);
914 }
915 
916 #define COLOUR(colour_code) "\033[0;" colour_code "m"
917 #define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
918 #define COLOUR_HIGHLIGHT "\033[43m"
919 #define NORMAL ""
920 #define GREY "30"
921 #define RED "31"
922 #define GREEN "32"
923 #define YELLOW "33"
924 #define BLUE "34"
925 #define MAGENTA "35"
926 #define CYAN "36"
927 #define WHITE "37"
SetColouredTrace(bool value)928 void Simulator::SetColouredTrace(bool value) {
929   coloured_trace_ = value;
930 
931   clr_normal = value ? COLOUR(NORMAL) : "";
932   clr_flag_name = value ? COLOUR_BOLD(WHITE) : "";
933   clr_flag_value = value ? COLOUR(NORMAL) : "";
934   clr_reg_name = value ? COLOUR_BOLD(CYAN) : "";
935   clr_reg_value = value ? COLOUR(CYAN) : "";
936   clr_vreg_name = value ? COLOUR_BOLD(MAGENTA) : "";
937   clr_vreg_value = value ? COLOUR(MAGENTA) : "";
938   clr_preg_name = value ? COLOUR_BOLD(GREEN) : "";
939   clr_preg_value = value ? COLOUR(GREEN) : "";
940   clr_memory_address = value ? COLOUR_BOLD(BLUE) : "";
941   clr_warning = value ? COLOUR_BOLD(YELLOW) : "";
942   clr_warning_message = value ? COLOUR(YELLOW) : "";
943   clr_printf = value ? COLOUR(GREEN) : "";
944   clr_branch_marker = value ? COLOUR(GREY) COLOUR_HIGHLIGHT : "";
945 
946   if (value) {
947     print_disasm_->SetCPUFeaturesPrefix("// Needs: " COLOUR_BOLD(RED));
948     print_disasm_->SetCPUFeaturesSuffix(COLOUR(NORMAL));
949   } else {
950     print_disasm_->SetCPUFeaturesPrefix("// Needs: ");
951     print_disasm_->SetCPUFeaturesSuffix("");
952   }
953 }
954 
955 
SetTraceParameters(int parameters)956 void Simulator::SetTraceParameters(int parameters) {
957   bool disasm_before = trace_parameters_ & LOG_DISASM;
958   trace_parameters_ = parameters;
959   bool disasm_after = trace_parameters_ & LOG_DISASM;
960 
961   if (disasm_before != disasm_after) {
962     if (disasm_after) {
963       decoder_->InsertVisitorBefore(print_disasm_, this);
964     } else {
965       decoder_->RemoveVisitor(print_disasm_);
966     }
967   }
968 }
969 
970 // Helpers ---------------------------------------------------------------------
AddWithCarry(unsigned reg_size,bool set_flags,uint64_t left,uint64_t right,int carry_in)971 uint64_t Simulator::AddWithCarry(unsigned reg_size,
972                                  bool set_flags,
973                                  uint64_t left,
974                                  uint64_t right,
975                                  int carry_in) {
976   std::pair<uint64_t, uint8_t> result_and_flags =
977       AddWithCarry(reg_size, left, right, carry_in);
978   if (set_flags) {
979     uint8_t flags = result_and_flags.second;
980     ReadNzcv().SetN((flags >> 3) & 1);
981     ReadNzcv().SetZ((flags >> 2) & 1);
982     ReadNzcv().SetC((flags >> 1) & 1);
983     ReadNzcv().SetV((flags >> 0) & 1);
984     LogSystemRegister(NZCV);
985   }
986   return result_and_flags.first;
987 }
988 
AddWithCarry(unsigned reg_size,uint64_t left,uint64_t right,int carry_in)989 std::pair<uint64_t, uint8_t> Simulator::AddWithCarry(unsigned reg_size,
990                                                      uint64_t left,
991                                                      uint64_t right,
992                                                      int carry_in) {
993   VIXL_ASSERT((carry_in == 0) || (carry_in == 1));
994   VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
995 
996   uint64_t max_uint = (reg_size == kWRegSize) ? kWMaxUInt : kXMaxUInt;
997   uint64_t reg_mask = (reg_size == kWRegSize) ? kWRegMask : kXRegMask;
998   uint64_t sign_mask = (reg_size == kWRegSize) ? kWSignMask : kXSignMask;
999 
1000   left &= reg_mask;
1001   right &= reg_mask;
1002   uint64_t result = (left + right + carry_in) & reg_mask;
1003 
1004   // NZCV bits, ordered N in bit 3 to V in bit 0.
1005   uint8_t nzcv = CalcNFlag(result, reg_size) ? 8 : 0;
1006   nzcv |= CalcZFlag(result) ? 4 : 0;
1007 
1008   // Compute the C flag by comparing the result to the max unsigned integer.
1009   uint64_t max_uint_2op = max_uint - carry_in;
1010   bool C = (left > max_uint_2op) || ((max_uint_2op - left) < right);
1011   nzcv |= C ? 2 : 0;
1012 
1013   // Overflow iff the sign bit is the same for the two inputs and different
1014   // for the result.
1015   uint64_t left_sign = left & sign_mask;
1016   uint64_t right_sign = right & sign_mask;
1017   uint64_t result_sign = result & sign_mask;
1018   bool V = (left_sign == right_sign) && (left_sign != result_sign);
1019   nzcv |= V ? 1 : 0;
1020 
1021   return std::make_pair(result, nzcv);
1022 }
1023 
1024 using vixl_uint128_t = std::pair<uint64_t, uint64_t>;
1025 
Add128(vixl_uint128_t x,vixl_uint128_t y)1026 vixl_uint128_t Simulator::Add128(vixl_uint128_t x, vixl_uint128_t y) {
1027   std::pair<uint64_t, uint8_t> sum_lo =
1028       AddWithCarry(kXRegSize, x.second, y.second, 0);
1029   int carry_in = (sum_lo.second & 0x2) >> 1;  // C flag in NZCV result.
1030   std::pair<uint64_t, uint8_t> sum_hi =
1031       AddWithCarry(kXRegSize, x.first, y.first, carry_in);
1032   return std::make_pair(sum_hi.first, sum_lo.first);
1033 }
1034 
Lsl128(vixl_uint128_t x,unsigned shift) const1035 vixl_uint128_t Simulator::Lsl128(vixl_uint128_t x, unsigned shift) const {
1036   VIXL_ASSERT(shift <= 64);
1037   if (shift == 0) return x;
1038   if (shift == 64) return std::make_pair(x.second, 0);
1039   uint64_t lo = x.second << shift;
1040   uint64_t hi = (x.first << shift) | (x.second >> (64 - shift));
1041   return std::make_pair(hi, lo);
1042 }
1043 
Eor128(vixl_uint128_t x,vixl_uint128_t y) const1044 vixl_uint128_t Simulator::Eor128(vixl_uint128_t x, vixl_uint128_t y) const {
1045   return std::make_pair(x.first ^ y.first, x.second ^ y.second);
1046 }
1047 
Neg128(vixl_uint128_t x)1048 vixl_uint128_t Simulator::Neg128(vixl_uint128_t x) {
1049   // Negate the integer value. Throw an assertion when the input is INT128_MIN.
1050   VIXL_ASSERT((x.first != GetSignMask(64)) || (x.second != 0));
1051   x.first = ~x.first;
1052   x.second = ~x.second;
1053   return Add128(x, {0, 1});
1054 }
1055 
Mul64(uint64_t x,uint64_t y)1056 vixl_uint128_t Simulator::Mul64(uint64_t x, uint64_t y) {
1057   bool neg_result = false;
1058   if ((x >> 63) == 1) {
1059     x = UnsignedNegate(x);
1060     neg_result = !neg_result;
1061   }
1062   if ((y >> 63) == 1) {
1063     y = UnsignedNegate(y);
1064     neg_result = !neg_result;
1065   }
1066 
1067   uint64_t x_lo = x & 0xffffffff;
1068   uint64_t x_hi = x >> 32;
1069   uint64_t y_lo = y & 0xffffffff;
1070   uint64_t y_hi = y >> 32;
1071 
1072   uint64_t t1 = x_lo * y_hi;
1073   uint64_t t2 = x_hi * y_lo;
1074   vixl_uint128_t a = std::make_pair(0, x_lo * y_lo);
1075   vixl_uint128_t b = std::make_pair(t1 >> 32, t1 << 32);
1076   vixl_uint128_t c = std::make_pair(t2 >> 32, t2 << 32);
1077   vixl_uint128_t d = std::make_pair(x_hi * y_hi, 0);
1078 
1079   vixl_uint128_t result = Add128(a, b);
1080   result = Add128(result, c);
1081   result = Add128(result, d);
1082   return neg_result ? std::make_pair(UnsignedNegate(result.first) - 1,
1083                                      UnsignedNegate(result.second))
1084                     : result;
1085 }
1086 
PolynomialMult128(uint64_t op1,uint64_t op2,int lane_size_in_bits) const1087 vixl_uint128_t Simulator::PolynomialMult128(uint64_t op1,
1088                                             uint64_t op2,
1089                                             int lane_size_in_bits) const {
1090   VIXL_ASSERT(static_cast<unsigned>(lane_size_in_bits) <= kDRegSize);
1091   vixl_uint128_t result = std::make_pair(0, 0);
1092   vixl_uint128_t op2q = std::make_pair(0, op2);
1093   for (int i = 0; i < lane_size_in_bits; i++) {
1094     if ((op1 >> i) & 1) {
1095       result = Eor128(result, Lsl128(op2q, i));
1096     }
1097   }
1098   return result;
1099 }
1100 
ShiftOperand(unsigned reg_size,uint64_t uvalue,Shift shift_type,unsigned amount) const1101 int64_t Simulator::ShiftOperand(unsigned reg_size,
1102                                 uint64_t uvalue,
1103                                 Shift shift_type,
1104                                 unsigned amount) const {
1105   VIXL_ASSERT((reg_size == kBRegSize) || (reg_size == kHRegSize) ||
1106               (reg_size == kSRegSize) || (reg_size == kDRegSize));
1107   if (amount > 0) {
1108     uint64_t mask = GetUintMask(reg_size);
1109     bool is_negative = (uvalue & GetSignMask(reg_size)) != 0;
1110     // The behavior is undefined in c++ if the shift amount greater than or
1111     // equal to the register lane size. Work out the shifted result based on
1112     // architectural behavior before performing the c++ type shift operations.
1113     switch (shift_type) {
1114       case LSL:
1115         if (amount >= reg_size) {
1116           return UINT64_C(0);
1117         }
1118         uvalue <<= amount;
1119         break;
1120       case LSR:
1121         if (amount >= reg_size) {
1122           return UINT64_C(0);
1123         }
1124         uvalue >>= amount;
1125         break;
1126       case ASR:
1127         if (amount >= reg_size) {
1128           return is_negative ? ~UINT64_C(0) : UINT64_C(0);
1129         }
1130         uvalue >>= amount;
1131         if (is_negative) {
1132           // Simulate sign-extension to 64 bits.
1133           uvalue |= ~UINT64_C(0) << (reg_size - amount);
1134         }
1135         break;
1136       case ROR: {
1137         uvalue = RotateRight(uvalue, amount, reg_size);
1138         break;
1139       }
1140       default:
1141         VIXL_UNIMPLEMENTED();
1142         return 0;
1143     }
1144     uvalue &= mask;
1145   }
1146 
1147   int64_t result;
1148   memcpy(&result, &uvalue, sizeof(result));
1149   return result;
1150 }
1151 
1152 
ExtendValue(unsigned reg_size,int64_t value,Extend extend_type,unsigned left_shift) const1153 int64_t Simulator::ExtendValue(unsigned reg_size,
1154                                int64_t value,
1155                                Extend extend_type,
1156                                unsigned left_shift) const {
1157   switch (extend_type) {
1158     case UXTB:
1159       value &= kByteMask;
1160       break;
1161     case UXTH:
1162       value &= kHalfWordMask;
1163       break;
1164     case UXTW:
1165       value &= kWordMask;
1166       break;
1167     case SXTB:
1168       value &= kByteMask;
1169       if ((value & 0x80) != 0) {
1170         value |= ~UINT64_C(0) << 8;
1171       }
1172       break;
1173     case SXTH:
1174       value &= kHalfWordMask;
1175       if ((value & 0x8000) != 0) {
1176         value |= ~UINT64_C(0) << 16;
1177       }
1178       break;
1179     case SXTW:
1180       value &= kWordMask;
1181       if ((value & 0x80000000) != 0) {
1182         value |= ~UINT64_C(0) << 32;
1183       }
1184       break;
1185     case UXTX:
1186     case SXTX:
1187       break;
1188     default:
1189       VIXL_UNREACHABLE();
1190   }
1191   return ShiftOperand(reg_size, value, LSL, left_shift);
1192 }
1193 
1194 
FPCompare(double val0,double val1,FPTrapFlags trap)1195 void Simulator::FPCompare(double val0, double val1, FPTrapFlags trap) {
1196   AssertSupportedFPCR();
1197 
1198   // TODO: This assumes that the C++ implementation handles comparisons in the
1199   // way that we expect (as per AssertSupportedFPCR()).
1200   bool process_exception = false;
1201   if ((IsNaN(val0) != 0) || (IsNaN(val1) != 0)) {
1202     ReadNzcv().SetRawValue(FPUnorderedFlag);
1203     if (IsSignallingNaN(val0) || IsSignallingNaN(val1) ||
1204         (trap == EnableTrap)) {
1205       process_exception = true;
1206     }
1207   } else if (val0 < val1) {
1208     ReadNzcv().SetRawValue(FPLessThanFlag);
1209   } else if (val0 > val1) {
1210     ReadNzcv().SetRawValue(FPGreaterThanFlag);
1211   } else if (val0 == val1) {
1212     ReadNzcv().SetRawValue(FPEqualFlag);
1213   } else {
1214     VIXL_UNREACHABLE();
1215   }
1216   LogSystemRegister(NZCV);
1217   if (process_exception) FPProcessException();
1218 }
1219 
1220 
ComputeMemOperandAddress(const MemOperand & mem_op) const1221 uint64_t Simulator::ComputeMemOperandAddress(const MemOperand& mem_op) const {
1222   VIXL_ASSERT(mem_op.IsValid());
1223   int64_t base = ReadRegister<int64_t>(mem_op.GetBaseRegister());
1224   if (mem_op.IsImmediateOffset()) {
1225     return base + mem_op.GetOffset();
1226   } else {
1227     VIXL_ASSERT(mem_op.GetRegisterOffset().IsValid());
1228     int64_t offset = ReadRegister<int64_t>(mem_op.GetRegisterOffset());
1229     unsigned shift_amount = mem_op.GetShiftAmount();
1230     if (mem_op.GetShift() != NO_SHIFT) {
1231       offset = ShiftOperand(kXRegSize, offset, mem_op.GetShift(), shift_amount);
1232     }
1233     if (mem_op.GetExtend() != NO_EXTEND) {
1234       offset = ExtendValue(kXRegSize, offset, mem_op.GetExtend(), shift_amount);
1235     }
1236     return static_cast<uint64_t>(base + offset);
1237   }
1238 }
1239 
1240 
GetPrintRegisterFormatForSize(unsigned reg_size,unsigned lane_size)1241 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize(
1242     unsigned reg_size, unsigned lane_size) {
1243   VIXL_ASSERT(reg_size >= lane_size);
1244 
1245   uint32_t format = 0;
1246   if (reg_size != lane_size) {
1247     switch (reg_size) {
1248       default:
1249         VIXL_UNREACHABLE();
1250         break;
1251       case kQRegSizeInBytes:
1252         format = kPrintRegAsQVector;
1253         break;
1254       case kDRegSizeInBytes:
1255         format = kPrintRegAsDVector;
1256         break;
1257     }
1258   }
1259 
1260   switch (lane_size) {
1261     default:
1262       VIXL_UNREACHABLE();
1263       break;
1264     case kQRegSizeInBytes:
1265       format |= kPrintReg1Q;
1266       break;
1267     case kDRegSizeInBytes:
1268       format |= kPrintReg1D;
1269       break;
1270     case kSRegSizeInBytes:
1271       format |= kPrintReg1S;
1272       break;
1273     case kHRegSizeInBytes:
1274       format |= kPrintReg1H;
1275       break;
1276     case kBRegSizeInBytes:
1277       format |= kPrintReg1B;
1278       break;
1279   }
1280   // These sizes would be duplicate case labels.
1281   VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
1282   VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
1283   VIXL_STATIC_ASSERT(kPrintXReg == kPrintReg1D);
1284   VIXL_STATIC_ASSERT(kPrintWReg == kPrintReg1S);
1285 
1286   return static_cast<PrintRegisterFormat>(format);
1287 }
1288 
1289 
GetPrintRegisterFormat(VectorFormat vform)1290 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat(
1291     VectorFormat vform) {
1292   switch (vform) {
1293     default:
1294       VIXL_UNREACHABLE();
1295       return kPrintReg16B;
1296     case kFormat16B:
1297       return kPrintReg16B;
1298     case kFormat8B:
1299       return kPrintReg8B;
1300     case kFormat8H:
1301       return kPrintReg8H;
1302     case kFormat4H:
1303       return kPrintReg4H;
1304     case kFormat4S:
1305       return kPrintReg4S;
1306     case kFormat2S:
1307       return kPrintReg2S;
1308     case kFormat2D:
1309       return kPrintReg2D;
1310     case kFormat1D:
1311       return kPrintReg1D;
1312 
1313     case kFormatB:
1314       return kPrintReg1B;
1315     case kFormatH:
1316       return kPrintReg1H;
1317     case kFormatS:
1318       return kPrintReg1S;
1319     case kFormatD:
1320       return kPrintReg1D;
1321 
1322     case kFormatVnB:
1323       return kPrintRegVnB;
1324     case kFormatVnH:
1325       return kPrintRegVnH;
1326     case kFormatVnS:
1327       return kPrintRegVnS;
1328     case kFormatVnD:
1329       return kPrintRegVnD;
1330   }
1331 }
1332 
1333 
GetPrintRegisterFormatFP(VectorFormat vform)1334 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP(
1335     VectorFormat vform) {
1336   switch (vform) {
1337     default:
1338       VIXL_UNREACHABLE();
1339       return kPrintReg16B;
1340     case kFormat8H:
1341       return kPrintReg8HFP;
1342     case kFormat4H:
1343       return kPrintReg4HFP;
1344     case kFormat4S:
1345       return kPrintReg4SFP;
1346     case kFormat2S:
1347       return kPrintReg2SFP;
1348     case kFormat2D:
1349       return kPrintReg2DFP;
1350     case kFormat1D:
1351       return kPrintReg1DFP;
1352     case kFormatH:
1353       return kPrintReg1HFP;
1354     case kFormatS:
1355       return kPrintReg1SFP;
1356     case kFormatD:
1357       return kPrintReg1DFP;
1358   }
1359 }
1360 
PrintRegisters()1361 void Simulator::PrintRegisters() {
1362   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
1363     if (i == kSpRegCode) i = kSPRegInternalCode;
1364     PrintRegister(i);
1365   }
1366 }
1367 
PrintVRegisters()1368 void Simulator::PrintVRegisters() {
1369   for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
1370     PrintVRegister(i);
1371   }
1372 }
1373 
PrintZRegisters()1374 void Simulator::PrintZRegisters() {
1375   for (unsigned i = 0; i < kNumberOfZRegisters; i++) {
1376     PrintZRegister(i);
1377   }
1378 }
1379 
PrintWrittenRegisters()1380 void Simulator::PrintWrittenRegisters() {
1381   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
1382     if (registers_[i].WrittenSinceLastLog()) {
1383       if (i == kSpRegCode) i = kSPRegInternalCode;
1384       PrintRegister(i);
1385     }
1386   }
1387 }
1388 
PrintWrittenVRegisters()1389 void Simulator::PrintWrittenVRegisters() {
1390   bool has_sve = GetCPUFeatures()->Has(CPUFeatures::kSVE);
1391   for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
1392     if (vregisters_[i].WrittenSinceLastLog()) {
1393       // Z registers are initialised in the constructor before the user can
1394       // configure the CPU features, so we must also check for SVE here.
1395       if (vregisters_[i].AccessedAsZSinceLastLog() && has_sve) {
1396         PrintZRegister(i);
1397       } else {
1398         PrintVRegister(i);
1399       }
1400     }
1401   }
1402 }
1403 
PrintWrittenPRegisters()1404 void Simulator::PrintWrittenPRegisters() {
1405   // P registers are initialised in the constructor before the user can
1406   // configure the CPU features, so we must check for SVE here.
1407   if (!GetCPUFeatures()->Has(CPUFeatures::kSVE)) return;
1408   for (unsigned i = 0; i < kNumberOfPRegisters; i++) {
1409     if (pregisters_[i].WrittenSinceLastLog()) {
1410       PrintPRegister(i);
1411     }
1412   }
1413   if (ReadFFR().WrittenSinceLastLog()) PrintFFR();
1414 }
1415 
PrintSystemRegisters()1416 void Simulator::PrintSystemRegisters() {
1417   PrintSystemRegister(NZCV);
1418   PrintSystemRegister(FPCR);
1419 }
1420 
PrintRegisterValue(const uint8_t * value,int value_size,PrintRegisterFormat format)1421 void Simulator::PrintRegisterValue(const uint8_t* value,
1422                                    int value_size,
1423                                    PrintRegisterFormat format) {
1424   int print_width = GetPrintRegSizeInBytes(format);
1425   VIXL_ASSERT(print_width <= value_size);
1426   for (int i = value_size - 1; i >= print_width; i--) {
1427     // Pad with spaces so that values align vertically.
1428     fprintf(stream_, "  ");
1429     // If we aren't explicitly printing a partial value, ensure that the
1430     // unprinted bits are zero.
1431     VIXL_ASSERT(((format & kPrintRegPartial) != 0) || (value[i] == 0));
1432   }
1433   fprintf(stream_, "0x");
1434   for (int i = print_width - 1; i >= 0; i--) {
1435     fprintf(stream_, "%02x", value[i]);
1436   }
1437 }
1438 
PrintRegisterValueFPAnnotations(const uint8_t * value,uint16_t lane_mask,PrintRegisterFormat format)1439 void Simulator::PrintRegisterValueFPAnnotations(const uint8_t* value,
1440                                                 uint16_t lane_mask,
1441                                                 PrintRegisterFormat format) {
1442   VIXL_ASSERT((format & kPrintRegAsFP) != 0);
1443   int lane_size = GetPrintRegLaneSizeInBytes(format);
1444   fprintf(stream_, " (");
1445   bool last_inactive = false;
1446   const char* sep = "";
1447   for (int i = GetPrintRegLaneCount(format) - 1; i >= 0; i--, sep = ", ") {
1448     bool access = (lane_mask & (1 << (i * lane_size))) != 0;
1449     if (access) {
1450       // Read the lane as a double, so we can format all FP types in the same
1451       // way. We squash NaNs, and a double can exactly represent any other value
1452       // that the smaller types can represent, so this is lossless.
1453       double element;
1454       switch (lane_size) {
1455         case kHRegSizeInBytes: {
1456           Float16 element_fp16;
1457           VIXL_STATIC_ASSERT(sizeof(element_fp16) == kHRegSizeInBytes);
1458           memcpy(&element_fp16, &value[i * lane_size], sizeof(element_fp16));
1459           element = FPToDouble(element_fp16, kUseDefaultNaN);
1460           break;
1461         }
1462         case kSRegSizeInBytes: {
1463           float element_fp32;
1464           memcpy(&element_fp32, &value[i * lane_size], sizeof(element_fp32));
1465           element = static_cast<double>(element_fp32);
1466           break;
1467         }
1468         case kDRegSizeInBytes: {
1469           memcpy(&element, &value[i * lane_size], sizeof(element));
1470           break;
1471         }
1472         default:
1473           VIXL_UNREACHABLE();
1474           fprintf(stream_, "{UnknownFPValue}");
1475           continue;
1476       }
1477       if (IsNaN(element)) {
1478         // The fprintf behaviour for NaNs is implementation-defined. Always
1479         // print "nan", so that traces are consistent.
1480         fprintf(stream_, "%s%snan%s", sep, clr_vreg_value, clr_normal);
1481       } else {
1482         fprintf(stream_,
1483                 "%s%s%#.4g%s",
1484                 sep,
1485                 clr_vreg_value,
1486                 element,
1487                 clr_normal);
1488       }
1489       last_inactive = false;
1490     } else if (!last_inactive) {
1491       // Replace each contiguous sequence of inactive lanes with "...".
1492       fprintf(stream_, "%s...", sep);
1493       last_inactive = true;
1494     }
1495   }
1496   fprintf(stream_, ")");
1497 }
1498 
PrintRegister(int code,PrintRegisterFormat format,const char * suffix)1499 void Simulator::PrintRegister(int code,
1500                               PrintRegisterFormat format,
1501                               const char* suffix) {
1502   VIXL_ASSERT((static_cast<unsigned>(code) < kNumberOfRegisters) ||
1503               (static_cast<unsigned>(code) == kSPRegInternalCode));
1504   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsScalar);
1505   VIXL_ASSERT((format & kPrintRegAsFP) == 0);
1506 
1507   SimRegister* reg;
1508   SimRegister zero;
1509   if (code == kZeroRegCode) {
1510     reg = &zero;
1511   } else {
1512     // registers_[31] holds the SP.
1513     VIXL_STATIC_ASSERT((kSPRegInternalCode % kNumberOfRegisters) == 31);
1514     reg = &registers_[code % kNumberOfRegisters];
1515   }
1516 
1517   // We trace register writes as whole register values, implying that any
1518   // unprinted bits are all zero:
1519   //   "#       x{code}: 0x{-----value----}"
1520   //   "#       w{code}:         0x{-value}"
1521   // Stores trace partial register values, implying nothing about the unprinted
1522   // bits:
1523   //   "# x{code}<63:0>: 0x{-----value----}"
1524   //   "# x{code}<31:0>:         0x{-value}"
1525   //   "# x{code}<15:0>:             0x{--}"
1526   //   "#  x{code}<7:0>:               0x{}"
1527 
1528   bool is_partial = (format & kPrintRegPartial) != 0;
1529   unsigned print_reg_size = GetPrintRegSizeInBits(format);
1530   std::stringstream name;
1531   if (is_partial) {
1532     name << XRegNameForCode(code) << GetPartialRegSuffix(format);
1533   } else {
1534     // Notify the register that it has been logged, but only if we're printing
1535     // all of it.
1536     reg->NotifyRegisterLogged();
1537     switch (print_reg_size) {
1538       case kWRegSize:
1539         name << WRegNameForCode(code);
1540         break;
1541       case kXRegSize:
1542         name << XRegNameForCode(code);
1543         break;
1544       default:
1545         VIXL_UNREACHABLE();
1546         return;
1547     }
1548   }
1549 
1550   fprintf(stream_,
1551           "# %s%*s: %s",
1552           clr_reg_name,
1553           kPrintRegisterNameFieldWidth,
1554           name.str().c_str(),
1555           clr_reg_value);
1556   PrintRegisterValue(*reg, format);
1557   fprintf(stream_, "%s%s", clr_normal, suffix);
1558 }
1559 
PrintVRegister(int code,PrintRegisterFormat format,const char * suffix)1560 void Simulator::PrintVRegister(int code,
1561                                PrintRegisterFormat format,
1562                                const char* suffix) {
1563   VIXL_ASSERT(static_cast<unsigned>(code) < kNumberOfVRegisters);
1564   VIXL_ASSERT(((format & kPrintRegAsVectorMask) == kPrintRegAsScalar) ||
1565               ((format & kPrintRegAsVectorMask) == kPrintRegAsDVector) ||
1566               ((format & kPrintRegAsVectorMask) == kPrintRegAsQVector));
1567 
1568   // We trace register writes as whole register values, implying that any
1569   // unprinted bits are all zero:
1570   //   "#        v{code}: 0x{-------------value------------}"
1571   //   "#        d{code}:                 0x{-----value----}"
1572   //   "#        s{code}:                         0x{-value}"
1573   //   "#        h{code}:                             0x{--}"
1574   //   "#        b{code}:                               0x{}"
1575   // Stores trace partial register values, implying nothing about the unprinted
1576   // bits:
1577   //   "# v{code}<127:0>: 0x{-------------value------------}"
1578   //   "#  v{code}<63:0>:                 0x{-----value----}"
1579   //   "#  v{code}<31:0>:                         0x{-value}"
1580   //   "#  v{code}<15:0>:                             0x{--}"
1581   //   "#   v{code}<7:0>:                               0x{}"
1582 
1583   bool is_partial = ((format & kPrintRegPartial) != 0);
1584   std::stringstream name;
1585   unsigned print_reg_size = GetPrintRegSizeInBits(format);
1586   if (is_partial) {
1587     name << VRegNameForCode(code) << GetPartialRegSuffix(format);
1588   } else {
1589     // Notify the register that it has been logged, but only if we're printing
1590     // all of it.
1591     vregisters_[code].NotifyRegisterLogged();
1592     switch (print_reg_size) {
1593       case kBRegSize:
1594         name << BRegNameForCode(code);
1595         break;
1596       case kHRegSize:
1597         name << HRegNameForCode(code);
1598         break;
1599       case kSRegSize:
1600         name << SRegNameForCode(code);
1601         break;
1602       case kDRegSize:
1603         name << DRegNameForCode(code);
1604         break;
1605       case kQRegSize:
1606         name << VRegNameForCode(code);
1607         break;
1608       default:
1609         VIXL_UNREACHABLE();
1610         return;
1611     }
1612   }
1613 
1614   fprintf(stream_,
1615           "# %s%*s: %s",
1616           clr_vreg_name,
1617           kPrintRegisterNameFieldWidth,
1618           name.str().c_str(),
1619           clr_vreg_value);
1620   PrintRegisterValue(vregisters_[code], format);
1621   fprintf(stream_, "%s", clr_normal);
1622   if ((format & kPrintRegAsFP) != 0) {
1623     PrintRegisterValueFPAnnotations(vregisters_[code], format);
1624   }
1625   fprintf(stream_, "%s", suffix);
1626 }
1627 
PrintVRegistersForStructuredAccess(int rt_code,int reg_count,uint16_t focus_mask,PrintRegisterFormat format)1628 void Simulator::PrintVRegistersForStructuredAccess(int rt_code,
1629                                                    int reg_count,
1630                                                    uint16_t focus_mask,
1631                                                    PrintRegisterFormat format) {
1632   bool print_fp = (format & kPrintRegAsFP) != 0;
1633   // Suppress FP formatting, so we can specify the lanes we're interested in.
1634   PrintRegisterFormat format_no_fp =
1635       static_cast<PrintRegisterFormat>(format & ~kPrintRegAsFP);
1636 
1637   for (int r = 0; r < reg_count; r++) {
1638     int code = (rt_code + r) % kNumberOfVRegisters;
1639     PrintVRegister(code, format_no_fp, "");
1640     if (print_fp) {
1641       PrintRegisterValueFPAnnotations(vregisters_[code], focus_mask, format);
1642     }
1643     fprintf(stream_, "\n");
1644   }
1645 }
1646 
PrintZRegistersForStructuredAccess(int rt_code,int q_index,int reg_count,uint16_t focus_mask,PrintRegisterFormat format)1647 void Simulator::PrintZRegistersForStructuredAccess(int rt_code,
1648                                                    int q_index,
1649                                                    int reg_count,
1650                                                    uint16_t focus_mask,
1651                                                    PrintRegisterFormat format) {
1652   bool print_fp = (format & kPrintRegAsFP) != 0;
1653   // Suppress FP formatting, so we can specify the lanes we're interested in.
1654   PrintRegisterFormat format_no_fp =
1655       static_cast<PrintRegisterFormat>(format & ~kPrintRegAsFP);
1656 
1657   PrintRegisterFormat format_q = GetPrintRegAsQChunkOfSVE(format);
1658 
1659   const unsigned size = kQRegSizeInBytes;
1660   unsigned byte_index = q_index * size;
1661   const uint8_t* value = vregisters_[rt_code].GetBytes() + byte_index;
1662   VIXL_ASSERT((byte_index + size) <= vregisters_[rt_code].GetSizeInBytes());
1663 
1664   for (int r = 0; r < reg_count; r++) {
1665     int code = (rt_code + r) % kNumberOfZRegisters;
1666     PrintPartialZRegister(code, q_index, format_no_fp, "");
1667     if (print_fp) {
1668       PrintRegisterValueFPAnnotations(value, focus_mask, format_q);
1669     }
1670     fprintf(stream_, "\n");
1671   }
1672 }
1673 
PrintZRegister(int code,PrintRegisterFormat format)1674 void Simulator::PrintZRegister(int code, PrintRegisterFormat format) {
1675   // We're going to print the register in parts, so force a partial format.
1676   format = GetPrintRegPartial(format);
1677   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector);
1678   int vl = GetVectorLengthInBits();
1679   VIXL_ASSERT((vl % kQRegSize) == 0);
1680   for (unsigned i = 0; i < (vl / kQRegSize); i++) {
1681     PrintPartialZRegister(code, i, format);
1682   }
1683   vregisters_[code].NotifyRegisterLogged();
1684 }
1685 
PrintPRegister(int code,PrintRegisterFormat format)1686 void Simulator::PrintPRegister(int code, PrintRegisterFormat format) {
1687   // We're going to print the register in parts, so force a partial format.
1688   format = GetPrintRegPartial(format);
1689   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector);
1690   int vl = GetVectorLengthInBits();
1691   VIXL_ASSERT((vl % kQRegSize) == 0);
1692   for (unsigned i = 0; i < (vl / kQRegSize); i++) {
1693     PrintPartialPRegister(code, i, format);
1694   }
1695   pregisters_[code].NotifyRegisterLogged();
1696 }
1697 
PrintFFR(PrintRegisterFormat format)1698 void Simulator::PrintFFR(PrintRegisterFormat format) {
1699   // We're going to print the register in parts, so force a partial format.
1700   format = GetPrintRegPartial(format);
1701   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector);
1702   int vl = GetVectorLengthInBits();
1703   VIXL_ASSERT((vl % kQRegSize) == 0);
1704   SimPRegister& ffr = ReadFFR();
1705   for (unsigned i = 0; i < (vl / kQRegSize); i++) {
1706     PrintPartialPRegister("FFR", ffr, i, format);
1707   }
1708   ffr.NotifyRegisterLogged();
1709 }
1710 
PrintPartialZRegister(int code,int q_index,PrintRegisterFormat format,const char * suffix)1711 void Simulator::PrintPartialZRegister(int code,
1712                                       int q_index,
1713                                       PrintRegisterFormat format,
1714                                       const char* suffix) {
1715   VIXL_ASSERT(static_cast<unsigned>(code) < kNumberOfZRegisters);
1716   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector);
1717   VIXL_ASSERT((format & kPrintRegPartial) != 0);
1718   VIXL_ASSERT((q_index * kQRegSize) < GetVectorLengthInBits());
1719 
1720   // We _only_ trace partial Z register values in Q-sized chunks, because
1721   // they're often too large to reasonably fit on a single line. Each line
1722   // implies nothing about the unprinted bits.
1723   //   "# z{code}<127:0>: 0x{-------------value------------}"
1724 
1725   format = GetPrintRegAsQChunkOfSVE(format);
1726 
1727   const unsigned size = kQRegSizeInBytes;
1728   unsigned byte_index = q_index * size;
1729   const uint8_t* value = vregisters_[code].GetBytes() + byte_index;
1730   VIXL_ASSERT((byte_index + size) <= vregisters_[code].GetSizeInBytes());
1731 
1732   int lsb = q_index * kQRegSize;
1733   int msb = lsb + kQRegSize - 1;
1734   std::stringstream name;
1735   name << ZRegNameForCode(code) << '<' << msb << ':' << lsb << '>';
1736 
1737   fprintf(stream_,
1738           "# %s%*s: %s",
1739           clr_vreg_name,
1740           kPrintRegisterNameFieldWidth,
1741           name.str().c_str(),
1742           clr_vreg_value);
1743   PrintRegisterValue(value, size, format);
1744   fprintf(stream_, "%s", clr_normal);
1745   if ((format & kPrintRegAsFP) != 0) {
1746     PrintRegisterValueFPAnnotations(value, GetPrintRegLaneMask(format), format);
1747   }
1748   fprintf(stream_, "%s", suffix);
1749 }
1750 
PrintPartialPRegister(const char * name,const SimPRegister & reg,int q_index,PrintRegisterFormat format,const char * suffix)1751 void Simulator::PrintPartialPRegister(const char* name,
1752                                       const SimPRegister& reg,
1753                                       int q_index,
1754                                       PrintRegisterFormat format,
1755                                       const char* suffix) {
1756   VIXL_ASSERT((format & kPrintRegAsVectorMask) == kPrintRegAsSVEVector);
1757   VIXL_ASSERT((format & kPrintRegPartial) != 0);
1758   VIXL_ASSERT((q_index * kQRegSize) < GetVectorLengthInBits());
1759 
1760   // We don't currently use the format for anything here.
1761   USE(format);
1762 
1763   // We _only_ trace partial P register values, because they're often too large
1764   // to reasonably fit on a single line. Each line implies nothing about the
1765   // unprinted bits.
1766   //
1767   // We print values in binary, with spaces between each bit, in order for the
1768   // bits to align with the Z register bytes that they predicate.
1769   //   "# {name}<15:0>: 0b{-------------value------------}"
1770 
1771   int print_size_in_bits = kQRegSize / kZRegBitsPerPRegBit;
1772   int lsb = q_index * print_size_in_bits;
1773   int msb = lsb + print_size_in_bits - 1;
1774   std::stringstream prefix;
1775   prefix << name << '<' << msb << ':' << lsb << '>';
1776 
1777   fprintf(stream_,
1778           "# %s%*s: %s0b",
1779           clr_preg_name,
1780           kPrintRegisterNameFieldWidth,
1781           prefix.str().c_str(),
1782           clr_preg_value);
1783   for (int i = msb; i >= lsb; i--) {
1784     fprintf(stream_, " %c", reg.GetBit(i) ? '1' : '0');
1785   }
1786   fprintf(stream_, "%s%s", clr_normal, suffix);
1787 }
1788 
PrintPartialPRegister(int code,int q_index,PrintRegisterFormat format,const char * suffix)1789 void Simulator::PrintPartialPRegister(int code,
1790                                       int q_index,
1791                                       PrintRegisterFormat format,
1792                                       const char* suffix) {
1793   VIXL_ASSERT(static_cast<unsigned>(code) < kNumberOfPRegisters);
1794   PrintPartialPRegister(PRegNameForCode(code),
1795                         pregisters_[code],
1796                         q_index,
1797                         format,
1798                         suffix);
1799 }
1800 
PrintSystemRegister(SystemRegister id)1801 void Simulator::PrintSystemRegister(SystemRegister id) {
1802   switch (id) {
1803     case NZCV:
1804       fprintf(stream_,
1805               "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
1806               clr_flag_name,
1807               clr_flag_value,
1808               ReadNzcv().GetN(),
1809               ReadNzcv().GetZ(),
1810               ReadNzcv().GetC(),
1811               ReadNzcv().GetV(),
1812               clr_normal);
1813       break;
1814     case FPCR: {
1815       static const char* rmode[] = {"0b00 (Round to Nearest)",
1816                                     "0b01 (Round towards Plus Infinity)",
1817                                     "0b10 (Round towards Minus Infinity)",
1818                                     "0b11 (Round towards Zero)"};
1819       VIXL_ASSERT(ReadFpcr().GetRMode() < ArrayLength(rmode));
1820       fprintf(stream_,
1821               "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
1822               clr_flag_name,
1823               clr_flag_value,
1824               ReadFpcr().GetAHP(),
1825               ReadFpcr().GetDN(),
1826               ReadFpcr().GetFZ(),
1827               rmode[ReadFpcr().GetRMode()],
1828               clr_normal);
1829       break;
1830     }
1831     default:
1832       VIXL_UNREACHABLE();
1833   }
1834 }
1835 
PrintGCS(bool is_push,uint64_t addr,size_t entry)1836 void Simulator::PrintGCS(bool is_push, uint64_t addr, size_t entry) {
1837   const char* arrow = is_push ? "<-" : "->";
1838   fprintf(stream_,
1839           "# %sgcs0x%04" PRIx64 "[%" PRIxPTR "]: %s %s 0x%016" PRIx64 "\n",
1840           clr_flag_name,
1841           gcs_,
1842           entry,
1843           clr_normal,
1844           arrow,
1845           addr);
1846 }
1847 
PrintPartialAccess(uint16_t access_mask,uint16_t future_access_mask,int struct_element_count,int lane_size_in_bytes,const char * op,uintptr_t address,int reg_size_in_bytes)1848 uint16_t Simulator::PrintPartialAccess(uint16_t access_mask,
1849                                        uint16_t future_access_mask,
1850                                        int struct_element_count,
1851                                        int lane_size_in_bytes,
1852                                        const char* op,
1853                                        uintptr_t address,
1854                                        int reg_size_in_bytes) {
1855   // We want to assume that we'll access at least one lane.
1856   VIXL_ASSERT(access_mask != 0);
1857   VIXL_ASSERT((reg_size_in_bytes == kXRegSizeInBytes) ||
1858               (reg_size_in_bytes == kQRegSizeInBytes));
1859   bool started_annotation = false;
1860   // Indent to match the register field, the fixed formatting, and the value
1861   // prefix ("0x"): "# {name}: 0x"
1862   fprintf(stream_, "# %*s    ", kPrintRegisterNameFieldWidth, "");
1863   // First, annotate the lanes (byte by byte).
1864   for (int lane = reg_size_in_bytes - 1; lane >= 0; lane--) {
1865     bool access = (access_mask & (1 << lane)) != 0;
1866     bool future = (future_access_mask & (1 << lane)) != 0;
1867     if (started_annotation) {
1868       // If we've started an annotation, draw a horizontal line in addition to
1869       // any other symbols.
1870       if (access) {
1871         fprintf(stream_, "─╨");
1872       } else if (future) {
1873         fprintf(stream_, "─║");
1874       } else {
1875         fprintf(stream_, "──");
1876       }
1877     } else {
1878       if (access) {
1879         started_annotation = true;
1880         fprintf(stream_, " ╙");
1881       } else if (future) {
1882         fprintf(stream_, " ║");
1883       } else {
1884         fprintf(stream_, "  ");
1885       }
1886     }
1887   }
1888   VIXL_ASSERT(started_annotation);
1889   fprintf(stream_, "─ 0x");
1890   int lane_size_in_nibbles = lane_size_in_bytes * 2;
1891   // Print the most-significant struct element first.
1892   const char* sep = "";
1893   for (int i = struct_element_count - 1; i >= 0; i--) {
1894     int offset = lane_size_in_bytes * i;
1895     auto nibble = MemReadUint(lane_size_in_bytes, address + offset);
1896     VIXL_ASSERT(nibble);
1897     fprintf(stream_, "%s%0*" PRIx64, sep, lane_size_in_nibbles, *nibble);
1898     sep = "'";
1899   }
1900   fprintf(stream_,
1901           " %s %s0x%016" PRIxPTR "%s\n",
1902           op,
1903           clr_memory_address,
1904           address,
1905           clr_normal);
1906   return future_access_mask & ~access_mask;
1907 }
1908 
PrintAccess(int code,PrintRegisterFormat format,const char * op,uintptr_t address)1909 void Simulator::PrintAccess(int code,
1910                             PrintRegisterFormat format,
1911                             const char* op,
1912                             uintptr_t address) {
1913   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
1914   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1915   if ((format & kPrintRegPartial) == 0) {
1916     if (code != kZeroRegCode) {
1917       registers_[code].NotifyRegisterLogged();
1918     }
1919   }
1920   // Scalar-format accesses use a simple format:
1921   //   "# {reg}: 0x{value} -> {address}"
1922 
1923   // Suppress the newline, so the access annotation goes on the same line.
1924   PrintRegister(code, format, "");
1925   fprintf(stream_,
1926           " %s %s0x%016" PRIxPTR "%s\n",
1927           op,
1928           clr_memory_address,
1929           address,
1930           clr_normal);
1931 }
1932 
PrintVAccess(int code,PrintRegisterFormat format,const char * op,uintptr_t address)1933 void Simulator::PrintVAccess(int code,
1934                              PrintRegisterFormat format,
1935                              const char* op,
1936                              uintptr_t address) {
1937   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1938 
1939   // Scalar-format accesses use a simple format:
1940   //   "# v{code}: 0x{value} -> {address}"
1941 
1942   // Suppress the newline, so the access annotation goes on the same line.
1943   PrintVRegister(code, format, "");
1944   fprintf(stream_,
1945           " %s %s0x%016" PRIxPTR "%s\n",
1946           op,
1947           clr_memory_address,
1948           address,
1949           clr_normal);
1950 }
1951 
PrintVStructAccess(int rt_code,int reg_count,PrintRegisterFormat format,const char * op,uintptr_t address)1952 void Simulator::PrintVStructAccess(int rt_code,
1953                                    int reg_count,
1954                                    PrintRegisterFormat format,
1955                                    const char* op,
1956                                    uintptr_t address) {
1957   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1958 
1959   // For example:
1960   //   "# v{code}: 0x{value}"
1961   //   "#     ...: 0x{value}"
1962   //   "#              ║   ╙─ {struct_value} -> {lowest_address}"
1963   //   "#              ╙───── {struct_value} -> {highest_address}"
1964 
1965   uint16_t lane_mask = GetPrintRegLaneMask(format);
1966   PrintVRegistersForStructuredAccess(rt_code, reg_count, lane_mask, format);
1967 
1968   int reg_size_in_bytes = GetPrintRegSizeInBytes(format);
1969   int lane_size_in_bytes = GetPrintRegLaneSizeInBytes(format);
1970   for (int i = 0; i < reg_size_in_bytes; i += lane_size_in_bytes) {
1971     uint16_t access_mask = 1 << i;
1972     VIXL_ASSERT((lane_mask & access_mask) != 0);
1973     lane_mask = PrintPartialAccess(access_mask,
1974                                    lane_mask,
1975                                    reg_count,
1976                                    lane_size_in_bytes,
1977                                    op,
1978                                    address + (i * reg_count));
1979   }
1980 }
1981 
PrintVSingleStructAccess(int rt_code,int reg_count,int lane,PrintRegisterFormat format,const char * op,uintptr_t address)1982 void Simulator::PrintVSingleStructAccess(int rt_code,
1983                                          int reg_count,
1984                                          int lane,
1985                                          PrintRegisterFormat format,
1986                                          const char* op,
1987                                          uintptr_t address) {
1988   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
1989 
1990   // For example:
1991   //   "# v{code}: 0x{value}"
1992   //   "#     ...: 0x{value}"
1993   //   "#              ╙───── {struct_value} -> {address}"
1994 
1995   int lane_size_in_bytes = GetPrintRegLaneSizeInBytes(format);
1996   uint16_t lane_mask = 1 << (lane * lane_size_in_bytes);
1997   PrintVRegistersForStructuredAccess(rt_code, reg_count, lane_mask, format);
1998   PrintPartialAccess(lane_mask, 0, reg_count, lane_size_in_bytes, op, address);
1999 }
2000 
PrintVReplicatingStructAccess(int rt_code,int reg_count,PrintRegisterFormat format,const char * op,uintptr_t address)2001 void Simulator::PrintVReplicatingStructAccess(int rt_code,
2002                                               int reg_count,
2003                                               PrintRegisterFormat format,
2004                                               const char* op,
2005                                               uintptr_t address) {
2006   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
2007 
2008   // For example:
2009   //   "# v{code}: 0x{value}"
2010   //   "#     ...: 0x{value}"
2011   //   "#            ╙─╨─╨─╨─ {struct_value} -> {address}"
2012 
2013   int lane_size_in_bytes = GetPrintRegLaneSizeInBytes(format);
2014   uint16_t lane_mask = GetPrintRegLaneMask(format);
2015   PrintVRegistersForStructuredAccess(rt_code, reg_count, lane_mask, format);
2016   PrintPartialAccess(lane_mask, 0, reg_count, lane_size_in_bytes, op, address);
2017 }
2018 
PrintZAccess(int rt_code,const char * op,uintptr_t address)2019 void Simulator::PrintZAccess(int rt_code, const char* op, uintptr_t address) {
2020   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
2021 
2022   // Scalar-format accesses are split into separate chunks, each of which uses a
2023   // simple format:
2024   //   "#   z{code}<127:0>: 0x{value} -> {address}"
2025   //   "# z{code}<255:128>: 0x{value} -> {address + 16}"
2026   //   "# z{code}<383:256>: 0x{value} -> {address + 32}"
2027   // etc
2028 
2029   int vl = GetVectorLengthInBits();
2030   VIXL_ASSERT((vl % kQRegSize) == 0);
2031   for (unsigned q_index = 0; q_index < (vl / kQRegSize); q_index++) {
2032     // Suppress the newline, so the access annotation goes on the same line.
2033     PrintPartialZRegister(rt_code, q_index, kPrintRegVnQPartial, "");
2034     fprintf(stream_,
2035             " %s %s0x%016" PRIxPTR "%s\n",
2036             op,
2037             clr_memory_address,
2038             address,
2039             clr_normal);
2040     address += kQRegSizeInBytes;
2041   }
2042 }
2043 
PrintZStructAccess(int rt_code,int reg_count,const LogicPRegister & pg,PrintRegisterFormat format,int msize_in_bytes,const char * op,const LogicSVEAddressVector & addr)2044 void Simulator::PrintZStructAccess(int rt_code,
2045                                    int reg_count,
2046                                    const LogicPRegister& pg,
2047                                    PrintRegisterFormat format,
2048                                    int msize_in_bytes,
2049                                    const char* op,
2050                                    const LogicSVEAddressVector& addr) {
2051   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
2052 
2053   // For example:
2054   //   "# z{code}<255:128>: 0x{value}"
2055   //   "#     ...<255:128>: 0x{value}"
2056   //   "#                       ║   ╙─ {struct_value} -> {first_address}"
2057   //   "#                       ╙───── {struct_value} -> {last_address}"
2058 
2059   // We're going to print the register in parts, so force a partial format.
2060   bool skip_inactive_chunks = (format & kPrintRegPartial) != 0;
2061   format = GetPrintRegPartial(format);
2062 
2063   int esize_in_bytes = GetPrintRegLaneSizeInBytes(format);
2064   int vl = GetVectorLengthInBits();
2065   VIXL_ASSERT((vl % kQRegSize) == 0);
2066   int lanes_per_q = kQRegSizeInBytes / esize_in_bytes;
2067   for (unsigned q_index = 0; q_index < (vl / kQRegSize); q_index++) {
2068     uint16_t pred =
2069         pg.GetActiveMask<uint16_t>(q_index) & GetPrintRegLaneMask(format);
2070     if ((pred == 0) && skip_inactive_chunks) continue;
2071 
2072     PrintZRegistersForStructuredAccess(rt_code,
2073                                        q_index,
2074                                        reg_count,
2075                                        pred,
2076                                        format);
2077     if (pred == 0) {
2078       // This register chunk has no active lanes. The loop below would print
2079       // nothing, so leave a blank line to keep structures grouped together.
2080       fprintf(stream_, "#\n");
2081       continue;
2082     }
2083     for (int i = 0; i < lanes_per_q; i++) {
2084       uint16_t access = 1 << (i * esize_in_bytes);
2085       int lane = (q_index * lanes_per_q) + i;
2086       // Skip inactive lanes.
2087       if ((pred & access) == 0) continue;
2088       pred = PrintPartialAccess(access,
2089                                 pred,
2090                                 reg_count,
2091                                 msize_in_bytes,
2092                                 op,
2093                                 addr.GetStructAddress(lane));
2094     }
2095   }
2096 
2097   // We print the whole register, even for stores.
2098   for (int i = 0; i < reg_count; i++) {
2099     vregisters_[(rt_code + i) % kNumberOfZRegisters].NotifyRegisterLogged();
2100   }
2101 }
2102 
PrintPAccess(int code,const char * op,uintptr_t address)2103 void Simulator::PrintPAccess(int code, const char* op, uintptr_t address) {
2104   VIXL_ASSERT((strcmp(op, "->") == 0) || (strcmp(op, "<-") == 0));
2105 
2106   // Scalar-format accesses are split into separate chunks, each of which uses a
2107   // simple format:
2108   //   "#  p{code}<15:0>: 0b{value} -> {address}"
2109   //   "# p{code}<31:16>: 0b{value} -> {address + 2}"
2110   //   "# p{code}<47:32>: 0b{value} -> {address + 4}"
2111   // etc
2112 
2113   int vl = GetVectorLengthInBits();
2114   VIXL_ASSERT((vl % kQRegSize) == 0);
2115   for (unsigned q_index = 0; q_index < (vl / kQRegSize); q_index++) {
2116     // Suppress the newline, so the access annotation goes on the same line.
2117     PrintPartialPRegister(code, q_index, kPrintRegVnQPartial, "");
2118     fprintf(stream_,
2119             " %s %s0x%016" PRIxPTR "%s\n",
2120             op,
2121             clr_memory_address,
2122             address,
2123             clr_normal);
2124     address += kQRegSizeInBytes;
2125   }
2126 }
2127 
PrintMemTransfer(uintptr_t dst,uintptr_t src,uint8_t value)2128 void Simulator::PrintMemTransfer(uintptr_t dst, uintptr_t src, uint8_t value) {
2129   fprintf(stream_,
2130           "#               %s: %s0x%016" PRIxPTR " %s<- %s0x%02x%s",
2131           clr_reg_name,
2132           clr_memory_address,
2133           dst,
2134           clr_normal,
2135           clr_reg_value,
2136           value,
2137           clr_normal);
2138 
2139   fprintf(stream_,
2140           " <- %s0x%016" PRIxPTR "%s\n",
2141           clr_memory_address,
2142           src,
2143           clr_normal);
2144 }
2145 
PrintRead(int rt_code,PrintRegisterFormat format,uintptr_t address)2146 void Simulator::PrintRead(int rt_code,
2147                           PrintRegisterFormat format,
2148                           uintptr_t address) {
2149   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
2150   if (rt_code != kZeroRegCode) {
2151     registers_[rt_code].NotifyRegisterLogged();
2152   }
2153   PrintAccess(rt_code, format, "<-", address);
2154 }
2155 
PrintExtendingRead(int rt_code,PrintRegisterFormat format,int access_size_in_bytes,uintptr_t address)2156 void Simulator::PrintExtendingRead(int rt_code,
2157                                    PrintRegisterFormat format,
2158                                    int access_size_in_bytes,
2159                                    uintptr_t address) {
2160   int reg_size_in_bytes = GetPrintRegSizeInBytes(format);
2161   if (access_size_in_bytes == reg_size_in_bytes) {
2162     // There is no extension here, so print a simple load.
2163     PrintRead(rt_code, format, address);
2164     return;
2165   }
2166   VIXL_ASSERT(access_size_in_bytes < reg_size_in_bytes);
2167 
2168   // For sign- and zero-extension, make it clear that the resulting register
2169   // value is different from what is loaded from memory.
2170   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
2171   if (rt_code != kZeroRegCode) {
2172     registers_[rt_code].NotifyRegisterLogged();
2173   }
2174   PrintRegister(rt_code, format);
2175   PrintPartialAccess(1,
2176                      0,
2177                      1,
2178                      access_size_in_bytes,
2179                      "<-",
2180                      address,
2181                      kXRegSizeInBytes);
2182 }
2183 
PrintVRead(int rt_code,PrintRegisterFormat format,uintptr_t address)2184 void Simulator::PrintVRead(int rt_code,
2185                            PrintRegisterFormat format,
2186                            uintptr_t address) {
2187   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
2188   vregisters_[rt_code].NotifyRegisterLogged();
2189   PrintVAccess(rt_code, format, "<-", address);
2190 }
2191 
PrintWrite(int rt_code,PrintRegisterFormat format,uintptr_t address)2192 void Simulator::PrintWrite(int rt_code,
2193                            PrintRegisterFormat format,
2194                            uintptr_t address) {
2195   // Because this trace doesn't represent a change to the source register's
2196   // value, only print the relevant part of the value.
2197   format = GetPrintRegPartial(format);
2198   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
2199   if (rt_code != kZeroRegCode) {
2200     registers_[rt_code].NotifyRegisterLogged();
2201   }
2202   PrintAccess(rt_code, format, "->", address);
2203 }
2204 
PrintVWrite(int rt_code,PrintRegisterFormat format,uintptr_t address)2205 void Simulator::PrintVWrite(int rt_code,
2206                             PrintRegisterFormat format,
2207                             uintptr_t address) {
2208   // Because this trace doesn't represent a change to the source register's
2209   // value, only print the relevant part of the value.
2210   format = GetPrintRegPartial(format);
2211   // It only makes sense to write scalar values here. Vectors are handled by
2212   // PrintVStructAccess.
2213   VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
2214   PrintVAccess(rt_code, format, "->", address);
2215 }
2216 
PrintTakenBranch(const Instruction * target)2217 void Simulator::PrintTakenBranch(const Instruction* target) {
2218   fprintf(stream_,
2219           "# %sBranch%s to 0x%016" PRIx64 ".\n",
2220           clr_branch_marker,
2221           clr_normal,
2222           reinterpret_cast<uint64_t>(target));
2223 }
2224 
2225 // Visitors---------------------------------------------------------------------
2226 
2227 
Visit(Metadata * metadata,const Instruction * instr)2228 void Simulator::Visit(Metadata* metadata, const Instruction* instr) {
2229   VIXL_ASSERT(metadata->count("form") > 0);
2230   std::string form = (*metadata)["form"];
2231   form_hash_ = Hash(form.c_str());
2232   const FormToVisitorFnMap* fv = Simulator::GetFormToVisitorFnMap();
2233   FormToVisitorFnMap::const_iterator it = fv->find(form_hash_);
2234   if (it == fv->end()) {
2235     VisitUnimplemented(instr);
2236   } else {
2237     (it->second)(this, instr);
2238   }
2239 }
2240 
Simulate_PdT_PgZ_ZnT_ZmT(const Instruction * instr)2241 void Simulator::Simulate_PdT_PgZ_ZnT_ZmT(const Instruction* instr) {
2242   VectorFormat vform = instr->GetSVEVectorFormat();
2243   SimPRegister& pd = ReadPRegister(instr->GetPd());
2244   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
2245   SimVRegister& zm = ReadVRegister(instr->GetRm());
2246   SimVRegister& zn = ReadVRegister(instr->GetRn());
2247 
2248   switch (form_hash_) {
2249     case "match_p_p_zz"_h:
2250       match(vform, pd, zn, zm, /* negate_match = */ false);
2251       break;
2252     case "nmatch_p_p_zz"_h:
2253       match(vform, pd, zn, zm, /* negate_match = */ true);
2254       break;
2255     default:
2256       VIXL_UNIMPLEMENTED();
2257   }
2258   mov_zeroing(pd, pg, pd);
2259   PredTest(vform, pg, pd);
2260 }
2261 
Simulate_PdT_Xn_Xm(const Instruction * instr)2262 void Simulator::Simulate_PdT_Xn_Xm(const Instruction* instr) {
2263   VectorFormat vform = instr->GetSVEVectorFormat();
2264   SimPRegister& pd = ReadPRegister(instr->GetPd());
2265   uint64_t src1 = ReadXRegister(instr->GetRn());
2266   uint64_t src2 = ReadXRegister(instr->GetRm());
2267 
2268   uint64_t absdiff = (src1 > src2) ? (src1 - src2) : (src2 - src1);
2269   absdiff >>= LaneSizeInBytesLog2FromFormat(vform);
2270 
2271   bool no_conflict = false;
2272   switch (form_hash_) {
2273     case "whilerw_p_rr"_h:
2274       no_conflict = (absdiff == 0);
2275       break;
2276     case "whilewr_p_rr"_h:
2277       no_conflict = (absdiff == 0) || (src2 <= src1);
2278       break;
2279     default:
2280       VIXL_UNIMPLEMENTED();
2281   }
2282 
2283   LogicPRegister dst(pd);
2284   for (int i = 0; i < LaneCountFromFormat(vform); i++) {
2285     dst.SetActive(vform,
2286                   i,
2287                   no_conflict || (static_cast<uint64_t>(i) < absdiff));
2288   }
2289 
2290   PredTest(vform, GetPTrue(), pd);
2291 }
2292 
Simulate_ZdB_Zn1B_Zn2B_imm(const Instruction * instr)2293 void Simulator::Simulate_ZdB_Zn1B_Zn2B_imm(const Instruction* instr) {
2294   VIXL_ASSERT(form_hash_ == "ext_z_zi_con"_h);
2295 
2296   SimVRegister& zd = ReadVRegister(instr->GetRd());
2297   SimVRegister& zn = ReadVRegister(instr->GetRn());
2298   SimVRegister& zn2 = ReadVRegister((instr->GetRn() + 1) % kNumberOfZRegisters);
2299 
2300   int index = instr->GetSVEExtractImmediate();
2301   int vl = GetVectorLengthInBytes();
2302   index = (index >= vl) ? 0 : index;
2303 
2304   ext(kFormatVnB, zd, zn, zn2, index);
2305 }
2306 
Simulate_ZdB_ZnB_ZmB(const Instruction * instr)2307 void Simulator::Simulate_ZdB_ZnB_ZmB(const Instruction* instr) {
2308   SimVRegister& zd = ReadVRegister(instr->GetRd());
2309   SimVRegister& zm = ReadVRegister(instr->GetRm());
2310   SimVRegister& zn = ReadVRegister(instr->GetRn());
2311 
2312   switch (form_hash_) {
2313     case "histseg_z_zz"_h:
2314       if (instr->GetSVEVectorFormat() == kFormatVnB) {
2315         histogram(kFormatVnB,
2316                   zd,
2317                   GetPTrue(),
2318                   zn,
2319                   zm,
2320                   /* do_segmented = */ true);
2321       } else {
2322         VIXL_UNIMPLEMENTED();
2323       }
2324       break;
2325     case "pmul_z_zz"_h:
2326       pmul(kFormatVnB, zd, zn, zm);
2327       break;
2328     default:
2329       VIXL_UNIMPLEMENTED();
2330   }
2331 }
2332 
SimulateSVEMulIndex(const Instruction * instr)2333 void Simulator::SimulateSVEMulIndex(const Instruction* instr) {
2334   VectorFormat vform = instr->GetSVEVectorFormat();
2335   SimVRegister& zd = ReadVRegister(instr->GetRd());
2336   SimVRegister& zn = ReadVRegister(instr->GetRn());
2337 
2338   // The encoding for B and H-sized lanes are redefined to encode the most
2339   // significant bit of index for H-sized lanes. B-sized lanes are not
2340   // supported.
2341   if (vform == kFormatVnB) vform = kFormatVnH;
2342 
2343   VIXL_ASSERT((form_hash_ == "mul_z_zzi_d"_h) ||
2344               (form_hash_ == "mul_z_zzi_h"_h) ||
2345               (form_hash_ == "mul_z_zzi_s"_h));
2346 
2347   SimVRegister temp;
2348   dup_elements_to_segments(vform, temp, instr->GetSVEMulZmAndIndex());
2349   mul(vform, zd, zn, temp);
2350 }
2351 
SimulateSVEMlaMlsIndex(const Instruction * instr)2352 void Simulator::SimulateSVEMlaMlsIndex(const Instruction* instr) {
2353   VectorFormat vform = instr->GetSVEVectorFormat();
2354   SimVRegister& zda = ReadVRegister(instr->GetRd());
2355   SimVRegister& zn = ReadVRegister(instr->GetRn());
2356 
2357   // The encoding for B and H-sized lanes are redefined to encode the most
2358   // significant bit of index for H-sized lanes. B-sized lanes are not
2359   // supported.
2360   if (vform == kFormatVnB) vform = kFormatVnH;
2361 
2362   VIXL_ASSERT(
2363       (form_hash_ == "mla_z_zzzi_d"_h) || (form_hash_ == "mla_z_zzzi_h"_h) ||
2364       (form_hash_ == "mla_z_zzzi_s"_h) || (form_hash_ == "mls_z_zzzi_d"_h) ||
2365       (form_hash_ == "mls_z_zzzi_h"_h) || (form_hash_ == "mls_z_zzzi_s"_h));
2366 
2367   SimVRegister temp;
2368   dup_elements_to_segments(vform, temp, instr->GetSVEMulZmAndIndex());
2369   if (instr->ExtractBit(10) == 0) {
2370     mla(vform, zda, zda, zn, temp);
2371   } else {
2372     mls(vform, zda, zda, zn, temp);
2373   }
2374 }
2375 
SimulateSVESaturatingMulHighIndex(const Instruction * instr)2376 void Simulator::SimulateSVESaturatingMulHighIndex(const Instruction* instr) {
2377   VectorFormat vform = instr->GetSVEVectorFormat();
2378   SimVRegister& zd = ReadVRegister(instr->GetRd());
2379   SimVRegister& zn = ReadVRegister(instr->GetRn());
2380 
2381   // The encoding for B and H-sized lanes are redefined to encode the most
2382   // significant bit of index for H-sized lanes. B-sized lanes are not
2383   // supported.
2384   if (vform == kFormatVnB) {
2385     vform = kFormatVnH;
2386   }
2387 
2388   SimVRegister temp;
2389   dup_elements_to_segments(vform, temp, instr->GetSVEMulZmAndIndex());
2390   switch (form_hash_) {
2391     case "sqdmulh_z_zzi_h"_h:
2392     case "sqdmulh_z_zzi_s"_h:
2393     case "sqdmulh_z_zzi_d"_h:
2394       sqdmulh(vform, zd, zn, temp);
2395       break;
2396     case "sqrdmulh_z_zzi_h"_h:
2397     case "sqrdmulh_z_zzi_s"_h:
2398     case "sqrdmulh_z_zzi_d"_h:
2399       sqrdmulh(vform, zd, zn, temp);
2400       break;
2401     default:
2402       VIXL_UNIMPLEMENTED();
2403   }
2404 }
2405 
SimulateSVESaturatingIntMulLongIdx(const Instruction * instr)2406 void Simulator::SimulateSVESaturatingIntMulLongIdx(const Instruction* instr) {
2407   VectorFormat vform = instr->GetSVEVectorFormat();
2408   SimVRegister& zd = ReadVRegister(instr->GetRd());
2409   SimVRegister& zn = ReadVRegister(instr->GetRn());
2410 
2411   SimVRegister temp, zm_idx, zn_b, zn_t;
2412   // Instead of calling the indexed form of the instruction logic, we call the
2413   // vector form, which can reuse existing function logic without modification.
2414   // Select the specified elements based on the index input and than pack them
2415   // to the corresponding position.
2416   VectorFormat vform_half = VectorFormatHalfWidth(vform);
2417   dup_elements_to_segments(vform_half, temp, instr->GetSVEMulLongZmAndIndex());
2418   pack_even_elements(vform_half, zm_idx, temp);
2419 
2420   pack_even_elements(vform_half, zn_b, zn);
2421   pack_odd_elements(vform_half, zn_t, zn);
2422 
2423   switch (form_hash_) {
2424     case "smullb_z_zzi_s"_h:
2425     case "smullb_z_zzi_d"_h:
2426       smull(vform, zd, zn_b, zm_idx);
2427       break;
2428     case "smullt_z_zzi_s"_h:
2429     case "smullt_z_zzi_d"_h:
2430       smull(vform, zd, zn_t, zm_idx);
2431       break;
2432     case "sqdmullb_z_zzi_d"_h:
2433       sqdmull(vform, zd, zn_b, zm_idx);
2434       break;
2435     case "sqdmullt_z_zzi_d"_h:
2436       sqdmull(vform, zd, zn_t, zm_idx);
2437       break;
2438     case "umullb_z_zzi_s"_h:
2439     case "umullb_z_zzi_d"_h:
2440       umull(vform, zd, zn_b, zm_idx);
2441       break;
2442     case "umullt_z_zzi_s"_h:
2443     case "umullt_z_zzi_d"_h:
2444       umull(vform, zd, zn_t, zm_idx);
2445       break;
2446     case "sqdmullb_z_zzi_s"_h:
2447       sqdmull(vform, zd, zn_b, zm_idx);
2448       break;
2449     case "sqdmullt_z_zzi_s"_h:
2450       sqdmull(vform, zd, zn_t, zm_idx);
2451       break;
2452     case "smlalb_z_zzzi_s"_h:
2453     case "smlalb_z_zzzi_d"_h:
2454       smlal(vform, zd, zn_b, zm_idx);
2455       break;
2456     case "smlalt_z_zzzi_s"_h:
2457     case "smlalt_z_zzzi_d"_h:
2458       smlal(vform, zd, zn_t, zm_idx);
2459       break;
2460     case "smlslb_z_zzzi_s"_h:
2461     case "smlslb_z_zzzi_d"_h:
2462       smlsl(vform, zd, zn_b, zm_idx);
2463       break;
2464     case "smlslt_z_zzzi_s"_h:
2465     case "smlslt_z_zzzi_d"_h:
2466       smlsl(vform, zd, zn_t, zm_idx);
2467       break;
2468     case "umlalb_z_zzzi_s"_h:
2469     case "umlalb_z_zzzi_d"_h:
2470       umlal(vform, zd, zn_b, zm_idx);
2471       break;
2472     case "umlalt_z_zzzi_s"_h:
2473     case "umlalt_z_zzzi_d"_h:
2474       umlal(vform, zd, zn_t, zm_idx);
2475       break;
2476     case "umlslb_z_zzzi_s"_h:
2477     case "umlslb_z_zzzi_d"_h:
2478       umlsl(vform, zd, zn_b, zm_idx);
2479       break;
2480     case "umlslt_z_zzzi_s"_h:
2481     case "umlslt_z_zzzi_d"_h:
2482       umlsl(vform, zd, zn_t, zm_idx);
2483       break;
2484     default:
2485       VIXL_UNIMPLEMENTED();
2486   }
2487 }
2488 
Simulate_ZdH_PgM_ZnS(const Instruction * instr)2489 void Simulator::Simulate_ZdH_PgM_ZnS(const Instruction* instr) {
2490   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
2491   SimVRegister& zd = ReadVRegister(instr->GetRd());
2492   SimVRegister& zn = ReadVRegister(instr->GetRn());
2493   SimVRegister result, zd_b;
2494 
2495   pack_even_elements(kFormatVnH, zd_b, zd);
2496 
2497   switch (form_hash_) {
2498     case "fcvtnt_z_p_z_s2h"_h:
2499       fcvt(kFormatVnH, kFormatVnS, result, pg, zn);
2500       pack_even_elements(kFormatVnH, result, result);
2501       zip1(kFormatVnH, result, zd_b, result);
2502       break;
2503     default:
2504       VIXL_UNIMPLEMENTED();
2505   }
2506   mov_merging(kFormatVnS, zd, pg, result);
2507 }
2508 
Simulate_ZdS_PgM_ZnD(const Instruction * instr)2509 void Simulator::Simulate_ZdS_PgM_ZnD(const Instruction* instr) {
2510   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
2511   SimVRegister& zd = ReadVRegister(instr->GetRd());
2512   SimVRegister& zn = ReadVRegister(instr->GetRn());
2513   SimVRegister result, zero, zd_b;
2514 
2515   zero.Clear();
2516   pack_even_elements(kFormatVnS, zd_b, zd);
2517 
2518   switch (form_hash_) {
2519     case "fcvtnt_z_p_z_d2s"_h:
2520       fcvt(kFormatVnS, kFormatVnD, result, pg, zn);
2521       pack_even_elements(kFormatVnS, result, result);
2522       zip1(kFormatVnS, result, zd_b, result);
2523       break;
2524     case "fcvtx_z_p_z_d2s"_h:
2525       fcvtxn(kFormatVnS, result, zn);
2526       zip1(kFormatVnS, result, result, zero);
2527       break;
2528     case "fcvtxnt_z_p_z_d2s"_h:
2529       fcvtxn(kFormatVnS, result, zn);
2530       zip1(kFormatVnS, result, zd_b, result);
2531       break;
2532     default:
2533       VIXL_UNIMPLEMENTED();
2534   }
2535   mov_merging(kFormatVnD, zd, pg, result);
2536 }
2537 
SimulateSVEFPConvertLong(const Instruction * instr)2538 void Simulator::SimulateSVEFPConvertLong(const Instruction* instr) {
2539   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
2540   SimVRegister& zd = ReadVRegister(instr->GetRd());
2541   SimVRegister& zn = ReadVRegister(instr->GetRn());
2542   SimVRegister result;
2543 
2544   switch (form_hash_) {
2545     case "fcvtlt_z_p_z_h2s"_h:
2546       ext(kFormatVnB, result, zn, zn, kHRegSizeInBytes);
2547       fcvt(kFormatVnS, kFormatVnH, zd, pg, result);
2548       break;
2549     case "fcvtlt_z_p_z_s2d"_h:
2550       ext(kFormatVnB, result, zn, zn, kSRegSizeInBytes);
2551       fcvt(kFormatVnD, kFormatVnS, zd, pg, result);
2552       break;
2553     default:
2554       VIXL_UNIMPLEMENTED();
2555   }
2556 }
2557 
Simulate_ZdS_PgM_ZnS(const Instruction * instr)2558 void Simulator::Simulate_ZdS_PgM_ZnS(const Instruction* instr) {
2559   VectorFormat vform = instr->GetSVEVectorFormat();
2560   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
2561   SimVRegister& zd = ReadVRegister(instr->GetRd());
2562   SimVRegister& zn = ReadVRegister(instr->GetRn());
2563   SimVRegister result;
2564 
2565   if (vform != kFormatVnS) {
2566     VIXL_UNIMPLEMENTED();
2567   }
2568 
2569   switch (form_hash_) {
2570     case "urecpe_z_p_z"_h:
2571       urecpe(vform, result, zn);
2572       break;
2573     case "ursqrte_z_p_z"_h:
2574       ursqrte(vform, result, zn);
2575       break;
2576     default:
2577       VIXL_UNIMPLEMENTED();
2578   }
2579   mov_merging(vform, zd, pg, result);
2580 }
2581 
Simulate_ZdT_PgM_ZnT(const Instruction * instr)2582 void Simulator::Simulate_ZdT_PgM_ZnT(const Instruction* instr) {
2583   VectorFormat vform = instr->GetSVEVectorFormat();
2584   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
2585   SimVRegister& zd = ReadVRegister(instr->GetRd());
2586   SimVRegister& zn = ReadVRegister(instr->GetRn());
2587   SimVRegister result;
2588 
2589   switch (form_hash_) {
2590     case "flogb_z_p_z"_h:
2591       vform = instr->GetSVEVectorFormat(17);
2592       flogb(vform, result, zn);
2593       break;
2594     case "sqabs_z_p_z"_h:
2595       abs(vform, result, zn).SignedSaturate(vform);
2596       break;
2597     case "sqneg_z_p_z"_h:
2598       neg(vform, result, zn).SignedSaturate(vform);
2599       break;
2600     default:
2601       VIXL_UNIMPLEMENTED();
2602   }
2603   mov_merging(vform, zd, pg, result);
2604 }
2605 
Simulate_ZdT_PgZ_ZnT_ZmT(const Instruction * instr)2606 void Simulator::Simulate_ZdT_PgZ_ZnT_ZmT(const Instruction* instr) {
2607   VectorFormat vform = instr->GetSVEVectorFormat();
2608   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
2609   SimVRegister& zd = ReadVRegister(instr->GetRd());
2610   SimVRegister& zm = ReadVRegister(instr->GetRm());
2611   SimVRegister& zn = ReadVRegister(instr->GetRn());
2612   SimVRegister result;
2613 
2614   VIXL_ASSERT(form_hash_ == "histcnt_z_p_zz"_h);
2615   if ((vform == kFormatVnS) || (vform == kFormatVnD)) {
2616     histogram(vform, result, pg, zn, zm);
2617     mov_zeroing(vform, zd, pg, result);
2618   } else {
2619     VIXL_UNIMPLEMENTED();
2620   }
2621 }
2622 
Simulate_ZdT_ZnT_ZmT(const Instruction * instr)2623 void Simulator::Simulate_ZdT_ZnT_ZmT(const Instruction* instr) {
2624   VectorFormat vform = instr->GetSVEVectorFormat();
2625   SimVRegister& zd = ReadVRegister(instr->GetRd());
2626   SimVRegister& zm = ReadVRegister(instr->GetRm());
2627   SimVRegister& zn = ReadVRegister(instr->GetRn());
2628   SimVRegister result;
2629   bool do_bext = false;
2630 
2631   switch (form_hash_) {
2632     case "bdep_z_zz"_h:
2633       bdep(vform, zd, zn, zm);
2634       break;
2635     case "bext_z_zz"_h:
2636       do_bext = true;
2637       VIXL_FALLTHROUGH();
2638     case "bgrp_z_zz"_h:
2639       bgrp(vform, zd, zn, zm, do_bext);
2640       break;
2641     case "eorbt_z_zz"_h:
2642       rotate_elements_right(vform, result, zm, 1);
2643       SVEBitwiseLogicalUnpredicatedHelper(EOR, kFormatVnD, result, zn, result);
2644       mov_alternating(vform, zd, result, 0);
2645       break;
2646     case "eortb_z_zz"_h:
2647       rotate_elements_right(vform, result, zm, -1);
2648       SVEBitwiseLogicalUnpredicatedHelper(EOR, kFormatVnD, result, zn, result);
2649       mov_alternating(vform, zd, result, 1);
2650       break;
2651     case "mul_z_zz"_h:
2652       mul(vform, zd, zn, zm);
2653       break;
2654     case "smulh_z_zz"_h:
2655       smulh(vform, zd, zn, zm);
2656       break;
2657     case "sqdmulh_z_zz"_h:
2658       sqdmulh(vform, zd, zn, zm);
2659       break;
2660     case "sqrdmulh_z_zz"_h:
2661       sqrdmulh(vform, zd, zn, zm);
2662       break;
2663     case "umulh_z_zz"_h:
2664       umulh(vform, zd, zn, zm);
2665       break;
2666     default:
2667       VIXL_UNIMPLEMENTED();
2668   }
2669 }
2670 
Simulate_ZdT_ZnT_ZmTb(const Instruction * instr)2671 void Simulator::Simulate_ZdT_ZnT_ZmTb(const Instruction* instr) {
2672   VectorFormat vform = instr->GetSVEVectorFormat();
2673   SimVRegister& zd = ReadVRegister(instr->GetRd());
2674   SimVRegister& zm = ReadVRegister(instr->GetRm());
2675   SimVRegister& zn = ReadVRegister(instr->GetRn());
2676 
2677   SimVRegister zm_b, zm_t;
2678   VectorFormat vform_half = VectorFormatHalfWidth(vform);
2679   pack_even_elements(vform_half, zm_b, zm);
2680   pack_odd_elements(vform_half, zm_t, zm);
2681 
2682   switch (form_hash_) {
2683     case "saddwb_z_zz"_h:
2684       saddw(vform, zd, zn, zm_b);
2685       break;
2686     case "saddwt_z_zz"_h:
2687       saddw(vform, zd, zn, zm_t);
2688       break;
2689     case "ssubwb_z_zz"_h:
2690       ssubw(vform, zd, zn, zm_b);
2691       break;
2692     case "ssubwt_z_zz"_h:
2693       ssubw(vform, zd, zn, zm_t);
2694       break;
2695     case "uaddwb_z_zz"_h:
2696       uaddw(vform, zd, zn, zm_b);
2697       break;
2698     case "uaddwt_z_zz"_h:
2699       uaddw(vform, zd, zn, zm_t);
2700       break;
2701     case "usubwb_z_zz"_h:
2702       usubw(vform, zd, zn, zm_b);
2703       break;
2704     case "usubwt_z_zz"_h:
2705       usubw(vform, zd, zn, zm_t);
2706       break;
2707     default:
2708       VIXL_UNIMPLEMENTED();
2709   }
2710 }
2711 
Simulate_ZdT_ZnT_const(const Instruction * instr)2712 void Simulator::Simulate_ZdT_ZnT_const(const Instruction* instr) {
2713   SimVRegister& zd = ReadVRegister(instr->GetRd());
2714   SimVRegister& zn = ReadVRegister(instr->GetRn());
2715 
2716   std::pair<int, int> shift_and_lane_size =
2717       instr->GetSVEImmShiftAndLaneSizeLog2(/* is_predicated = */ false);
2718   int lane_size = shift_and_lane_size.second;
2719   VIXL_ASSERT((lane_size >= 0) &&
2720               (static_cast<unsigned>(lane_size) <= kDRegSizeInBytesLog2));
2721   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
2722   int shift_dist = shift_and_lane_size.first;
2723 
2724   switch (form_hash_) {
2725     case "sli_z_zzi"_h:
2726       // Shift distance is computed differently for left shifts. Convert the
2727       // result.
2728       shift_dist = (8 << lane_size) - shift_dist;
2729       sli(vform, zd, zn, shift_dist);
2730       break;
2731     case "sri_z_zzi"_h:
2732       sri(vform, zd, zn, shift_dist);
2733       break;
2734     default:
2735       VIXL_UNIMPLEMENTED();
2736   }
2737 }
2738 
SimulateSVENarrow(const Instruction * instr)2739 void Simulator::SimulateSVENarrow(const Instruction* instr) {
2740   SimVRegister& zd = ReadVRegister(instr->GetRd());
2741   SimVRegister& zn = ReadVRegister(instr->GetRn());
2742   SimVRegister result;
2743 
2744   std::pair<int, int> shift_and_lane_size =
2745       instr->GetSVEImmShiftAndLaneSizeLog2(/* is_predicated = */ false);
2746   int lane_size = shift_and_lane_size.second;
2747   VIXL_ASSERT((lane_size >= static_cast<int>(kBRegSizeInBytesLog2)) &&
2748               (lane_size <= static_cast<int>(kSRegSizeInBytesLog2)));
2749   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
2750   int right_shift_dist = shift_and_lane_size.first;
2751   bool top = false;
2752 
2753   switch (form_hash_) {
2754     case "sqxtnt_z_zz"_h:
2755       top = true;
2756       VIXL_FALLTHROUGH();
2757     case "sqxtnb_z_zz"_h:
2758       sqxtn(vform, result, zn);
2759       break;
2760     case "sqxtunt_z_zz"_h:
2761       top = true;
2762       VIXL_FALLTHROUGH();
2763     case "sqxtunb_z_zz"_h:
2764       sqxtun(vform, result, zn);
2765       break;
2766     case "uqxtnt_z_zz"_h:
2767       top = true;
2768       VIXL_FALLTHROUGH();
2769     case "uqxtnb_z_zz"_h:
2770       uqxtn(vform, result, zn);
2771       break;
2772     case "rshrnt_z_zi"_h:
2773       top = true;
2774       VIXL_FALLTHROUGH();
2775     case "rshrnb_z_zi"_h:
2776       rshrn(vform, result, zn, right_shift_dist);
2777       break;
2778     case "shrnt_z_zi"_h:
2779       top = true;
2780       VIXL_FALLTHROUGH();
2781     case "shrnb_z_zi"_h:
2782       shrn(vform, result, zn, right_shift_dist);
2783       break;
2784     case "sqrshrnt_z_zi"_h:
2785       top = true;
2786       VIXL_FALLTHROUGH();
2787     case "sqrshrnb_z_zi"_h:
2788       sqrshrn(vform, result, zn, right_shift_dist);
2789       break;
2790     case "sqrshrunt_z_zi"_h:
2791       top = true;
2792       VIXL_FALLTHROUGH();
2793     case "sqrshrunb_z_zi"_h:
2794       sqrshrun(vform, result, zn, right_shift_dist);
2795       break;
2796     case "sqshrnt_z_zi"_h:
2797       top = true;
2798       VIXL_FALLTHROUGH();
2799     case "sqshrnb_z_zi"_h:
2800       sqshrn(vform, result, zn, right_shift_dist);
2801       break;
2802     case "sqshrunt_z_zi"_h:
2803       top = true;
2804       VIXL_FALLTHROUGH();
2805     case "sqshrunb_z_zi"_h:
2806       sqshrun(vform, result, zn, right_shift_dist);
2807       break;
2808     case "uqrshrnt_z_zi"_h:
2809       top = true;
2810       VIXL_FALLTHROUGH();
2811     case "uqrshrnb_z_zi"_h:
2812       uqrshrn(vform, result, zn, right_shift_dist);
2813       break;
2814     case "uqshrnt_z_zi"_h:
2815       top = true;
2816       VIXL_FALLTHROUGH();
2817     case "uqshrnb_z_zi"_h:
2818       uqshrn(vform, result, zn, right_shift_dist);
2819       break;
2820     default:
2821       VIXL_UNIMPLEMENTED();
2822   }
2823 
2824   if (top) {
2825     // Keep even elements, replace odd elements with the results.
2826     xtn(vform, zd, zd);
2827     zip1(vform, zd, zd, result);
2828   } else {
2829     // Zero odd elements, replace even elements with the results.
2830     SimVRegister zero;
2831     zero.Clear();
2832     zip1(vform, zd, result, zero);
2833   }
2834 }
2835 
SimulateSVEInterleavedArithLong(const Instruction * instr)2836 void Simulator::SimulateSVEInterleavedArithLong(const Instruction* instr) {
2837   VectorFormat vform = instr->GetSVEVectorFormat();
2838   SimVRegister& zd = ReadVRegister(instr->GetRd());
2839   SimVRegister& zm = ReadVRegister(instr->GetRm());
2840   SimVRegister& zn = ReadVRegister(instr->GetRn());
2841   SimVRegister temp, zn_b, zm_b, zn_t, zm_t;
2842 
2843   // Construct temporary registers containing the even (bottom) and odd (top)
2844   // elements.
2845   VectorFormat vform_half = VectorFormatHalfWidth(vform);
2846   pack_even_elements(vform_half, zn_b, zn);
2847   pack_even_elements(vform_half, zm_b, zm);
2848   pack_odd_elements(vform_half, zn_t, zn);
2849   pack_odd_elements(vform_half, zm_t, zm);
2850 
2851   switch (form_hash_) {
2852     case "sabdlb_z_zz"_h:
2853       sabdl(vform, zd, zn_b, zm_b);
2854       break;
2855     case "sabdlt_z_zz"_h:
2856       sabdl(vform, zd, zn_t, zm_t);
2857       break;
2858     case "saddlb_z_zz"_h:
2859       saddl(vform, zd, zn_b, zm_b);
2860       break;
2861     case "saddlbt_z_zz"_h:
2862       saddl(vform, zd, zn_b, zm_t);
2863       break;
2864     case "saddlt_z_zz"_h:
2865       saddl(vform, zd, zn_t, zm_t);
2866       break;
2867     case "ssublb_z_zz"_h:
2868       ssubl(vform, zd, zn_b, zm_b);
2869       break;
2870     case "ssublbt_z_zz"_h:
2871       ssubl(vform, zd, zn_b, zm_t);
2872       break;
2873     case "ssublt_z_zz"_h:
2874       ssubl(vform, zd, zn_t, zm_t);
2875       break;
2876     case "ssubltb_z_zz"_h:
2877       ssubl(vform, zd, zn_t, zm_b);
2878       break;
2879     case "uabdlb_z_zz"_h:
2880       uabdl(vform, zd, zn_b, zm_b);
2881       break;
2882     case "uabdlt_z_zz"_h:
2883       uabdl(vform, zd, zn_t, zm_t);
2884       break;
2885     case "uaddlb_z_zz"_h:
2886       uaddl(vform, zd, zn_b, zm_b);
2887       break;
2888     case "uaddlt_z_zz"_h:
2889       uaddl(vform, zd, zn_t, zm_t);
2890       break;
2891     case "usublb_z_zz"_h:
2892       usubl(vform, zd, zn_b, zm_b);
2893       break;
2894     case "usublt_z_zz"_h:
2895       usubl(vform, zd, zn_t, zm_t);
2896       break;
2897     case "sabalb_z_zzz"_h:
2898       sabal(vform, zd, zn_b, zm_b);
2899       break;
2900     case "sabalt_z_zzz"_h:
2901       sabal(vform, zd, zn_t, zm_t);
2902       break;
2903     case "uabalb_z_zzz"_h:
2904       uabal(vform, zd, zn_b, zm_b);
2905       break;
2906     case "uabalt_z_zzz"_h:
2907       uabal(vform, zd, zn_t, zm_t);
2908       break;
2909     default:
2910       VIXL_UNIMPLEMENTED();
2911   }
2912 }
2913 
SimulateSVEPmull128(const Instruction * instr)2914 void Simulator::SimulateSVEPmull128(const Instruction* instr) {
2915   SimVRegister& zd = ReadVRegister(instr->GetRd());
2916   SimVRegister& zm = ReadVRegister(instr->GetRm());
2917   SimVRegister& zn = ReadVRegister(instr->GetRn());
2918   SimVRegister zn_temp, zm_temp;
2919 
2920   if (form_hash_ == "pmullb_z_zz_q"_h) {
2921     pack_even_elements(kFormatVnD, zn_temp, zn);
2922     pack_even_elements(kFormatVnD, zm_temp, zm);
2923   } else {
2924     VIXL_ASSERT(form_hash_ == "pmullt_z_zz_q"_h);
2925     pack_odd_elements(kFormatVnD, zn_temp, zn);
2926     pack_odd_elements(kFormatVnD, zm_temp, zm);
2927   }
2928   pmull(kFormatVnQ, zd, zn_temp, zm_temp);
2929 }
2930 
SimulateSVEIntMulLongVec(const Instruction * instr)2931 void Simulator::SimulateSVEIntMulLongVec(const Instruction* instr) {
2932   VectorFormat vform = instr->GetSVEVectorFormat();
2933   SimVRegister& zd = ReadVRegister(instr->GetRd());
2934   SimVRegister& zm = ReadVRegister(instr->GetRm());
2935   SimVRegister& zn = ReadVRegister(instr->GetRn());
2936   SimVRegister temp, zn_b, zm_b, zn_t, zm_t;
2937   VectorFormat vform_half = VectorFormatHalfWidth(vform);
2938   pack_even_elements(vform_half, zn_b, zn);
2939   pack_even_elements(vform_half, zm_b, zm);
2940   pack_odd_elements(vform_half, zn_t, zn);
2941   pack_odd_elements(vform_half, zm_t, zm);
2942 
2943   switch (form_hash_) {
2944     case "pmullb_z_zz"_h:
2945       // Size '10' is undefined.
2946       if (vform == kFormatVnS) {
2947         VIXL_UNIMPLEMENTED();
2948       }
2949       pmull(vform, zd, zn_b, zm_b);
2950       break;
2951     case "pmullt_z_zz"_h:
2952       // Size '10' is undefined.
2953       if (vform == kFormatVnS) {
2954         VIXL_UNIMPLEMENTED();
2955       }
2956       pmull(vform, zd, zn_t, zm_t);
2957       break;
2958     case "smullb_z_zz"_h:
2959       smull(vform, zd, zn_b, zm_b);
2960       break;
2961     case "smullt_z_zz"_h:
2962       smull(vform, zd, zn_t, zm_t);
2963       break;
2964     case "sqdmullb_z_zz"_h:
2965       sqdmull(vform, zd, zn_b, zm_b);
2966       break;
2967     case "sqdmullt_z_zz"_h:
2968       sqdmull(vform, zd, zn_t, zm_t);
2969       break;
2970     case "umullb_z_zz"_h:
2971       umull(vform, zd, zn_b, zm_b);
2972       break;
2973     case "umullt_z_zz"_h:
2974       umull(vform, zd, zn_t, zm_t);
2975       break;
2976     default:
2977       VIXL_UNIMPLEMENTED();
2978   }
2979 }
2980 
SimulateSVEAddSubHigh(const Instruction * instr)2981 void Simulator::SimulateSVEAddSubHigh(const Instruction* instr) {
2982   SimVRegister& zd = ReadVRegister(instr->GetRd());
2983   SimVRegister& zm = ReadVRegister(instr->GetRm());
2984   SimVRegister& zn = ReadVRegister(instr->GetRn());
2985   SimVRegister result;
2986   bool top = false;
2987 
2988   VectorFormat vform_src = instr->GetSVEVectorFormat();
2989   if (vform_src == kFormatVnB) {
2990     VIXL_UNIMPLEMENTED();
2991   }
2992   VectorFormat vform = VectorFormatHalfWidth(vform_src);
2993 
2994   switch (form_hash_) {
2995     case "addhnt_z_zz"_h:
2996       top = true;
2997       VIXL_FALLTHROUGH();
2998     case "addhnb_z_zz"_h:
2999       addhn(vform, result, zn, zm);
3000       break;
3001     case "raddhnt_z_zz"_h:
3002       top = true;
3003       VIXL_FALLTHROUGH();
3004     case "raddhnb_z_zz"_h:
3005       raddhn(vform, result, zn, zm);
3006       break;
3007     case "rsubhnt_z_zz"_h:
3008       top = true;
3009       VIXL_FALLTHROUGH();
3010     case "rsubhnb_z_zz"_h:
3011       rsubhn(vform, result, zn, zm);
3012       break;
3013     case "subhnt_z_zz"_h:
3014       top = true;
3015       VIXL_FALLTHROUGH();
3016     case "subhnb_z_zz"_h:
3017       subhn(vform, result, zn, zm);
3018       break;
3019     default:
3020       VIXL_UNIMPLEMENTED();
3021   }
3022 
3023   if (top) {
3024     // Keep even elements, replace odd elements with the results.
3025     xtn(vform, zd, zd);
3026     zip1(vform, zd, zd, result);
3027   } else {
3028     // Zero odd elements, replace even elements with the results.
3029     SimVRegister zero;
3030     zero.Clear();
3031     zip1(vform, zd, result, zero);
3032   }
3033 }
3034 
SimulateSVEShiftLeftImm(const Instruction * instr)3035 void Simulator::SimulateSVEShiftLeftImm(const Instruction* instr) {
3036   SimVRegister& zd = ReadVRegister(instr->GetRd());
3037   SimVRegister& zn = ReadVRegister(instr->GetRn());
3038   SimVRegister zn_b, zn_t;
3039 
3040   std::pair<int, int> shift_and_lane_size =
3041       instr->GetSVEImmShiftAndLaneSizeLog2(/* is_predicated = */ false);
3042   int lane_size = shift_and_lane_size.second;
3043   VIXL_ASSERT((lane_size >= 0) &&
3044               (static_cast<unsigned>(lane_size) <= kDRegSizeInBytesLog2));
3045   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size + 1);
3046   int right_shift_dist = shift_and_lane_size.first;
3047   int left_shift_dist = (8 << lane_size) - right_shift_dist;
3048 
3049   // Construct temporary registers containing the even (bottom) and odd (top)
3050   // elements.
3051   VectorFormat vform_half = VectorFormatHalfWidth(vform);
3052   pack_even_elements(vform_half, zn_b, zn);
3053   pack_odd_elements(vform_half, zn_t, zn);
3054 
3055   switch (form_hash_) {
3056     case "sshllb_z_zi"_h:
3057       sshll(vform, zd, zn_b, left_shift_dist);
3058       break;
3059     case "sshllt_z_zi"_h:
3060       sshll(vform, zd, zn_t, left_shift_dist);
3061       break;
3062     case "ushllb_z_zi"_h:
3063       ushll(vform, zd, zn_b, left_shift_dist);
3064       break;
3065     case "ushllt_z_zi"_h:
3066       ushll(vform, zd, zn_t, left_shift_dist);
3067       break;
3068     default:
3069       VIXL_UNIMPLEMENTED();
3070   }
3071 }
3072 
SimulateSVESaturatingMulAddHigh(const Instruction * instr)3073 void Simulator::SimulateSVESaturatingMulAddHigh(const Instruction* instr) {
3074   VectorFormat vform = instr->GetSVEVectorFormat();
3075   SimVRegister& zda = ReadVRegister(instr->GetRd());
3076   SimVRegister& zn = ReadVRegister(instr->GetRn());
3077   unsigned zm_code = instr->GetRm();
3078   int index = -1;
3079   bool is_mla = false;
3080 
3081   switch (form_hash_) {
3082     case "sqrdmlah_z_zzz"_h:
3083       is_mla = true;
3084       VIXL_FALLTHROUGH();
3085     case "sqrdmlsh_z_zzz"_h:
3086       // Nothing to do.
3087       break;
3088     case "sqrdmlah_z_zzzi_h"_h:
3089       is_mla = true;
3090       VIXL_FALLTHROUGH();
3091     case "sqrdmlsh_z_zzzi_h"_h:
3092       vform = kFormatVnH;
3093       index = (instr->ExtractBit(22) << 2) | instr->ExtractBits(20, 19);
3094       zm_code = instr->ExtractBits(18, 16);
3095       break;
3096     case "sqrdmlah_z_zzzi_s"_h:
3097       is_mla = true;
3098       VIXL_FALLTHROUGH();
3099     case "sqrdmlsh_z_zzzi_s"_h:
3100       vform = kFormatVnS;
3101       index = instr->ExtractBits(20, 19);
3102       zm_code = instr->ExtractBits(18, 16);
3103       break;
3104     case "sqrdmlah_z_zzzi_d"_h:
3105       is_mla = true;
3106       VIXL_FALLTHROUGH();
3107     case "sqrdmlsh_z_zzzi_d"_h:
3108       vform = kFormatVnD;
3109       index = instr->ExtractBit(20);
3110       zm_code = instr->ExtractBits(19, 16);
3111       break;
3112     default:
3113       VIXL_UNIMPLEMENTED();
3114   }
3115 
3116   SimVRegister& zm = ReadVRegister(zm_code);
3117   SimVRegister zm_idx;
3118   if (index >= 0) {
3119     dup_elements_to_segments(vform, zm_idx, zm, index);
3120   }
3121 
3122   if (is_mla) {
3123     sqrdmlah(vform, zda, zn, (index >= 0) ? zm_idx : zm);
3124   } else {
3125     sqrdmlsh(vform, zda, zn, (index >= 0) ? zm_idx : zm);
3126   }
3127 }
3128 
Simulate_ZdaD_ZnS_ZmS_imm(const Instruction * instr)3129 void Simulator::Simulate_ZdaD_ZnS_ZmS_imm(const Instruction* instr) {
3130   SimVRegister& zda = ReadVRegister(instr->GetRd());
3131   SimVRegister& zn = ReadVRegister(instr->GetRn());
3132   SimVRegister& zm = ReadVRegister(instr->ExtractBits(19, 16));
3133 
3134   SimVRegister temp, zm_idx, zn_b, zn_t;
3135   Instr index = (instr->ExtractBit(20) << 1) | instr->ExtractBit(11);
3136   dup_elements_to_segments(kFormatVnS, temp, zm, index);
3137   pack_even_elements(kFormatVnS, zm_idx, temp);
3138   pack_even_elements(kFormatVnS, zn_b, zn);
3139   pack_odd_elements(kFormatVnS, zn_t, zn);
3140 
3141   switch (form_hash_) {
3142     case "sqdmlalb_z_zzzi_d"_h:
3143       sqdmlal(kFormatVnD, zda, zn_b, zm_idx);
3144       break;
3145     case "sqdmlalt_z_zzzi_d"_h:
3146       sqdmlal(kFormatVnD, zda, zn_t, zm_idx);
3147       break;
3148     case "sqdmlslb_z_zzzi_d"_h:
3149       sqdmlsl(kFormatVnD, zda, zn_b, zm_idx);
3150       break;
3151     case "sqdmlslt_z_zzzi_d"_h:
3152       sqdmlsl(kFormatVnD, zda, zn_t, zm_idx);
3153       break;
3154     default:
3155       VIXL_UNIMPLEMENTED();
3156   }
3157 }
3158 
Simulate_ZdaS_ZnH_ZmH(const Instruction * instr)3159 void Simulator::Simulate_ZdaS_ZnH_ZmH(const Instruction* instr) {
3160   SimVRegister& zda = ReadVRegister(instr->GetRd());
3161   SimVRegister& zm = ReadVRegister(instr->GetRm());
3162   SimVRegister& zn = ReadVRegister(instr->GetRn());
3163 
3164   SimVRegister temp, zn_b, zm_b, zn_t, zm_t;
3165   pack_even_elements(kFormatVnH, zn_b, zn);
3166   pack_even_elements(kFormatVnH, zm_b, zm);
3167   pack_odd_elements(kFormatVnH, zn_t, zn);
3168   pack_odd_elements(kFormatVnH, zm_t, zm);
3169 
3170   switch (form_hash_) {
3171     case "fmlalb_z_zzz"_h:
3172       fmlal(kFormatVnS, zda, zn_b, zm_b);
3173       break;
3174     case "fmlalt_z_zzz"_h:
3175       fmlal(kFormatVnS, zda, zn_t, zm_t);
3176       break;
3177     case "fmlslb_z_zzz"_h:
3178       fmlsl(kFormatVnS, zda, zn_b, zm_b);
3179       break;
3180     case "fmlslt_z_zzz"_h:
3181       fmlsl(kFormatVnS, zda, zn_t, zm_t);
3182       break;
3183     default:
3184       VIXL_UNIMPLEMENTED();
3185   }
3186 }
3187 
Simulate_ZdaS_ZnH_ZmH_imm(const Instruction * instr)3188 void Simulator::Simulate_ZdaS_ZnH_ZmH_imm(const Instruction* instr) {
3189   SimVRegister& zda = ReadVRegister(instr->GetRd());
3190   SimVRegister& zn = ReadVRegister(instr->GetRn());
3191   SimVRegister& zm = ReadVRegister(instr->ExtractBits(18, 16));
3192 
3193   SimVRegister temp, zm_idx, zn_b, zn_t;
3194   Instr index = (instr->ExtractBits(20, 19) << 1) | instr->ExtractBit(11);
3195   dup_elements_to_segments(kFormatVnH, temp, zm, index);
3196   pack_even_elements(kFormatVnH, zm_idx, temp);
3197   pack_even_elements(kFormatVnH, zn_b, zn);
3198   pack_odd_elements(kFormatVnH, zn_t, zn);
3199 
3200   switch (form_hash_) {
3201     case "fmlalb_z_zzzi_s"_h:
3202       fmlal(kFormatVnS, zda, zn_b, zm_idx);
3203       break;
3204     case "fmlalt_z_zzzi_s"_h:
3205       fmlal(kFormatVnS, zda, zn_t, zm_idx);
3206       break;
3207     case "fmlslb_z_zzzi_s"_h:
3208       fmlsl(kFormatVnS, zda, zn_b, zm_idx);
3209       break;
3210     case "fmlslt_z_zzzi_s"_h:
3211       fmlsl(kFormatVnS, zda, zn_t, zm_idx);
3212       break;
3213     case "sqdmlalb_z_zzzi_s"_h:
3214       sqdmlal(kFormatVnS, zda, zn_b, zm_idx);
3215       break;
3216     case "sqdmlalt_z_zzzi_s"_h:
3217       sqdmlal(kFormatVnS, zda, zn_t, zm_idx);
3218       break;
3219     case "sqdmlslb_z_zzzi_s"_h:
3220       sqdmlsl(kFormatVnS, zda, zn_b, zm_idx);
3221       break;
3222     case "sqdmlslt_z_zzzi_s"_h:
3223       sqdmlsl(kFormatVnS, zda, zn_t, zm_idx);
3224       break;
3225     default:
3226       VIXL_UNIMPLEMENTED();
3227   }
3228 }
3229 
Simulate_ZdaT_PgM_ZnTb(const Instruction * instr)3230 void Simulator::Simulate_ZdaT_PgM_ZnTb(const Instruction* instr) {
3231   VectorFormat vform = instr->GetSVEVectorFormat();
3232   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
3233   SimVRegister& zda = ReadVRegister(instr->GetRd());
3234   SimVRegister& zn = ReadVRegister(instr->GetRn());
3235   SimVRegister result;
3236 
3237   switch (form_hash_) {
3238     case "sadalp_z_p_z"_h:
3239       sadalp(vform, result, zn);
3240       break;
3241     case "uadalp_z_p_z"_h:
3242       uadalp(vform, result, zn);
3243       break;
3244     default:
3245       VIXL_UNIMPLEMENTED();
3246   }
3247   mov_merging(vform, zda, pg, result);
3248 }
3249 
SimulateSVEAddSubCarry(const Instruction * instr)3250 void Simulator::SimulateSVEAddSubCarry(const Instruction* instr) {
3251   VectorFormat vform = (instr->ExtractBit(22) == 0) ? kFormatVnS : kFormatVnD;
3252   SimVRegister& zda = ReadVRegister(instr->GetRd());
3253   SimVRegister& zm = ReadVRegister(instr->GetRm());
3254   SimVRegister& zn = ReadVRegister(instr->GetRn());
3255 
3256   SimVRegister not_zn;
3257   not_(vform, not_zn, zn);
3258 
3259   switch (form_hash_) {
3260     case "adclb_z_zzz"_h:
3261       adcl(vform, zda, zn, zm, /* top = */ false);
3262       break;
3263     case "adclt_z_zzz"_h:
3264       adcl(vform, zda, zn, zm, /* top = */ true);
3265       break;
3266     case "sbclb_z_zzz"_h:
3267       adcl(vform, zda, not_zn, zm, /* top = */ false);
3268       break;
3269     case "sbclt_z_zzz"_h:
3270       adcl(vform, zda, not_zn, zm, /* top = */ true);
3271       break;
3272     default:
3273       VIXL_UNIMPLEMENTED();
3274   }
3275 }
3276 
Simulate_ZdaT_ZnT_ZmT(const Instruction * instr)3277 void Simulator::Simulate_ZdaT_ZnT_ZmT(const Instruction* instr) {
3278   VectorFormat vform = instr->GetSVEVectorFormat();
3279   SimVRegister& zda = ReadVRegister(instr->GetRd());
3280   SimVRegister& zm = ReadVRegister(instr->GetRm());
3281   SimVRegister& zn = ReadVRegister(instr->GetRn());
3282 
3283   switch (form_hash_) {
3284     case "saba_z_zzz"_h:
3285       saba(vform, zda, zn, zm);
3286       break;
3287     case "uaba_z_zzz"_h:
3288       uaba(vform, zda, zn, zm);
3289       break;
3290     default:
3291       VIXL_UNIMPLEMENTED();
3292   }
3293 }
3294 
SimulateSVEComplexIntMulAdd(const Instruction * instr)3295 void Simulator::SimulateSVEComplexIntMulAdd(const Instruction* instr) {
3296   SimVRegister& zda = ReadVRegister(instr->GetRd());
3297   SimVRegister& zn = ReadVRegister(instr->GetRn());
3298   int rot = instr->ExtractBits(11, 10) * 90;
3299   // vform and zm are only valid for the vector form of instruction.
3300   VectorFormat vform = instr->GetSVEVectorFormat();
3301   SimVRegister& zm = ReadVRegister(instr->GetRm());
3302 
3303   // Inputs for indexed form of instruction.
3304   SimVRegister& zm_h = ReadVRegister(instr->ExtractBits(18, 16));
3305   SimVRegister& zm_s = ReadVRegister(instr->ExtractBits(19, 16));
3306   int idx_h = instr->ExtractBits(20, 19);
3307   int idx_s = instr->ExtractBit(20);
3308 
3309   switch (form_hash_) {
3310     case "cmla_z_zzz"_h:
3311       cmla(vform, zda, zda, zn, zm, rot);
3312       break;
3313     case "cmla_z_zzzi_h"_h:
3314       cmla(kFormatVnH, zda, zda, zn, zm_h, idx_h, rot);
3315       break;
3316     case "cmla_z_zzzi_s"_h:
3317       cmla(kFormatVnS, zda, zda, zn, zm_s, idx_s, rot);
3318       break;
3319     case "sqrdcmlah_z_zzz"_h:
3320       sqrdcmlah(vform, zda, zda, zn, zm, rot);
3321       break;
3322     case "sqrdcmlah_z_zzzi_h"_h:
3323       sqrdcmlah(kFormatVnH, zda, zda, zn, zm_h, idx_h, rot);
3324       break;
3325     case "sqrdcmlah_z_zzzi_s"_h:
3326       sqrdcmlah(kFormatVnS, zda, zda, zn, zm_s, idx_s, rot);
3327       break;
3328     default:
3329       VIXL_UNIMPLEMENTED();
3330   }
3331 }
3332 
Simulate_ZdaT_ZnT_const(const Instruction * instr)3333 void Simulator::Simulate_ZdaT_ZnT_const(const Instruction* instr) {
3334   SimVRegister& zd = ReadVRegister(instr->GetRd());
3335   SimVRegister& zn = ReadVRegister(instr->GetRn());
3336 
3337   std::pair<int, int> shift_and_lane_size =
3338       instr->GetSVEImmShiftAndLaneSizeLog2(/* is_predicated = */ false);
3339   int lane_size = shift_and_lane_size.second;
3340   VIXL_ASSERT((lane_size >= 0) &&
3341               (static_cast<unsigned>(lane_size) <= kDRegSizeInBytesLog2));
3342   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
3343   int shift_dist = shift_and_lane_size.first;
3344 
3345   switch (form_hash_) {
3346     case "srsra_z_zi"_h:
3347       srsra(vform, zd, zn, shift_dist);
3348       break;
3349     case "ssra_z_zi"_h:
3350       ssra(vform, zd, zn, shift_dist);
3351       break;
3352     case "ursra_z_zi"_h:
3353       ursra(vform, zd, zn, shift_dist);
3354       break;
3355     case "usra_z_zi"_h:
3356       usra(vform, zd, zn, shift_dist);
3357       break;
3358     default:
3359       VIXL_UNIMPLEMENTED();
3360   }
3361 }
3362 
Simulate_ZdaT_ZnTb_ZmTb(const Instruction * instr)3363 void Simulator::Simulate_ZdaT_ZnTb_ZmTb(const Instruction* instr) {
3364   VectorFormat vform = instr->GetSVEVectorFormat();
3365   SimVRegister& zda = ReadVRegister(instr->GetRd());
3366   SimVRegister& zm = ReadVRegister(instr->GetRm());
3367   SimVRegister& zn = ReadVRegister(instr->GetRn());
3368 
3369   SimVRegister zero, zn_b, zm_b, zn_t, zm_t;
3370   zero.Clear();
3371 
3372   VectorFormat vform_half = VectorFormatHalfWidth(vform);
3373   uzp1(vform_half, zn_b, zn, zero);
3374   uzp1(vform_half, zm_b, zm, zero);
3375   uzp2(vform_half, zn_t, zn, zero);
3376   uzp2(vform_half, zm_t, zm, zero);
3377 
3378   switch (form_hash_) {
3379     case "smlalb_z_zzz"_h:
3380       smlal(vform, zda, zn_b, zm_b);
3381       break;
3382     case "smlalt_z_zzz"_h:
3383       smlal(vform, zda, zn_t, zm_t);
3384       break;
3385     case "smlslb_z_zzz"_h:
3386       smlsl(vform, zda, zn_b, zm_b);
3387       break;
3388     case "smlslt_z_zzz"_h:
3389       smlsl(vform, zda, zn_t, zm_t);
3390       break;
3391     case "sqdmlalb_z_zzz"_h:
3392       sqdmlal(vform, zda, zn_b, zm_b);
3393       break;
3394     case "sqdmlalbt_z_zzz"_h:
3395       sqdmlal(vform, zda, zn_b, zm_t);
3396       break;
3397     case "sqdmlalt_z_zzz"_h:
3398       sqdmlal(vform, zda, zn_t, zm_t);
3399       break;
3400     case "sqdmlslb_z_zzz"_h:
3401       sqdmlsl(vform, zda, zn_b, zm_b);
3402       break;
3403     case "sqdmlslbt_z_zzz"_h:
3404       sqdmlsl(vform, zda, zn_b, zm_t);
3405       break;
3406     case "sqdmlslt_z_zzz"_h:
3407       sqdmlsl(vform, zda, zn_t, zm_t);
3408       break;
3409     case "umlalb_z_zzz"_h:
3410       umlal(vform, zda, zn_b, zm_b);
3411       break;
3412     case "umlalt_z_zzz"_h:
3413       umlal(vform, zda, zn_t, zm_t);
3414       break;
3415     case "umlslb_z_zzz"_h:
3416       umlsl(vform, zda, zn_b, zm_b);
3417       break;
3418     case "umlslt_z_zzz"_h:
3419       umlsl(vform, zda, zn_t, zm_t);
3420       break;
3421     default:
3422       VIXL_UNIMPLEMENTED();
3423   }
3424 }
3425 
SimulateSVEComplexDotProduct(const Instruction * instr)3426 void Simulator::SimulateSVEComplexDotProduct(const Instruction* instr) {
3427   VectorFormat vform = instr->GetSVEVectorFormat();
3428   SimVRegister& zda = ReadVRegister(instr->GetRd());
3429   SimVRegister& zn = ReadVRegister(instr->GetRn());
3430   int rot = instr->ExtractBits(11, 10) * 90;
3431   unsigned zm_code = instr->GetRm();
3432   int index = -1;
3433 
3434   switch (form_hash_) {
3435     case "cdot_z_zzz"_h:
3436       // Nothing to do.
3437       break;
3438     case "cdot_z_zzzi_s"_h:
3439       index = zm_code >> 3;
3440       zm_code &= 0x7;
3441       break;
3442     case "cdot_z_zzzi_d"_h:
3443       index = zm_code >> 4;
3444       zm_code &= 0xf;
3445       break;
3446     default:
3447       VIXL_UNIMPLEMENTED();
3448   }
3449 
3450   SimVRegister temp;
3451   SimVRegister& zm = ReadVRegister(zm_code);
3452   if (index >= 0) dup_elements_to_segments(vform, temp, zm, index);
3453   cdot(vform, zda, zda, zn, (index >= 0) ? temp : zm, rot);
3454 }
3455 
SimulateSVEBitwiseTernary(const Instruction * instr)3456 void Simulator::SimulateSVEBitwiseTernary(const Instruction* instr) {
3457   VectorFormat vform = kFormatVnD;
3458   SimVRegister& zdn = ReadVRegister(instr->GetRd());
3459   SimVRegister& zm = ReadVRegister(instr->GetRm());
3460   SimVRegister& zk = ReadVRegister(instr->GetRn());
3461   SimVRegister temp;
3462 
3463   switch (form_hash_) {
3464     case "bcax_z_zzz"_h:
3465       bic(vform, temp, zm, zk);
3466       eor(vform, zdn, temp, zdn);
3467       break;
3468     case "bsl1n_z_zzz"_h:
3469       not_(vform, temp, zdn);
3470       bsl(vform, zdn, zk, temp, zm);
3471       break;
3472     case "bsl2n_z_zzz"_h:
3473       not_(vform, temp, zm);
3474       bsl(vform, zdn, zk, zdn, temp);
3475       break;
3476     case "bsl_z_zzz"_h:
3477       bsl(vform, zdn, zk, zdn, zm);
3478       break;
3479     case "eor3_z_zzz"_h:
3480       eor(vform, temp, zdn, zm);
3481       eor(vform, zdn, temp, zk);
3482       break;
3483     case "nbsl_z_zzz"_h:
3484       bsl(vform, zdn, zk, zdn, zm);
3485       not_(vform, zdn, zdn);
3486       break;
3487     default:
3488       VIXL_UNIMPLEMENTED();
3489   }
3490 }
3491 
SimulateSVEHalvingAddSub(const Instruction * instr)3492 void Simulator::SimulateSVEHalvingAddSub(const Instruction* instr) {
3493   VectorFormat vform = instr->GetSVEVectorFormat();
3494   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
3495   SimVRegister& zdn = ReadVRegister(instr->GetRd());
3496   SimVRegister& zm = ReadVRegister(instr->GetRn());
3497   SimVRegister result;
3498 
3499   switch (form_hash_) {
3500     case "shadd_z_p_zz"_h:
3501       add(vform, result, zdn, zm).Halve(vform);
3502       break;
3503     case "shsub_z_p_zz"_h:
3504       sub(vform, result, zdn, zm).Halve(vform);
3505       break;
3506     case "shsubr_z_p_zz"_h:
3507       sub(vform, result, zm, zdn).Halve(vform);
3508       break;
3509     case "srhadd_z_p_zz"_h:
3510       add(vform, result, zdn, zm).Halve(vform).Round(vform);
3511       break;
3512     case "uhadd_z_p_zz"_h:
3513       add(vform, result, zdn, zm).Uhalve(vform);
3514       break;
3515     case "uhsub_z_p_zz"_h:
3516       sub(vform, result, zdn, zm).Uhalve(vform);
3517       break;
3518     case "uhsubr_z_p_zz"_h:
3519       sub(vform, result, zm, zdn).Uhalve(vform);
3520       break;
3521     case "urhadd_z_p_zz"_h:
3522       add(vform, result, zdn, zm).Uhalve(vform).Round(vform);
3523       break;
3524     default:
3525       VIXL_UNIMPLEMENTED();
3526       break;
3527   }
3528   mov_merging(vform, zdn, pg, result);
3529 }
3530 
SimulateSVESaturatingArithmetic(const Instruction * instr)3531 void Simulator::SimulateSVESaturatingArithmetic(const Instruction* instr) {
3532   VectorFormat vform = instr->GetSVEVectorFormat();
3533   SimVRegister& zdn = ReadVRegister(instr->GetRd());
3534   SimVRegister& zm = ReadVRegister(instr->GetRn());
3535   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
3536   SimVRegister result;
3537 
3538   switch (form_hash_) {
3539     case "sqadd_z_p_zz"_h:
3540       add(vform, result, zdn, zm).SignedSaturate(vform);
3541       break;
3542     case "sqsub_z_p_zz"_h:
3543       sub(vform, result, zdn, zm).SignedSaturate(vform);
3544       break;
3545     case "sqsubr_z_p_zz"_h:
3546       sub(vform, result, zm, zdn).SignedSaturate(vform);
3547       break;
3548     case "suqadd_z_p_zz"_h:
3549       suqadd(vform, result, zdn, zm);
3550       break;
3551     case "uqadd_z_p_zz"_h:
3552       add(vform, result, zdn, zm).UnsignedSaturate(vform);
3553       break;
3554     case "uqsub_z_p_zz"_h:
3555       sub(vform, result, zdn, zm).UnsignedSaturate(vform);
3556       break;
3557     case "uqsubr_z_p_zz"_h:
3558       sub(vform, result, zm, zdn).UnsignedSaturate(vform);
3559       break;
3560     case "usqadd_z_p_zz"_h:
3561       usqadd(vform, result, zdn, zm);
3562       break;
3563     default:
3564       VIXL_UNIMPLEMENTED();
3565       break;
3566   }
3567   mov_merging(vform, zdn, pg, result);
3568 }
3569 
SimulateSVEIntArithPair(const Instruction * instr)3570 void Simulator::SimulateSVEIntArithPair(const Instruction* instr) {
3571   VectorFormat vform = instr->GetSVEVectorFormat();
3572   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
3573   SimVRegister& zdn = ReadVRegister(instr->GetRd());
3574   SimVRegister& zm = ReadVRegister(instr->GetRn());
3575   SimVRegister result;
3576 
3577   switch (form_hash_) {
3578     case "addp_z_p_zz"_h:
3579       addp(vform, result, zdn, zm);
3580       break;
3581     case "smaxp_z_p_zz"_h:
3582       smaxp(vform, result, zdn, zm);
3583       break;
3584     case "sminp_z_p_zz"_h:
3585       sminp(vform, result, zdn, zm);
3586       break;
3587     case "umaxp_z_p_zz"_h:
3588       umaxp(vform, result, zdn, zm);
3589       break;
3590     case "uminp_z_p_zz"_h:
3591       uminp(vform, result, zdn, zm);
3592       break;
3593     default:
3594       VIXL_UNIMPLEMENTED();
3595       break;
3596   }
3597   mov_merging(vform, zdn, pg, result);
3598 }
3599 
Simulate_ZdnT_PgM_ZdnT_ZmT(const Instruction * instr)3600 void Simulator::Simulate_ZdnT_PgM_ZdnT_ZmT(const Instruction* instr) {
3601   VectorFormat vform = instr->GetSVEVectorFormat();
3602   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
3603   SimVRegister& zdn = ReadVRegister(instr->GetRd());
3604   SimVRegister& zm = ReadVRegister(instr->GetRn());
3605   SimVRegister result;
3606 
3607   switch (form_hash_) {
3608     case "faddp_z_p_zz"_h:
3609       faddp(vform, result, zdn, zm);
3610       break;
3611     case "fmaxnmp_z_p_zz"_h:
3612       fmaxnmp(vform, result, zdn, zm);
3613       break;
3614     case "fmaxp_z_p_zz"_h:
3615       fmaxp(vform, result, zdn, zm);
3616       break;
3617     case "fminnmp_z_p_zz"_h:
3618       fminnmp(vform, result, zdn, zm);
3619       break;
3620     case "fminp_z_p_zz"_h:
3621       fminp(vform, result, zdn, zm);
3622       break;
3623     default:
3624       VIXL_UNIMPLEMENTED();
3625   }
3626   mov_merging(vform, zdn, pg, result);
3627 }
3628 
Simulate_ZdnT_PgM_ZdnT_const(const Instruction * instr)3629 void Simulator::Simulate_ZdnT_PgM_ZdnT_const(const Instruction* instr) {
3630   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
3631   SimVRegister& zdn = ReadVRegister(instr->GetRd());
3632 
3633   std::pair<int, int> shift_and_lane_size =
3634       instr->GetSVEImmShiftAndLaneSizeLog2(/* is_predicated = */ true);
3635   unsigned lane_size = shift_and_lane_size.second;
3636   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
3637   int right_shift_dist = shift_and_lane_size.first;
3638   int left_shift_dist = (8 << lane_size) - right_shift_dist;
3639   SimVRegister result;
3640 
3641   switch (form_hash_) {
3642     case "sqshl_z_p_zi"_h:
3643       sqshl(vform, result, zdn, left_shift_dist);
3644       break;
3645     case "sqshlu_z_p_zi"_h:
3646       sqshlu(vform, result, zdn, left_shift_dist);
3647       break;
3648     case "srshr_z_p_zi"_h:
3649       sshr(vform, result, zdn, right_shift_dist).Round(vform);
3650       break;
3651     case "uqshl_z_p_zi"_h:
3652       uqshl(vform, result, zdn, left_shift_dist);
3653       break;
3654     case "urshr_z_p_zi"_h:
3655       ushr(vform, result, zdn, right_shift_dist).Round(vform);
3656       break;
3657     default:
3658       VIXL_UNIMPLEMENTED();
3659   }
3660   mov_merging(vform, zdn, pg, result);
3661 }
3662 
SimulateSVEExclusiveOrRotate(const Instruction * instr)3663 void Simulator::SimulateSVEExclusiveOrRotate(const Instruction* instr) {
3664   VIXL_ASSERT(form_hash_ == "xar_z_zzi"_h);
3665 
3666   SimVRegister& zdn = ReadVRegister(instr->GetRd());
3667   SimVRegister& zm = ReadVRegister(instr->GetRn());
3668 
3669   std::pair<int, int> shift_and_lane_size =
3670       instr->GetSVEImmShiftAndLaneSizeLog2(/* is_predicated = */ false);
3671   unsigned lane_size = shift_and_lane_size.second;
3672   VIXL_ASSERT(lane_size <= kDRegSizeInBytesLog2);
3673   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
3674   int shift_dist = shift_and_lane_size.first;
3675   eor(vform, zdn, zdn, zm);
3676   ror(vform, zdn, zdn, shift_dist);
3677 }
3678 
Simulate_ZdnT_ZdnT_ZmT_const(const Instruction * instr)3679 void Simulator::Simulate_ZdnT_ZdnT_ZmT_const(const Instruction* instr) {
3680   VectorFormat vform = instr->GetSVEVectorFormat();
3681   SimVRegister& zdn = ReadVRegister(instr->GetRd());
3682   SimVRegister& zm = ReadVRegister(instr->GetRn());
3683   int rot = (instr->ExtractBit(10) == 0) ? 90 : 270;
3684 
3685   switch (form_hash_) {
3686     case "cadd_z_zz"_h:
3687       cadd(vform, zdn, zdn, zm, rot);
3688       break;
3689     case "sqcadd_z_zz"_h:
3690       cadd(vform, zdn, zdn, zm, rot, /* saturate = */ true);
3691       break;
3692     default:
3693       VIXL_UNIMPLEMENTED();
3694   }
3695 }
3696 
Simulate_ZtD_PgZ_ZnD_Xm(const Instruction * instr)3697 void Simulator::Simulate_ZtD_PgZ_ZnD_Xm(const Instruction* instr) {
3698   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
3699   SimVRegister& zn = ReadVRegister(instr->GetRn());
3700   uint64_t xm = ReadXRegister(instr->GetRm());
3701 
3702   LogicSVEAddressVector addr(xm, &zn, kFormatVnD);
3703   int msize = -1;
3704   bool is_signed = false;
3705 
3706   switch (form_hash_) {
3707     case "ldnt1b_z_p_ar_d_64_unscaled"_h:
3708       msize = 0;
3709       break;
3710     case "ldnt1d_z_p_ar_d_64_unscaled"_h:
3711       msize = 3;
3712       break;
3713     case "ldnt1h_z_p_ar_d_64_unscaled"_h:
3714       msize = 1;
3715       break;
3716     case "ldnt1sb_z_p_ar_d_64_unscaled"_h:
3717       msize = 0;
3718       is_signed = true;
3719       break;
3720     case "ldnt1sh_z_p_ar_d_64_unscaled"_h:
3721       msize = 1;
3722       is_signed = true;
3723       break;
3724     case "ldnt1sw_z_p_ar_d_64_unscaled"_h:
3725       msize = 2;
3726       is_signed = true;
3727       break;
3728     case "ldnt1w_z_p_ar_d_64_unscaled"_h:
3729       msize = 2;
3730       break;
3731     default:
3732       VIXL_UNIMPLEMENTED();
3733   }
3734   addr.SetMsizeInBytesLog2(msize);
3735   SVEStructuredLoadHelper(kFormatVnD, pg, instr->GetRt(), addr, is_signed);
3736 }
3737 
Simulate_ZtD_Pg_ZnD_Xm(const Instruction * instr)3738 void Simulator::Simulate_ZtD_Pg_ZnD_Xm(const Instruction* instr) {
3739   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
3740   SimVRegister& zn = ReadVRegister(instr->GetRn());
3741   uint64_t xm = ReadXRegister(instr->GetRm());
3742 
3743   LogicSVEAddressVector addr(xm, &zn, kFormatVnD);
3744   VIXL_ASSERT((form_hash_ == "stnt1b_z_p_ar_d_64_unscaled"_h) ||
3745               (form_hash_ == "stnt1d_z_p_ar_d_64_unscaled"_h) ||
3746               (form_hash_ == "stnt1h_z_p_ar_d_64_unscaled"_h) ||
3747               (form_hash_ == "stnt1w_z_p_ar_d_64_unscaled"_h));
3748 
3749   addr.SetMsizeInBytesLog2(
3750       instr->GetSVEMsizeFromDtype(/* is_signed = */ false));
3751   SVEStructuredStoreHelper(kFormatVnD, pg, instr->GetRt(), addr);
3752 }
3753 
Simulate_ZtS_PgZ_ZnS_Xm(const Instruction * instr)3754 void Simulator::Simulate_ZtS_PgZ_ZnS_Xm(const Instruction* instr) {
3755   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
3756   SimVRegister& zn = ReadVRegister(instr->GetRn());
3757   uint64_t xm = ReadXRegister(instr->GetRm());
3758 
3759   LogicSVEAddressVector addr(xm, &zn, kFormatVnS);
3760   int msize = -1;
3761   bool is_signed = false;
3762 
3763   switch (form_hash_) {
3764     case "ldnt1b_z_p_ar_s_x32_unscaled"_h:
3765       msize = 0;
3766       break;
3767     case "ldnt1h_z_p_ar_s_x32_unscaled"_h:
3768       msize = 1;
3769       break;
3770     case "ldnt1sb_z_p_ar_s_x32_unscaled"_h:
3771       msize = 0;
3772       is_signed = true;
3773       break;
3774     case "ldnt1sh_z_p_ar_s_x32_unscaled"_h:
3775       msize = 1;
3776       is_signed = true;
3777       break;
3778     case "ldnt1w_z_p_ar_s_x32_unscaled"_h:
3779       msize = 2;
3780       break;
3781     default:
3782       VIXL_UNIMPLEMENTED();
3783   }
3784   addr.SetMsizeInBytesLog2(msize);
3785   SVEStructuredLoadHelper(kFormatVnS, pg, instr->GetRt(), addr, is_signed);
3786 }
3787 
Simulate_ZtS_Pg_ZnS_Xm(const Instruction * instr)3788 void Simulator::Simulate_ZtS_Pg_ZnS_Xm(const Instruction* instr) {
3789   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
3790   SimVRegister& zn = ReadVRegister(instr->GetRn());
3791   uint64_t xm = ReadXRegister(instr->GetRm());
3792 
3793   LogicSVEAddressVector addr(xm, &zn, kFormatVnS);
3794   VIXL_ASSERT((form_hash_ == "stnt1b_z_p_ar_s_x32_unscaled"_h) ||
3795               (form_hash_ == "stnt1h_z_p_ar_s_x32_unscaled"_h) ||
3796               (form_hash_ == "stnt1w_z_p_ar_s_x32_unscaled"_h));
3797 
3798   addr.SetMsizeInBytesLog2(
3799       instr->GetSVEMsizeFromDtype(/* is_signed = */ false));
3800   SVEStructuredStoreHelper(kFormatVnS, pg, instr->GetRt(), addr);
3801 }
3802 
VisitReserved(const Instruction * instr)3803 void Simulator::VisitReserved(const Instruction* instr) {
3804   // UDF is the only instruction in this group, and the Decoder is precise here.
3805   VIXL_ASSERT(instr->Mask(ReservedMask) == UDF);
3806 
3807   printf("UDF (permanently undefined) instruction at %p: 0x%08" PRIx32 "\n",
3808          reinterpret_cast<const void*>(instr),
3809          instr->GetInstructionBits());
3810   VIXL_ABORT_WITH_MSG("UNDEFINED (UDF)\n");
3811 }
3812 
3813 
VisitUnimplemented(const Instruction * instr)3814 void Simulator::VisitUnimplemented(const Instruction* instr) {
3815   printf("Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
3816          reinterpret_cast<const void*>(instr),
3817          instr->GetInstructionBits());
3818   VIXL_UNIMPLEMENTED();
3819 }
3820 
3821 
VisitUnallocated(const Instruction * instr)3822 void Simulator::VisitUnallocated(const Instruction* instr) {
3823   printf("Unallocated instruction at %p: 0x%08" PRIx32 "\n",
3824          reinterpret_cast<const void*>(instr),
3825          instr->GetInstructionBits());
3826   VIXL_UNIMPLEMENTED();
3827 }
3828 
3829 
VisitPCRelAddressing(const Instruction * instr)3830 void Simulator::VisitPCRelAddressing(const Instruction* instr) {
3831   VIXL_ASSERT((instr->Mask(PCRelAddressingMask) == ADR) ||
3832               (instr->Mask(PCRelAddressingMask) == ADRP));
3833 
3834   WriteRegister(instr->GetRd(), instr->GetImmPCOffsetTarget());
3835 }
3836 
3837 
VisitUnconditionalBranch(const Instruction * instr)3838 void Simulator::VisitUnconditionalBranch(const Instruction* instr) {
3839   switch (instr->Mask(UnconditionalBranchMask)) {
3840     case BL:
3841       WriteLr(instr->GetNextInstruction());
3842       GCSPush(reinterpret_cast<uint64_t>(instr->GetNextInstruction()));
3843       VIXL_FALLTHROUGH();
3844     case B:
3845       WritePc(instr->GetImmPCOffsetTarget());
3846       break;
3847     default:
3848       VIXL_UNREACHABLE();
3849   }
3850 }
3851 
3852 
VisitConditionalBranch(const Instruction * instr)3853 void Simulator::VisitConditionalBranch(const Instruction* instr) {
3854   VIXL_ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
3855   if (ConditionPassed(instr->GetConditionBranch())) {
3856     WritePc(instr->GetImmPCOffsetTarget());
3857   }
3858 }
3859 
GetBTypeFromInstruction(const Instruction * instr) const3860 BType Simulator::GetBTypeFromInstruction(const Instruction* instr) const {
3861   switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
3862     case BLR:
3863     case BLRAA:
3864     case BLRAB:
3865     case BLRAAZ:
3866     case BLRABZ:
3867       return BranchAndLink;
3868     case BR:
3869     case BRAA:
3870     case BRAB:
3871     case BRAAZ:
3872     case BRABZ:
3873       if ((instr->GetRn() == 16) || (instr->GetRn() == 17) ||
3874           !PcIsInGuardedPage()) {
3875         return BranchFromUnguardedOrToIP;
3876       }
3877       return BranchFromGuardedNotToIP;
3878   }
3879   return DefaultBType;
3880 }
3881 
VisitUnconditionalBranchToRegister(const Instruction * instr)3882 void Simulator::VisitUnconditionalBranchToRegister(const Instruction* instr) {
3883   bool authenticate = false;
3884   bool link = false;
3885   bool ret = false;
3886   bool compare_gcs = false;
3887   uint64_t addr = ReadXRegister(instr->GetRn());
3888   uint64_t context = 0;
3889 
3890   switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
3891     case BLR:
3892       link = true;
3893       VIXL_FALLTHROUGH();
3894     case BR:
3895       break;
3896 
3897     case BLRAAZ:
3898     case BLRABZ:
3899       link = true;
3900       VIXL_FALLTHROUGH();
3901     case BRAAZ:
3902     case BRABZ:
3903       authenticate = true;
3904       break;
3905 
3906     case BLRAA:
3907     case BLRAB:
3908       link = true;
3909       VIXL_FALLTHROUGH();
3910     case BRAA:
3911     case BRAB:
3912       authenticate = true;
3913       context = ReadXRegister(instr->GetRd());
3914       break;
3915 
3916     case RETAA:
3917     case RETAB:
3918       authenticate = true;
3919       addr = ReadXRegister(kLinkRegCode);
3920       context = ReadXRegister(31, Reg31IsStackPointer);
3921       VIXL_FALLTHROUGH();
3922     case RET:
3923       compare_gcs = true;
3924       ret = true;
3925       break;
3926     default:
3927       VIXL_UNREACHABLE();
3928   }
3929 
3930   if (authenticate) {
3931     PACKey key = (instr->ExtractBit(10) == 0) ? kPACKeyIA : kPACKeyIB;
3932     addr = AuthPAC(addr, context, key, kInstructionPointer);
3933 
3934     int error_lsb = GetTopPACBit(addr, kInstructionPointer) - 2;
3935     if (((addr >> error_lsb) & 0x3) != 0x0) {
3936       VIXL_ABORT_WITH_MSG("Failed to authenticate pointer.");
3937     }
3938   }
3939 
3940   if (compare_gcs) {
3941     uint64_t expected_lr = GCSPeek();
3942     char msg[128];
3943     if (expected_lr != 0) {
3944       if ((expected_lr & 0x3) != 0) {
3945         snprintf(msg,
3946                  sizeof(msg),
3947                  "GCS contains misaligned return address: 0x%016" PRIx64 "\n",
3948                  expected_lr);
3949         ReportGCSFailure(msg);
3950       } else if ((addr != 0) && (addr != expected_lr)) {
3951         snprintf(msg,
3952                  sizeof(msg),
3953                  "GCS mismatch: lr = 0x%016" PRIx64 ", gcs = 0x%016" PRIx64
3954                  "\n",
3955                  addr,
3956                  expected_lr);
3957         ReportGCSFailure(msg);
3958       }
3959       GCSPop();
3960     }
3961   }
3962 
3963   if (link) {
3964     WriteLr(instr->GetNextInstruction());
3965     GCSPush(reinterpret_cast<uint64_t>(instr->GetNextInstruction()));
3966   }
3967 
3968   if (!ret) {
3969     // Check for interceptions to the target address, if one is found, call it.
3970     MetaDataDepot::BranchInterceptionAbstract* interception =
3971         meta_data_.FindBranchInterception(addr);
3972 
3973     if (interception != nullptr) {
3974       // Instead of writing the address of the function to the PC, call the
3975       // function's interception directly. We change the address that will be
3976       // branched to so that afterwards we continue execution from
3977       // the address in the LR. Note: the interception may modify the LR so
3978       // store it before calling the interception.
3979       addr = ReadRegister<uint64_t>(kLinkRegCode);
3980       (*interception)(this);
3981     }
3982   }
3983 
3984   WriteNextBType(GetBTypeFromInstruction(instr));
3985   WritePc(Instruction::Cast(addr));
3986 }
3987 
3988 
VisitTestBranch(const Instruction * instr)3989 void Simulator::VisitTestBranch(const Instruction* instr) {
3990   unsigned bit_pos =
3991       (instr->GetImmTestBranchBit5() << 5) | instr->GetImmTestBranchBit40();
3992   bool bit_zero = ((ReadXRegister(instr->GetRt()) >> bit_pos) & 1) == 0;
3993   bool take_branch = false;
3994   switch (instr->Mask(TestBranchMask)) {
3995     case TBZ:
3996       take_branch = bit_zero;
3997       break;
3998     case TBNZ:
3999       take_branch = !bit_zero;
4000       break;
4001     default:
4002       VIXL_UNIMPLEMENTED();
4003   }
4004   if (take_branch) {
4005     WritePc(instr->GetImmPCOffsetTarget());
4006   }
4007 }
4008 
4009 
VisitCompareBranch(const Instruction * instr)4010 void Simulator::VisitCompareBranch(const Instruction* instr) {
4011   unsigned rt = instr->GetRt();
4012   bool take_branch = false;
4013   switch (instr->Mask(CompareBranchMask)) {
4014     case CBZ_w:
4015       take_branch = (ReadWRegister(rt) == 0);
4016       break;
4017     case CBZ_x:
4018       take_branch = (ReadXRegister(rt) == 0);
4019       break;
4020     case CBNZ_w:
4021       take_branch = (ReadWRegister(rt) != 0);
4022       break;
4023     case CBNZ_x:
4024       take_branch = (ReadXRegister(rt) != 0);
4025       break;
4026     default:
4027       VIXL_UNIMPLEMENTED();
4028   }
4029   if (take_branch) {
4030     WritePc(instr->GetImmPCOffsetTarget());
4031   }
4032 }
4033 
4034 
AddSubHelper(const Instruction * instr,int64_t op2)4035 void Simulator::AddSubHelper(const Instruction* instr, int64_t op2) {
4036   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
4037   bool set_flags = instr->GetFlagsUpdate();
4038   int64_t new_val = 0;
4039   Instr operation = instr->Mask(AddSubOpMask);
4040 
4041   switch (operation) {
4042     case ADD:
4043     case ADDS: {
4044       new_val = AddWithCarry(reg_size,
4045                              set_flags,
4046                              ReadRegister(reg_size,
4047                                           instr->GetRn(),
4048                                           instr->GetRnMode()),
4049                              op2);
4050       break;
4051     }
4052     case SUB:
4053     case SUBS: {
4054       new_val = AddWithCarry(reg_size,
4055                              set_flags,
4056                              ReadRegister(reg_size,
4057                                           instr->GetRn(),
4058                                           instr->GetRnMode()),
4059                              ~op2,
4060                              1);
4061       break;
4062     }
4063     default:
4064       VIXL_UNREACHABLE();
4065   }
4066 
4067   WriteRegister(reg_size,
4068                 instr->GetRd(),
4069                 new_val,
4070                 LogRegWrites,
4071                 instr->GetRdMode());
4072 }
4073 
4074 
VisitAddSubShifted(const Instruction * instr)4075 void Simulator::VisitAddSubShifted(const Instruction* instr) {
4076   // Add/sub/adds/subs don't allow ROR as a shift mode.
4077   VIXL_ASSERT(instr->GetShiftDP() != ROR);
4078 
4079   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
4080   int64_t op2 = ShiftOperand(reg_size,
4081                              ReadRegister(reg_size, instr->GetRm()),
4082                              static_cast<Shift>(instr->GetShiftDP()),
4083                              instr->GetImmDPShift());
4084   AddSubHelper(instr, op2);
4085 }
4086 
4087 
VisitAddSubImmediate(const Instruction * instr)4088 void Simulator::VisitAddSubImmediate(const Instruction* instr) {
4089   int64_t op2 = instr->GetImmAddSub()
4090                 << ((instr->GetImmAddSubShift() == 1) ? 12 : 0);
4091   AddSubHelper(instr, op2);
4092 }
4093 
4094 
VisitAddSubExtended(const Instruction * instr)4095 void Simulator::VisitAddSubExtended(const Instruction* instr) {
4096   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
4097   int64_t op2 = ExtendValue(reg_size,
4098                             ReadRegister(reg_size, instr->GetRm()),
4099                             static_cast<Extend>(instr->GetExtendMode()),
4100                             instr->GetImmExtendShift());
4101   AddSubHelper(instr, op2);
4102 }
4103 
4104 
VisitAddSubWithCarry(const Instruction * instr)4105 void Simulator::VisitAddSubWithCarry(const Instruction* instr) {
4106   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
4107   int64_t op2 = ReadRegister(reg_size, instr->GetRm());
4108   int64_t new_val;
4109 
4110   if ((instr->Mask(AddSubOpMask) == SUB) ||
4111       (instr->Mask(AddSubOpMask) == SUBS)) {
4112     op2 = ~op2;
4113   }
4114 
4115   new_val = AddWithCarry(reg_size,
4116                          instr->GetFlagsUpdate(),
4117                          ReadRegister(reg_size, instr->GetRn()),
4118                          op2,
4119                          ReadC());
4120 
4121   WriteRegister(reg_size, instr->GetRd(), new_val);
4122 }
4123 
4124 
VisitRotateRightIntoFlags(const Instruction * instr)4125 void Simulator::VisitRotateRightIntoFlags(const Instruction* instr) {
4126   switch (instr->Mask(RotateRightIntoFlagsMask)) {
4127     case RMIF: {
4128       uint64_t value = ReadRegister<uint64_t>(instr->GetRn());
4129       unsigned shift = instr->GetImmRMIFRotation();
4130       unsigned mask = instr->GetNzcv();
4131       uint64_t rotated = RotateRight(value, shift, kXRegSize);
4132 
4133       ReadNzcv().SetFlags((rotated & mask) | (ReadNzcv().GetFlags() & ~mask));
4134       break;
4135     }
4136   }
4137 }
4138 
4139 
VisitEvaluateIntoFlags(const Instruction * instr)4140 void Simulator::VisitEvaluateIntoFlags(const Instruction* instr) {
4141   uint32_t value = ReadRegister<uint32_t>(instr->GetRn());
4142   unsigned msb = (instr->Mask(EvaluateIntoFlagsMask) == SETF16) ? 15 : 7;
4143 
4144   unsigned sign_bit = (value >> msb) & 1;
4145   unsigned overflow_bit = (value >> (msb + 1)) & 1;
4146   ReadNzcv().SetN(sign_bit);
4147   ReadNzcv().SetZ((value << (31 - msb)) == 0);
4148   ReadNzcv().SetV(sign_bit ^ overflow_bit);
4149 }
4150 
4151 
VisitLogicalShifted(const Instruction * instr)4152 void Simulator::VisitLogicalShifted(const Instruction* instr) {
4153   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
4154   Shift shift_type = static_cast<Shift>(instr->GetShiftDP());
4155   unsigned shift_amount = instr->GetImmDPShift();
4156   int64_t op2 = ShiftOperand(reg_size,
4157                              ReadRegister(reg_size, instr->GetRm()),
4158                              shift_type,
4159                              shift_amount);
4160   if (instr->Mask(NOT) == NOT) {
4161     op2 = ~op2;
4162   }
4163   LogicalHelper(instr, op2);
4164 }
4165 
4166 
VisitLogicalImmediate(const Instruction * instr)4167 void Simulator::VisitLogicalImmediate(const Instruction* instr) {
4168   if (instr->GetImmLogical() == 0) {
4169     VIXL_UNIMPLEMENTED();
4170   } else {
4171     LogicalHelper(instr, instr->GetImmLogical());
4172   }
4173 }
4174 
4175 
LogicalHelper(const Instruction * instr,int64_t op2)4176 void Simulator::LogicalHelper(const Instruction* instr, int64_t op2) {
4177   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
4178   int64_t op1 = ReadRegister(reg_size, instr->GetRn());
4179   int64_t result = 0;
4180   bool update_flags = false;
4181 
4182   // Switch on the logical operation, stripping out the NOT bit, as it has a
4183   // different meaning for logical immediate instructions.
4184   switch (instr->Mask(LogicalOpMask & ~NOT)) {
4185     case ANDS:
4186       update_flags = true;
4187       VIXL_FALLTHROUGH();
4188     case AND:
4189       result = op1 & op2;
4190       break;
4191     case ORR:
4192       result = op1 | op2;
4193       break;
4194     case EOR:
4195       result = op1 ^ op2;
4196       break;
4197     default:
4198       VIXL_UNIMPLEMENTED();
4199   }
4200 
4201   if (update_flags) {
4202     ReadNzcv().SetN(CalcNFlag(result, reg_size));
4203     ReadNzcv().SetZ(CalcZFlag(result));
4204     ReadNzcv().SetC(0);
4205     ReadNzcv().SetV(0);
4206     LogSystemRegister(NZCV);
4207   }
4208 
4209   WriteRegister(reg_size,
4210                 instr->GetRd(),
4211                 result,
4212                 LogRegWrites,
4213                 instr->GetRdMode());
4214 }
4215 
4216 
VisitConditionalCompareRegister(const Instruction * instr)4217 void Simulator::VisitConditionalCompareRegister(const Instruction* instr) {
4218   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
4219   ConditionalCompareHelper(instr, ReadRegister(reg_size, instr->GetRm()));
4220 }
4221 
4222 
VisitConditionalCompareImmediate(const Instruction * instr)4223 void Simulator::VisitConditionalCompareImmediate(const Instruction* instr) {
4224   ConditionalCompareHelper(instr, instr->GetImmCondCmp());
4225 }
4226 
4227 
ConditionalCompareHelper(const Instruction * instr,int64_t op2)4228 void Simulator::ConditionalCompareHelper(const Instruction* instr,
4229                                          int64_t op2) {
4230   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
4231   int64_t op1 = ReadRegister(reg_size, instr->GetRn());
4232 
4233   if (ConditionPassed(instr->GetCondition())) {
4234     // If the condition passes, set the status flags to the result of comparing
4235     // the operands.
4236     if (instr->Mask(ConditionalCompareMask) == CCMP) {
4237       AddWithCarry(reg_size, true, op1, ~op2, 1);
4238     } else {
4239       VIXL_ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
4240       AddWithCarry(reg_size, true, op1, op2, 0);
4241     }
4242   } else {
4243     // If the condition fails, set the status flags to the nzcv immediate.
4244     ReadNzcv().SetFlags(instr->GetNzcv());
4245     LogSystemRegister(NZCV);
4246   }
4247 }
4248 
4249 
VisitLoadStoreUnsignedOffset(const Instruction * instr)4250 void Simulator::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
4251   int offset = instr->GetImmLSUnsigned() << instr->GetSizeLS();
4252   LoadStoreHelper(instr, offset, Offset);
4253 }
4254 
4255 
VisitLoadStoreUnscaledOffset(const Instruction * instr)4256 void Simulator::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
4257   LoadStoreHelper(instr, instr->GetImmLS(), Offset);
4258 }
4259 
4260 
VisitLoadStorePreIndex(const Instruction * instr)4261 void Simulator::VisitLoadStorePreIndex(const Instruction* instr) {
4262   LoadStoreHelper(instr, instr->GetImmLS(), PreIndex);
4263 }
4264 
4265 
VisitLoadStorePostIndex(const Instruction * instr)4266 void Simulator::VisitLoadStorePostIndex(const Instruction* instr) {
4267   LoadStoreHelper(instr, instr->GetImmLS(), PostIndex);
4268 }
4269 
4270 
4271 template <typename T1, typename T2>
LoadAcquireRCpcUnscaledOffsetHelper(const Instruction * instr)4272 void Simulator::LoadAcquireRCpcUnscaledOffsetHelper(const Instruction* instr) {
4273   unsigned rt = instr->GetRt();
4274   unsigned rn = instr->GetRn();
4275 
4276   unsigned element_size = sizeof(T2);
4277   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
4278   int offset = instr->GetImmLS();
4279   address += offset;
4280 
4281   // Verify that the address is available to the host.
4282   VIXL_ASSERT(address == static_cast<uintptr_t>(address));
4283 
4284   // Check the alignment of `address`.
4285   if (AlignDown(address, 16) != AlignDown(address + element_size - 1, 16)) {
4286     VIXL_ALIGNMENT_EXCEPTION();
4287   }
4288 
4289   VIXL_DEFINE_OR_RETURN(value, MemRead<T2>(address));
4290 
4291   WriteRegister<T1>(rt, static_cast<T1>(value));
4292 
4293   // Approximate load-acquire by issuing a full barrier after the load.
4294   VIXL_SYNC();
4295 
4296   LogRead(rt, GetPrintRegisterFormat(element_size), address);
4297 }
4298 
4299 
4300 template <typename T>
StoreReleaseUnscaledOffsetHelper(const Instruction * instr)4301 void Simulator::StoreReleaseUnscaledOffsetHelper(const Instruction* instr) {
4302   unsigned rt = instr->GetRt();
4303   unsigned rn = instr->GetRn();
4304 
4305   unsigned element_size = sizeof(T);
4306   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
4307   int offset = instr->GetImmLS();
4308   address += offset;
4309 
4310   // Verify that the address is available to the host.
4311   VIXL_ASSERT(address == static_cast<uintptr_t>(address));
4312 
4313   // Check the alignment of `address`.
4314   if (AlignDown(address, 16) != AlignDown(address + element_size - 1, 16)) {
4315     VIXL_ALIGNMENT_EXCEPTION();
4316   }
4317 
4318   // Approximate store-release by issuing a full barrier after the load.
4319   VIXL_SYNC();
4320 
4321   if (!MemWrite<T>(address, ReadRegister<T>(rt))) return;
4322 
4323   LogWrite(rt, GetPrintRegisterFormat(element_size), address);
4324 }
4325 
4326 
VisitLoadStoreRCpcUnscaledOffset(const Instruction * instr)4327 void Simulator::VisitLoadStoreRCpcUnscaledOffset(const Instruction* instr) {
4328   switch (instr->Mask(LoadStoreRCpcUnscaledOffsetMask)) {
4329     case LDAPURB:
4330       LoadAcquireRCpcUnscaledOffsetHelper<uint8_t, uint8_t>(instr);
4331       break;
4332     case LDAPURH:
4333       LoadAcquireRCpcUnscaledOffsetHelper<uint16_t, uint16_t>(instr);
4334       break;
4335     case LDAPUR_w:
4336       LoadAcquireRCpcUnscaledOffsetHelper<uint32_t, uint32_t>(instr);
4337       break;
4338     case LDAPUR_x:
4339       LoadAcquireRCpcUnscaledOffsetHelper<uint64_t, uint64_t>(instr);
4340       break;
4341     case LDAPURSB_w:
4342       LoadAcquireRCpcUnscaledOffsetHelper<int32_t, int8_t>(instr);
4343       break;
4344     case LDAPURSB_x:
4345       LoadAcquireRCpcUnscaledOffsetHelper<int64_t, int8_t>(instr);
4346       break;
4347     case LDAPURSH_w:
4348       LoadAcquireRCpcUnscaledOffsetHelper<int32_t, int16_t>(instr);
4349       break;
4350     case LDAPURSH_x:
4351       LoadAcquireRCpcUnscaledOffsetHelper<int64_t, int16_t>(instr);
4352       break;
4353     case LDAPURSW:
4354       LoadAcquireRCpcUnscaledOffsetHelper<int64_t, int32_t>(instr);
4355       break;
4356     case STLURB:
4357       StoreReleaseUnscaledOffsetHelper<uint8_t>(instr);
4358       break;
4359     case STLURH:
4360       StoreReleaseUnscaledOffsetHelper<uint16_t>(instr);
4361       break;
4362     case STLUR_w:
4363       StoreReleaseUnscaledOffsetHelper<uint32_t>(instr);
4364       break;
4365     case STLUR_x:
4366       StoreReleaseUnscaledOffsetHelper<uint64_t>(instr);
4367       break;
4368   }
4369 }
4370 
4371 
VisitLoadStorePAC(const Instruction * instr)4372 void Simulator::VisitLoadStorePAC(const Instruction* instr) {
4373   unsigned dst = instr->GetRt();
4374   unsigned addr_reg = instr->GetRn();
4375 
4376   uint64_t address = ReadXRegister(addr_reg, Reg31IsStackPointer);
4377 
4378   PACKey key = (instr->ExtractBit(23) == 0) ? kPACKeyDA : kPACKeyDB;
4379   address = AuthPAC(address, 0, key, kDataPointer);
4380 
4381   int error_lsb = GetTopPACBit(address, kInstructionPointer) - 2;
4382   if (((address >> error_lsb) & 0x3) != 0x0) {
4383     VIXL_ABORT_WITH_MSG("Failed to authenticate pointer.");
4384   }
4385 
4386 
4387   if ((addr_reg == 31) && ((address % 16) != 0)) {
4388     // When the base register is SP the stack pointer is required to be
4389     // quadword aligned prior to the address calculation and write-backs.
4390     // Misalignment will cause a stack alignment fault.
4391     VIXL_ALIGNMENT_EXCEPTION();
4392   }
4393 
4394   int64_t offset = instr->GetImmLSPAC();
4395   address += offset;
4396 
4397   if (instr->Mask(LoadStorePACPreBit) == LoadStorePACPreBit) {
4398     // Pre-index mode.
4399     VIXL_ASSERT(offset != 0);
4400     WriteXRegister(addr_reg, address, LogRegWrites, Reg31IsStackPointer);
4401   }
4402 
4403   uintptr_t addr_ptr = static_cast<uintptr_t>(address);
4404 
4405   // Verify that the calculated address is available to the host.
4406   VIXL_ASSERT(address == addr_ptr);
4407 
4408   VIXL_DEFINE_OR_RETURN(value, MemRead<uint64_t>(addr_ptr));
4409 
4410   WriteXRegister(dst, value, NoRegLog);
4411   unsigned access_size = 1 << 3;
4412   LogRead(dst, GetPrintRegisterFormatForSize(access_size), addr_ptr);
4413 }
4414 
4415 
VisitLoadStoreRegisterOffset(const Instruction * instr)4416 void Simulator::VisitLoadStoreRegisterOffset(const Instruction* instr) {
4417   Extend ext = static_cast<Extend>(instr->GetExtendMode());
4418   VIXL_ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
4419   unsigned shift_amount = instr->GetImmShiftLS() * instr->GetSizeLS();
4420 
4421   int64_t offset =
4422       ExtendValue(kXRegSize, ReadXRegister(instr->GetRm()), ext, shift_amount);
4423   LoadStoreHelper(instr, offset, Offset);
4424 }
4425 
4426 
LoadStoreHelper(const Instruction * instr,int64_t offset,AddrMode addrmode)4427 void Simulator::LoadStoreHelper(const Instruction* instr,
4428                                 int64_t offset,
4429                                 AddrMode addrmode) {
4430   unsigned srcdst = instr->GetRt();
4431   uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addrmode);
4432 
4433   bool rt_is_vreg = false;
4434   int extend_to_size = 0;
4435   LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
4436   switch (op) {
4437     case LDRB_w: {
4438       VIXL_DEFINE_OR_RETURN(value, MemRead<uint8_t>(address));
4439       WriteWRegister(srcdst, value, NoRegLog);
4440       extend_to_size = kWRegSizeInBytes;
4441       break;
4442     }
4443     case LDRH_w: {
4444       VIXL_DEFINE_OR_RETURN(value, MemRead<uint16_t>(address));
4445       WriteWRegister(srcdst, value, NoRegLog);
4446       extend_to_size = kWRegSizeInBytes;
4447       break;
4448     }
4449     case LDR_w: {
4450       VIXL_DEFINE_OR_RETURN(value, MemRead<uint32_t>(address));
4451       WriteWRegister(srcdst, value, NoRegLog);
4452       extend_to_size = kWRegSizeInBytes;
4453       break;
4454     }
4455     case LDR_x: {
4456       VIXL_DEFINE_OR_RETURN(value, MemRead<uint64_t>(address));
4457       WriteXRegister(srcdst, value, NoRegLog);
4458       extend_to_size = kXRegSizeInBytes;
4459       break;
4460     }
4461     case LDRSB_w: {
4462       VIXL_DEFINE_OR_RETURN(value, MemRead<int8_t>(address));
4463       WriteWRegister(srcdst, value, NoRegLog);
4464       extend_to_size = kWRegSizeInBytes;
4465       break;
4466     }
4467     case LDRSH_w: {
4468       VIXL_DEFINE_OR_RETURN(value, MemRead<int16_t>(address));
4469       WriteWRegister(srcdst, value, NoRegLog);
4470       extend_to_size = kWRegSizeInBytes;
4471       break;
4472     }
4473     case LDRSB_x: {
4474       VIXL_DEFINE_OR_RETURN(value, MemRead<int8_t>(address));
4475       WriteXRegister(srcdst, value, NoRegLog);
4476       extend_to_size = kXRegSizeInBytes;
4477       break;
4478     }
4479     case LDRSH_x: {
4480       VIXL_DEFINE_OR_RETURN(value, MemRead<int16_t>(address));
4481       WriteXRegister(srcdst, value, NoRegLog);
4482       extend_to_size = kXRegSizeInBytes;
4483       break;
4484     }
4485     case LDRSW_x: {
4486       VIXL_DEFINE_OR_RETURN(value, MemRead<int32_t>(address));
4487       WriteXRegister(srcdst, value, NoRegLog);
4488       extend_to_size = kXRegSizeInBytes;
4489       break;
4490     }
4491     case LDR_b: {
4492       VIXL_DEFINE_OR_RETURN(value, MemRead<uint8_t>(address));
4493       WriteBRegister(srcdst, value, NoRegLog);
4494       rt_is_vreg = true;
4495       break;
4496     }
4497     case LDR_h: {
4498       VIXL_DEFINE_OR_RETURN(value, MemRead<uint16_t>(address));
4499       WriteHRegister(srcdst, value, NoRegLog);
4500       rt_is_vreg = true;
4501       break;
4502     }
4503     case LDR_s: {
4504       VIXL_DEFINE_OR_RETURN(value, MemRead<float>(address));
4505       WriteSRegister(srcdst, value, NoRegLog);
4506       rt_is_vreg = true;
4507       break;
4508     }
4509     case LDR_d: {
4510       VIXL_DEFINE_OR_RETURN(value, MemRead<double>(address));
4511       WriteDRegister(srcdst, value, NoRegLog);
4512       rt_is_vreg = true;
4513       break;
4514     }
4515     case LDR_q: {
4516       VIXL_DEFINE_OR_RETURN(value, MemRead<qreg_t>(address));
4517       WriteQRegister(srcdst, value, NoRegLog);
4518       rt_is_vreg = true;
4519       break;
4520     }
4521 
4522     case STRB_w:
4523       if (!MemWrite<uint8_t>(address, ReadWRegister(srcdst))) return;
4524       break;
4525     case STRH_w:
4526       if (!MemWrite<uint16_t>(address, ReadWRegister(srcdst))) return;
4527       break;
4528     case STR_w:
4529       if (!MemWrite<uint32_t>(address, ReadWRegister(srcdst))) return;
4530       break;
4531     case STR_x:
4532       if (!MemWrite<uint64_t>(address, ReadXRegister(srcdst))) return;
4533       break;
4534     case STR_b:
4535       if (!MemWrite<uint8_t>(address, ReadBRegister(srcdst))) return;
4536       rt_is_vreg = true;
4537       break;
4538     case STR_h:
4539       if (!MemWrite<uint16_t>(address, ReadHRegisterBits(srcdst))) return;
4540       rt_is_vreg = true;
4541       break;
4542     case STR_s:
4543       if (!MemWrite<float>(address, ReadSRegister(srcdst))) return;
4544       rt_is_vreg = true;
4545       break;
4546     case STR_d:
4547       if (!MemWrite<double>(address, ReadDRegister(srcdst))) return;
4548       rt_is_vreg = true;
4549       break;
4550     case STR_q:
4551       if (!MemWrite<qreg_t>(address, ReadQRegister(srcdst))) return;
4552       rt_is_vreg = true;
4553       break;
4554 
4555     // Ignore prfm hint instructions.
4556     case PRFM:
4557       break;
4558 
4559     default:
4560       VIXL_UNIMPLEMENTED();
4561   }
4562 
4563   // Print a detailed trace (including the memory address).
4564   bool extend = (extend_to_size != 0);
4565   unsigned access_size = 1 << instr->GetSizeLS();
4566   unsigned result_size = extend ? extend_to_size : access_size;
4567   PrintRegisterFormat print_format =
4568       rt_is_vreg ? GetPrintRegisterFormatForSizeTryFP(result_size)
4569                  : GetPrintRegisterFormatForSize(result_size);
4570 
4571   if (instr->IsLoad()) {
4572     if (rt_is_vreg) {
4573       LogVRead(srcdst, print_format, address);
4574     } else {
4575       LogExtendingRead(srcdst, print_format, access_size, address);
4576     }
4577   } else if (instr->IsStore()) {
4578     if (rt_is_vreg) {
4579       LogVWrite(srcdst, print_format, address);
4580     } else {
4581       LogWrite(srcdst, GetPrintRegisterFormatForSize(result_size), address);
4582     }
4583   } else {
4584     VIXL_ASSERT(op == PRFM);
4585   }
4586 
4587   local_monitor_.MaybeClear();
4588 }
4589 
4590 
VisitLoadStorePairOffset(const Instruction * instr)4591 void Simulator::VisitLoadStorePairOffset(const Instruction* instr) {
4592   LoadStorePairHelper(instr, Offset);
4593 }
4594 
4595 
VisitLoadStorePairPreIndex(const Instruction * instr)4596 void Simulator::VisitLoadStorePairPreIndex(const Instruction* instr) {
4597   LoadStorePairHelper(instr, PreIndex);
4598 }
4599 
4600 
VisitLoadStorePairPostIndex(const Instruction * instr)4601 void Simulator::VisitLoadStorePairPostIndex(const Instruction* instr) {
4602   LoadStorePairHelper(instr, PostIndex);
4603 }
4604 
4605 
VisitLoadStorePairNonTemporal(const Instruction * instr)4606 void Simulator::VisitLoadStorePairNonTemporal(const Instruction* instr) {
4607   LoadStorePairHelper(instr, Offset);
4608 }
4609 
4610 
LoadStorePairHelper(const Instruction * instr,AddrMode addrmode)4611 void Simulator::LoadStorePairHelper(const Instruction* instr,
4612                                     AddrMode addrmode) {
4613   unsigned rt = instr->GetRt();
4614   unsigned rt2 = instr->GetRt2();
4615   int element_size = 1 << instr->GetSizeLSPair();
4616   int64_t offset = instr->GetImmLSPair() * element_size;
4617   uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addrmode);
4618   uintptr_t address2 = address + element_size;
4619 
4620   LoadStorePairOp op =
4621       static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
4622 
4623   // 'rt' and 'rt2' can only be aliased for stores.
4624   VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
4625 
4626   bool rt_is_vreg = false;
4627   bool sign_extend = false;
4628   switch (op) {
4629     // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We
4630     // will print a more detailed log.
4631     case LDP_w: {
4632       VIXL_DEFINE_OR_RETURN(value, MemRead<uint32_t>(address));
4633       VIXL_DEFINE_OR_RETURN(value2, MemRead<uint32_t>(address2));
4634       WriteWRegister(rt, value, NoRegLog);
4635       WriteWRegister(rt2, value2, NoRegLog);
4636       break;
4637     }
4638     case LDP_s: {
4639       VIXL_DEFINE_OR_RETURN(value, MemRead<float>(address));
4640       VIXL_DEFINE_OR_RETURN(value2, MemRead<float>(address2));
4641       WriteSRegister(rt, value, NoRegLog);
4642       WriteSRegister(rt2, value2, NoRegLog);
4643       rt_is_vreg = true;
4644       break;
4645     }
4646     case LDP_x: {
4647       VIXL_DEFINE_OR_RETURN(value, MemRead<uint64_t>(address));
4648       VIXL_DEFINE_OR_RETURN(value2, MemRead<uint64_t>(address2));
4649       WriteXRegister(rt, value, NoRegLog);
4650       WriteXRegister(rt2, value2, NoRegLog);
4651       break;
4652     }
4653     case LDP_d: {
4654       VIXL_DEFINE_OR_RETURN(value, MemRead<double>(address));
4655       VIXL_DEFINE_OR_RETURN(value2, MemRead<double>(address2));
4656       WriteDRegister(rt, value, NoRegLog);
4657       WriteDRegister(rt2, value2, NoRegLog);
4658       rt_is_vreg = true;
4659       break;
4660     }
4661     case LDP_q: {
4662       VIXL_DEFINE_OR_RETURN(value, MemRead<qreg_t>(address));
4663       VIXL_DEFINE_OR_RETURN(value2, MemRead<qreg_t>(address2));
4664       WriteQRegister(rt, value, NoRegLog);
4665       WriteQRegister(rt2, value2, NoRegLog);
4666       rt_is_vreg = true;
4667       break;
4668     }
4669     case LDPSW_x: {
4670       VIXL_DEFINE_OR_RETURN(value, MemRead<int32_t>(address));
4671       VIXL_DEFINE_OR_RETURN(value2, MemRead<int32_t>(address2));
4672       WriteXRegister(rt, value, NoRegLog);
4673       WriteXRegister(rt2, value2, NoRegLog);
4674       sign_extend = true;
4675       break;
4676     }
4677     case STP_w: {
4678       if (!MemWrite<uint32_t>(address, ReadWRegister(rt))) return;
4679       if (!MemWrite<uint32_t>(address2, ReadWRegister(rt2))) return;
4680       break;
4681     }
4682     case STP_s: {
4683       if (!MemWrite<float>(address, ReadSRegister(rt))) return;
4684       if (!MemWrite<float>(address2, ReadSRegister(rt2))) return;
4685       rt_is_vreg = true;
4686       break;
4687     }
4688     case STP_x: {
4689       if (!MemWrite<uint64_t>(address, ReadXRegister(rt))) return;
4690       if (!MemWrite<uint64_t>(address2, ReadXRegister(rt2))) return;
4691       break;
4692     }
4693     case STP_d: {
4694       if (!MemWrite<double>(address, ReadDRegister(rt))) return;
4695       if (!MemWrite<double>(address2, ReadDRegister(rt2))) return;
4696       rt_is_vreg = true;
4697       break;
4698     }
4699     case STP_q: {
4700       if (!MemWrite<qreg_t>(address, ReadQRegister(rt))) return;
4701       if (!MemWrite<qreg_t>(address2, ReadQRegister(rt2))) return;
4702       rt_is_vreg = true;
4703       break;
4704     }
4705     default:
4706       VIXL_UNREACHABLE();
4707   }
4708 
4709   // Print a detailed trace (including the memory address).
4710   unsigned result_size = sign_extend ? kXRegSizeInBytes : element_size;
4711   PrintRegisterFormat print_format =
4712       rt_is_vreg ? GetPrintRegisterFormatForSizeTryFP(result_size)
4713                  : GetPrintRegisterFormatForSize(result_size);
4714 
4715   if (instr->IsLoad()) {
4716     if (rt_is_vreg) {
4717       LogVRead(rt, print_format, address);
4718       LogVRead(rt2, print_format, address2);
4719     } else if (sign_extend) {
4720       LogExtendingRead(rt, print_format, element_size, address);
4721       LogExtendingRead(rt2, print_format, element_size, address2);
4722     } else {
4723       LogRead(rt, print_format, address);
4724       LogRead(rt2, print_format, address2);
4725     }
4726   } else {
4727     if (rt_is_vreg) {
4728       LogVWrite(rt, print_format, address);
4729       LogVWrite(rt2, print_format, address2);
4730     } else {
4731       LogWrite(rt, print_format, address);
4732       LogWrite(rt2, print_format, address2);
4733     }
4734   }
4735 
4736   local_monitor_.MaybeClear();
4737 }
4738 
4739 
4740 template <typename T>
CompareAndSwapHelper(const Instruction * instr)4741 void Simulator::CompareAndSwapHelper(const Instruction* instr) {
4742   unsigned rs = instr->GetRs();
4743   unsigned rt = instr->GetRt();
4744   unsigned rn = instr->GetRn();
4745 
4746   unsigned element_size = sizeof(T);
4747   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
4748 
4749   CheckIsValidUnalignedAtomicAccess(rn, address, element_size);
4750 
4751   bool is_acquire = instr->ExtractBit(22) == 1;
4752   bool is_release = instr->ExtractBit(15) == 1;
4753 
4754   T comparevalue = ReadRegister<T>(rs);
4755   T newvalue = ReadRegister<T>(rt);
4756 
4757   // The architecture permits that the data read clears any exclusive monitors
4758   // associated with that location, even if the compare subsequently fails.
4759   local_monitor_.Clear();
4760 
4761   VIXL_DEFINE_OR_RETURN(data, MemRead<T>(address));
4762 
4763   if (is_acquire) {
4764     // Approximate load-acquire by issuing a full barrier after the load.
4765     VIXL_SYNC();
4766   }
4767 
4768   if (data == comparevalue) {
4769     if (is_release) {
4770       // Approximate store-release by issuing a full barrier before the store.
4771       VIXL_SYNC();
4772     }
4773     if (!MemWrite<T>(address, newvalue)) return;
4774     LogWrite(rt, GetPrintRegisterFormatForSize(element_size), address);
4775   }
4776   WriteRegister<T>(rs, data, NoRegLog);
4777   LogRead(rs, GetPrintRegisterFormatForSize(element_size), address);
4778 }
4779 
4780 
4781 template <typename T>
CompareAndSwapPairHelper(const Instruction * instr)4782 void Simulator::CompareAndSwapPairHelper(const Instruction* instr) {
4783   VIXL_ASSERT((sizeof(T) == 4) || (sizeof(T) == 8));
4784   unsigned rs = instr->GetRs();
4785   unsigned rt = instr->GetRt();
4786   unsigned rn = instr->GetRn();
4787 
4788   VIXL_ASSERT((rs % 2 == 0) && (rt % 2 == 0));
4789 
4790   unsigned element_size = sizeof(T);
4791   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
4792 
4793   CheckIsValidUnalignedAtomicAccess(rn, address, element_size * 2);
4794 
4795   uint64_t address2 = address + element_size;
4796 
4797   bool is_acquire = instr->ExtractBit(22) == 1;
4798   bool is_release = instr->ExtractBit(15) == 1;
4799 
4800   T comparevalue_high = ReadRegister<T>(rs + 1);
4801   T comparevalue_low = ReadRegister<T>(rs);
4802   T newvalue_high = ReadRegister<T>(rt + 1);
4803   T newvalue_low = ReadRegister<T>(rt);
4804 
4805   // The architecture permits that the data read clears any exclusive monitors
4806   // associated with that location, even if the compare subsequently fails.
4807   local_monitor_.Clear();
4808 
4809   VIXL_DEFINE_OR_RETURN(data_low, MemRead<T>(address));
4810   VIXL_DEFINE_OR_RETURN(data_high, MemRead<T>(address2));
4811 
4812   if (is_acquire) {
4813     // Approximate load-acquire by issuing a full barrier after the load.
4814     VIXL_SYNC();
4815   }
4816 
4817   bool same =
4818       (data_high == comparevalue_high) && (data_low == comparevalue_low);
4819   if (same) {
4820     if (is_release) {
4821       // Approximate store-release by issuing a full barrier before the store.
4822       VIXL_SYNC();
4823     }
4824 
4825     if (!MemWrite<T>(address, newvalue_low)) return;
4826     if (!MemWrite<T>(address2, newvalue_high)) return;
4827   }
4828 
4829   WriteRegister<T>(rs + 1, data_high, NoRegLog);
4830   WriteRegister<T>(rs, data_low, NoRegLog);
4831 
4832   PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
4833   LogRead(rs, format, address);
4834   LogRead(rs + 1, format, address2);
4835 
4836   if (same) {
4837     LogWrite(rt, format, address);
4838     LogWrite(rt + 1, format, address2);
4839   }
4840 }
4841 
CanReadMemory(uintptr_t address,size_t size)4842 bool Simulator::CanReadMemory(uintptr_t address, size_t size) {
4843 #ifndef _WIN32
4844   // To simulate fault-tolerant loads, we need to know what host addresses we
4845   // can access without generating a real fault. One way to do that is to
4846   // attempt to `write()` the memory to a placeholder pipe[1]. This is more
4847   // portable and less intrusive than using (global) signal handlers.
4848   //
4849   // [1]: https://stackoverflow.com/questions/7134590
4850 
4851   size_t written = 0;
4852   bool can_read = true;
4853   // `write` will normally return after one invocation, but it is allowed to
4854   // handle only part of the operation, so wrap it in a loop.
4855   while (can_read && (written < size)) {
4856     ssize_t result = write(placeholder_pipe_fd_[1],
4857                            reinterpret_cast<void*>(address + written),
4858                            size - written);
4859     if (result > 0) {
4860       written += result;
4861     } else {
4862       switch (result) {
4863         case -EPERM:
4864         case -EFAULT:
4865           // The address range is not accessible.
4866           // `write` is supposed to return -EFAULT in this case, but in practice
4867           // it seems to return -EPERM, so we accept that too.
4868           can_read = false;
4869           break;
4870         case -EINTR:
4871           // The call was interrupted by a signal. Just try again.
4872           break;
4873         default:
4874           // Any other error is fatal.
4875           VIXL_ABORT();
4876       }
4877     }
4878   }
4879   // Drain the read side of the pipe. If we don't do this, we'll leak memory as
4880   // the placeholder data is buffered. As before, we expect to drain the whole
4881   // write in one invocation, but cannot guarantee that, so we wrap it in a
4882   // loop. This function is primarily intended to implement SVE fault-tolerant
4883   // loads, so the maximum Z register size is a good default buffer size.
4884   char buffer[kZRegMaxSizeInBytes];
4885   while (written > 0) {
4886     ssize_t result = read(placeholder_pipe_fd_[0],
4887                           reinterpret_cast<void*>(buffer),
4888                           sizeof(buffer));
4889     // `read` blocks, and returns 0 only at EOF. We should not hit EOF until
4890     // we've read everything that was written, so treat 0 as an error.
4891     if (result > 0) {
4892       VIXL_ASSERT(static_cast<size_t>(result) <= written);
4893       written -= result;
4894     } else {
4895       // For -EINTR, just try again. We can't handle any other error.
4896       VIXL_CHECK(result == -EINTR);
4897     }
4898   }
4899 
4900   return can_read;
4901 #else
4902   // To simulate fault-tolerant loads, we need to know what host addresses we
4903   // can access without generating a real fault
4904   // The pipe code above is almost but not fully compatible with Windows
4905   // Instead, use the platform specific API VirtualQuery()
4906   //
4907   // [2]: https://stackoverflow.com/a/18395247/9109981
4908 
4909   bool can_read = true;
4910   MEMORY_BASIC_INFORMATION pageInfo;
4911 
4912   size_t checked = 0;
4913   while (can_read && (checked < size)) {
4914     size_t result = VirtualQuery(reinterpret_cast<void*>(address + checked),
4915                                  &pageInfo,
4916                                  sizeof(pageInfo));
4917 
4918     if (result < 0) {
4919       can_read = false;
4920       break;
4921     }
4922 
4923     if (pageInfo.State != MEM_COMMIT) {
4924       can_read = false;
4925       break;
4926     }
4927 
4928     if (pageInfo.Protect == PAGE_NOACCESS || pageInfo.Protect == PAGE_EXECUTE) {
4929       can_read = false;
4930       break;
4931     }
4932     checked += pageInfo.RegionSize -
4933                ((address + checked) -
4934                 reinterpret_cast<uintptr_t>(pageInfo.BaseAddress));
4935   }
4936 
4937   return can_read;
4938 #endif
4939 }
4940 
PrintExclusiveAccessWarning()4941 void Simulator::PrintExclusiveAccessWarning() {
4942   if (print_exclusive_access_warning_) {
4943     fprintf(stderr,
4944             "%sWARNING:%s VIXL simulator support for "
4945             "load-/store-/clear-exclusive "
4946             "instructions is limited. Refer to the README for details.%s\n",
4947             clr_warning,
4948             clr_warning_message,
4949             clr_normal);
4950     print_exclusive_access_warning_ = false;
4951   }
4952 }
4953 
VisitLoadStoreExclusive(const Instruction * instr)4954 void Simulator::VisitLoadStoreExclusive(const Instruction* instr) {
4955   LoadStoreExclusive op =
4956       static_cast<LoadStoreExclusive>(instr->Mask(LoadStoreExclusiveMask));
4957 
4958   switch (op) {
4959     case CAS_w:
4960     case CASA_w:
4961     case CASL_w:
4962     case CASAL_w:
4963       CompareAndSwapHelper<uint32_t>(instr);
4964       break;
4965     case CAS_x:
4966     case CASA_x:
4967     case CASL_x:
4968     case CASAL_x:
4969       CompareAndSwapHelper<uint64_t>(instr);
4970       break;
4971     case CASB:
4972     case CASAB:
4973     case CASLB:
4974     case CASALB:
4975       CompareAndSwapHelper<uint8_t>(instr);
4976       break;
4977     case CASH:
4978     case CASAH:
4979     case CASLH:
4980     case CASALH:
4981       CompareAndSwapHelper<uint16_t>(instr);
4982       break;
4983     case CASP_w:
4984     case CASPA_w:
4985     case CASPL_w:
4986     case CASPAL_w:
4987       CompareAndSwapPairHelper<uint32_t>(instr);
4988       break;
4989     case CASP_x:
4990     case CASPA_x:
4991     case CASPL_x:
4992     case CASPAL_x:
4993       CompareAndSwapPairHelper<uint64_t>(instr);
4994       break;
4995     default:
4996       PrintExclusiveAccessWarning();
4997 
4998       unsigned rs = instr->GetRs();
4999       unsigned rt = instr->GetRt();
5000       unsigned rt2 = instr->GetRt2();
5001       unsigned rn = instr->GetRn();
5002 
5003       bool is_exclusive = !instr->GetLdStXNotExclusive();
5004       bool is_acquire_release =
5005           !is_exclusive || instr->GetLdStXAcquireRelease();
5006       bool is_load = instr->GetLdStXLoad();
5007       bool is_pair = instr->GetLdStXPair();
5008 
5009       unsigned element_size = 1 << instr->GetLdStXSizeLog2();
5010       unsigned access_size = is_pair ? element_size * 2 : element_size;
5011       uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
5012 
5013       CheckIsValidUnalignedAtomicAccess(rn, address, access_size);
5014 
5015       if (is_load) {
5016         if (is_exclusive) {
5017           local_monitor_.MarkExclusive(address, access_size);
5018         } else {
5019           // Any non-exclusive load can clear the local monitor as a side
5020           // effect. We don't need to do this, but it is useful to stress the
5021           // simulated code.
5022           local_monitor_.Clear();
5023         }
5024 
5025         // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS).
5026         // We will print a more detailed log.
5027         unsigned reg_size = 0;
5028         switch (op) {
5029           case LDXRB_w:
5030           case LDAXRB_w:
5031           case LDARB_w:
5032           case LDLARB: {
5033             VIXL_DEFINE_OR_RETURN(value, MemRead<uint8_t>(address));
5034             WriteWRegister(rt, value, NoRegLog);
5035             reg_size = kWRegSizeInBytes;
5036             break;
5037           }
5038           case LDXRH_w:
5039           case LDAXRH_w:
5040           case LDARH_w:
5041           case LDLARH: {
5042             VIXL_DEFINE_OR_RETURN(value, MemRead<uint16_t>(address));
5043             WriteWRegister(rt, value, NoRegLog);
5044             reg_size = kWRegSizeInBytes;
5045             break;
5046           }
5047           case LDXR_w:
5048           case LDAXR_w:
5049           case LDAR_w:
5050           case LDLAR_w: {
5051             VIXL_DEFINE_OR_RETURN(value, MemRead<uint32_t>(address));
5052             WriteWRegister(rt, value, NoRegLog);
5053             reg_size = kWRegSizeInBytes;
5054             break;
5055           }
5056           case LDXR_x:
5057           case LDAXR_x:
5058           case LDAR_x:
5059           case LDLAR_x: {
5060             VIXL_DEFINE_OR_RETURN(value, MemRead<uint64_t>(address));
5061             WriteXRegister(rt, value, NoRegLog);
5062             reg_size = kXRegSizeInBytes;
5063             break;
5064           }
5065           case LDXP_w:
5066           case LDAXP_w: {
5067             VIXL_DEFINE_OR_RETURN(value, MemRead<uint32_t>(address));
5068             VIXL_DEFINE_OR_RETURN(value2,
5069                                   MemRead<uint32_t>(address + element_size));
5070             WriteWRegister(rt, value, NoRegLog);
5071             WriteWRegister(rt2, value2, NoRegLog);
5072             reg_size = kWRegSizeInBytes;
5073             break;
5074           }
5075           case LDXP_x:
5076           case LDAXP_x: {
5077             VIXL_DEFINE_OR_RETURN(value, MemRead<uint64_t>(address));
5078             VIXL_DEFINE_OR_RETURN(value2,
5079                                   MemRead<uint64_t>(address + element_size));
5080             WriteXRegister(rt, value, NoRegLog);
5081             WriteXRegister(rt2, value2, NoRegLog);
5082             reg_size = kXRegSizeInBytes;
5083             break;
5084           }
5085           default:
5086             VIXL_UNREACHABLE();
5087         }
5088 
5089         if (is_acquire_release) {
5090           // Approximate load-acquire by issuing a full barrier after the load.
5091           VIXL_SYNC();
5092         }
5093 
5094         PrintRegisterFormat format = GetPrintRegisterFormatForSize(reg_size);
5095         LogExtendingRead(rt, format, element_size, address);
5096         if (is_pair) {
5097           LogExtendingRead(rt2, format, element_size, address + element_size);
5098         }
5099       } else {
5100         if (is_acquire_release) {
5101           // Approximate store-release by issuing a full barrier before the
5102           // store.
5103           VIXL_SYNC();
5104         }
5105 
5106         bool do_store = true;
5107         if (is_exclusive) {
5108           do_store = local_monitor_.IsExclusive(address, access_size) &&
5109                      global_monitor_.IsExclusive(address, access_size);
5110           WriteWRegister(rs, do_store ? 0 : 1);
5111 
5112           //  - All exclusive stores explicitly clear the local monitor.
5113           local_monitor_.Clear();
5114         } else {
5115           //  - Any other store can clear the local monitor as a side effect.
5116           local_monitor_.MaybeClear();
5117         }
5118 
5119         if (do_store) {
5120           switch (op) {
5121             case STXRB_w:
5122             case STLXRB_w:
5123             case STLRB_w:
5124             case STLLRB:
5125               if (!MemWrite<uint8_t>(address, ReadWRegister(rt))) return;
5126               break;
5127             case STXRH_w:
5128             case STLXRH_w:
5129             case STLRH_w:
5130             case STLLRH:
5131               if (!MemWrite<uint16_t>(address, ReadWRegister(rt))) return;
5132               break;
5133             case STXR_w:
5134             case STLXR_w:
5135             case STLR_w:
5136             case STLLR_w:
5137               if (!MemWrite<uint32_t>(address, ReadWRegister(rt))) return;
5138               break;
5139             case STXR_x:
5140             case STLXR_x:
5141             case STLR_x:
5142             case STLLR_x:
5143               if (!MemWrite<uint64_t>(address, ReadXRegister(rt))) return;
5144               break;
5145             case STXP_w:
5146             case STLXP_w:
5147               if (!MemWrite<uint32_t>(address, ReadWRegister(rt))) return;
5148               if (!MemWrite<uint32_t>(address + element_size,
5149                                       ReadWRegister(rt2))) {
5150                 return;
5151               }
5152               break;
5153             case STXP_x:
5154             case STLXP_x:
5155               if (!MemWrite<uint64_t>(address, ReadXRegister(rt))) return;
5156               if (!MemWrite<uint64_t>(address + element_size,
5157                                       ReadXRegister(rt2))) {
5158                 return;
5159               }
5160               break;
5161             default:
5162               VIXL_UNREACHABLE();
5163           }
5164 
5165           PrintRegisterFormat format =
5166               GetPrintRegisterFormatForSize(element_size);
5167           LogWrite(rt, format, address);
5168           if (is_pair) {
5169             LogWrite(rt2, format, address + element_size);
5170           }
5171         }
5172       }
5173   }
5174 }
5175 
5176 template <typename T>
AtomicMemorySimpleHelper(const Instruction * instr)5177 void Simulator::AtomicMemorySimpleHelper(const Instruction* instr) {
5178   unsigned rs = instr->GetRs();
5179   unsigned rt = instr->GetRt();
5180   unsigned rn = instr->GetRn();
5181 
5182   bool is_acquire = (instr->ExtractBit(23) == 1) && (rt != kZeroRegCode);
5183   bool is_release = instr->ExtractBit(22) == 1;
5184 
5185   unsigned element_size = sizeof(T);
5186   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
5187 
5188   CheckIsValidUnalignedAtomicAccess(rn, address, element_size);
5189 
5190   T value = ReadRegister<T>(rs);
5191 
5192   VIXL_DEFINE_OR_RETURN(data, MemRead<T>(address));
5193 
5194   if (is_acquire) {
5195     // Approximate load-acquire by issuing a full barrier after the load.
5196     VIXL_SYNC();
5197   }
5198 
5199   T result = 0;
5200   switch (instr->Mask(AtomicMemorySimpleOpMask)) {
5201     case LDADDOp:
5202       result = data + value;
5203       break;
5204     case LDCLROp:
5205       VIXL_ASSERT(!std::numeric_limits<T>::is_signed);
5206       result = data & ~value;
5207       break;
5208     case LDEOROp:
5209       VIXL_ASSERT(!std::numeric_limits<T>::is_signed);
5210       result = data ^ value;
5211       break;
5212     case LDSETOp:
5213       VIXL_ASSERT(!std::numeric_limits<T>::is_signed);
5214       result = data | value;
5215       break;
5216 
5217     // Signed/Unsigned difference is done via the templated type T.
5218     case LDSMAXOp:
5219     case LDUMAXOp:
5220       result = (data > value) ? data : value;
5221       break;
5222     case LDSMINOp:
5223     case LDUMINOp:
5224       result = (data > value) ? value : data;
5225       break;
5226   }
5227 
5228   if (is_release) {
5229     // Approximate store-release by issuing a full barrier before the store.
5230     VIXL_SYNC();
5231   }
5232 
5233   WriteRegister<T>(rt, data, NoRegLog);
5234 
5235   unsigned register_size = element_size;
5236   if (element_size < kXRegSizeInBytes) {
5237     register_size = kWRegSizeInBytes;
5238   }
5239   PrintRegisterFormat format = GetPrintRegisterFormatForSize(register_size);
5240   LogExtendingRead(rt, format, element_size, address);
5241 
5242   if (!MemWrite<T>(address, result)) return;
5243   format = GetPrintRegisterFormatForSize(element_size);
5244   LogWrite(rs, format, address);
5245 }
5246 
5247 template <typename T>
AtomicMemorySwapHelper(const Instruction * instr)5248 void Simulator::AtomicMemorySwapHelper(const Instruction* instr) {
5249   unsigned rs = instr->GetRs();
5250   unsigned rt = instr->GetRt();
5251   unsigned rn = instr->GetRn();
5252 
5253   bool is_acquire = (instr->ExtractBit(23) == 1) && (rt != kZeroRegCode);
5254   bool is_release = instr->ExtractBit(22) == 1;
5255 
5256   unsigned element_size = sizeof(T);
5257   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
5258 
5259   CheckIsValidUnalignedAtomicAccess(rn, address, element_size);
5260 
5261   VIXL_DEFINE_OR_RETURN(data, MemRead<T>(address));
5262 
5263   if (is_acquire) {
5264     // Approximate load-acquire by issuing a full barrier after the load.
5265     VIXL_SYNC();
5266   }
5267 
5268   if (is_release) {
5269     // Approximate store-release by issuing a full barrier before the store.
5270     VIXL_SYNC();
5271   }
5272   if (!MemWrite<T>(address, ReadRegister<T>(rs))) return;
5273 
5274   WriteRegister<T>(rt, data);
5275 
5276   PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
5277   LogRead(rt, format, address);
5278   LogWrite(rs, format, address);
5279 }
5280 
5281 template <typename T>
LoadAcquireRCpcHelper(const Instruction * instr)5282 void Simulator::LoadAcquireRCpcHelper(const Instruction* instr) {
5283   unsigned rt = instr->GetRt();
5284   unsigned rn = instr->GetRn();
5285 
5286   unsigned element_size = sizeof(T);
5287   uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer);
5288 
5289   CheckIsValidUnalignedAtomicAccess(rn, address, element_size);
5290 
5291   VIXL_DEFINE_OR_RETURN(value, MemRead<T>(address));
5292 
5293   WriteRegister<T>(rt, value);
5294 
5295   // Approximate load-acquire by issuing a full barrier after the load.
5296   VIXL_SYNC();
5297 
5298   LogRead(rt, GetPrintRegisterFormatForSize(element_size), address);
5299 }
5300 
5301 #define ATOMIC_MEMORY_SIMPLE_UINT_LIST(V) \
5302   V(LDADD)                                \
5303   V(LDCLR)                                \
5304   V(LDEOR)                                \
5305   V(LDSET)                                \
5306   V(LDUMAX)                               \
5307   V(LDUMIN)
5308 
5309 #define ATOMIC_MEMORY_SIMPLE_INT_LIST(V) \
5310   V(LDSMAX)                              \
5311   V(LDSMIN)
5312 
VisitAtomicMemory(const Instruction * instr)5313 void Simulator::VisitAtomicMemory(const Instruction* instr) {
5314   switch (instr->Mask(AtomicMemoryMask)) {
5315 // clang-format off
5316 #define SIM_FUNC_B(A) \
5317     case A##B:        \
5318     case A##AB:       \
5319     case A##LB:       \
5320     case A##ALB:
5321 #define SIM_FUNC_H(A) \
5322     case A##H:        \
5323     case A##AH:       \
5324     case A##LH:       \
5325     case A##ALH:
5326 #define SIM_FUNC_w(A) \
5327     case A##_w:       \
5328     case A##A_w:      \
5329     case A##L_w:      \
5330     case A##AL_w:
5331 #define SIM_FUNC_x(A) \
5332     case A##_x:       \
5333     case A##A_x:      \
5334     case A##L_x:      \
5335     case A##AL_x:
5336 
5337     ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_B)
5338       AtomicMemorySimpleHelper<uint8_t>(instr);
5339       break;
5340     ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_B)
5341       AtomicMemorySimpleHelper<int8_t>(instr);
5342       break;
5343     ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_H)
5344       AtomicMemorySimpleHelper<uint16_t>(instr);
5345       break;
5346     ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_H)
5347       AtomicMemorySimpleHelper<int16_t>(instr);
5348       break;
5349     ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_w)
5350       AtomicMemorySimpleHelper<uint32_t>(instr);
5351       break;
5352     ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_w)
5353       AtomicMemorySimpleHelper<int32_t>(instr);
5354       break;
5355     ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_x)
5356       AtomicMemorySimpleHelper<uint64_t>(instr);
5357       break;
5358     ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_x)
5359       AtomicMemorySimpleHelper<int64_t>(instr);
5360       break;
5361       // clang-format on
5362 
5363     case SWPB:
5364     case SWPAB:
5365     case SWPLB:
5366     case SWPALB:
5367       AtomicMemorySwapHelper<uint8_t>(instr);
5368       break;
5369     case SWPH:
5370     case SWPAH:
5371     case SWPLH:
5372     case SWPALH:
5373       AtomicMemorySwapHelper<uint16_t>(instr);
5374       break;
5375     case SWP_w:
5376     case SWPA_w:
5377     case SWPL_w:
5378     case SWPAL_w:
5379       AtomicMemorySwapHelper<uint32_t>(instr);
5380       break;
5381     case SWP_x:
5382     case SWPA_x:
5383     case SWPL_x:
5384     case SWPAL_x:
5385       AtomicMemorySwapHelper<uint64_t>(instr);
5386       break;
5387     case LDAPRB:
5388       LoadAcquireRCpcHelper<uint8_t>(instr);
5389       break;
5390     case LDAPRH:
5391       LoadAcquireRCpcHelper<uint16_t>(instr);
5392       break;
5393     case LDAPR_w:
5394       LoadAcquireRCpcHelper<uint32_t>(instr);
5395       break;
5396     case LDAPR_x:
5397       LoadAcquireRCpcHelper<uint64_t>(instr);
5398       break;
5399   }
5400 }
5401 
5402 
VisitLoadLiteral(const Instruction * instr)5403 void Simulator::VisitLoadLiteral(const Instruction* instr) {
5404   unsigned rt = instr->GetRt();
5405   uint64_t address = instr->GetLiteralAddress<uint64_t>();
5406 
5407   // Verify that the calculated address is available to the host.
5408   VIXL_ASSERT(address == static_cast<uintptr_t>(address));
5409 
5410   switch (instr->Mask(LoadLiteralMask)) {
5411     // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_VREGS), then
5412     // print a more detailed log.
5413     case LDR_w_lit: {
5414       VIXL_DEFINE_OR_RETURN(value, MemRead<uint32_t>(address));
5415       WriteWRegister(rt, value, NoRegLog);
5416       LogRead(rt, kPrintWReg, address);
5417       break;
5418     }
5419     case LDR_x_lit: {
5420       VIXL_DEFINE_OR_RETURN(value, MemRead<uint64_t>(address));
5421       WriteXRegister(rt, value, NoRegLog);
5422       LogRead(rt, kPrintXReg, address);
5423       break;
5424     }
5425     case LDR_s_lit: {
5426       VIXL_DEFINE_OR_RETURN(value, MemRead<float>(address));
5427       WriteSRegister(rt, value, NoRegLog);
5428       LogVRead(rt, kPrintSRegFP, address);
5429       break;
5430     }
5431     case LDR_d_lit: {
5432       VIXL_DEFINE_OR_RETURN(value, MemRead<double>(address));
5433       WriteDRegister(rt, value, NoRegLog);
5434       LogVRead(rt, kPrintDRegFP, address);
5435       break;
5436     }
5437     case LDR_q_lit: {
5438       VIXL_DEFINE_OR_RETURN(value, MemRead<qreg_t>(address));
5439       WriteQRegister(rt, value, NoRegLog);
5440       LogVRead(rt, kPrintReg1Q, address);
5441       break;
5442     }
5443     case LDRSW_x_lit: {
5444       VIXL_DEFINE_OR_RETURN(value, MemRead<int32_t>(address));
5445       WriteXRegister(rt, value, NoRegLog);
5446       LogExtendingRead(rt, kPrintXReg, kWRegSizeInBytes, address);
5447       break;
5448     }
5449 
5450     // Ignore prfm hint instructions.
5451     case PRFM_lit:
5452       break;
5453 
5454     default:
5455       VIXL_UNREACHABLE();
5456   }
5457 
5458   local_monitor_.MaybeClear();
5459 }
5460 
5461 
AddressModeHelper(unsigned addr_reg,int64_t offset,AddrMode addrmode)5462 uintptr_t Simulator::AddressModeHelper(unsigned addr_reg,
5463                                        int64_t offset,
5464                                        AddrMode addrmode) {
5465   uint64_t address = ReadXRegister(addr_reg, Reg31IsStackPointer);
5466 
5467   if ((addr_reg == 31) && ((address % 16) != 0)) {
5468     // When the base register is SP the stack pointer is required to be
5469     // quadword aligned prior to the address calculation and write-backs.
5470     // Misalignment will cause a stack alignment fault.
5471     VIXL_ALIGNMENT_EXCEPTION();
5472   }
5473 
5474   if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
5475     VIXL_ASSERT(offset != 0);
5476     // Only preindex should log the register update here. For Postindex, the
5477     // update will be printed automatically by LogWrittenRegisters _after_ the
5478     // memory access itself is logged.
5479     RegLogMode log_mode = (addrmode == PreIndex) ? LogRegWrites : NoRegLog;
5480     WriteXRegister(addr_reg, address + offset, log_mode, Reg31IsStackPointer);
5481   }
5482 
5483   if ((addrmode == Offset) || (addrmode == PreIndex)) {
5484     address += offset;
5485   }
5486 
5487   // Verify that the calculated address is available to the host.
5488   VIXL_ASSERT(address == static_cast<uintptr_t>(address));
5489 
5490   return static_cast<uintptr_t>(address);
5491 }
5492 
5493 
VisitMoveWideImmediate(const Instruction * instr)5494 void Simulator::VisitMoveWideImmediate(const Instruction* instr) {
5495   MoveWideImmediateOp mov_op =
5496       static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
5497   int64_t new_xn_val = 0;
5498 
5499   bool is_64_bits = instr->GetSixtyFourBits() == 1;
5500   // Shift is limited for W operations.
5501   VIXL_ASSERT(is_64_bits || (instr->GetShiftMoveWide() < 2));
5502 
5503   // Get the shifted immediate.
5504   int64_t shift = instr->GetShiftMoveWide() * 16;
5505   int64_t shifted_imm16 = static_cast<int64_t>(instr->GetImmMoveWide())
5506                           << shift;
5507 
5508   // Compute the new value.
5509   switch (mov_op) {
5510     case MOVN_w:
5511     case MOVN_x: {
5512       new_xn_val = ~shifted_imm16;
5513       if (!is_64_bits) new_xn_val &= kWRegMask;
5514       break;
5515     }
5516     case MOVK_w:
5517     case MOVK_x: {
5518       unsigned reg_code = instr->GetRd();
5519       int64_t prev_xn_val =
5520           is_64_bits ? ReadXRegister(reg_code) : ReadWRegister(reg_code);
5521       new_xn_val = (prev_xn_val & ~(INT64_C(0xffff) << shift)) | shifted_imm16;
5522       break;
5523     }
5524     case MOVZ_w:
5525     case MOVZ_x: {
5526       new_xn_val = shifted_imm16;
5527       break;
5528     }
5529     default:
5530       VIXL_UNREACHABLE();
5531   }
5532 
5533   // Update the destination register.
5534   WriteXRegister(instr->GetRd(), new_xn_val);
5535 }
5536 
5537 
VisitConditionalSelect(const Instruction * instr)5538 void Simulator::VisitConditionalSelect(const Instruction* instr) {
5539   uint64_t new_val = ReadXRegister(instr->GetRn());
5540 
5541   if (ConditionFailed(static_cast<Condition>(instr->GetCondition()))) {
5542     new_val = ReadXRegister(instr->GetRm());
5543     switch (instr->Mask(ConditionalSelectMask)) {
5544       case CSEL_w:
5545       case CSEL_x:
5546         break;
5547       case CSINC_w:
5548       case CSINC_x:
5549         new_val++;
5550         break;
5551       case CSINV_w:
5552       case CSINV_x:
5553         new_val = ~new_val;
5554         break;
5555       case CSNEG_w:
5556       case CSNEG_x:
5557         new_val = UnsignedNegate(new_val);
5558         break;
5559       default:
5560         VIXL_UNIMPLEMENTED();
5561     }
5562   }
5563   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
5564   WriteRegister(reg_size, instr->GetRd(), new_val);
5565 }
5566 
5567 
5568 #define PAUTH_MODES_REGISTER_CONTEXT(V)   \
5569   V(i, a, kPACKeyIA, kInstructionPointer) \
5570   V(i, b, kPACKeyIB, kInstructionPointer) \
5571   V(d, a, kPACKeyDA, kDataPointer)        \
5572   V(d, b, kPACKeyDB, kDataPointer)
5573 
VisitDataProcessing1Source(const Instruction * instr)5574 void Simulator::VisitDataProcessing1Source(const Instruction* instr) {
5575   unsigned dst = instr->GetRd();
5576   unsigned src = instr->GetRn();
5577   Reg31Mode r31_pac = Reg31IsStackPointer;
5578 
5579   switch (form_hash_) {
5580 #define DEFINE_PAUTH_FUNCS(SUF0, SUF1, KEY, D)      \
5581   case "pac" #SUF0 "z" #SUF1 "_64z_dp_1src"_h:      \
5582     VIXL_ASSERT(src == kZeroRegCode);               \
5583     r31_pac = Reg31IsZeroRegister;                  \
5584     VIXL_FALLTHROUGH();                             \
5585   case "pac" #SUF0 #SUF1 "_64p_dp_1src"_h: {        \
5586     uint64_t mod = ReadXRegister(src, r31_pac);     \
5587     uint64_t ptr = ReadXRegister(dst);              \
5588     WriteXRegister(dst, AddPAC(ptr, mod, KEY, D));  \
5589     break;                                          \
5590   }                                                 \
5591   case "aut" #SUF0 "z" #SUF1 "_64z_dp_1src"_h:      \
5592     VIXL_ASSERT(src == kZeroRegCode);               \
5593     r31_pac = Reg31IsZeroRegister;                  \
5594     VIXL_FALLTHROUGH();                             \
5595   case "aut" #SUF0 #SUF1 "_64p_dp_1src"_h: {        \
5596     uint64_t mod = ReadXRegister(src, r31_pac);     \
5597     uint64_t ptr = ReadXRegister(dst);              \
5598     WriteXRegister(dst, AuthPAC(ptr, mod, KEY, D)); \
5599     break;                                          \
5600   }
5601     PAUTH_MODES_REGISTER_CONTEXT(DEFINE_PAUTH_FUNCS)
5602 #undef DEFINE_PAUTH_FUNCS
5603 
5604     case "xpaci_64z_dp_1src"_h:
5605       WriteXRegister(dst, StripPAC(ReadXRegister(dst), kInstructionPointer));
5606       break;
5607     case "xpacd_64z_dp_1src"_h:
5608       WriteXRegister(dst, StripPAC(ReadXRegister(dst), kDataPointer));
5609       break;
5610     case "rbit_32_dp_1src"_h:
5611       WriteWRegister(dst, ReverseBits(ReadWRegister(src)));
5612       break;
5613     case "rbit_64_dp_1src"_h:
5614       WriteXRegister(dst, ReverseBits(ReadXRegister(src)));
5615       break;
5616     case "rev16_32_dp_1src"_h:
5617       WriteWRegister(dst, ReverseBytes(ReadWRegister(src), 1));
5618       break;
5619     case "rev16_64_dp_1src"_h:
5620       WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 1));
5621       break;
5622     case "rev_32_dp_1src"_h:
5623       WriteWRegister(dst, ReverseBytes(ReadWRegister(src), 2));
5624       break;
5625     case "rev32_64_dp_1src"_h:
5626       WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 2));
5627       break;
5628     case "rev_64_dp_1src"_h:
5629       WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 3));
5630       break;
5631     case "clz_32_dp_1src"_h:
5632       WriteWRegister(dst, CountLeadingZeros(ReadWRegister(src)));
5633       break;
5634     case "clz_64_dp_1src"_h:
5635       WriteXRegister(dst, CountLeadingZeros(ReadXRegister(src)));
5636       break;
5637     case "cls_32_dp_1src"_h:
5638       WriteWRegister(dst, CountLeadingSignBits(ReadWRegister(src)));
5639       break;
5640     case "cls_64_dp_1src"_h:
5641       WriteXRegister(dst, CountLeadingSignBits(ReadXRegister(src)));
5642       break;
5643     case "abs_32_dp_1src"_h:
5644       WriteWRegister(dst, Abs(ReadWRegister(src)));
5645       break;
5646     case "abs_64_dp_1src"_h:
5647       WriteXRegister(dst, Abs(ReadXRegister(src)));
5648       break;
5649     case "cnt_32_dp_1src"_h:
5650       WriteWRegister(dst, CountSetBits(ReadWRegister(src)));
5651       break;
5652     case "cnt_64_dp_1src"_h:
5653       WriteXRegister(dst, CountSetBits(ReadXRegister(src)));
5654       break;
5655     case "ctz_32_dp_1src"_h:
5656       WriteWRegister(dst, CountTrailingZeros(ReadWRegister(src)));
5657       break;
5658     case "ctz_64_dp_1src"_h:
5659       WriteXRegister(dst, CountTrailingZeros(ReadXRegister(src)));
5660       break;
5661   }
5662 }
5663 
Poly32Mod2(unsigned n,uint64_t data,uint32_t poly)5664 uint32_t Simulator::Poly32Mod2(unsigned n, uint64_t data, uint32_t poly) {
5665   VIXL_ASSERT((n > 32) && (n <= 64));
5666   for (unsigned i = (n - 1); i >= 32; i--) {
5667     if (((data >> i) & 1) != 0) {
5668       uint64_t polysh32 = (uint64_t)poly << (i - 32);
5669       uint64_t mask = (UINT64_C(1) << i) - 1;
5670       data = ((data & mask) ^ polysh32);
5671     }
5672   }
5673   return data & 0xffffffff;
5674 }
5675 
5676 
5677 template <typename T>
Crc32Checksum(uint32_t acc,T val,uint32_t poly)5678 uint32_t Simulator::Crc32Checksum(uint32_t acc, T val, uint32_t poly) {
5679   unsigned size = sizeof(val) * 8;  // Number of bits in type T.
5680   VIXL_ASSERT((size == 8) || (size == 16) || (size == 32));
5681   uint64_t tempacc = static_cast<uint64_t>(ReverseBits(acc)) << size;
5682   uint64_t tempval = static_cast<uint64_t>(ReverseBits(val)) << 32;
5683   return ReverseBits(Poly32Mod2(32 + size, tempacc ^ tempval, poly));
5684 }
5685 
5686 
Crc32Checksum(uint32_t acc,uint64_t val,uint32_t poly)5687 uint32_t Simulator::Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly) {
5688   // Poly32Mod2 cannot handle inputs with more than 32 bits, so compute
5689   // the CRC of each 32-bit word sequentially.
5690   acc = Crc32Checksum(acc, (uint32_t)(val & 0xffffffff), poly);
5691   return Crc32Checksum(acc, (uint32_t)(val >> 32), poly);
5692 }
5693 
5694 
VisitDataProcessing2Source(const Instruction * instr)5695 void Simulator::VisitDataProcessing2Source(const Instruction* instr) {
5696   Shift shift_op = NO_SHIFT;
5697   int64_t result = 0;
5698   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
5699 
5700   switch (instr->Mask(DataProcessing2SourceMask)) {
5701     case SDIV_w: {
5702       int32_t rn = ReadWRegister(instr->GetRn());
5703       int32_t rm = ReadWRegister(instr->GetRm());
5704       if ((rn == kWMinInt) && (rm == -1)) {
5705         result = kWMinInt;
5706       } else if (rm == 0) {
5707         // Division by zero can be trapped, but not on A-class processors.
5708         result = 0;
5709       } else {
5710         result = rn / rm;
5711       }
5712       break;
5713     }
5714     case SDIV_x: {
5715       int64_t rn = ReadXRegister(instr->GetRn());
5716       int64_t rm = ReadXRegister(instr->GetRm());
5717       if ((rn == kXMinInt) && (rm == -1)) {
5718         result = kXMinInt;
5719       } else if (rm == 0) {
5720         // Division by zero can be trapped, but not on A-class processors.
5721         result = 0;
5722       } else {
5723         result = rn / rm;
5724       }
5725       break;
5726     }
5727     case UDIV_w: {
5728       uint32_t rn = static_cast<uint32_t>(ReadWRegister(instr->GetRn()));
5729       uint32_t rm = static_cast<uint32_t>(ReadWRegister(instr->GetRm()));
5730       if (rm == 0) {
5731         // Division by zero can be trapped, but not on A-class processors.
5732         result = 0;
5733       } else {
5734         result = rn / rm;
5735       }
5736       break;
5737     }
5738     case UDIV_x: {
5739       uint64_t rn = static_cast<uint64_t>(ReadXRegister(instr->GetRn()));
5740       uint64_t rm = static_cast<uint64_t>(ReadXRegister(instr->GetRm()));
5741       if (rm == 0) {
5742         // Division by zero can be trapped, but not on A-class processors.
5743         result = 0;
5744       } else {
5745         result = rn / rm;
5746       }
5747       break;
5748     }
5749     case LSLV_w:
5750     case LSLV_x:
5751       shift_op = LSL;
5752       break;
5753     case LSRV_w:
5754     case LSRV_x:
5755       shift_op = LSR;
5756       break;
5757     case ASRV_w:
5758     case ASRV_x:
5759       shift_op = ASR;
5760       break;
5761     case RORV_w:
5762     case RORV_x:
5763       shift_op = ROR;
5764       break;
5765     case PACGA: {
5766       uint64_t dst = static_cast<uint64_t>(ReadXRegister(instr->GetRn()));
5767       uint64_t src = static_cast<uint64_t>(
5768           ReadXRegister(instr->GetRm(), Reg31IsStackPointer));
5769       uint64_t code = ComputePAC(dst, src, kPACKeyGA);
5770       result = code & 0xffffffff00000000;
5771       break;
5772     }
5773     case CRC32B: {
5774       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
5775       uint8_t val = ReadRegister<uint8_t>(instr->GetRm());
5776       result = Crc32Checksum(acc, val, CRC32_POLY);
5777       break;
5778     }
5779     case CRC32H: {
5780       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
5781       uint16_t val = ReadRegister<uint16_t>(instr->GetRm());
5782       result = Crc32Checksum(acc, val, CRC32_POLY);
5783       break;
5784     }
5785     case CRC32W: {
5786       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
5787       uint32_t val = ReadRegister<uint32_t>(instr->GetRm());
5788       result = Crc32Checksum(acc, val, CRC32_POLY);
5789       break;
5790     }
5791     case CRC32X: {
5792       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
5793       uint64_t val = ReadRegister<uint64_t>(instr->GetRm());
5794       result = Crc32Checksum(acc, val, CRC32_POLY);
5795       reg_size = kWRegSize;
5796       break;
5797     }
5798     case CRC32CB: {
5799       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
5800       uint8_t val = ReadRegister<uint8_t>(instr->GetRm());
5801       result = Crc32Checksum(acc, val, CRC32C_POLY);
5802       break;
5803     }
5804     case CRC32CH: {
5805       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
5806       uint16_t val = ReadRegister<uint16_t>(instr->GetRm());
5807       result = Crc32Checksum(acc, val, CRC32C_POLY);
5808       break;
5809     }
5810     case CRC32CW: {
5811       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
5812       uint32_t val = ReadRegister<uint32_t>(instr->GetRm());
5813       result = Crc32Checksum(acc, val, CRC32C_POLY);
5814       break;
5815     }
5816     case CRC32CX: {
5817       uint32_t acc = ReadRegister<uint32_t>(instr->GetRn());
5818       uint64_t val = ReadRegister<uint64_t>(instr->GetRm());
5819       result = Crc32Checksum(acc, val, CRC32C_POLY);
5820       reg_size = kWRegSize;
5821       break;
5822     }
5823     default:
5824       VIXL_UNIMPLEMENTED();
5825   }
5826 
5827   if (shift_op != NO_SHIFT) {
5828     // Shift distance encoded in the least-significant five/six bits of the
5829     // register.
5830     int mask = (instr->GetSixtyFourBits() == 1) ? 0x3f : 0x1f;
5831     unsigned shift = ReadWRegister(instr->GetRm()) & mask;
5832     result = ShiftOperand(reg_size,
5833                           ReadRegister(reg_size, instr->GetRn()),
5834                           shift_op,
5835                           shift);
5836   }
5837   WriteRegister(reg_size, instr->GetRd(), result);
5838 }
5839 
SimulateSignedMinMax(const Instruction * instr)5840 void Simulator::SimulateSignedMinMax(const Instruction* instr) {
5841   int32_t wn = ReadWRegister(instr->GetRn());
5842   int32_t wm = ReadWRegister(instr->GetRm());
5843   int64_t xn = ReadXRegister(instr->GetRn());
5844   int64_t xm = ReadXRegister(instr->GetRm());
5845   int32_t imm = instr->ExtractSignedBits(17, 10);
5846   int dst = instr->GetRd();
5847 
5848   switch (form_hash_) {
5849     case "smax_64_minmax_imm"_h:
5850     case "smin_64_minmax_imm"_h:
5851       xm = imm;
5852       break;
5853     case "smax_32_minmax_imm"_h:
5854     case "smin_32_minmax_imm"_h:
5855       wm = imm;
5856       break;
5857   }
5858 
5859   switch (form_hash_) {
5860     case "smax_32_minmax_imm"_h:
5861     case "smax_32_dp_2src"_h:
5862       WriteWRegister(dst, std::max(wn, wm));
5863       break;
5864     case "smax_64_minmax_imm"_h:
5865     case "smax_64_dp_2src"_h:
5866       WriteXRegister(dst, std::max(xn, xm));
5867       break;
5868     case "smin_32_minmax_imm"_h:
5869     case "smin_32_dp_2src"_h:
5870       WriteWRegister(dst, std::min(wn, wm));
5871       break;
5872     case "smin_64_minmax_imm"_h:
5873     case "smin_64_dp_2src"_h:
5874       WriteXRegister(dst, std::min(xn, xm));
5875       break;
5876   }
5877 }
5878 
SimulateUnsignedMinMax(const Instruction * instr)5879 void Simulator::SimulateUnsignedMinMax(const Instruction* instr) {
5880   uint64_t xn = ReadXRegister(instr->GetRn());
5881   uint64_t xm = ReadXRegister(instr->GetRm());
5882   uint32_t imm = instr->ExtractBits(17, 10);
5883   int dst = instr->GetRd();
5884 
5885   switch (form_hash_) {
5886     case "umax_64u_minmax_imm"_h:
5887     case "umax_32u_minmax_imm"_h:
5888     case "umin_64u_minmax_imm"_h:
5889     case "umin_32u_minmax_imm"_h:
5890       xm = imm;
5891       break;
5892   }
5893 
5894   switch (form_hash_) {
5895     case "umax_32u_minmax_imm"_h:
5896     case "umax_32_dp_2src"_h:
5897       xn &= 0xffff'ffff;
5898       xm &= 0xffff'ffff;
5899       VIXL_FALLTHROUGH();
5900     case "umax_64u_minmax_imm"_h:
5901     case "umax_64_dp_2src"_h:
5902       WriteXRegister(dst, std::max(xn, xm));
5903       break;
5904     case "umin_32u_minmax_imm"_h:
5905     case "umin_32_dp_2src"_h:
5906       xn &= 0xffff'ffff;
5907       xm &= 0xffff'ffff;
5908       VIXL_FALLTHROUGH();
5909     case "umin_64u_minmax_imm"_h:
5910     case "umin_64_dp_2src"_h:
5911       WriteXRegister(dst, std::min(xn, xm));
5912       break;
5913   }
5914 }
5915 
VisitDataProcessing3Source(const Instruction * instr)5916 void Simulator::VisitDataProcessing3Source(const Instruction* instr) {
5917   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
5918 
5919   uint64_t result = 0;
5920   // Extract and sign- or zero-extend 32-bit arguments for widening operations.
5921   uint64_t rn_u32 = ReadRegister<uint32_t>(instr->GetRn());
5922   uint64_t rm_u32 = ReadRegister<uint32_t>(instr->GetRm());
5923   int64_t rn_s32 = ReadRegister<int32_t>(instr->GetRn());
5924   int64_t rm_s32 = ReadRegister<int32_t>(instr->GetRm());
5925   uint64_t rn_u64 = ReadXRegister(instr->GetRn());
5926   uint64_t rm_u64 = ReadXRegister(instr->GetRm());
5927   switch (instr->Mask(DataProcessing3SourceMask)) {
5928     case MADD_w:
5929     case MADD_x:
5930       result = ReadXRegister(instr->GetRa()) + (rn_u64 * rm_u64);
5931       break;
5932     case MSUB_w:
5933     case MSUB_x:
5934       result = ReadXRegister(instr->GetRa()) - (rn_u64 * rm_u64);
5935       break;
5936     case SMADDL_x:
5937       result = ReadXRegister(instr->GetRa()) +
5938                static_cast<uint64_t>(rn_s32 * rm_s32);
5939       break;
5940     case SMSUBL_x:
5941       result = ReadXRegister(instr->GetRa()) -
5942                static_cast<uint64_t>(rn_s32 * rm_s32);
5943       break;
5944     case UMADDL_x:
5945       result = ReadXRegister(instr->GetRa()) + (rn_u32 * rm_u32);
5946       break;
5947     case UMSUBL_x:
5948       result = ReadXRegister(instr->GetRa()) - (rn_u32 * rm_u32);
5949       break;
5950     case UMULH_x:
5951       result =
5952           internal::MultiplyHigh<64>(ReadRegister<uint64_t>(instr->GetRn()),
5953                                      ReadRegister<uint64_t>(instr->GetRm()));
5954       break;
5955     case SMULH_x:
5956       result = internal::MultiplyHigh<64>(ReadXRegister(instr->GetRn()),
5957                                           ReadXRegister(instr->GetRm()));
5958       break;
5959     default:
5960       VIXL_UNIMPLEMENTED();
5961   }
5962   WriteRegister(reg_size, instr->GetRd(), result);
5963 }
5964 
5965 
VisitBitfield(const Instruction * instr)5966 void Simulator::VisitBitfield(const Instruction* instr) {
5967   unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize;
5968   int64_t reg_mask = instr->GetSixtyFourBits() ? kXRegMask : kWRegMask;
5969   int R = instr->GetImmR();
5970   int S = instr->GetImmS();
5971 
5972   if (instr->GetSixtyFourBits() != instr->GetBitN()) {
5973     VisitUnallocated(instr);
5974   }
5975 
5976   if ((instr->GetSixtyFourBits() == 0) && ((S > 31) || (R > 31))) {
5977     VisitUnallocated(instr);
5978   }
5979 
5980   int diff = S - R;
5981   uint64_t mask;
5982   if (diff >= 0) {
5983     mask = ~UINT64_C(0) >> (64 - (diff + 1));
5984     mask = (static_cast<unsigned>(diff) < (reg_size - 1)) ? mask : reg_mask;
5985   } else {
5986     mask = ~UINT64_C(0) >> (64 - (S + 1));
5987     mask = RotateRight(mask, R, reg_size);
5988     diff += reg_size;
5989   }
5990 
5991   // inzero indicates if the extracted bitfield is inserted into the
5992   // destination register value or in zero.
5993   // If extend is true, extend the sign of the extracted bitfield.
5994   bool inzero = false;
5995   bool extend = false;
5996   switch (instr->Mask(BitfieldMask)) {
5997     case BFM_x:
5998     case BFM_w:
5999       break;
6000     case SBFM_x:
6001     case SBFM_w:
6002       inzero = true;
6003       extend = true;
6004       break;
6005     case UBFM_x:
6006     case UBFM_w:
6007       inzero = true;
6008       break;
6009     default:
6010       VIXL_UNIMPLEMENTED();
6011   }
6012 
6013   uint64_t dst = inzero ? 0 : ReadRegister(reg_size, instr->GetRd());
6014   uint64_t src = ReadRegister(reg_size, instr->GetRn());
6015   // Rotate source bitfield into place.
6016   uint64_t result = RotateRight(src, R, reg_size);
6017   // Determine the sign extension.
6018   uint64_t topbits = (diff == 63) ? 0 : (~UINT64_C(0) << (diff + 1));
6019   uint64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
6020 
6021   // Merge sign extension, dest/zero and bitfield.
6022   result = signbits | (result & mask) | (dst & ~mask);
6023 
6024   WriteRegister(reg_size, instr->GetRd(), result);
6025 }
6026 
6027 
VisitExtract(const Instruction * instr)6028 void Simulator::VisitExtract(const Instruction* instr) {
6029   unsigned lsb = instr->GetImmS();
6030   unsigned reg_size = (instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize;
6031   uint64_t low_res =
6032       static_cast<uint64_t>(ReadRegister(reg_size, instr->GetRm())) >> lsb;
6033   uint64_t high_res = (lsb == 0)
6034                           ? 0
6035                           : ReadRegister<uint64_t>(reg_size, instr->GetRn())
6036                                 << (reg_size - lsb);
6037   WriteRegister(reg_size, instr->GetRd(), low_res | high_res);
6038 }
6039 
6040 
VisitFPImmediate(const Instruction * instr)6041 void Simulator::VisitFPImmediate(const Instruction* instr) {
6042   AssertSupportedFPCR();
6043   unsigned dest = instr->GetRd();
6044   switch (instr->Mask(FPImmediateMask)) {
6045     case FMOV_h_imm:
6046       WriteHRegister(dest, Float16ToRawbits(instr->GetImmFP16()));
6047       break;
6048     case FMOV_s_imm:
6049       WriteSRegister(dest, instr->GetImmFP32());
6050       break;
6051     case FMOV_d_imm:
6052       WriteDRegister(dest, instr->GetImmFP64());
6053       break;
6054     default:
6055       VIXL_UNREACHABLE();
6056   }
6057 }
6058 
6059 
VisitFPIntegerConvert(const Instruction * instr)6060 void Simulator::VisitFPIntegerConvert(const Instruction* instr) {
6061   AssertSupportedFPCR();
6062 
6063   unsigned dst = instr->GetRd();
6064   unsigned src = instr->GetRn();
6065 
6066   FPRounding round = ReadRMode();
6067 
6068   switch (instr->Mask(FPIntegerConvertMask)) {
6069     case FCVTAS_wh:
6070       WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPTieAway));
6071       break;
6072     case FCVTAS_xh:
6073       WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPTieAway));
6074       break;
6075     case FCVTAS_ws:
6076       WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPTieAway));
6077       break;
6078     case FCVTAS_xs:
6079       WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPTieAway));
6080       break;
6081     case FCVTAS_wd:
6082       WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPTieAway));
6083       break;
6084     case FCVTAS_xd:
6085       WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPTieAway));
6086       break;
6087     case FCVTAU_wh:
6088       WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPTieAway));
6089       break;
6090     case FCVTAU_xh:
6091       WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPTieAway));
6092       break;
6093     case FCVTAU_ws:
6094       WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPTieAway));
6095       break;
6096     case FCVTAU_xs:
6097       WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPTieAway));
6098       break;
6099     case FCVTAU_wd:
6100       WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPTieAway));
6101       break;
6102     case FCVTAU_xd:
6103       WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPTieAway));
6104       break;
6105     case FCVTMS_wh:
6106       WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPNegativeInfinity));
6107       break;
6108     case FCVTMS_xh:
6109       WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPNegativeInfinity));
6110       break;
6111     case FCVTMS_ws:
6112       WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPNegativeInfinity));
6113       break;
6114     case FCVTMS_xs:
6115       WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPNegativeInfinity));
6116       break;
6117     case FCVTMS_wd:
6118       WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPNegativeInfinity));
6119       break;
6120     case FCVTMS_xd:
6121       WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPNegativeInfinity));
6122       break;
6123     case FCVTMU_wh:
6124       WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPNegativeInfinity));
6125       break;
6126     case FCVTMU_xh:
6127       WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPNegativeInfinity));
6128       break;
6129     case FCVTMU_ws:
6130       WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPNegativeInfinity));
6131       break;
6132     case FCVTMU_xs:
6133       WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPNegativeInfinity));
6134       break;
6135     case FCVTMU_wd:
6136       WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPNegativeInfinity));
6137       break;
6138     case FCVTMU_xd:
6139       WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPNegativeInfinity));
6140       break;
6141     case FCVTPS_wh:
6142       WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPPositiveInfinity));
6143       break;
6144     case FCVTPS_xh:
6145       WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPPositiveInfinity));
6146       break;
6147     case FCVTPS_ws:
6148       WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPPositiveInfinity));
6149       break;
6150     case FCVTPS_xs:
6151       WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPPositiveInfinity));
6152       break;
6153     case FCVTPS_wd:
6154       WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPPositiveInfinity));
6155       break;
6156     case FCVTPS_xd:
6157       WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPPositiveInfinity));
6158       break;
6159     case FCVTPU_wh:
6160       WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPPositiveInfinity));
6161       break;
6162     case FCVTPU_xh:
6163       WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPPositiveInfinity));
6164       break;
6165     case FCVTPU_ws:
6166       WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPPositiveInfinity));
6167       break;
6168     case FCVTPU_xs:
6169       WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPPositiveInfinity));
6170       break;
6171     case FCVTPU_wd:
6172       WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPPositiveInfinity));
6173       break;
6174     case FCVTPU_xd:
6175       WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPPositiveInfinity));
6176       break;
6177     case FCVTNS_wh:
6178       WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPTieEven));
6179       break;
6180     case FCVTNS_xh:
6181       WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPTieEven));
6182       break;
6183     case FCVTNS_ws:
6184       WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPTieEven));
6185       break;
6186     case FCVTNS_xs:
6187       WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPTieEven));
6188       break;
6189     case FCVTNS_wd:
6190       WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPTieEven));
6191       break;
6192     case FCVTNS_xd:
6193       WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPTieEven));
6194       break;
6195     case FCVTNU_wh:
6196       WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPTieEven));
6197       break;
6198     case FCVTNU_xh:
6199       WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPTieEven));
6200       break;
6201     case FCVTNU_ws:
6202       WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPTieEven));
6203       break;
6204     case FCVTNU_xs:
6205       WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPTieEven));
6206       break;
6207     case FCVTNU_wd:
6208       WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPTieEven));
6209       break;
6210     case FCVTNU_xd:
6211       WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPTieEven));
6212       break;
6213     case FCVTZS_wh:
6214       WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPZero));
6215       break;
6216     case FCVTZS_xh:
6217       WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPZero));
6218       break;
6219     case FCVTZS_ws:
6220       WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPZero));
6221       break;
6222     case FCVTZS_xs:
6223       WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPZero));
6224       break;
6225     case FCVTZS_wd:
6226       WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPZero));
6227       break;
6228     case FCVTZS_xd:
6229       WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPZero));
6230       break;
6231     case FCVTZU_wh:
6232       WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPZero));
6233       break;
6234     case FCVTZU_xh:
6235       WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPZero));
6236       break;
6237     case FCVTZU_ws:
6238       WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPZero));
6239       break;
6240     case FCVTZU_xs:
6241       WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPZero));
6242       break;
6243     case FCVTZU_wd:
6244       WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPZero));
6245       break;
6246     case FCVTZU_xd:
6247       WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPZero));
6248       break;
6249     case FJCVTZS:
6250       WriteWRegister(dst, FPToFixedJS(ReadDRegister(src)));
6251       break;
6252     case FMOV_hw:
6253       WriteHRegister(dst, ReadWRegister(src) & kHRegMask);
6254       break;
6255     case FMOV_wh:
6256       WriteWRegister(dst, ReadHRegisterBits(src));
6257       break;
6258     case FMOV_xh:
6259       WriteXRegister(dst, ReadHRegisterBits(src));
6260       break;
6261     case FMOV_hx:
6262       WriteHRegister(dst, ReadXRegister(src) & kHRegMask);
6263       break;
6264     case FMOV_ws:
6265       WriteWRegister(dst, ReadSRegisterBits(src));
6266       break;
6267     case FMOV_xd:
6268       WriteXRegister(dst, ReadDRegisterBits(src));
6269       break;
6270     case FMOV_sw:
6271       WriteSRegisterBits(dst, ReadWRegister(src));
6272       break;
6273     case FMOV_dx:
6274       WriteDRegisterBits(dst, ReadXRegister(src));
6275       break;
6276     case FMOV_d1_x:
6277       // Zero bits beyond the MSB of a Q register.
6278       mov(kFormat16B, ReadVRegister(dst), ReadVRegister(dst));
6279       LogicVRegister(ReadVRegister(dst))
6280           .SetUint(kFormatD, 1, ReadXRegister(src));
6281       break;
6282     case FMOV_x_d1:
6283       WriteXRegister(dst, LogicVRegister(ReadVRegister(src)).Uint(kFormatD, 1));
6284       break;
6285 
6286     // A 32-bit input can be handled in the same way as a 64-bit input, since
6287     // the sign- or zero-extension will not affect the conversion.
6288     case SCVTF_dx:
6289       WriteDRegister(dst, FixedToDouble(ReadXRegister(src), 0, round));
6290       break;
6291     case SCVTF_dw:
6292       WriteDRegister(dst, FixedToDouble(ReadWRegister(src), 0, round));
6293       break;
6294     case UCVTF_dx:
6295       WriteDRegister(dst, UFixedToDouble(ReadXRegister(src), 0, round));
6296       break;
6297     case UCVTF_dw: {
6298       WriteDRegister(dst,
6299                      UFixedToDouble(ReadRegister<uint32_t>(src), 0, round));
6300       break;
6301     }
6302     case SCVTF_sx:
6303       WriteSRegister(dst, FixedToFloat(ReadXRegister(src), 0, round));
6304       break;
6305     case SCVTF_sw:
6306       WriteSRegister(dst, FixedToFloat(ReadWRegister(src), 0, round));
6307       break;
6308     case UCVTF_sx:
6309       WriteSRegister(dst, UFixedToFloat(ReadXRegister(src), 0, round));
6310       break;
6311     case UCVTF_sw: {
6312       WriteSRegister(dst, UFixedToFloat(ReadRegister<uint32_t>(src), 0, round));
6313       break;
6314     }
6315     case SCVTF_hx:
6316       WriteHRegister(dst, FixedToFloat16(ReadXRegister(src), 0, round));
6317       break;
6318     case SCVTF_hw:
6319       WriteHRegister(dst, FixedToFloat16(ReadWRegister(src), 0, round));
6320       break;
6321     case UCVTF_hx:
6322       WriteHRegister(dst, UFixedToFloat16(ReadXRegister(src), 0, round));
6323       break;
6324     case UCVTF_hw: {
6325       WriteHRegister(dst,
6326                      UFixedToFloat16(ReadRegister<uint32_t>(src), 0, round));
6327       break;
6328     }
6329 
6330     default:
6331       VIXL_UNREACHABLE();
6332   }
6333 }
6334 
6335 
VisitFPFixedPointConvert(const Instruction * instr)6336 void Simulator::VisitFPFixedPointConvert(const Instruction* instr) {
6337   AssertSupportedFPCR();
6338 
6339   unsigned dst = instr->GetRd();
6340   unsigned src = instr->GetRn();
6341   int fbits = 64 - instr->GetFPScale();
6342 
6343   FPRounding round = ReadRMode();
6344 
6345   switch (instr->Mask(FPFixedPointConvertMask)) {
6346     // A 32-bit input can be handled in the same way as a 64-bit input, since
6347     // the sign- or zero-extension will not affect the conversion.
6348     case SCVTF_dx_fixed:
6349       WriteDRegister(dst, FixedToDouble(ReadXRegister(src), fbits, round));
6350       break;
6351     case SCVTF_dw_fixed:
6352       WriteDRegister(dst, FixedToDouble(ReadWRegister(src), fbits, round));
6353       break;
6354     case UCVTF_dx_fixed:
6355       WriteDRegister(dst, UFixedToDouble(ReadXRegister(src), fbits, round));
6356       break;
6357     case UCVTF_dw_fixed: {
6358       WriteDRegister(dst,
6359                      UFixedToDouble(ReadRegister<uint32_t>(src), fbits, round));
6360       break;
6361     }
6362     case SCVTF_sx_fixed:
6363       WriteSRegister(dst, FixedToFloat(ReadXRegister(src), fbits, round));
6364       break;
6365     case SCVTF_sw_fixed:
6366       WriteSRegister(dst, FixedToFloat(ReadWRegister(src), fbits, round));
6367       break;
6368     case UCVTF_sx_fixed:
6369       WriteSRegister(dst, UFixedToFloat(ReadXRegister(src), fbits, round));
6370       break;
6371     case UCVTF_sw_fixed: {
6372       WriteSRegister(dst,
6373                      UFixedToFloat(ReadRegister<uint32_t>(src), fbits, round));
6374       break;
6375     }
6376     case SCVTF_hx_fixed:
6377       WriteHRegister(dst, FixedToFloat16(ReadXRegister(src), fbits, round));
6378       break;
6379     case SCVTF_hw_fixed:
6380       WriteHRegister(dst, FixedToFloat16(ReadWRegister(src), fbits, round));
6381       break;
6382     case UCVTF_hx_fixed:
6383       WriteHRegister(dst, UFixedToFloat16(ReadXRegister(src), fbits, round));
6384       break;
6385     case UCVTF_hw_fixed: {
6386       WriteHRegister(dst,
6387                      UFixedToFloat16(ReadRegister<uint32_t>(src),
6388                                      fbits,
6389                                      round));
6390       break;
6391     }
6392     case FCVTZS_xd_fixed:
6393       WriteXRegister(dst,
6394                      FPToInt64(ReadDRegister(src) * std::pow(2.0, fbits),
6395                                FPZero));
6396       break;
6397     case FCVTZS_wd_fixed:
6398       WriteWRegister(dst,
6399                      FPToInt32(ReadDRegister(src) * std::pow(2.0, fbits),
6400                                FPZero));
6401       break;
6402     case FCVTZU_xd_fixed:
6403       WriteXRegister(dst,
6404                      FPToUInt64(ReadDRegister(src) * std::pow(2.0, fbits),
6405                                 FPZero));
6406       break;
6407     case FCVTZU_wd_fixed:
6408       WriteWRegister(dst,
6409                      FPToUInt32(ReadDRegister(src) * std::pow(2.0, fbits),
6410                                 FPZero));
6411       break;
6412     case FCVTZS_xs_fixed:
6413       WriteXRegister(dst,
6414                      FPToInt64(ReadSRegister(src) * std::pow(2.0f, fbits),
6415                                FPZero));
6416       break;
6417     case FCVTZS_ws_fixed:
6418       WriteWRegister(dst,
6419                      FPToInt32(ReadSRegister(src) * std::pow(2.0f, fbits),
6420                                FPZero));
6421       break;
6422     case FCVTZU_xs_fixed:
6423       WriteXRegister(dst,
6424                      FPToUInt64(ReadSRegister(src) * std::pow(2.0f, fbits),
6425                                 FPZero));
6426       break;
6427     case FCVTZU_ws_fixed:
6428       WriteWRegister(dst,
6429                      FPToUInt32(ReadSRegister(src) * std::pow(2.0f, fbits),
6430                                 FPZero));
6431       break;
6432     case FCVTZS_xh_fixed: {
6433       double output =
6434           static_cast<double>(ReadHRegister(src)) * std::pow(2.0, fbits);
6435       WriteXRegister(dst, FPToInt64(output, FPZero));
6436       break;
6437     }
6438     case FCVTZS_wh_fixed: {
6439       double output =
6440           static_cast<double>(ReadHRegister(src)) * std::pow(2.0, fbits);
6441       WriteWRegister(dst, FPToInt32(output, FPZero));
6442       break;
6443     }
6444     case FCVTZU_xh_fixed: {
6445       double output =
6446           static_cast<double>(ReadHRegister(src)) * std::pow(2.0, fbits);
6447       WriteXRegister(dst, FPToUInt64(output, FPZero));
6448       break;
6449     }
6450     case FCVTZU_wh_fixed: {
6451       double output =
6452           static_cast<double>(ReadHRegister(src)) * std::pow(2.0, fbits);
6453       WriteWRegister(dst, FPToUInt32(output, FPZero));
6454       break;
6455     }
6456     default:
6457       VIXL_UNREACHABLE();
6458   }
6459 }
6460 
6461 
VisitFPCompare(const Instruction * instr)6462 void Simulator::VisitFPCompare(const Instruction* instr) {
6463   AssertSupportedFPCR();
6464 
6465   FPTrapFlags trap = DisableTrap;
6466   switch (instr->Mask(FPCompareMask)) {
6467     case FCMPE_h:
6468       trap = EnableTrap;
6469       VIXL_FALLTHROUGH();
6470     case FCMP_h:
6471       FPCompare(ReadHRegister(instr->GetRn()),
6472                 ReadHRegister(instr->GetRm()),
6473                 trap);
6474       break;
6475     case FCMPE_s:
6476       trap = EnableTrap;
6477       VIXL_FALLTHROUGH();
6478     case FCMP_s:
6479       FPCompare(ReadSRegister(instr->GetRn()),
6480                 ReadSRegister(instr->GetRm()),
6481                 trap);
6482       break;
6483     case FCMPE_d:
6484       trap = EnableTrap;
6485       VIXL_FALLTHROUGH();
6486     case FCMP_d:
6487       FPCompare(ReadDRegister(instr->GetRn()),
6488                 ReadDRegister(instr->GetRm()),
6489                 trap);
6490       break;
6491     case FCMPE_h_zero:
6492       trap = EnableTrap;
6493       VIXL_FALLTHROUGH();
6494     case FCMP_h_zero:
6495       FPCompare(ReadHRegister(instr->GetRn()), SimFloat16(0.0), trap);
6496       break;
6497     case FCMPE_s_zero:
6498       trap = EnableTrap;
6499       VIXL_FALLTHROUGH();
6500     case FCMP_s_zero:
6501       FPCompare(ReadSRegister(instr->GetRn()), 0.0f, trap);
6502       break;
6503     case FCMPE_d_zero:
6504       trap = EnableTrap;
6505       VIXL_FALLTHROUGH();
6506     case FCMP_d_zero:
6507       FPCompare(ReadDRegister(instr->GetRn()), 0.0, trap);
6508       break;
6509     default:
6510       VIXL_UNIMPLEMENTED();
6511   }
6512 }
6513 
6514 
VisitFPConditionalCompare(const Instruction * instr)6515 void Simulator::VisitFPConditionalCompare(const Instruction* instr) {
6516   AssertSupportedFPCR();
6517 
6518   FPTrapFlags trap = DisableTrap;
6519   switch (instr->Mask(FPConditionalCompareMask)) {
6520     case FCCMPE_h:
6521       trap = EnableTrap;
6522       VIXL_FALLTHROUGH();
6523     case FCCMP_h:
6524       if (ConditionPassed(instr->GetCondition())) {
6525         FPCompare(ReadHRegister(instr->GetRn()),
6526                   ReadHRegister(instr->GetRm()),
6527                   trap);
6528       } else {
6529         ReadNzcv().SetFlags(instr->GetNzcv());
6530         LogSystemRegister(NZCV);
6531       }
6532       break;
6533     case FCCMPE_s:
6534       trap = EnableTrap;
6535       VIXL_FALLTHROUGH();
6536     case FCCMP_s:
6537       if (ConditionPassed(instr->GetCondition())) {
6538         FPCompare(ReadSRegister(instr->GetRn()),
6539                   ReadSRegister(instr->GetRm()),
6540                   trap);
6541       } else {
6542         ReadNzcv().SetFlags(instr->GetNzcv());
6543         LogSystemRegister(NZCV);
6544       }
6545       break;
6546     case FCCMPE_d:
6547       trap = EnableTrap;
6548       VIXL_FALLTHROUGH();
6549     case FCCMP_d:
6550       if (ConditionPassed(instr->GetCondition())) {
6551         FPCompare(ReadDRegister(instr->GetRn()),
6552                   ReadDRegister(instr->GetRm()),
6553                   trap);
6554       } else {
6555         ReadNzcv().SetFlags(instr->GetNzcv());
6556         LogSystemRegister(NZCV);
6557       }
6558       break;
6559     default:
6560       VIXL_UNIMPLEMENTED();
6561   }
6562 }
6563 
6564 
VisitFPConditionalSelect(const Instruction * instr)6565 void Simulator::VisitFPConditionalSelect(const Instruction* instr) {
6566   AssertSupportedFPCR();
6567 
6568   Instr selected;
6569   if (ConditionPassed(instr->GetCondition())) {
6570     selected = instr->GetRn();
6571   } else {
6572     selected = instr->GetRm();
6573   }
6574 
6575   switch (instr->Mask(FPConditionalSelectMask)) {
6576     case FCSEL_h:
6577       WriteHRegister(instr->GetRd(), ReadHRegister(selected));
6578       break;
6579     case FCSEL_s:
6580       WriteSRegister(instr->GetRd(), ReadSRegister(selected));
6581       break;
6582     case FCSEL_d:
6583       WriteDRegister(instr->GetRd(), ReadDRegister(selected));
6584       break;
6585     default:
6586       VIXL_UNIMPLEMENTED();
6587   }
6588 }
6589 
6590 
VisitFPDataProcessing1Source(const Instruction * instr)6591 void Simulator::VisitFPDataProcessing1Source(const Instruction* instr) {
6592   AssertSupportedFPCR();
6593 
6594   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
6595   VectorFormat vform;
6596   switch (instr->Mask(FPTypeMask)) {
6597     default:
6598       VIXL_UNREACHABLE_OR_FALLTHROUGH();
6599     case FP64:
6600       vform = kFormatD;
6601       break;
6602     case FP32:
6603       vform = kFormatS;
6604       break;
6605     case FP16:
6606       vform = kFormatH;
6607       break;
6608   }
6609 
6610   SimVRegister& rd = ReadVRegister(instr->GetRd());
6611   SimVRegister& rn = ReadVRegister(instr->GetRn());
6612   bool inexact_exception = false;
6613   FrintMode frint_mode = kFrintToInteger;
6614 
6615   unsigned fd = instr->GetRd();
6616   unsigned fn = instr->GetRn();
6617 
6618   switch (instr->Mask(FPDataProcessing1SourceMask)) {
6619     case FMOV_h:
6620       WriteHRegister(fd, ReadHRegister(fn));
6621       return;
6622     case FMOV_s:
6623       WriteSRegister(fd, ReadSRegister(fn));
6624       return;
6625     case FMOV_d:
6626       WriteDRegister(fd, ReadDRegister(fn));
6627       return;
6628     case FABS_h:
6629     case FABS_s:
6630     case FABS_d:
6631       fabs_(vform, ReadVRegister(fd), ReadVRegister(fn));
6632       // Explicitly log the register update whilst we have type information.
6633       LogVRegister(fd, GetPrintRegisterFormatFP(vform));
6634       return;
6635     case FNEG_h:
6636     case FNEG_s:
6637     case FNEG_d:
6638       fneg(vform, ReadVRegister(fd), ReadVRegister(fn));
6639       // Explicitly log the register update whilst we have type information.
6640       LogVRegister(fd, GetPrintRegisterFormatFP(vform));
6641       return;
6642     case FCVT_ds:
6643       WriteDRegister(fd, FPToDouble(ReadSRegister(fn), ReadDN()));
6644       return;
6645     case FCVT_sd:
6646       WriteSRegister(fd, FPToFloat(ReadDRegister(fn), FPTieEven, ReadDN()));
6647       return;
6648     case FCVT_hs:
6649       WriteHRegister(fd,
6650                      Float16ToRawbits(
6651                          FPToFloat16(ReadSRegister(fn), FPTieEven, ReadDN())));
6652       return;
6653     case FCVT_sh:
6654       WriteSRegister(fd, FPToFloat(ReadHRegister(fn), ReadDN()));
6655       return;
6656     case FCVT_dh:
6657       WriteDRegister(fd, FPToDouble(ReadHRegister(fn), ReadDN()));
6658       return;
6659     case FCVT_hd:
6660       WriteHRegister(fd,
6661                      Float16ToRawbits(
6662                          FPToFloat16(ReadDRegister(fn), FPTieEven, ReadDN())));
6663       return;
6664     case FSQRT_h:
6665     case FSQRT_s:
6666     case FSQRT_d:
6667       fsqrt(vform, rd, rn);
6668       // Explicitly log the register update whilst we have type information.
6669       LogVRegister(fd, GetPrintRegisterFormatFP(vform));
6670       return;
6671     case FRINT32X_s:
6672     case FRINT32X_d:
6673       inexact_exception = true;
6674       frint_mode = kFrintToInt32;
6675       break;  // Use FPCR rounding mode.
6676     case FRINT64X_s:
6677     case FRINT64X_d:
6678       inexact_exception = true;
6679       frint_mode = kFrintToInt64;
6680       break;  // Use FPCR rounding mode.
6681     case FRINT32Z_s:
6682     case FRINT32Z_d:
6683       inexact_exception = true;
6684       frint_mode = kFrintToInt32;
6685       fpcr_rounding = FPZero;
6686       break;
6687     case FRINT64Z_s:
6688     case FRINT64Z_d:
6689       inexact_exception = true;
6690       frint_mode = kFrintToInt64;
6691       fpcr_rounding = FPZero;
6692       break;
6693     case FRINTI_h:
6694     case FRINTI_s:
6695     case FRINTI_d:
6696       break;  // Use FPCR rounding mode.
6697     case FRINTX_h:
6698     case FRINTX_s:
6699     case FRINTX_d:
6700       inexact_exception = true;
6701       break;
6702     case FRINTA_h:
6703     case FRINTA_s:
6704     case FRINTA_d:
6705       fpcr_rounding = FPTieAway;
6706       break;
6707     case FRINTM_h:
6708     case FRINTM_s:
6709     case FRINTM_d:
6710       fpcr_rounding = FPNegativeInfinity;
6711       break;
6712     case FRINTN_h:
6713     case FRINTN_s:
6714     case FRINTN_d:
6715       fpcr_rounding = FPTieEven;
6716       break;
6717     case FRINTP_h:
6718     case FRINTP_s:
6719     case FRINTP_d:
6720       fpcr_rounding = FPPositiveInfinity;
6721       break;
6722     case FRINTZ_h:
6723     case FRINTZ_s:
6724     case FRINTZ_d:
6725       fpcr_rounding = FPZero;
6726       break;
6727     default:
6728       VIXL_UNIMPLEMENTED();
6729   }
6730 
6731   // Only FRINT* instructions fall through the switch above.
6732   frint(vform, rd, rn, fpcr_rounding, inexact_exception, frint_mode);
6733   // Explicitly log the register update whilst we have type information.
6734   LogVRegister(fd, GetPrintRegisterFormatFP(vform));
6735 }
6736 
6737 
VisitFPDataProcessing2Source(const Instruction * instr)6738 void Simulator::VisitFPDataProcessing2Source(const Instruction* instr) {
6739   AssertSupportedFPCR();
6740 
6741   VectorFormat vform;
6742   switch (instr->Mask(FPTypeMask)) {
6743     default:
6744       VIXL_UNREACHABLE_OR_FALLTHROUGH();
6745     case FP64:
6746       vform = kFormatD;
6747       break;
6748     case FP32:
6749       vform = kFormatS;
6750       break;
6751     case FP16:
6752       vform = kFormatH;
6753       break;
6754   }
6755   SimVRegister& rd = ReadVRegister(instr->GetRd());
6756   SimVRegister& rn = ReadVRegister(instr->GetRn());
6757   SimVRegister& rm = ReadVRegister(instr->GetRm());
6758 
6759   switch (instr->Mask(FPDataProcessing2SourceMask)) {
6760     case FADD_h:
6761     case FADD_s:
6762     case FADD_d:
6763       fadd(vform, rd, rn, rm);
6764       break;
6765     case FSUB_h:
6766     case FSUB_s:
6767     case FSUB_d:
6768       fsub(vform, rd, rn, rm);
6769       break;
6770     case FMUL_h:
6771     case FMUL_s:
6772     case FMUL_d:
6773       fmul(vform, rd, rn, rm);
6774       break;
6775     case FNMUL_h:
6776     case FNMUL_s:
6777     case FNMUL_d:
6778       fnmul(vform, rd, rn, rm);
6779       break;
6780     case FDIV_h:
6781     case FDIV_s:
6782     case FDIV_d:
6783       fdiv(vform, rd, rn, rm);
6784       break;
6785     case FMAX_h:
6786     case FMAX_s:
6787     case FMAX_d:
6788       fmax(vform, rd, rn, rm);
6789       break;
6790     case FMIN_h:
6791     case FMIN_s:
6792     case FMIN_d:
6793       fmin(vform, rd, rn, rm);
6794       break;
6795     case FMAXNM_h:
6796     case FMAXNM_s:
6797     case FMAXNM_d:
6798       fmaxnm(vform, rd, rn, rm);
6799       break;
6800     case FMINNM_h:
6801     case FMINNM_s:
6802     case FMINNM_d:
6803       fminnm(vform, rd, rn, rm);
6804       break;
6805     default:
6806       VIXL_UNREACHABLE();
6807   }
6808   // Explicitly log the register update whilst we have type information.
6809   LogVRegister(instr->GetRd(), GetPrintRegisterFormatFP(vform));
6810 }
6811 
6812 
VisitFPDataProcessing3Source(const Instruction * instr)6813 void Simulator::VisitFPDataProcessing3Source(const Instruction* instr) {
6814   AssertSupportedFPCR();
6815 
6816   unsigned fd = instr->GetRd();
6817   unsigned fn = instr->GetRn();
6818   unsigned fm = instr->GetRm();
6819   unsigned fa = instr->GetRa();
6820 
6821   switch (instr->Mask(FPDataProcessing3SourceMask)) {
6822     // fd = fa +/- (fn * fm)
6823     case FMADD_h:
6824       WriteHRegister(fd,
6825                      FPMulAdd(ReadHRegister(fa),
6826                               ReadHRegister(fn),
6827                               ReadHRegister(fm)));
6828       break;
6829     case FMSUB_h:
6830       WriteHRegister(fd,
6831                      FPMulAdd(ReadHRegister(fa),
6832                               -ReadHRegister(fn),
6833                               ReadHRegister(fm)));
6834       break;
6835     case FMADD_s:
6836       WriteSRegister(fd,
6837                      FPMulAdd(ReadSRegister(fa),
6838                               ReadSRegister(fn),
6839                               ReadSRegister(fm)));
6840       break;
6841     case FMSUB_s:
6842       WriteSRegister(fd,
6843                      FPMulAdd(ReadSRegister(fa),
6844                               -ReadSRegister(fn),
6845                               ReadSRegister(fm)));
6846       break;
6847     case FMADD_d:
6848       WriteDRegister(fd,
6849                      FPMulAdd(ReadDRegister(fa),
6850                               ReadDRegister(fn),
6851                               ReadDRegister(fm)));
6852       break;
6853     case FMSUB_d:
6854       WriteDRegister(fd,
6855                      FPMulAdd(ReadDRegister(fa),
6856                               -ReadDRegister(fn),
6857                               ReadDRegister(fm)));
6858       break;
6859     // Negated variants of the above.
6860     case FNMADD_h:
6861       WriteHRegister(fd,
6862                      FPMulAdd(-ReadHRegister(fa),
6863                               -ReadHRegister(fn),
6864                               ReadHRegister(fm)));
6865       break;
6866     case FNMSUB_h:
6867       WriteHRegister(fd,
6868                      FPMulAdd(-ReadHRegister(fa),
6869                               ReadHRegister(fn),
6870                               ReadHRegister(fm)));
6871       break;
6872     case FNMADD_s:
6873       WriteSRegister(fd,
6874                      FPMulAdd(-ReadSRegister(fa),
6875                               -ReadSRegister(fn),
6876                               ReadSRegister(fm)));
6877       break;
6878     case FNMSUB_s:
6879       WriteSRegister(fd,
6880                      FPMulAdd(-ReadSRegister(fa),
6881                               ReadSRegister(fn),
6882                               ReadSRegister(fm)));
6883       break;
6884     case FNMADD_d:
6885       WriteDRegister(fd,
6886                      FPMulAdd(-ReadDRegister(fa),
6887                               -ReadDRegister(fn),
6888                               ReadDRegister(fm)));
6889       break;
6890     case FNMSUB_d:
6891       WriteDRegister(fd,
6892                      FPMulAdd(-ReadDRegister(fa),
6893                               ReadDRegister(fn),
6894                               ReadDRegister(fm)));
6895       break;
6896     default:
6897       VIXL_UNIMPLEMENTED();
6898   }
6899 }
6900 
6901 
FPProcessNaNs(const Instruction * instr)6902 bool Simulator::FPProcessNaNs(const Instruction* instr) {
6903   unsigned fd = instr->GetRd();
6904   unsigned fn = instr->GetRn();
6905   unsigned fm = instr->GetRm();
6906   bool done = false;
6907 
6908   if (instr->Mask(FP64) == FP64) {
6909     double result = FPProcessNaNs(ReadDRegister(fn), ReadDRegister(fm));
6910     if (IsNaN(result)) {
6911       WriteDRegister(fd, result);
6912       done = true;
6913     }
6914   } else if (instr->Mask(FP32) == FP32) {
6915     float result = FPProcessNaNs(ReadSRegister(fn), ReadSRegister(fm));
6916     if (IsNaN(result)) {
6917       WriteSRegister(fd, result);
6918       done = true;
6919     }
6920   } else {
6921     VIXL_ASSERT(instr->Mask(FP16) == FP16);
6922     VIXL_UNIMPLEMENTED();
6923   }
6924 
6925   return done;
6926 }
6927 
6928 
SysOp_W(int op,int64_t val)6929 bool Simulator::SysOp_W(int op, int64_t val) {
6930   switch (op) {
6931     case IVAU:
6932     case CVAC:
6933     case CVAU:
6934     case CVAP:
6935     case CVADP:
6936     case CIVAC:
6937     case CGVAC:
6938     case CGDVAC:
6939     case CGVAP:
6940     case CGDVAP:
6941     case CIGVAC:
6942     case CIGDVAC: {
6943       // Perform a placeholder memory access to ensure that we have read access
6944       // to the specified address. The read access does not require a tag match,
6945       // so temporarily disable MTE.
6946       bool mte_enabled = MetaDataDepot::MetaDataMTE::IsActive();
6947       MetaDataDepot::MetaDataMTE::SetActive(false);
6948       volatile uint8_t y = *MemRead<uint8_t>(val);
6949       MetaDataDepot::MetaDataMTE::SetActive(mte_enabled);
6950       USE(y);
6951       break;
6952     }
6953     case ZVA: {
6954       if ((dczid_ & 0x10) != 0) {  // Check dc zva is enabled.
6955         return false;
6956       }
6957       int blocksize = (1 << (dczid_ & 0xf)) * kWRegSizeInBytes;
6958       VIXL_ASSERT(IsMultiple(blocksize, sizeof(uint64_t)));
6959       uintptr_t addr = AlignDown(val, blocksize);
6960       for (int i = 0; i < blocksize; i += sizeof(uint64_t)) {
6961         MemWrite<uint64_t>(addr + i, 0);
6962         LogWriteU64(0, addr + i);
6963       }
6964       break;
6965     }
6966     // TODO: Implement GVA, GZVA.
6967     default:
6968       VIXL_UNIMPLEMENTED();
6969       return false;
6970   }
6971   return true;
6972 }
6973 
PACHelper(int dst,int src,PACKey key,decltype(& Simulator::AddPAC) pac_fn)6974 void Simulator::PACHelper(int dst,
6975                           int src,
6976                           PACKey key,
6977                           decltype(&Simulator::AddPAC) pac_fn) {
6978   VIXL_ASSERT((dst == 17) || (dst == 30));
6979   VIXL_ASSERT((src == -1) || (src == 16) || (src == 31));
6980 
6981   uint64_t modifier = (src == -1) ? 0 : ReadXRegister(src, Reg31IsStackPointer);
6982   uint64_t result =
6983       (this->*pac_fn)(ReadXRegister(dst), modifier, key, kInstructionPointer);
6984   WriteXRegister(dst, result);
6985 }
6986 
VisitSystem(const Instruction * instr)6987 void Simulator::VisitSystem(const Instruction* instr) {
6988   PACKey pac_key = kPACKeyIA;  // Default key for PAC/AUTH handling.
6989 
6990   switch (form_hash_) {
6991     case "cfinv_m_pstate"_h:
6992       ReadNzcv().SetC(!ReadC());
6993       break;
6994     case "axflag_m_pstate"_h:
6995       ReadNzcv().SetN(0);
6996       ReadNzcv().SetZ(ReadNzcv().GetZ() | ReadNzcv().GetV());
6997       ReadNzcv().SetC(ReadNzcv().GetC() & ~ReadNzcv().GetV());
6998       ReadNzcv().SetV(0);
6999       break;
7000     case "xaflag_m_pstate"_h: {
7001       // Can't set the flags in place due to the logical dependencies.
7002       uint32_t n = (~ReadNzcv().GetC() & ~ReadNzcv().GetZ()) & 1;
7003       uint32_t z = ReadNzcv().GetZ() & ReadNzcv().GetC();
7004       uint32_t c = ReadNzcv().GetC() | ReadNzcv().GetZ();
7005       uint32_t v = ~ReadNzcv().GetC() & ReadNzcv().GetZ();
7006       ReadNzcv().SetN(n);
7007       ReadNzcv().SetZ(z);
7008       ReadNzcv().SetC(c);
7009       ReadNzcv().SetV(v);
7010       break;
7011     }
7012     case "xpaclri_hi_hints"_h:
7013       WriteXRegister(30, StripPAC(ReadXRegister(30), kInstructionPointer));
7014       break;
7015     case "clrex_bn_barriers"_h:
7016       PrintExclusiveAccessWarning();
7017       ClearLocalMonitor();
7018       break;
7019     case "msr_sr_systemmove"_h:
7020       switch (instr->GetImmSystemRegister()) {
7021         case NZCV:
7022           ReadNzcv().SetRawValue(ReadWRegister(instr->GetRt()));
7023           LogSystemRegister(NZCV);
7024           break;
7025         case FPCR:
7026           ReadFpcr().SetRawValue(ReadWRegister(instr->GetRt()));
7027           LogSystemRegister(FPCR);
7028           break;
7029         default:
7030           VIXL_UNIMPLEMENTED();
7031       }
7032       break;
7033     case "mrs_rs_systemmove"_h:
7034       switch (instr->GetImmSystemRegister()) {
7035         case NZCV:
7036           WriteXRegister(instr->GetRt(), ReadNzcv().GetRawValue());
7037           break;
7038         case FPCR:
7039           WriteXRegister(instr->GetRt(), ReadFpcr().GetRawValue());
7040           break;
7041         case RNDR:
7042         case RNDRRS: {
7043           uint64_t high = rand_gen_();
7044           uint64_t low = rand_gen_();
7045           uint64_t rand_num = (high << 32) | (low & 0xffffffff);
7046           WriteXRegister(instr->GetRt(), rand_num);
7047           // Simulate successful random number generation.
7048           // TODO: Return failure occasionally as a random number cannot be
7049           // returned in a period of time.
7050           ReadNzcv().SetRawValue(NoFlag);
7051           LogSystemRegister(NZCV);
7052           break;
7053         }
7054         case DCZID_EL0:
7055           WriteXRegister(instr->GetRt(), dczid_);
7056           break;
7057         default:
7058           VIXL_UNIMPLEMENTED();
7059       }
7060       break;
7061     case "chkfeat_hf_hints"_h: {
7062       uint64_t feat_select = ReadXRegister(16);
7063       uint64_t gcs_enabled = IsGCSCheckEnabled() ? 1 : 0;
7064       feat_select &= ~gcs_enabled;
7065       WriteXRegister(16, feat_select);
7066       break;
7067     }
7068     case "hint_hm_hints"_h:
7069     case "nop_hi_hints"_h:
7070     case "esb_hi_hints"_h:
7071     case "csdb_hi_hints"_h:
7072       break;
7073     case "bti_hb_hints"_h:
7074       switch (instr->GetImmHint()) {
7075         case BTI_jc:
7076           break;
7077         case BTI:
7078           if (PcIsInGuardedPage() && (ReadBType() != DefaultBType)) {
7079             VIXL_ABORT_WITH_MSG("Executing BTI with wrong BType.");
7080           }
7081           break;
7082         case BTI_c:
7083           if (PcIsInGuardedPage() &&
7084               (ReadBType() == BranchFromGuardedNotToIP)) {
7085             VIXL_ABORT_WITH_MSG("Executing BTI c with wrong BType.");
7086           }
7087           break;
7088         case BTI_j:
7089           if (PcIsInGuardedPage() && (ReadBType() == BranchAndLink)) {
7090             VIXL_ABORT_WITH_MSG("Executing BTI j with wrong BType.");
7091           }
7092           break;
7093         default:
7094           VIXL_UNREACHABLE();
7095       }
7096       return;
7097     case "pacib1716_hi_hints"_h:
7098       pac_key = kPACKeyIB;
7099       VIXL_FALLTHROUGH();
7100     case "pacia1716_hi_hints"_h:
7101       PACHelper(17, 16, pac_key, &Simulator::AddPAC);
7102       break;
7103     case "pacibsp_hi_hints"_h:
7104       pac_key = kPACKeyIB;
7105       VIXL_FALLTHROUGH();
7106     case "paciasp_hi_hints"_h:
7107       PACHelper(30, 31, pac_key, &Simulator::AddPAC);
7108 
7109       // Check BType allows PACI[AB]SP instructions.
7110       if (PcIsInGuardedPage()) {
7111         switch (ReadBType()) {
7112           case BranchFromGuardedNotToIP:
7113           // TODO: This case depends on the value of SCTLR_EL1.BT0, which we
7114           // assume here to be zero. This allows execution of PACI[AB]SP when
7115           // BTYPE is BranchFromGuardedNotToIP (0b11).
7116           case DefaultBType:
7117           case BranchFromUnguardedOrToIP:
7118           case BranchAndLink:
7119             break;
7120         }
7121       }
7122       break;
7123     case "pacibz_hi_hints"_h:
7124       pac_key = kPACKeyIB;
7125       VIXL_FALLTHROUGH();
7126     case "paciaz_hi_hints"_h:
7127       PACHelper(30, -1, pac_key, &Simulator::AddPAC);
7128       break;
7129     case "autib1716_hi_hints"_h:
7130       pac_key = kPACKeyIB;
7131       VIXL_FALLTHROUGH();
7132     case "autia1716_hi_hints"_h:
7133       PACHelper(17, 16, pac_key, &Simulator::AuthPAC);
7134       break;
7135     case "autibsp_hi_hints"_h:
7136       pac_key = kPACKeyIB;
7137       VIXL_FALLTHROUGH();
7138     case "autiasp_hi_hints"_h:
7139       PACHelper(30, 31, pac_key, &Simulator::AuthPAC);
7140       break;
7141     case "autibz_hi_hints"_h:
7142       pac_key = kPACKeyIB;
7143       VIXL_FALLTHROUGH();
7144     case "autiaz_hi_hints"_h:
7145       PACHelper(30, -1, pac_key, &Simulator::AuthPAC);
7146       break;
7147     case "dsb_bo_barriers"_h:
7148     case "dmb_bo_barriers"_h:
7149     case "isb_bi_barriers"_h:
7150       VIXL_SYNC();
7151       break;
7152     case "sys_cr_systeminstrs"_h: {
7153       uint64_t rt = ReadXRegister(instr->GetRt());
7154       uint32_t sysop = instr->GetSysOp();
7155       if (sysop == GCSSS1) {
7156         uint64_t incoming_size = rt >> 32;
7157         // Drop upper 32 bits to get GCS index.
7158         uint64_t incoming_gcs = rt & 0xffffffff;
7159         uint64_t outgoing_gcs = ActivateGCS(incoming_gcs);
7160         uint64_t incoming_seal = GCSPop();
7161         if (((incoming_seal ^ rt) != 1) ||
7162             (GetActiveGCSPtr()->size() != incoming_size)) {
7163           char msg[128];
7164           snprintf(msg,
7165                    sizeof(msg),
7166                    "GCS: invalid incoming stack: 0x%016" PRIx64 "\n",
7167                    incoming_seal);
7168           ReportGCSFailure(msg);
7169         }
7170         GCSPush(outgoing_gcs + 5);
7171       } else if (sysop == GCSPUSHM) {
7172         GCSPush(ReadXRegister(instr->GetRt()));
7173       } else {
7174         if (!SysOp_W(sysop, rt)) {
7175           VisitUnallocated(instr);
7176         }
7177       }
7178       break;
7179     }
7180     case "sysl_rc_systeminstrs"_h: {
7181       uint32_t sysop = instr->GetSysOp();
7182       if (sysop == GCSPOPM) {
7183         uint64_t addr = GCSPop();
7184         WriteXRegister(instr->GetRt(), addr);
7185       } else if (sysop == GCSSS2) {
7186         uint64_t outgoing_gcs = GCSPop();
7187         // Check for token inserted by gcsss1.
7188         if ((outgoing_gcs & 7) != 5) {
7189           char msg[128];
7190           snprintf(msg,
7191                    sizeof(msg),
7192                    "GCS: outgoing stack has no token: 0x%016" PRIx64 "\n",
7193                    outgoing_gcs);
7194           ReportGCSFailure(msg);
7195         }
7196         uint64_t incoming_gcs = ActivateGCS(outgoing_gcs);
7197         outgoing_gcs &= ~UINT64_C(0x3ff);
7198 
7199         // Encode the size into the outgoing stack seal, to check later.
7200         uint64_t size = GetActiveGCSPtr()->size();
7201         VIXL_ASSERT(IsUint32(size));
7202         VIXL_ASSERT(IsUint32(outgoing_gcs + 1));
7203         uint64_t outgoing_seal = (size << 32) | (outgoing_gcs + 1);
7204         GCSPush(outgoing_seal);
7205         ActivateGCS(incoming_gcs);
7206         WriteXRegister(instr->GetRt(), outgoing_seal - 1);
7207       } else {
7208         VIXL_UNIMPLEMENTED();
7209       }
7210       break;
7211     }
7212     default:
7213       VIXL_UNIMPLEMENTED();
7214   }
7215 }
7216 
7217 
VisitException(const Instruction * instr)7218 void Simulator::VisitException(const Instruction* instr) {
7219   switch (instr->Mask(ExceptionMask)) {
7220     case HLT:
7221       switch (instr->GetImmException()) {
7222         case kUnreachableOpcode:
7223           DoUnreachable(instr);
7224           return;
7225         case kTraceOpcode:
7226           DoTrace(instr);
7227           return;
7228         case kLogOpcode:
7229           DoLog(instr);
7230           return;
7231         case kPrintfOpcode:
7232           DoPrintf(instr);
7233           return;
7234         case kRuntimeCallOpcode:
7235           DoRuntimeCall(instr);
7236           return;
7237         case kSetCPUFeaturesOpcode:
7238         case kEnableCPUFeaturesOpcode:
7239         case kDisableCPUFeaturesOpcode:
7240           DoConfigureCPUFeatures(instr);
7241           return;
7242         case kSaveCPUFeaturesOpcode:
7243           DoSaveCPUFeatures(instr);
7244           return;
7245         case kRestoreCPUFeaturesOpcode:
7246           DoRestoreCPUFeatures(instr);
7247           return;
7248         case kMTEActive:
7249           MetaDataDepot::MetaDataMTE::SetActive(true);
7250           return;
7251         case kMTEInactive:
7252           MetaDataDepot::MetaDataMTE::SetActive(false);
7253           return;
7254         default:
7255           HostBreakpoint();
7256           return;
7257       }
7258     case BRK:
7259       if (debugger_enabled_) {
7260         uint64_t next_instr =
7261             reinterpret_cast<uint64_t>(pc_->GetNextInstruction());
7262         if (!debugger_->IsBreakpoint(next_instr)) {
7263           debugger_->RegisterBreakpoint(next_instr);
7264         }
7265       } else {
7266         HostBreakpoint();
7267       }
7268       return;
7269     default:
7270       VIXL_UNIMPLEMENTED();
7271   }
7272 }
7273 
7274 
VisitCrypto2RegSHA(const Instruction * instr)7275 void Simulator::VisitCrypto2RegSHA(const Instruction* instr) {
7276   SimVRegister& rd = ReadVRegister(instr->GetRd());
7277   SimVRegister& rn = ReadVRegister(instr->GetRn());
7278 
7279   switch (form_hash_) {
7280     case "sha1h_ss_cryptosha2"_h:
7281       ror(kFormatS, rd, rn, 2);
7282       break;
7283     case "sha1su1_vv_cryptosha2"_h: {
7284       SimVRegister temp;
7285 
7286       // temp = srcdst ^ (src >> 32);
7287       ext(kFormat16B, temp, rn, temp, 4);
7288       eor(kFormat16B, temp, rd, temp);
7289 
7290       // srcdst = ROL(temp, 1) ^ (ROL(temp, 2) << 96)
7291       rol(kFormat4S, rd, temp, 1);
7292       rol(kFormatS, temp, temp, 2);  // kFormatS will zero bits <127:32>
7293       ext(kFormat16B, temp, temp, temp, 4);
7294       eor(kFormat16B, rd, rd, temp);
7295       break;
7296     }
7297     case "sha256su0_vv_cryptosha2"_h:
7298       sha2su0(rd, rn);
7299       break;
7300   }
7301 }
7302 
7303 
VisitCrypto3RegSHA(const Instruction * instr)7304 void Simulator::VisitCrypto3RegSHA(const Instruction* instr) {
7305   SimVRegister& rd = ReadVRegister(instr->GetRd());
7306   SimVRegister& rn = ReadVRegister(instr->GetRn());
7307   SimVRegister& rm = ReadVRegister(instr->GetRm());
7308 
7309   switch (form_hash_) {
7310     case "sha1c_qsv_cryptosha3"_h:
7311       sha1<"choose"_h>(rd, rn, rm);
7312       break;
7313     case "sha1m_qsv_cryptosha3"_h:
7314       sha1<"majority"_h>(rd, rn, rm);
7315       break;
7316     case "sha1p_qsv_cryptosha3"_h:
7317       sha1<"parity"_h>(rd, rn, rm);
7318       break;
7319     case "sha1su0_vvv_cryptosha3"_h: {
7320       SimVRegister temp;
7321       ext(kFormat16B, temp, rd, rn, 8);
7322       eor(kFormat16B, temp, temp, rd);
7323       eor(kFormat16B, rd, temp, rm);
7324       break;
7325     }
7326     case "sha256h_qqv_cryptosha3"_h:
7327       sha2h(rd, rn, rm, /* part1 = */ true);
7328       break;
7329     case "sha256h2_qqv_cryptosha3"_h:
7330       sha2h(rd, rn, rm, /* part1 = */ false);
7331       break;
7332     case "sha256su1_vvv_cryptosha3"_h:
7333       sha2su1(rd, rn, rm);
7334       break;
7335   }
7336 }
7337 
7338 
VisitCryptoAES(const Instruction * instr)7339 void Simulator::VisitCryptoAES(const Instruction* instr) {
7340   SimVRegister& rd = ReadVRegister(instr->GetRd());
7341   SimVRegister& rn = ReadVRegister(instr->GetRn());
7342   SimVRegister temp;
7343 
7344   switch (form_hash_) {
7345     case "aesd_b_cryptoaes"_h:
7346       eor(kFormat16B, temp, rd, rn);
7347       aes(rd, temp, /* decrypt = */ true);
7348       break;
7349     case "aese_b_cryptoaes"_h:
7350       eor(kFormat16B, temp, rd, rn);
7351       aes(rd, temp, /* decrypt = */ false);
7352       break;
7353     case "aesimc_b_cryptoaes"_h:
7354       aesmix(rd, rn, /* inverse = */ true);
7355       break;
7356     case "aesmc_b_cryptoaes"_h:
7357       aesmix(rd, rn, /* inverse = */ false);
7358       break;
7359   }
7360 }
7361 
VisitCryptoSM3(const Instruction * instr)7362 void Simulator::VisitCryptoSM3(const Instruction* instr) {
7363   SimVRegister& rd = ReadVRegister(instr->GetRd());
7364   SimVRegister& rn = ReadVRegister(instr->GetRn());
7365   SimVRegister& rm = ReadVRegister(instr->GetRm());
7366   SimVRegister& ra = ReadVRegister(instr->GetRa());
7367   int index = instr->ExtractBits(13, 12);
7368 
7369   bool is_a = false;
7370   switch (form_hash_) {
7371     case "sm3partw1_vvv4_cryptosha512_3"_h:
7372       sm3partw1(rd, rn, rm);
7373       break;
7374     case "sm3partw2_vvv4_cryptosha512_3"_h:
7375       sm3partw2(rd, rn, rm);
7376       break;
7377     case "sm3ss1_vvv4_crypto4"_h:
7378       sm3ss1(rd, rn, rm, ra);
7379       break;
7380     case "sm3tt1a_vvv4_crypto3_imm2"_h:
7381       is_a = true;
7382       VIXL_FALLTHROUGH();
7383     case "sm3tt1b_vvv4_crypto3_imm2"_h:
7384       sm3tt1(rd, rn, rm, index, is_a);
7385       break;
7386     case "sm3tt2a_vvv4_crypto3_imm2"_h:
7387       is_a = true;
7388       VIXL_FALLTHROUGH();
7389     case "sm3tt2b_vvv_crypto3_imm2"_h:
7390       sm3tt2(rd, rn, rm, index, is_a);
7391       break;
7392   }
7393 }
7394 
VisitCryptoSM4(const Instruction * instr)7395 void Simulator::VisitCryptoSM4(const Instruction* instr) {
7396   SimVRegister& rd = ReadVRegister(instr->GetRd());
7397   SimVRegister& rn = ReadVRegister(instr->GetRn());
7398   SimVRegister& rm = ReadVRegister(instr->GetRm());
7399 
7400   bool is_key = false;
7401   switch (form_hash_) {
7402     case "sm4ekey_vvv4_cryptosha512_3"_h:
7403       is_key = true;
7404       VIXL_FALLTHROUGH();
7405     case "sm4e_vv4_cryptosha512_2"_h:
7406       sm4(rd, rn, rm, is_key);
7407       break;
7408   }
7409 }
7410 
SimulateSHA512(const Instruction * instr)7411 void Simulator::SimulateSHA512(const Instruction* instr) {
7412   SimVRegister& rd = ReadVRegister(instr->GetRd());
7413   SimVRegister& rn = ReadVRegister(instr->GetRn());
7414   SimVRegister& rm = ReadVRegister(instr->GetRm());
7415 
7416   switch (form_hash_) {
7417     case "sha512h_qqv_cryptosha512_3"_h:
7418       sha512h(rd, rn, rm);
7419       break;
7420     case "sha512h2_qqv_cryptosha512_3"_h:
7421       sha512h2(rd, rn, rm);
7422       break;
7423     case "sha512su0_vv2_cryptosha512_2"_h:
7424       sha512su0(rd, rn);
7425       break;
7426     case "sha512su1_vvv2_cryptosha512_3"_h:
7427       sha512su1(rd, rn, rm);
7428       break;
7429   }
7430 }
7431 
VisitNEON2RegMisc(const Instruction * instr)7432 void Simulator::VisitNEON2RegMisc(const Instruction* instr) {
7433   NEONFormatDecoder nfd(instr);
7434   VectorFormat vf = nfd.GetVectorFormat();
7435 
7436   static const NEONFormatMap map_lp =
7437       {{23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
7438   VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp);
7439 
7440   static const NEONFormatMap map_fcvtl = {{22}, {NF_4S, NF_2D}};
7441   VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl);
7442 
7443   static const NEONFormatMap map_fcvtn = {{22, 30},
7444                                           {NF_4H, NF_8H, NF_2S, NF_4S}};
7445   VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn);
7446 
7447   SimVRegister& rd = ReadVRegister(instr->GetRd());
7448   SimVRegister& rn = ReadVRegister(instr->GetRn());
7449 
7450   if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
7451     // These instructions all use a two bit size field, except NOT and RBIT,
7452     // which use the field to encode the operation.
7453     switch (instr->Mask(NEON2RegMiscMask)) {
7454       case NEON_REV64:
7455         rev64(vf, rd, rn);
7456         break;
7457       case NEON_REV32:
7458         rev32(vf, rd, rn);
7459         break;
7460       case NEON_REV16:
7461         rev16(vf, rd, rn);
7462         break;
7463       case NEON_SUQADD:
7464         suqadd(vf, rd, rd, rn);
7465         break;
7466       case NEON_USQADD:
7467         usqadd(vf, rd, rd, rn);
7468         break;
7469       case NEON_CLS:
7470         cls(vf, rd, rn);
7471         break;
7472       case NEON_CLZ:
7473         clz(vf, rd, rn);
7474         break;
7475       case NEON_CNT:
7476         cnt(vf, rd, rn);
7477         break;
7478       case NEON_SQABS:
7479         abs(vf, rd, rn).SignedSaturate(vf);
7480         break;
7481       case NEON_SQNEG:
7482         neg(vf, rd, rn).SignedSaturate(vf);
7483         break;
7484       case NEON_CMGT_zero:
7485         cmp(vf, rd, rn, 0, gt);
7486         break;
7487       case NEON_CMGE_zero:
7488         cmp(vf, rd, rn, 0, ge);
7489         break;
7490       case NEON_CMEQ_zero:
7491         cmp(vf, rd, rn, 0, eq);
7492         break;
7493       case NEON_CMLE_zero:
7494         cmp(vf, rd, rn, 0, le);
7495         break;
7496       case NEON_CMLT_zero:
7497         cmp(vf, rd, rn, 0, lt);
7498         break;
7499       case NEON_ABS:
7500         abs(vf, rd, rn);
7501         break;
7502       case NEON_NEG:
7503         neg(vf, rd, rn);
7504         break;
7505       case NEON_SADDLP:
7506         saddlp(vf_lp, rd, rn);
7507         break;
7508       case NEON_UADDLP:
7509         uaddlp(vf_lp, rd, rn);
7510         break;
7511       case NEON_SADALP:
7512         sadalp(vf_lp, rd, rn);
7513         break;
7514       case NEON_UADALP:
7515         uadalp(vf_lp, rd, rn);
7516         break;
7517       case NEON_RBIT_NOT:
7518         vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
7519         switch (instr->GetFPType()) {
7520           case 0:
7521             not_(vf, rd, rn);
7522             break;
7523           case 1:
7524             rbit(vf, rd, rn);
7525             break;
7526           default:
7527             VIXL_UNIMPLEMENTED();
7528         }
7529         break;
7530     }
7531   } else {
7532     VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap());
7533     FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
7534     bool inexact_exception = false;
7535     FrintMode frint_mode = kFrintToInteger;
7536 
7537     // These instructions all use a one bit size field, except XTN, SQXTUN,
7538     // SHLL, SQXTN and UQXTN, which use a two bit size field.
7539     switch (instr->Mask(NEON2RegMiscFPMask)) {
7540       case NEON_FABS:
7541         fabs_(fpf, rd, rn);
7542         return;
7543       case NEON_FNEG:
7544         fneg(fpf, rd, rn);
7545         return;
7546       case NEON_FSQRT:
7547         fsqrt(fpf, rd, rn);
7548         return;
7549       case NEON_FCVTL:
7550         if (instr->Mask(NEON_Q)) {
7551           fcvtl2(vf_fcvtl, rd, rn);
7552         } else {
7553           fcvtl(vf_fcvtl, rd, rn);
7554         }
7555         return;
7556       case NEON_FCVTN:
7557         if (instr->Mask(NEON_Q)) {
7558           fcvtn2(vf_fcvtn, rd, rn);
7559         } else {
7560           fcvtn(vf_fcvtn, rd, rn);
7561         }
7562         return;
7563       case NEON_FCVTXN:
7564         if (instr->Mask(NEON_Q)) {
7565           fcvtxn2(vf_fcvtn, rd, rn);
7566         } else {
7567           fcvtxn(vf_fcvtn, rd, rn);
7568         }
7569         return;
7570 
7571       // The following instructions break from the switch statement, rather
7572       // than return.
7573       case NEON_FRINT32X:
7574         inexact_exception = true;
7575         frint_mode = kFrintToInt32;
7576         break;  // Use FPCR rounding mode.
7577       case NEON_FRINT32Z:
7578         inexact_exception = true;
7579         frint_mode = kFrintToInt32;
7580         fpcr_rounding = FPZero;
7581         break;
7582       case NEON_FRINT64X:
7583         inexact_exception = true;
7584         frint_mode = kFrintToInt64;
7585         break;  // Use FPCR rounding mode.
7586       case NEON_FRINT64Z:
7587         inexact_exception = true;
7588         frint_mode = kFrintToInt64;
7589         fpcr_rounding = FPZero;
7590         break;
7591       case NEON_FRINTI:
7592         break;  // Use FPCR rounding mode.
7593       case NEON_FRINTX:
7594         inexact_exception = true;
7595         break;
7596       case NEON_FRINTA:
7597         fpcr_rounding = FPTieAway;
7598         break;
7599       case NEON_FRINTM:
7600         fpcr_rounding = FPNegativeInfinity;
7601         break;
7602       case NEON_FRINTN:
7603         fpcr_rounding = FPTieEven;
7604         break;
7605       case NEON_FRINTP:
7606         fpcr_rounding = FPPositiveInfinity;
7607         break;
7608       case NEON_FRINTZ:
7609         fpcr_rounding = FPZero;
7610         break;
7611 
7612       case NEON_FCVTNS:
7613         fcvts(fpf, rd, rn, FPTieEven);
7614         return;
7615       case NEON_FCVTNU:
7616         fcvtu(fpf, rd, rn, FPTieEven);
7617         return;
7618       case NEON_FCVTPS:
7619         fcvts(fpf, rd, rn, FPPositiveInfinity);
7620         return;
7621       case NEON_FCVTPU:
7622         fcvtu(fpf, rd, rn, FPPositiveInfinity);
7623         return;
7624       case NEON_FCVTMS:
7625         fcvts(fpf, rd, rn, FPNegativeInfinity);
7626         return;
7627       case NEON_FCVTMU:
7628         fcvtu(fpf, rd, rn, FPNegativeInfinity);
7629         return;
7630       case NEON_FCVTZS:
7631         fcvts(fpf, rd, rn, FPZero);
7632         return;
7633       case NEON_FCVTZU:
7634         fcvtu(fpf, rd, rn, FPZero);
7635         return;
7636       case NEON_FCVTAS:
7637         fcvts(fpf, rd, rn, FPTieAway);
7638         return;
7639       case NEON_FCVTAU:
7640         fcvtu(fpf, rd, rn, FPTieAway);
7641         return;
7642       case NEON_SCVTF:
7643         scvtf(fpf, rd, rn, 0, fpcr_rounding);
7644         return;
7645       case NEON_UCVTF:
7646         ucvtf(fpf, rd, rn, 0, fpcr_rounding);
7647         return;
7648       case NEON_URSQRTE:
7649         ursqrte(fpf, rd, rn);
7650         return;
7651       case NEON_URECPE:
7652         urecpe(fpf, rd, rn);
7653         return;
7654       case NEON_FRSQRTE:
7655         frsqrte(fpf, rd, rn);
7656         return;
7657       case NEON_FRECPE:
7658         frecpe(fpf, rd, rn, fpcr_rounding);
7659         return;
7660       case NEON_FCMGT_zero:
7661         fcmp_zero(fpf, rd, rn, gt);
7662         return;
7663       case NEON_FCMGE_zero:
7664         fcmp_zero(fpf, rd, rn, ge);
7665         return;
7666       case NEON_FCMEQ_zero:
7667         fcmp_zero(fpf, rd, rn, eq);
7668         return;
7669       case NEON_FCMLE_zero:
7670         fcmp_zero(fpf, rd, rn, le);
7671         return;
7672       case NEON_FCMLT_zero:
7673         fcmp_zero(fpf, rd, rn, lt);
7674         return;
7675       default:
7676         if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
7677             (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
7678           switch (instr->Mask(NEON2RegMiscMask)) {
7679             case NEON_XTN:
7680               xtn(vf, rd, rn);
7681               return;
7682             case NEON_SQXTN:
7683               sqxtn(vf, rd, rn);
7684               return;
7685             case NEON_UQXTN:
7686               uqxtn(vf, rd, rn);
7687               return;
7688             case NEON_SQXTUN:
7689               sqxtun(vf, rd, rn);
7690               return;
7691             case NEON_SHLL:
7692               vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
7693               if (instr->Mask(NEON_Q)) {
7694                 shll2(vf, rd, rn);
7695               } else {
7696                 shll(vf, rd, rn);
7697               }
7698               return;
7699             default:
7700               VIXL_UNIMPLEMENTED();
7701           }
7702         } else {
7703           VIXL_UNIMPLEMENTED();
7704         }
7705     }
7706 
7707     // Only FRINT* instructions fall through the switch above.
7708     frint(fpf, rd, rn, fpcr_rounding, inexact_exception, frint_mode);
7709   }
7710 }
7711 
7712 
VisitNEON2RegMiscFP16(const Instruction * instr)7713 void Simulator::VisitNEON2RegMiscFP16(const Instruction* instr) {
7714   static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}};
7715   NEONFormatDecoder nfd(instr);
7716   VectorFormat fpf = nfd.GetVectorFormat(&map_half);
7717 
7718   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
7719 
7720   SimVRegister& rd = ReadVRegister(instr->GetRd());
7721   SimVRegister& rn = ReadVRegister(instr->GetRn());
7722 
7723   switch (instr->Mask(NEON2RegMiscFP16Mask)) {
7724     case NEON_SCVTF_H:
7725       scvtf(fpf, rd, rn, 0, fpcr_rounding);
7726       return;
7727     case NEON_UCVTF_H:
7728       ucvtf(fpf, rd, rn, 0, fpcr_rounding);
7729       return;
7730     case NEON_FCVTNS_H:
7731       fcvts(fpf, rd, rn, FPTieEven);
7732       return;
7733     case NEON_FCVTNU_H:
7734       fcvtu(fpf, rd, rn, FPTieEven);
7735       return;
7736     case NEON_FCVTPS_H:
7737       fcvts(fpf, rd, rn, FPPositiveInfinity);
7738       return;
7739     case NEON_FCVTPU_H:
7740       fcvtu(fpf, rd, rn, FPPositiveInfinity);
7741       return;
7742     case NEON_FCVTMS_H:
7743       fcvts(fpf, rd, rn, FPNegativeInfinity);
7744       return;
7745     case NEON_FCVTMU_H:
7746       fcvtu(fpf, rd, rn, FPNegativeInfinity);
7747       return;
7748     case NEON_FCVTZS_H:
7749       fcvts(fpf, rd, rn, FPZero);
7750       return;
7751     case NEON_FCVTZU_H:
7752       fcvtu(fpf, rd, rn, FPZero);
7753       return;
7754     case NEON_FCVTAS_H:
7755       fcvts(fpf, rd, rn, FPTieAway);
7756       return;
7757     case NEON_FCVTAU_H:
7758       fcvtu(fpf, rd, rn, FPTieAway);
7759       return;
7760     case NEON_FRINTI_H:
7761       frint(fpf, rd, rn, fpcr_rounding, false);
7762       return;
7763     case NEON_FRINTX_H:
7764       frint(fpf, rd, rn, fpcr_rounding, true);
7765       return;
7766     case NEON_FRINTA_H:
7767       frint(fpf, rd, rn, FPTieAway, false);
7768       return;
7769     case NEON_FRINTM_H:
7770       frint(fpf, rd, rn, FPNegativeInfinity, false);
7771       return;
7772     case NEON_FRINTN_H:
7773       frint(fpf, rd, rn, FPTieEven, false);
7774       return;
7775     case NEON_FRINTP_H:
7776       frint(fpf, rd, rn, FPPositiveInfinity, false);
7777       return;
7778     case NEON_FRINTZ_H:
7779       frint(fpf, rd, rn, FPZero, false);
7780       return;
7781     case NEON_FABS_H:
7782       fabs_(fpf, rd, rn);
7783       return;
7784     case NEON_FNEG_H:
7785       fneg(fpf, rd, rn);
7786       return;
7787     case NEON_FSQRT_H:
7788       fsqrt(fpf, rd, rn);
7789       return;
7790     case NEON_FRSQRTE_H:
7791       frsqrte(fpf, rd, rn);
7792       return;
7793     case NEON_FRECPE_H:
7794       frecpe(fpf, rd, rn, fpcr_rounding);
7795       return;
7796     case NEON_FCMGT_H_zero:
7797       fcmp_zero(fpf, rd, rn, gt);
7798       return;
7799     case NEON_FCMGE_H_zero:
7800       fcmp_zero(fpf, rd, rn, ge);
7801       return;
7802     case NEON_FCMEQ_H_zero:
7803       fcmp_zero(fpf, rd, rn, eq);
7804       return;
7805     case NEON_FCMLE_H_zero:
7806       fcmp_zero(fpf, rd, rn, le);
7807       return;
7808     case NEON_FCMLT_H_zero:
7809       fcmp_zero(fpf, rd, rn, lt);
7810       return;
7811     default:
7812       VIXL_UNIMPLEMENTED();
7813       return;
7814   }
7815 }
7816 
7817 
VisitNEON3Same(const Instruction * instr)7818 void Simulator::VisitNEON3Same(const Instruction* instr) {
7819   NEONFormatDecoder nfd(instr);
7820   SimVRegister& rd = ReadVRegister(instr->GetRd());
7821   SimVRegister& rn = ReadVRegister(instr->GetRn());
7822   SimVRegister& rm = ReadVRegister(instr->GetRm());
7823 
7824   if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
7825     VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
7826     switch (instr->Mask(NEON3SameLogicalMask)) {
7827       case NEON_AND:
7828         and_(vf, rd, rn, rm);
7829         break;
7830       case NEON_ORR:
7831         orr(vf, rd, rn, rm);
7832         break;
7833       case NEON_ORN:
7834         orn(vf, rd, rn, rm);
7835         break;
7836       case NEON_EOR:
7837         eor(vf, rd, rn, rm);
7838         break;
7839       case NEON_BIC:
7840         bic(vf, rd, rn, rm);
7841         break;
7842       case NEON_BIF:
7843         bif(vf, rd, rn, rm);
7844         break;
7845       case NEON_BIT:
7846         bit(vf, rd, rn, rm);
7847         break;
7848       case NEON_BSL:
7849         bsl(vf, rd, rd, rn, rm);
7850         break;
7851       default:
7852         VIXL_UNIMPLEMENTED();
7853     }
7854   } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
7855     VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
7856     switch (instr->Mask(NEON3SameFPMask)) {
7857       case NEON_FADD:
7858         fadd(vf, rd, rn, rm);
7859         break;
7860       case NEON_FSUB:
7861         fsub(vf, rd, rn, rm);
7862         break;
7863       case NEON_FMUL:
7864         fmul(vf, rd, rn, rm);
7865         break;
7866       case NEON_FDIV:
7867         fdiv(vf, rd, rn, rm);
7868         break;
7869       case NEON_FMAX:
7870         fmax(vf, rd, rn, rm);
7871         break;
7872       case NEON_FMIN:
7873         fmin(vf, rd, rn, rm);
7874         break;
7875       case NEON_FMAXNM:
7876         fmaxnm(vf, rd, rn, rm);
7877         break;
7878       case NEON_FMINNM:
7879         fminnm(vf, rd, rn, rm);
7880         break;
7881       case NEON_FMLA:
7882         fmla(vf, rd, rd, rn, rm);
7883         break;
7884       case NEON_FMLS:
7885         fmls(vf, rd, rd, rn, rm);
7886         break;
7887       case NEON_FMULX:
7888         fmulx(vf, rd, rn, rm);
7889         break;
7890       case NEON_FACGE:
7891         fabscmp(vf, rd, rn, rm, ge);
7892         break;
7893       case NEON_FACGT:
7894         fabscmp(vf, rd, rn, rm, gt);
7895         break;
7896       case NEON_FCMEQ:
7897         fcmp(vf, rd, rn, rm, eq);
7898         break;
7899       case NEON_FCMGE:
7900         fcmp(vf, rd, rn, rm, ge);
7901         break;
7902       case NEON_FCMGT:
7903         fcmp(vf, rd, rn, rm, gt);
7904         break;
7905       case NEON_FRECPS:
7906         frecps(vf, rd, rn, rm);
7907         break;
7908       case NEON_FRSQRTS:
7909         frsqrts(vf, rd, rn, rm);
7910         break;
7911       case NEON_FABD:
7912         fabd(vf, rd, rn, rm);
7913         break;
7914       case NEON_FADDP:
7915         faddp(vf, rd, rn, rm);
7916         break;
7917       case NEON_FMAXP:
7918         fmaxp(vf, rd, rn, rm);
7919         break;
7920       case NEON_FMAXNMP:
7921         fmaxnmp(vf, rd, rn, rm);
7922         break;
7923       case NEON_FMINP:
7924         fminp(vf, rd, rn, rm);
7925         break;
7926       case NEON_FMINNMP:
7927         fminnmp(vf, rd, rn, rm);
7928         break;
7929       default:
7930         // FMLAL{2} and FMLSL{2} have special-case encodings.
7931         switch (instr->Mask(NEON3SameFHMMask)) {
7932           case NEON_FMLAL:
7933             fmlal(vf, rd, rn, rm);
7934             break;
7935           case NEON_FMLAL2:
7936             fmlal2(vf, rd, rn, rm);
7937             break;
7938           case NEON_FMLSL:
7939             fmlsl(vf, rd, rn, rm);
7940             break;
7941           case NEON_FMLSL2:
7942             fmlsl2(vf, rd, rn, rm);
7943             break;
7944           default:
7945             VIXL_UNIMPLEMENTED();
7946         }
7947     }
7948   } else {
7949     VectorFormat vf = nfd.GetVectorFormat();
7950     switch (instr->Mask(NEON3SameMask)) {
7951       case NEON_ADD:
7952         add(vf, rd, rn, rm);
7953         break;
7954       case NEON_ADDP:
7955         addp(vf, rd, rn, rm);
7956         break;
7957       case NEON_CMEQ:
7958         cmp(vf, rd, rn, rm, eq);
7959         break;
7960       case NEON_CMGE:
7961         cmp(vf, rd, rn, rm, ge);
7962         break;
7963       case NEON_CMGT:
7964         cmp(vf, rd, rn, rm, gt);
7965         break;
7966       case NEON_CMHI:
7967         cmp(vf, rd, rn, rm, hi);
7968         break;
7969       case NEON_CMHS:
7970         cmp(vf, rd, rn, rm, hs);
7971         break;
7972       case NEON_CMTST:
7973         cmptst(vf, rd, rn, rm);
7974         break;
7975       case NEON_MLS:
7976         mls(vf, rd, rd, rn, rm);
7977         break;
7978       case NEON_MLA:
7979         mla(vf, rd, rd, rn, rm);
7980         break;
7981       case NEON_MUL:
7982         mul(vf, rd, rn, rm);
7983         break;
7984       case NEON_PMUL:
7985         pmul(vf, rd, rn, rm);
7986         break;
7987       case NEON_SMAX:
7988         smax(vf, rd, rn, rm);
7989         break;
7990       case NEON_SMAXP:
7991         smaxp(vf, rd, rn, rm);
7992         break;
7993       case NEON_SMIN:
7994         smin(vf, rd, rn, rm);
7995         break;
7996       case NEON_SMINP:
7997         sminp(vf, rd, rn, rm);
7998         break;
7999       case NEON_SUB:
8000         sub(vf, rd, rn, rm);
8001         break;
8002       case NEON_UMAX:
8003         umax(vf, rd, rn, rm);
8004         break;
8005       case NEON_UMAXP:
8006         umaxp(vf, rd, rn, rm);
8007         break;
8008       case NEON_UMIN:
8009         umin(vf, rd, rn, rm);
8010         break;
8011       case NEON_UMINP:
8012         uminp(vf, rd, rn, rm);
8013         break;
8014       case NEON_SSHL:
8015         sshl(vf, rd, rn, rm);
8016         break;
8017       case NEON_USHL:
8018         ushl(vf, rd, rn, rm);
8019         break;
8020       case NEON_SABD:
8021         absdiff(vf, rd, rn, rm, true);
8022         break;
8023       case NEON_UABD:
8024         absdiff(vf, rd, rn, rm, false);
8025         break;
8026       case NEON_SABA:
8027         saba(vf, rd, rn, rm);
8028         break;
8029       case NEON_UABA:
8030         uaba(vf, rd, rn, rm);
8031         break;
8032       case NEON_UQADD:
8033         add(vf, rd, rn, rm).UnsignedSaturate(vf);
8034         break;
8035       case NEON_SQADD:
8036         add(vf, rd, rn, rm).SignedSaturate(vf);
8037         break;
8038       case NEON_UQSUB:
8039         sub(vf, rd, rn, rm).UnsignedSaturate(vf);
8040         break;
8041       case NEON_SQSUB:
8042         sub(vf, rd, rn, rm).SignedSaturate(vf);
8043         break;
8044       case NEON_SQDMULH:
8045         sqdmulh(vf, rd, rn, rm);
8046         break;
8047       case NEON_SQRDMULH:
8048         sqrdmulh(vf, rd, rn, rm);
8049         break;
8050       case NEON_UQSHL:
8051         ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
8052         break;
8053       case NEON_SQSHL:
8054         sshl(vf, rd, rn, rm).SignedSaturate(vf);
8055         break;
8056       case NEON_URSHL:
8057         ushl(vf, rd, rn, rm).Round(vf);
8058         break;
8059       case NEON_SRSHL:
8060         sshl(vf, rd, rn, rm).Round(vf);
8061         break;
8062       case NEON_UQRSHL:
8063         ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
8064         break;
8065       case NEON_SQRSHL:
8066         sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
8067         break;
8068       case NEON_UHADD:
8069         add(vf, rd, rn, rm).Uhalve(vf);
8070         break;
8071       case NEON_URHADD:
8072         add(vf, rd, rn, rm).Uhalve(vf).Round(vf);
8073         break;
8074       case NEON_SHADD:
8075         add(vf, rd, rn, rm).Halve(vf);
8076         break;
8077       case NEON_SRHADD:
8078         add(vf, rd, rn, rm).Halve(vf).Round(vf);
8079         break;
8080       case NEON_UHSUB:
8081         sub(vf, rd, rn, rm).Uhalve(vf);
8082         break;
8083       case NEON_SHSUB:
8084         sub(vf, rd, rn, rm).Halve(vf);
8085         break;
8086       default:
8087         VIXL_UNIMPLEMENTED();
8088     }
8089   }
8090 }
8091 
8092 
VisitNEON3SameFP16(const Instruction * instr)8093 void Simulator::VisitNEON3SameFP16(const Instruction* instr) {
8094   NEONFormatDecoder nfd(instr);
8095   SimVRegister& rd = ReadVRegister(instr->GetRd());
8096   SimVRegister& rn = ReadVRegister(instr->GetRn());
8097   SimVRegister& rm = ReadVRegister(instr->GetRm());
8098 
8099   VectorFormat vf = nfd.GetVectorFormat(nfd.FP16FormatMap());
8100   switch (instr->Mask(NEON3SameFP16Mask)) {
8101 #define SIM_FUNC(A, B) \
8102   case NEON_##A##_H:   \
8103     B(vf, rd, rn, rm); \
8104     break;
8105     SIM_FUNC(FMAXNM, fmaxnm);
8106     SIM_FUNC(FADD, fadd);
8107     SIM_FUNC(FMULX, fmulx);
8108     SIM_FUNC(FMAX, fmax);
8109     SIM_FUNC(FRECPS, frecps);
8110     SIM_FUNC(FMINNM, fminnm);
8111     SIM_FUNC(FSUB, fsub);
8112     SIM_FUNC(FMIN, fmin);
8113     SIM_FUNC(FRSQRTS, frsqrts);
8114     SIM_FUNC(FMAXNMP, fmaxnmp);
8115     SIM_FUNC(FADDP, faddp);
8116     SIM_FUNC(FMUL, fmul);
8117     SIM_FUNC(FMAXP, fmaxp);
8118     SIM_FUNC(FDIV, fdiv);
8119     SIM_FUNC(FMINNMP, fminnmp);
8120     SIM_FUNC(FABD, fabd);
8121     SIM_FUNC(FMINP, fminp);
8122 #undef SIM_FUNC
8123     case NEON_FMLA_H:
8124       fmla(vf, rd, rd, rn, rm);
8125       break;
8126     case NEON_FMLS_H:
8127       fmls(vf, rd, rd, rn, rm);
8128       break;
8129     case NEON_FCMEQ_H:
8130       fcmp(vf, rd, rn, rm, eq);
8131       break;
8132     case NEON_FCMGE_H:
8133       fcmp(vf, rd, rn, rm, ge);
8134       break;
8135     case NEON_FACGE_H:
8136       fabscmp(vf, rd, rn, rm, ge);
8137       break;
8138     case NEON_FCMGT_H:
8139       fcmp(vf, rd, rn, rm, gt);
8140       break;
8141     case NEON_FACGT_H:
8142       fabscmp(vf, rd, rn, rm, gt);
8143       break;
8144     default:
8145       VIXL_UNIMPLEMENTED();
8146       break;
8147   }
8148 }
8149 
VisitNEON3SameExtra(const Instruction * instr)8150 void Simulator::VisitNEON3SameExtra(const Instruction* instr) {
8151   NEONFormatDecoder nfd(instr);
8152   SimVRegister& rd = ReadVRegister(instr->GetRd());
8153   SimVRegister& rn = ReadVRegister(instr->GetRn());
8154   SimVRegister& rm = ReadVRegister(instr->GetRm());
8155   int rot = 0;
8156   VectorFormat vf = nfd.GetVectorFormat();
8157 
8158   switch (form_hash_) {
8159     case "fcmla_asimdsame2_c"_h:
8160       rot = instr->GetImmRotFcmlaVec();
8161       fcmla(vf, rd, rn, rm, rd, rot);
8162       break;
8163     case "fcadd_asimdsame2_c"_h:
8164       rot = instr->GetImmRotFcadd();
8165       fcadd(vf, rd, rn, rm, rot);
8166       break;
8167     case "sdot_asimdsame2_d"_h:
8168       sdot(vf, rd, rn, rm);
8169       break;
8170     case "udot_asimdsame2_d"_h:
8171       udot(vf, rd, rn, rm);
8172       break;
8173     case "usdot_asimdsame2_d"_h:
8174       usdot(vf, rd, rn, rm);
8175       break;
8176     case "sqrdmlah_asimdsame2_only"_h:
8177       sqrdmlah(vf, rd, rn, rm);
8178       break;
8179     case "sqrdmlsh_asimdsame2_only"_h:
8180       sqrdmlsh(vf, rd, rn, rm);
8181       break;
8182   }
8183 }
8184 
8185 
VisitNEON3Different(const Instruction * instr)8186 void Simulator::VisitNEON3Different(const Instruction* instr) {
8187   NEONFormatDecoder nfd(instr);
8188   VectorFormat vf = nfd.GetVectorFormat();
8189   VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
8190 
8191   SimVRegister& rd = ReadVRegister(instr->GetRd());
8192   SimVRegister& rn = ReadVRegister(instr->GetRn());
8193   SimVRegister& rm = ReadVRegister(instr->GetRm());
8194   int size = instr->GetNEONSize();
8195 
8196   switch (instr->Mask(NEON3DifferentMask)) {
8197     case NEON_PMULL:
8198       if ((size == 1) || (size == 2)) {  // S/D reserved.
8199         VisitUnallocated(instr);
8200       } else {
8201         if (size == 3) vf_l = kFormat1Q;
8202         pmull(vf_l, rd, rn, rm);
8203       }
8204       break;
8205     case NEON_PMULL2:
8206       if ((size == 1) || (size == 2)) {  // S/D reserved.
8207         VisitUnallocated(instr);
8208       } else {
8209         if (size == 3) vf_l = kFormat1Q;
8210         pmull2(vf_l, rd, rn, rm);
8211       }
8212       break;
8213     case NEON_UADDL:
8214       uaddl(vf_l, rd, rn, rm);
8215       break;
8216     case NEON_UADDL2:
8217       uaddl2(vf_l, rd, rn, rm);
8218       break;
8219     case NEON_SADDL:
8220       saddl(vf_l, rd, rn, rm);
8221       break;
8222     case NEON_SADDL2:
8223       saddl2(vf_l, rd, rn, rm);
8224       break;
8225     case NEON_USUBL:
8226       usubl(vf_l, rd, rn, rm);
8227       break;
8228     case NEON_USUBL2:
8229       usubl2(vf_l, rd, rn, rm);
8230       break;
8231     case NEON_SSUBL:
8232       ssubl(vf_l, rd, rn, rm);
8233       break;
8234     case NEON_SSUBL2:
8235       ssubl2(vf_l, rd, rn, rm);
8236       break;
8237     case NEON_SABAL:
8238       sabal(vf_l, rd, rn, rm);
8239       break;
8240     case NEON_SABAL2:
8241       sabal2(vf_l, rd, rn, rm);
8242       break;
8243     case NEON_UABAL:
8244       uabal(vf_l, rd, rn, rm);
8245       break;
8246     case NEON_UABAL2:
8247       uabal2(vf_l, rd, rn, rm);
8248       break;
8249     case NEON_SABDL:
8250       sabdl(vf_l, rd, rn, rm);
8251       break;
8252     case NEON_SABDL2:
8253       sabdl2(vf_l, rd, rn, rm);
8254       break;
8255     case NEON_UABDL:
8256       uabdl(vf_l, rd, rn, rm);
8257       break;
8258     case NEON_UABDL2:
8259       uabdl2(vf_l, rd, rn, rm);
8260       break;
8261     case NEON_SMLAL:
8262       smlal(vf_l, rd, rn, rm);
8263       break;
8264     case NEON_SMLAL2:
8265       smlal2(vf_l, rd, rn, rm);
8266       break;
8267     case NEON_UMLAL:
8268       umlal(vf_l, rd, rn, rm);
8269       break;
8270     case NEON_UMLAL2:
8271       umlal2(vf_l, rd, rn, rm);
8272       break;
8273     case NEON_SMLSL:
8274       smlsl(vf_l, rd, rn, rm);
8275       break;
8276     case NEON_SMLSL2:
8277       smlsl2(vf_l, rd, rn, rm);
8278       break;
8279     case NEON_UMLSL:
8280       umlsl(vf_l, rd, rn, rm);
8281       break;
8282     case NEON_UMLSL2:
8283       umlsl2(vf_l, rd, rn, rm);
8284       break;
8285     case NEON_SMULL:
8286       smull(vf_l, rd, rn, rm);
8287       break;
8288     case NEON_SMULL2:
8289       smull2(vf_l, rd, rn, rm);
8290       break;
8291     case NEON_UMULL:
8292       umull(vf_l, rd, rn, rm);
8293       break;
8294     case NEON_UMULL2:
8295       umull2(vf_l, rd, rn, rm);
8296       break;
8297     case NEON_SQDMLAL:
8298       sqdmlal(vf_l, rd, rn, rm);
8299       break;
8300     case NEON_SQDMLAL2:
8301       sqdmlal2(vf_l, rd, rn, rm);
8302       break;
8303     case NEON_SQDMLSL:
8304       sqdmlsl(vf_l, rd, rn, rm);
8305       break;
8306     case NEON_SQDMLSL2:
8307       sqdmlsl2(vf_l, rd, rn, rm);
8308       break;
8309     case NEON_SQDMULL:
8310       sqdmull(vf_l, rd, rn, rm);
8311       break;
8312     case NEON_SQDMULL2:
8313       sqdmull2(vf_l, rd, rn, rm);
8314       break;
8315     case NEON_UADDW:
8316       uaddw(vf_l, rd, rn, rm);
8317       break;
8318     case NEON_UADDW2:
8319       uaddw2(vf_l, rd, rn, rm);
8320       break;
8321     case NEON_SADDW:
8322       saddw(vf_l, rd, rn, rm);
8323       break;
8324     case NEON_SADDW2:
8325       saddw2(vf_l, rd, rn, rm);
8326       break;
8327     case NEON_USUBW:
8328       usubw(vf_l, rd, rn, rm);
8329       break;
8330     case NEON_USUBW2:
8331       usubw2(vf_l, rd, rn, rm);
8332       break;
8333     case NEON_SSUBW:
8334       ssubw(vf_l, rd, rn, rm);
8335       break;
8336     case NEON_SSUBW2:
8337       ssubw2(vf_l, rd, rn, rm);
8338       break;
8339     case NEON_ADDHN:
8340       addhn(vf, rd, rn, rm);
8341       break;
8342     case NEON_ADDHN2:
8343       addhn2(vf, rd, rn, rm);
8344       break;
8345     case NEON_RADDHN:
8346       raddhn(vf, rd, rn, rm);
8347       break;
8348     case NEON_RADDHN2:
8349       raddhn2(vf, rd, rn, rm);
8350       break;
8351     case NEON_SUBHN:
8352       subhn(vf, rd, rn, rm);
8353       break;
8354     case NEON_SUBHN2:
8355       subhn2(vf, rd, rn, rm);
8356       break;
8357     case NEON_RSUBHN:
8358       rsubhn(vf, rd, rn, rm);
8359       break;
8360     case NEON_RSUBHN2:
8361       rsubhn2(vf, rd, rn, rm);
8362       break;
8363     default:
8364       VIXL_UNIMPLEMENTED();
8365   }
8366 }
8367 
8368 
VisitNEONAcrossLanes(const Instruction * instr)8369 void Simulator::VisitNEONAcrossLanes(const Instruction* instr) {
8370   NEONFormatDecoder nfd(instr);
8371 
8372   static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}};
8373 
8374   SimVRegister& rd = ReadVRegister(instr->GetRd());
8375   SimVRegister& rn = ReadVRegister(instr->GetRn());
8376 
8377   if (instr->Mask(NEONAcrossLanesFP16FMask) == NEONAcrossLanesFP16Fixed) {
8378     VectorFormat vf = nfd.GetVectorFormat(&map_half);
8379     switch (instr->Mask(NEONAcrossLanesFP16Mask)) {
8380       case NEON_FMAXV_H:
8381         fmaxv(vf, rd, rn);
8382         break;
8383       case NEON_FMINV_H:
8384         fminv(vf, rd, rn);
8385         break;
8386       case NEON_FMAXNMV_H:
8387         fmaxnmv(vf, rd, rn);
8388         break;
8389       case NEON_FMINNMV_H:
8390         fminnmv(vf, rd, rn);
8391         break;
8392       default:
8393         VIXL_UNIMPLEMENTED();
8394     }
8395   } else if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
8396     // The input operand's VectorFormat is passed for these instructions.
8397     VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
8398 
8399     switch (instr->Mask(NEONAcrossLanesFPMask)) {
8400       case NEON_FMAXV:
8401         fmaxv(vf, rd, rn);
8402         break;
8403       case NEON_FMINV:
8404         fminv(vf, rd, rn);
8405         break;
8406       case NEON_FMAXNMV:
8407         fmaxnmv(vf, rd, rn);
8408         break;
8409       case NEON_FMINNMV:
8410         fminnmv(vf, rd, rn);
8411         break;
8412       default:
8413         VIXL_UNIMPLEMENTED();
8414     }
8415   } else {
8416     VectorFormat vf = nfd.GetVectorFormat();
8417 
8418     switch (instr->Mask(NEONAcrossLanesMask)) {
8419       case NEON_ADDV:
8420         addv(vf, rd, rn);
8421         break;
8422       case NEON_SMAXV:
8423         smaxv(vf, rd, rn);
8424         break;
8425       case NEON_SMINV:
8426         sminv(vf, rd, rn);
8427         break;
8428       case NEON_UMAXV:
8429         umaxv(vf, rd, rn);
8430         break;
8431       case NEON_UMINV:
8432         uminv(vf, rd, rn);
8433         break;
8434       case NEON_SADDLV:
8435         saddlv(vf, rd, rn);
8436         break;
8437       case NEON_UADDLV:
8438         uaddlv(vf, rd, rn);
8439         break;
8440       default:
8441         VIXL_UNIMPLEMENTED();
8442     }
8443   }
8444 }
8445 
SimulateNEONMulByElementLong(const Instruction * instr)8446 void Simulator::SimulateNEONMulByElementLong(const Instruction* instr) {
8447   NEONFormatDecoder nfd(instr);
8448   VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
8449   SimVRegister& rd = ReadVRegister(instr->GetRd());
8450   SimVRegister& rn = ReadVRegister(instr->GetRn());
8451 
8452   std::pair<int, int> rm_and_index = instr->GetNEONMulRmAndIndex();
8453   SimVRegister temp;
8454   VectorFormat indexform =
8455       VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vf));
8456   dup_elements_to_segments(indexform, temp, rm_and_index);
8457 
8458   bool is_2 = instr->Mask(NEON_Q) ? true : false;
8459 
8460   switch (form_hash_) {
8461     case "smull_asimdelem_l"_h:
8462       smull(vf, rd, rn, temp, is_2);
8463       break;
8464     case "umull_asimdelem_l"_h:
8465       umull(vf, rd, rn, temp, is_2);
8466       break;
8467     case "smlal_asimdelem_l"_h:
8468       smlal(vf, rd, rn, temp, is_2);
8469       break;
8470     case "umlal_asimdelem_l"_h:
8471       umlal(vf, rd, rn, temp, is_2);
8472       break;
8473     case "smlsl_asimdelem_l"_h:
8474       smlsl(vf, rd, rn, temp, is_2);
8475       break;
8476     case "umlsl_asimdelem_l"_h:
8477       umlsl(vf, rd, rn, temp, is_2);
8478       break;
8479     case "sqdmull_asimdelem_l"_h:
8480       sqdmull(vf, rd, rn, temp, is_2);
8481       break;
8482     case "sqdmlal_asimdelem_l"_h:
8483       sqdmlal(vf, rd, rn, temp, is_2);
8484       break;
8485     case "sqdmlsl_asimdelem_l"_h:
8486       sqdmlsl(vf, rd, rn, temp, is_2);
8487       break;
8488     default:
8489       VIXL_UNREACHABLE();
8490   }
8491 }
8492 
SimulateNEONFPMulByElementLong(const Instruction * instr)8493 void Simulator::SimulateNEONFPMulByElementLong(const Instruction* instr) {
8494   VectorFormat vform = instr->GetNEONQ() ? kFormat4S : kFormat2S;
8495   SimVRegister& rd = ReadVRegister(instr->GetRd());
8496   SimVRegister& rn = ReadVRegister(instr->GetRn());
8497   SimVRegister& rm = ReadVRegister(instr->GetRmLow16());
8498 
8499   int index =
8500       (instr->GetNEONH() << 2) | (instr->GetNEONL() << 1) | instr->GetNEONM();
8501 
8502   switch (form_hash_) {
8503     case "fmlal_asimdelem_lh"_h:
8504       fmlal(vform, rd, rn, rm, index);
8505       break;
8506     case "fmlal2_asimdelem_lh"_h:
8507       fmlal2(vform, rd, rn, rm, index);
8508       break;
8509     case "fmlsl_asimdelem_lh"_h:
8510       fmlsl(vform, rd, rn, rm, index);
8511       break;
8512     case "fmlsl2_asimdelem_lh"_h:
8513       fmlsl2(vform, rd, rn, rm, index);
8514       break;
8515     default:
8516       VIXL_UNREACHABLE();
8517   }
8518 }
8519 
SimulateNEONFPMulByElement(const Instruction * instr)8520 void Simulator::SimulateNEONFPMulByElement(const Instruction* instr) {
8521   NEONFormatDecoder nfd(instr);
8522   static const NEONFormatMap map =
8523       {{23, 22, 30},
8524        {NF_4H, NF_8H, NF_UNDEF, NF_UNDEF, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
8525   VectorFormat vform = nfd.GetVectorFormat(&map);
8526 
8527   SimVRegister& rd = ReadVRegister(instr->GetRd());
8528   SimVRegister& rn = ReadVRegister(instr->GetRn());
8529 
8530   std::pair<int, int> rm_and_index = instr->GetNEONMulRmAndIndex();
8531   SimVRegister& rm = ReadVRegister(rm_and_index.first);
8532   int index = rm_and_index.second;
8533 
8534   switch (form_hash_) {
8535     case "fmul_asimdelem_rh_h"_h:
8536     case "fmul_asimdelem_r_sd"_h:
8537       fmul(vform, rd, rn, rm, index);
8538       break;
8539     case "fmla_asimdelem_rh_h"_h:
8540     case "fmla_asimdelem_r_sd"_h:
8541       fmla(vform, rd, rn, rm, index);
8542       break;
8543     case "fmls_asimdelem_rh_h"_h:
8544     case "fmls_asimdelem_r_sd"_h:
8545       fmls(vform, rd, rn, rm, index);
8546       break;
8547     case "fmulx_asimdelem_rh_h"_h:
8548     case "fmulx_asimdelem_r_sd"_h:
8549       fmulx(vform, rd, rn, rm, index);
8550       break;
8551     default:
8552       VIXL_UNREACHABLE();
8553   }
8554 }
8555 
SimulateNEONComplexMulByElement(const Instruction * instr)8556 void Simulator::SimulateNEONComplexMulByElement(const Instruction* instr) {
8557   VectorFormat vform = instr->GetNEONQ() ? kFormat8H : kFormat4H;
8558   SimVRegister& rd = ReadVRegister(instr->GetRd());
8559   SimVRegister& rn = ReadVRegister(instr->GetRn());
8560   SimVRegister& rm = ReadVRegister(instr->GetRm());
8561   int index = (instr->GetNEONH() << 1) | instr->GetNEONL();
8562 
8563   switch (form_hash_) {
8564     case "fcmla_asimdelem_c_s"_h:
8565       vform = kFormat4S;
8566       index >>= 1;
8567       VIXL_FALLTHROUGH();
8568     case "fcmla_asimdelem_c_h"_h:
8569       fcmla(vform, rd, rn, rm, index, instr->GetImmRotFcmlaSca());
8570       break;
8571     default:
8572       VIXL_UNREACHABLE();
8573   }
8574 }
8575 
SimulateNEONDotProdByElement(const Instruction * instr)8576 void Simulator::SimulateNEONDotProdByElement(const Instruction* instr) {
8577   VectorFormat vform = instr->GetNEONQ() ? kFormat4S : kFormat2S;
8578 
8579   SimVRegister& rd = ReadVRegister(instr->GetRd());
8580   SimVRegister& rn = ReadVRegister(instr->GetRn());
8581   SimVRegister& rm = ReadVRegister(instr->GetRm());
8582   int index = (instr->GetNEONH() << 1) | instr->GetNEONL();
8583 
8584   SimVRegister temp;
8585   // NEON indexed `dot` allows the index value exceed the register size.
8586   // Promote the format to Q-sized vector format before the duplication.
8587   dup_elements_to_segments(VectorFormatFillQ(vform), temp, rm, index);
8588 
8589   switch (form_hash_) {
8590     case "sdot_asimdelem_d"_h:
8591       sdot(vform, rd, rn, temp);
8592       break;
8593     case "udot_asimdelem_d"_h:
8594       udot(vform, rd, rn, temp);
8595       break;
8596     case "sudot_asimdelem_d"_h:
8597       usdot(vform, rd, temp, rn);
8598       break;
8599     case "usdot_asimdelem_d"_h:
8600       usdot(vform, rd, rn, temp);
8601       break;
8602   }
8603 }
8604 
VisitNEONByIndexedElement(const Instruction * instr)8605 void Simulator::VisitNEONByIndexedElement(const Instruction* instr) {
8606   NEONFormatDecoder nfd(instr);
8607   VectorFormat vform = nfd.GetVectorFormat();
8608 
8609   SimVRegister& rd = ReadVRegister(instr->GetRd());
8610   SimVRegister& rn = ReadVRegister(instr->GetRn());
8611 
8612   std::pair<int, int> rm_and_index = instr->GetNEONMulRmAndIndex();
8613   SimVRegister& rm = ReadVRegister(rm_and_index.first);
8614   int index = rm_and_index.second;
8615 
8616   switch (form_hash_) {
8617     case "mul_asimdelem_r"_h:
8618       mul(vform, rd, rn, rm, index);
8619       break;
8620     case "mla_asimdelem_r"_h:
8621       mla(vform, rd, rn, rm, index);
8622       break;
8623     case "mls_asimdelem_r"_h:
8624       mls(vform, rd, rn, rm, index);
8625       break;
8626     case "sqdmulh_asimdelem_r"_h:
8627       sqdmulh(vform, rd, rn, rm, index);
8628       break;
8629     case "sqrdmulh_asimdelem_r"_h:
8630       sqrdmulh(vform, rd, rn, rm, index);
8631       break;
8632     case "sqrdmlah_asimdelem_r"_h:
8633       sqrdmlah(vform, rd, rn, rm, index);
8634       break;
8635     case "sqrdmlsh_asimdelem_r"_h:
8636       sqrdmlsh(vform, rd, rn, rm, index);
8637       break;
8638   }
8639 }
8640 
8641 
VisitNEONCopy(const Instruction * instr)8642 void Simulator::VisitNEONCopy(const Instruction* instr) {
8643   NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap());
8644   VectorFormat vf = nfd.GetVectorFormat();
8645 
8646   SimVRegister& rd = ReadVRegister(instr->GetRd());
8647   SimVRegister& rn = ReadVRegister(instr->GetRn());
8648   int imm5 = instr->GetImmNEON5();
8649   int tz = CountTrailingZeros(imm5, 32);
8650   int reg_index = ExtractSignedBitfield32(31, tz + 1, imm5);
8651 
8652   if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
8653     int imm4 = instr->GetImmNEON4();
8654     int rn_index = ExtractSignedBitfield32(31, tz, imm4);
8655     mov(kFormat16B, rd, rd);  // Zero bits beyond the MSB of a Q register.
8656     ins_element(vf, rd, reg_index, rn, rn_index);
8657   } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
8658     mov(kFormat16B, rd, rd);  // Zero bits beyond the MSB of a Q register.
8659     ins_immediate(vf, rd, reg_index, ReadXRegister(instr->GetRn()));
8660   } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
8661     uint64_t value = LogicVRegister(rn).Uint(vf, reg_index);
8662     value &= MaxUintFromFormat(vf);
8663     WriteXRegister(instr->GetRd(), value);
8664   } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) {
8665     int64_t value = LogicVRegister(rn).Int(vf, reg_index);
8666     if (instr->GetNEONQ()) {
8667       WriteXRegister(instr->GetRd(), value);
8668     } else {
8669       WriteWRegister(instr->GetRd(), (int32_t)value);
8670     }
8671   } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
8672     dup_element(vf, rd, rn, reg_index);
8673   } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
8674     dup_immediate(vf, rd, ReadXRegister(instr->GetRn()));
8675   } else {
8676     VIXL_UNIMPLEMENTED();
8677   }
8678 }
8679 
8680 
VisitNEONExtract(const Instruction * instr)8681 void Simulator::VisitNEONExtract(const Instruction* instr) {
8682   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
8683   VectorFormat vf = nfd.GetVectorFormat();
8684   SimVRegister& rd = ReadVRegister(instr->GetRd());
8685   SimVRegister& rn = ReadVRegister(instr->GetRn());
8686   SimVRegister& rm = ReadVRegister(instr->GetRm());
8687   if (instr->Mask(NEONExtractMask) == NEON_EXT) {
8688     int index = instr->GetImmNEONExt();
8689     ext(vf, rd, rn, rm, index);
8690   } else {
8691     VIXL_UNIMPLEMENTED();
8692   }
8693 }
8694 
8695 
NEONLoadStoreMultiStructHelper(const Instruction * instr,AddrMode addr_mode)8696 void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
8697                                                AddrMode addr_mode) {
8698   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
8699   VectorFormat vf = nfd.GetVectorFormat();
8700 
8701   uint64_t addr_base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
8702   int reg_size = RegisterSizeInBytesFromFormat(vf);
8703 
8704   int reg[4];
8705   uint64_t addr[4];
8706   for (int i = 0; i < 4; i++) {
8707     reg[i] = (instr->GetRt() + i) % kNumberOfVRegisters;
8708     addr[i] = addr_base + (i * reg_size);
8709   }
8710   int struct_parts = 1;
8711   int reg_count = 1;
8712   bool log_read = true;
8713 
8714   // Bit 23 determines whether this is an offset or post-index addressing mode.
8715   // In offset mode, bits 20 to 16 should be zero; these bits encode the
8716   // register or immediate in post-index mode.
8717   if ((instr->ExtractBit(23) == 0) && (instr->ExtractBits(20, 16) != 0)) {
8718     VIXL_UNREACHABLE();
8719   }
8720 
8721   // We use the PostIndex mask here, as it works in this case for both Offset
8722   // and PostIndex addressing.
8723   switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
8724     case NEON_LD1_4v:
8725     case NEON_LD1_4v_post:
8726       if (!ld1(vf, ReadVRegister(reg[3]), addr[3])) {
8727         return;
8728       }
8729       reg_count++;
8730       VIXL_FALLTHROUGH();
8731     case NEON_LD1_3v:
8732     case NEON_LD1_3v_post:
8733       if (!ld1(vf, ReadVRegister(reg[2]), addr[2])) {
8734         return;
8735       }
8736       reg_count++;
8737       VIXL_FALLTHROUGH();
8738     case NEON_LD1_2v:
8739     case NEON_LD1_2v_post:
8740       if (!ld1(vf, ReadVRegister(reg[1]), addr[1])) {
8741         return;
8742       }
8743       reg_count++;
8744       VIXL_FALLTHROUGH();
8745     case NEON_LD1_1v:
8746     case NEON_LD1_1v_post:
8747       if (!ld1(vf, ReadVRegister(reg[0]), addr[0])) {
8748         return;
8749       }
8750       break;
8751     case NEON_ST1_4v:
8752     case NEON_ST1_4v_post:
8753       if (!st1(vf, ReadVRegister(reg[3]), addr[3])) return;
8754       reg_count++;
8755       VIXL_FALLTHROUGH();
8756     case NEON_ST1_3v:
8757     case NEON_ST1_3v_post:
8758       if (!st1(vf, ReadVRegister(reg[2]), addr[2])) return;
8759       reg_count++;
8760       VIXL_FALLTHROUGH();
8761     case NEON_ST1_2v:
8762     case NEON_ST1_2v_post:
8763       if (!st1(vf, ReadVRegister(reg[1]), addr[1])) return;
8764       reg_count++;
8765       VIXL_FALLTHROUGH();
8766     case NEON_ST1_1v:
8767     case NEON_ST1_1v_post:
8768       if (!st1(vf, ReadVRegister(reg[0]), addr[0])) return;
8769       log_read = false;
8770       break;
8771     case NEON_LD2_post:
8772     case NEON_LD2:
8773       if (!ld2(vf, ReadVRegister(reg[0]), ReadVRegister(reg[1]), addr[0])) {
8774         return;
8775       }
8776       struct_parts = 2;
8777       reg_count = 2;
8778       break;
8779     case NEON_ST2:
8780     case NEON_ST2_post:
8781       if (!st2(vf, ReadVRegister(reg[0]), ReadVRegister(reg[1]), addr[0])) {
8782         return;
8783       }
8784       struct_parts = 2;
8785       reg_count = 2;
8786       log_read = false;
8787       break;
8788     case NEON_LD3_post:
8789     case NEON_LD3:
8790       if (!ld3(vf,
8791                ReadVRegister(reg[0]),
8792                ReadVRegister(reg[1]),
8793                ReadVRegister(reg[2]),
8794                addr[0])) {
8795         return;
8796       }
8797       struct_parts = 3;
8798       reg_count = 3;
8799       break;
8800     case NEON_ST3:
8801     case NEON_ST3_post:
8802       if (!st3(vf,
8803                ReadVRegister(reg[0]),
8804                ReadVRegister(reg[1]),
8805                ReadVRegister(reg[2]),
8806                addr[0])) {
8807         return;
8808       }
8809       struct_parts = 3;
8810       reg_count = 3;
8811       log_read = false;
8812       break;
8813     case NEON_ST4:
8814     case NEON_ST4_post:
8815       if (!st4(vf,
8816                ReadVRegister(reg[0]),
8817                ReadVRegister(reg[1]),
8818                ReadVRegister(reg[2]),
8819                ReadVRegister(reg[3]),
8820                addr[0])) {
8821         return;
8822       }
8823       struct_parts = 4;
8824       reg_count = 4;
8825       log_read = false;
8826       break;
8827     case NEON_LD4_post:
8828     case NEON_LD4:
8829       if (!ld4(vf,
8830                ReadVRegister(reg[0]),
8831                ReadVRegister(reg[1]),
8832                ReadVRegister(reg[2]),
8833                ReadVRegister(reg[3]),
8834                addr[0])) {
8835         return;
8836       }
8837       struct_parts = 4;
8838       reg_count = 4;
8839       break;
8840     default:
8841       VIXL_UNIMPLEMENTED();
8842   }
8843 
8844   bool do_trace = log_read ? ShouldTraceVRegs() : ShouldTraceWrites();
8845   if (do_trace) {
8846     PrintRegisterFormat print_format =
8847         GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
8848     const char* op;
8849     if (log_read) {
8850       op = "<-";
8851     } else {
8852       op = "->";
8853       // Stores don't represent a change to the source register's value, so only
8854       // print the relevant part of the value.
8855       print_format = GetPrintRegPartial(print_format);
8856     }
8857 
8858     VIXL_ASSERT((struct_parts == reg_count) || (struct_parts == 1));
8859     for (int s = reg_count - struct_parts; s >= 0; s -= struct_parts) {
8860       uintptr_t address = addr_base + (s * RegisterSizeInBytesFromFormat(vf));
8861       PrintVStructAccess(reg[s], struct_parts, print_format, op, address);
8862     }
8863   }
8864 
8865   if (addr_mode == PostIndex) {
8866     int rm = instr->GetRm();
8867     // The immediate post index addressing mode is indicated by rm = 31.
8868     // The immediate is implied by the number of vector registers used.
8869     addr_base += (rm == 31) ? (RegisterSizeInBytesFromFormat(vf) * reg_count)
8870                             : ReadXRegister(rm);
8871     WriteXRegister(instr->GetRn(),
8872                    addr_base,
8873                    LogRegWrites,
8874                    Reg31IsStackPointer);
8875   } else {
8876     VIXL_ASSERT(addr_mode == Offset);
8877   }
8878 }
8879 
8880 
VisitNEONLoadStoreMultiStruct(const Instruction * instr)8881 void Simulator::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
8882   NEONLoadStoreMultiStructHelper(instr, Offset);
8883 }
8884 
8885 
VisitNEONLoadStoreMultiStructPostIndex(const Instruction * instr)8886 void Simulator::VisitNEONLoadStoreMultiStructPostIndex(
8887     const Instruction* instr) {
8888   NEONLoadStoreMultiStructHelper(instr, PostIndex);
8889 }
8890 
8891 
NEONLoadStoreSingleStructHelper(const Instruction * instr,AddrMode addr_mode)8892 void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
8893                                                 AddrMode addr_mode) {
8894   uint64_t addr = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
8895   int rt = instr->GetRt();
8896 
8897   // Bit 23 determines whether this is an offset or post-index addressing mode.
8898   // In offset mode, bits 20 to 16 should be zero; these bits encode the
8899   // register or immediate in post-index mode.
8900   if ((instr->ExtractBit(23) == 0) && (instr->ExtractBits(20, 16) != 0)) {
8901     VIXL_UNREACHABLE();
8902   }
8903 
8904   // We use the PostIndex mask here, as it works in this case for both Offset
8905   // and PostIndex addressing.
8906   bool do_load = false;
8907 
8908   bool replicating = false;
8909 
8910   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
8911   VectorFormat vf_t = nfd.GetVectorFormat();
8912 
8913   VectorFormat vf = kFormat16B;
8914   switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
8915     case NEON_LD1_b:
8916     case NEON_LD1_b_post:
8917     case NEON_LD2_b:
8918     case NEON_LD2_b_post:
8919     case NEON_LD3_b:
8920     case NEON_LD3_b_post:
8921     case NEON_LD4_b:
8922     case NEON_LD4_b_post:
8923       do_load = true;
8924       VIXL_FALLTHROUGH();
8925     case NEON_ST1_b:
8926     case NEON_ST1_b_post:
8927     case NEON_ST2_b:
8928     case NEON_ST2_b_post:
8929     case NEON_ST3_b:
8930     case NEON_ST3_b_post:
8931     case NEON_ST4_b:
8932     case NEON_ST4_b_post:
8933       break;
8934 
8935     case NEON_LD1_h:
8936     case NEON_LD1_h_post:
8937     case NEON_LD2_h:
8938     case NEON_LD2_h_post:
8939     case NEON_LD3_h:
8940     case NEON_LD3_h_post:
8941     case NEON_LD4_h:
8942     case NEON_LD4_h_post:
8943       do_load = true;
8944       VIXL_FALLTHROUGH();
8945     case NEON_ST1_h:
8946     case NEON_ST1_h_post:
8947     case NEON_ST2_h:
8948     case NEON_ST2_h_post:
8949     case NEON_ST3_h:
8950     case NEON_ST3_h_post:
8951     case NEON_ST4_h:
8952     case NEON_ST4_h_post:
8953       vf = kFormat8H;
8954       break;
8955     case NEON_LD1_s:
8956     case NEON_LD1_s_post:
8957     case NEON_LD2_s:
8958     case NEON_LD2_s_post:
8959     case NEON_LD3_s:
8960     case NEON_LD3_s_post:
8961     case NEON_LD4_s:
8962     case NEON_LD4_s_post:
8963       do_load = true;
8964       VIXL_FALLTHROUGH();
8965     case NEON_ST1_s:
8966     case NEON_ST1_s_post:
8967     case NEON_ST2_s:
8968     case NEON_ST2_s_post:
8969     case NEON_ST3_s:
8970     case NEON_ST3_s_post:
8971     case NEON_ST4_s:
8972     case NEON_ST4_s_post: {
8973       VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
8974       VIXL_STATIC_ASSERT((NEON_LD1_s_post | (1 << NEONLSSize_offset)) ==
8975                          NEON_LD1_d_post);
8976       VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
8977       VIXL_STATIC_ASSERT((NEON_ST1_s_post | (1 << NEONLSSize_offset)) ==
8978                          NEON_ST1_d_post);
8979       vf = ((instr->GetNEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D;
8980       break;
8981     }
8982 
8983     case NEON_LD1R:
8984     case NEON_LD1R_post:
8985     case NEON_LD2R:
8986     case NEON_LD2R_post:
8987     case NEON_LD3R:
8988     case NEON_LD3R_post:
8989     case NEON_LD4R:
8990     case NEON_LD4R_post:
8991       vf = vf_t;
8992       do_load = true;
8993       replicating = true;
8994       break;
8995 
8996     default:
8997       VIXL_UNIMPLEMENTED();
8998   }
8999 
9000   int index_shift = LaneSizeInBytesLog2FromFormat(vf);
9001   int lane = instr->GetNEONLSIndex(index_shift);
9002   int reg_count = 0;
9003   int rt2 = (rt + 1) % kNumberOfVRegisters;
9004   int rt3 = (rt2 + 1) % kNumberOfVRegisters;
9005   int rt4 = (rt3 + 1) % kNumberOfVRegisters;
9006   switch (instr->Mask(NEONLoadStoreSingleLenMask)) {
9007     case NEONLoadStoreSingle1:
9008       reg_count = 1;
9009       if (replicating) {
9010         VIXL_ASSERT(do_load);
9011         if (!ld1r(vf, ReadVRegister(rt), addr)) {
9012           return;
9013         }
9014       } else if (do_load) {
9015         if (!ld1(vf, ReadVRegister(rt), lane, addr)) {
9016           return;
9017         }
9018       } else {
9019         if (!st1(vf, ReadVRegister(rt), lane, addr)) return;
9020       }
9021       break;
9022     case NEONLoadStoreSingle2:
9023       reg_count = 2;
9024       if (replicating) {
9025         VIXL_ASSERT(do_load);
9026         if (!ld2r(vf, ReadVRegister(rt), ReadVRegister(rt2), addr)) {
9027           return;
9028         }
9029       } else if (do_load) {
9030         if (!ld2(vf, ReadVRegister(rt), ReadVRegister(rt2), lane, addr)) {
9031           return;
9032         }
9033       } else {
9034         if (!st2(vf, ReadVRegister(rt), ReadVRegister(rt2), lane, addr)) return;
9035       }
9036       break;
9037     case NEONLoadStoreSingle3:
9038       reg_count = 3;
9039       if (replicating) {
9040         VIXL_ASSERT(do_load);
9041         if (!ld3r(vf,
9042                   ReadVRegister(rt),
9043                   ReadVRegister(rt2),
9044                   ReadVRegister(rt3),
9045                   addr)) {
9046           return;
9047         }
9048       } else if (do_load) {
9049         if (!ld3(vf,
9050                  ReadVRegister(rt),
9051                  ReadVRegister(rt2),
9052                  ReadVRegister(rt3),
9053                  lane,
9054                  addr)) {
9055           return;
9056         }
9057       } else {
9058         if (!st3(vf,
9059                  ReadVRegister(rt),
9060                  ReadVRegister(rt2),
9061                  ReadVRegister(rt3),
9062                  lane,
9063                  addr)) {
9064           return;
9065         }
9066       }
9067       break;
9068     case NEONLoadStoreSingle4:
9069       reg_count = 4;
9070       if (replicating) {
9071         VIXL_ASSERT(do_load);
9072         if (!ld4r(vf,
9073                   ReadVRegister(rt),
9074                   ReadVRegister(rt2),
9075                   ReadVRegister(rt3),
9076                   ReadVRegister(rt4),
9077                   addr)) {
9078           return;
9079         }
9080       } else if (do_load) {
9081         if (!ld4(vf,
9082                  ReadVRegister(rt),
9083                  ReadVRegister(rt2),
9084                  ReadVRegister(rt3),
9085                  ReadVRegister(rt4),
9086                  lane,
9087                  addr)) {
9088           return;
9089         }
9090       } else {
9091         if (!st4(vf,
9092                  ReadVRegister(rt),
9093                  ReadVRegister(rt2),
9094                  ReadVRegister(rt3),
9095                  ReadVRegister(rt4),
9096                  lane,
9097                  addr)) {
9098           return;
9099         }
9100       }
9101       break;
9102     default:
9103       VIXL_UNIMPLEMENTED();
9104   }
9105 
9106   // Trace registers and/or memory writes.
9107   PrintRegisterFormat print_format =
9108       GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
9109   if (do_load) {
9110     if (ShouldTraceVRegs()) {
9111       if (replicating) {
9112         PrintVReplicatingStructAccess(rt, reg_count, print_format, "<-", addr);
9113       } else {
9114         PrintVSingleStructAccess(rt, reg_count, lane, print_format, "<-", addr);
9115       }
9116     }
9117   } else {
9118     if (ShouldTraceWrites()) {
9119       // Stores don't represent a change to the source register's value, so only
9120       // print the relevant part of the value.
9121       print_format = GetPrintRegPartial(print_format);
9122       PrintVSingleStructAccess(rt, reg_count, lane, print_format, "->", addr);
9123     }
9124   }
9125 
9126   if (addr_mode == PostIndex) {
9127     int rm = instr->GetRm();
9128     int lane_size = LaneSizeInBytesFromFormat(vf);
9129     WriteXRegister(instr->GetRn(),
9130                    addr + ((rm == 31) ? (reg_count * lane_size)
9131                                       : ReadXRegister(rm)),
9132                    LogRegWrites,
9133                    Reg31IsStackPointer);
9134   }
9135 }
9136 
9137 
VisitNEONLoadStoreSingleStruct(const Instruction * instr)9138 void Simulator::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
9139   NEONLoadStoreSingleStructHelper(instr, Offset);
9140 }
9141 
9142 
VisitNEONLoadStoreSingleStructPostIndex(const Instruction * instr)9143 void Simulator::VisitNEONLoadStoreSingleStructPostIndex(
9144     const Instruction* instr) {
9145   NEONLoadStoreSingleStructHelper(instr, PostIndex);
9146 }
9147 
9148 
VisitNEONModifiedImmediate(const Instruction * instr)9149 void Simulator::VisitNEONModifiedImmediate(const Instruction* instr) {
9150   SimVRegister& rd = ReadVRegister(instr->GetRd());
9151   int cmode = instr->GetNEONCmode();
9152   int cmode_3_1 = (cmode >> 1) & 7;
9153   int cmode_3 = (cmode >> 3) & 1;
9154   int cmode_2 = (cmode >> 2) & 1;
9155   int cmode_1 = (cmode >> 1) & 1;
9156   int cmode_0 = cmode & 1;
9157   int half_enc = instr->ExtractBit(11);
9158   int q = instr->GetNEONQ();
9159   int op_bit = instr->GetNEONModImmOp();
9160   uint64_t imm8 = instr->GetImmNEONabcdefgh();
9161   // Find the format and immediate value
9162   uint64_t imm = 0;
9163   VectorFormat vform = kFormatUndefined;
9164   switch (cmode_3_1) {
9165     case 0x0:
9166     case 0x1:
9167     case 0x2:
9168     case 0x3:
9169       vform = (q == 1) ? kFormat4S : kFormat2S;
9170       imm = imm8 << (8 * cmode_3_1);
9171       break;
9172     case 0x4:
9173     case 0x5:
9174       vform = (q == 1) ? kFormat8H : kFormat4H;
9175       imm = imm8 << (8 * cmode_1);
9176       break;
9177     case 0x6:
9178       vform = (q == 1) ? kFormat4S : kFormat2S;
9179       if (cmode_0 == 0) {
9180         imm = imm8 << 8 | 0x000000ff;
9181       } else {
9182         imm = imm8 << 16 | 0x0000ffff;
9183       }
9184       break;
9185     case 0x7:
9186       if (cmode_0 == 0 && op_bit == 0) {
9187         vform = q ? kFormat16B : kFormat8B;
9188         imm = imm8;
9189       } else if (cmode_0 == 0 && op_bit == 1) {
9190         vform = q ? kFormat2D : kFormat1D;
9191         imm = 0;
9192         for (int i = 0; i < 8; ++i) {
9193           if (imm8 & (uint64_t{1} << i)) {
9194             imm |= (UINT64_C(0xff) << (8 * i));
9195           }
9196         }
9197       } else {  // cmode_0 == 1, cmode == 0xf.
9198         if (half_enc == 1) {
9199           vform = q ? kFormat8H : kFormat4H;
9200           imm = Float16ToRawbits(instr->GetImmNEONFP16());
9201         } else if (op_bit == 0) {
9202           vform = q ? kFormat4S : kFormat2S;
9203           imm = FloatToRawbits(instr->GetImmNEONFP32());
9204         } else if (q == 1) {
9205           vform = kFormat2D;
9206           imm = DoubleToRawbits(instr->GetImmNEONFP64());
9207         } else {
9208           VIXL_ASSERT((q == 0) && (op_bit == 1) && (cmode == 0xf));
9209           VisitUnallocated(instr);
9210         }
9211       }
9212       break;
9213     default:
9214       VIXL_UNREACHABLE();
9215       break;
9216   }
9217 
9218   // Find the operation
9219   NEONModifiedImmediateOp op;
9220   if (cmode_3 == 0) {
9221     if (cmode_0 == 0) {
9222       op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
9223     } else {  // cmode<0> == '1'
9224       op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
9225     }
9226   } else {  // cmode<3> == '1'
9227     if (cmode_2 == 0) {
9228       if (cmode_0 == 0) {
9229         op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
9230       } else {  // cmode<0> == '1'
9231         op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
9232       }
9233     } else {  // cmode<2> == '1'
9234       if (cmode_1 == 0) {
9235         op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
9236       } else {  // cmode<1> == '1'
9237         if (cmode_0 == 0) {
9238           op = NEONModifiedImmediate_MOVI;
9239         } else {  // cmode<0> == '1'
9240           op = NEONModifiedImmediate_MOVI;
9241         }
9242       }
9243     }
9244   }
9245 
9246   // Call the logic function
9247   if (op == NEONModifiedImmediate_ORR) {
9248     orr(vform, rd, rd, imm);
9249   } else if (op == NEONModifiedImmediate_BIC) {
9250     bic(vform, rd, rd, imm);
9251   } else if (op == NEONModifiedImmediate_MOVI) {
9252     movi(vform, rd, imm);
9253   } else if (op == NEONModifiedImmediate_MVNI) {
9254     mvni(vform, rd, imm);
9255   } else {
9256     VisitUnimplemented(instr);
9257   }
9258 }
9259 
9260 
VisitNEONScalar2RegMisc(const Instruction * instr)9261 void Simulator::VisitNEONScalar2RegMisc(const Instruction* instr) {
9262   NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
9263   VectorFormat vf = nfd.GetVectorFormat();
9264 
9265   SimVRegister& rd = ReadVRegister(instr->GetRd());
9266   SimVRegister& rn = ReadVRegister(instr->GetRn());
9267 
9268   if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
9269     // These instructions all use a two bit size field, except NOT and RBIT,
9270     // which use the field to encode the operation.
9271     switch (instr->Mask(NEONScalar2RegMiscMask)) {
9272       case NEON_CMEQ_zero_scalar:
9273         cmp(vf, rd, rn, 0, eq);
9274         break;
9275       case NEON_CMGE_zero_scalar:
9276         cmp(vf, rd, rn, 0, ge);
9277         break;
9278       case NEON_CMGT_zero_scalar:
9279         cmp(vf, rd, rn, 0, gt);
9280         break;
9281       case NEON_CMLT_zero_scalar:
9282         cmp(vf, rd, rn, 0, lt);
9283         break;
9284       case NEON_CMLE_zero_scalar:
9285         cmp(vf, rd, rn, 0, le);
9286         break;
9287       case NEON_ABS_scalar:
9288         abs(vf, rd, rn);
9289         break;
9290       case NEON_SQABS_scalar:
9291         abs(vf, rd, rn).SignedSaturate(vf);
9292         break;
9293       case NEON_NEG_scalar:
9294         neg(vf, rd, rn);
9295         break;
9296       case NEON_SQNEG_scalar:
9297         neg(vf, rd, rn).SignedSaturate(vf);
9298         break;
9299       case NEON_SUQADD_scalar:
9300         suqadd(vf, rd, rd, rn);
9301         break;
9302       case NEON_USQADD_scalar:
9303         usqadd(vf, rd, rd, rn);
9304         break;
9305       default:
9306         VIXL_UNIMPLEMENTED();
9307         break;
9308     }
9309   } else {
9310     VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
9311     FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
9312 
9313     // These instructions all use a one bit size field, except SQXTUN, SQXTN
9314     // and UQXTN, which use a two bit size field.
9315     switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
9316       case NEON_FRECPE_scalar:
9317         frecpe(fpf, rd, rn, fpcr_rounding);
9318         break;
9319       case NEON_FRECPX_scalar:
9320         frecpx(fpf, rd, rn);
9321         break;
9322       case NEON_FRSQRTE_scalar:
9323         frsqrte(fpf, rd, rn);
9324         break;
9325       case NEON_FCMGT_zero_scalar:
9326         fcmp_zero(fpf, rd, rn, gt);
9327         break;
9328       case NEON_FCMGE_zero_scalar:
9329         fcmp_zero(fpf, rd, rn, ge);
9330         break;
9331       case NEON_FCMEQ_zero_scalar:
9332         fcmp_zero(fpf, rd, rn, eq);
9333         break;
9334       case NEON_FCMLE_zero_scalar:
9335         fcmp_zero(fpf, rd, rn, le);
9336         break;
9337       case NEON_FCMLT_zero_scalar:
9338         fcmp_zero(fpf, rd, rn, lt);
9339         break;
9340       case NEON_SCVTF_scalar:
9341         scvtf(fpf, rd, rn, 0, fpcr_rounding);
9342         break;
9343       case NEON_UCVTF_scalar:
9344         ucvtf(fpf, rd, rn, 0, fpcr_rounding);
9345         break;
9346       case NEON_FCVTNS_scalar:
9347         fcvts(fpf, rd, rn, FPTieEven);
9348         break;
9349       case NEON_FCVTNU_scalar:
9350         fcvtu(fpf, rd, rn, FPTieEven);
9351         break;
9352       case NEON_FCVTPS_scalar:
9353         fcvts(fpf, rd, rn, FPPositiveInfinity);
9354         break;
9355       case NEON_FCVTPU_scalar:
9356         fcvtu(fpf, rd, rn, FPPositiveInfinity);
9357         break;
9358       case NEON_FCVTMS_scalar:
9359         fcvts(fpf, rd, rn, FPNegativeInfinity);
9360         break;
9361       case NEON_FCVTMU_scalar:
9362         fcvtu(fpf, rd, rn, FPNegativeInfinity);
9363         break;
9364       case NEON_FCVTZS_scalar:
9365         fcvts(fpf, rd, rn, FPZero);
9366         break;
9367       case NEON_FCVTZU_scalar:
9368         fcvtu(fpf, rd, rn, FPZero);
9369         break;
9370       case NEON_FCVTAS_scalar:
9371         fcvts(fpf, rd, rn, FPTieAway);
9372         break;
9373       case NEON_FCVTAU_scalar:
9374         fcvtu(fpf, rd, rn, FPTieAway);
9375         break;
9376       case NEON_FCVTXN_scalar:
9377         // Unlike all of the other FP instructions above, fcvtxn encodes dest
9378         // size S as size<0>=1. There's only one case, so we ignore the form.
9379         VIXL_ASSERT(instr->ExtractBit(22) == 1);
9380         fcvtxn(kFormatS, rd, rn);
9381         break;
9382       default:
9383         switch (instr->Mask(NEONScalar2RegMiscMask)) {
9384           case NEON_SQXTN_scalar:
9385             sqxtn(vf, rd, rn);
9386             break;
9387           case NEON_UQXTN_scalar:
9388             uqxtn(vf, rd, rn);
9389             break;
9390           case NEON_SQXTUN_scalar:
9391             sqxtun(vf, rd, rn);
9392             break;
9393           default:
9394             VIXL_UNIMPLEMENTED();
9395         }
9396     }
9397   }
9398 }
9399 
9400 
VisitNEONScalar2RegMiscFP16(const Instruction * instr)9401 void Simulator::VisitNEONScalar2RegMiscFP16(const Instruction* instr) {
9402   VectorFormat fpf = kFormatH;
9403   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
9404 
9405   SimVRegister& rd = ReadVRegister(instr->GetRd());
9406   SimVRegister& rn = ReadVRegister(instr->GetRn());
9407 
9408   switch (instr->Mask(NEONScalar2RegMiscFP16Mask)) {
9409     case NEON_FRECPE_H_scalar:
9410       frecpe(fpf, rd, rn, fpcr_rounding);
9411       break;
9412     case NEON_FRECPX_H_scalar:
9413       frecpx(fpf, rd, rn);
9414       break;
9415     case NEON_FRSQRTE_H_scalar:
9416       frsqrte(fpf, rd, rn);
9417       break;
9418     case NEON_FCMGT_H_zero_scalar:
9419       fcmp_zero(fpf, rd, rn, gt);
9420       break;
9421     case NEON_FCMGE_H_zero_scalar:
9422       fcmp_zero(fpf, rd, rn, ge);
9423       break;
9424     case NEON_FCMEQ_H_zero_scalar:
9425       fcmp_zero(fpf, rd, rn, eq);
9426       break;
9427     case NEON_FCMLE_H_zero_scalar:
9428       fcmp_zero(fpf, rd, rn, le);
9429       break;
9430     case NEON_FCMLT_H_zero_scalar:
9431       fcmp_zero(fpf, rd, rn, lt);
9432       break;
9433     case NEON_SCVTF_H_scalar:
9434       scvtf(fpf, rd, rn, 0, fpcr_rounding);
9435       break;
9436     case NEON_UCVTF_H_scalar:
9437       ucvtf(fpf, rd, rn, 0, fpcr_rounding);
9438       break;
9439     case NEON_FCVTNS_H_scalar:
9440       fcvts(fpf, rd, rn, FPTieEven);
9441       break;
9442     case NEON_FCVTNU_H_scalar:
9443       fcvtu(fpf, rd, rn, FPTieEven);
9444       break;
9445     case NEON_FCVTPS_H_scalar:
9446       fcvts(fpf, rd, rn, FPPositiveInfinity);
9447       break;
9448     case NEON_FCVTPU_H_scalar:
9449       fcvtu(fpf, rd, rn, FPPositiveInfinity);
9450       break;
9451     case NEON_FCVTMS_H_scalar:
9452       fcvts(fpf, rd, rn, FPNegativeInfinity);
9453       break;
9454     case NEON_FCVTMU_H_scalar:
9455       fcvtu(fpf, rd, rn, FPNegativeInfinity);
9456       break;
9457     case NEON_FCVTZS_H_scalar:
9458       fcvts(fpf, rd, rn, FPZero);
9459       break;
9460     case NEON_FCVTZU_H_scalar:
9461       fcvtu(fpf, rd, rn, FPZero);
9462       break;
9463     case NEON_FCVTAS_H_scalar:
9464       fcvts(fpf, rd, rn, FPTieAway);
9465       break;
9466     case NEON_FCVTAU_H_scalar:
9467       fcvtu(fpf, rd, rn, FPTieAway);
9468       break;
9469   }
9470 }
9471 
9472 
VisitNEONScalar3Diff(const Instruction * instr)9473 void Simulator::VisitNEONScalar3Diff(const Instruction* instr) {
9474   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
9475   VectorFormat vf = nfd.GetVectorFormat();
9476 
9477   SimVRegister& rd = ReadVRegister(instr->GetRd());
9478   SimVRegister& rn = ReadVRegister(instr->GetRn());
9479   SimVRegister& rm = ReadVRegister(instr->GetRm());
9480   switch (instr->Mask(NEONScalar3DiffMask)) {
9481     case NEON_SQDMLAL_scalar:
9482       sqdmlal(vf, rd, rn, rm);
9483       break;
9484     case NEON_SQDMLSL_scalar:
9485       sqdmlsl(vf, rd, rn, rm);
9486       break;
9487     case NEON_SQDMULL_scalar:
9488       sqdmull(vf, rd, rn, rm);
9489       break;
9490     default:
9491       VIXL_UNIMPLEMENTED();
9492   }
9493 }
9494 
9495 
VisitNEONScalar3Same(const Instruction * instr)9496 void Simulator::VisitNEONScalar3Same(const Instruction* instr) {
9497   NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
9498   VectorFormat vf = nfd.GetVectorFormat();
9499 
9500   SimVRegister& rd = ReadVRegister(instr->GetRd());
9501   SimVRegister& rn = ReadVRegister(instr->GetRn());
9502   SimVRegister& rm = ReadVRegister(instr->GetRm());
9503 
9504   if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
9505     vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
9506     switch (instr->Mask(NEONScalar3SameFPMask)) {
9507       case NEON_FMULX_scalar:
9508         fmulx(vf, rd, rn, rm);
9509         break;
9510       case NEON_FACGE_scalar:
9511         fabscmp(vf, rd, rn, rm, ge);
9512         break;
9513       case NEON_FACGT_scalar:
9514         fabscmp(vf, rd, rn, rm, gt);
9515         break;
9516       case NEON_FCMEQ_scalar:
9517         fcmp(vf, rd, rn, rm, eq);
9518         break;
9519       case NEON_FCMGE_scalar:
9520         fcmp(vf, rd, rn, rm, ge);
9521         break;
9522       case NEON_FCMGT_scalar:
9523         fcmp(vf, rd, rn, rm, gt);
9524         break;
9525       case NEON_FRECPS_scalar:
9526         frecps(vf, rd, rn, rm);
9527         break;
9528       case NEON_FRSQRTS_scalar:
9529         frsqrts(vf, rd, rn, rm);
9530         break;
9531       case NEON_FABD_scalar:
9532         fabd(vf, rd, rn, rm);
9533         break;
9534       default:
9535         VIXL_UNIMPLEMENTED();
9536     }
9537   } else {
9538     switch (instr->Mask(NEONScalar3SameMask)) {
9539       case NEON_ADD_scalar:
9540         add(vf, rd, rn, rm);
9541         break;
9542       case NEON_SUB_scalar:
9543         sub(vf, rd, rn, rm);
9544         break;
9545       case NEON_CMEQ_scalar:
9546         cmp(vf, rd, rn, rm, eq);
9547         break;
9548       case NEON_CMGE_scalar:
9549         cmp(vf, rd, rn, rm, ge);
9550         break;
9551       case NEON_CMGT_scalar:
9552         cmp(vf, rd, rn, rm, gt);
9553         break;
9554       case NEON_CMHI_scalar:
9555         cmp(vf, rd, rn, rm, hi);
9556         break;
9557       case NEON_CMHS_scalar:
9558         cmp(vf, rd, rn, rm, hs);
9559         break;
9560       case NEON_CMTST_scalar:
9561         cmptst(vf, rd, rn, rm);
9562         break;
9563       case NEON_USHL_scalar:
9564         ushl(vf, rd, rn, rm);
9565         break;
9566       case NEON_SSHL_scalar:
9567         sshl(vf, rd, rn, rm);
9568         break;
9569       case NEON_SQDMULH_scalar:
9570         sqdmulh(vf, rd, rn, rm);
9571         break;
9572       case NEON_SQRDMULH_scalar:
9573         sqrdmulh(vf, rd, rn, rm);
9574         break;
9575       case NEON_UQADD_scalar:
9576         add(vf, rd, rn, rm).UnsignedSaturate(vf);
9577         break;
9578       case NEON_SQADD_scalar:
9579         add(vf, rd, rn, rm).SignedSaturate(vf);
9580         break;
9581       case NEON_UQSUB_scalar:
9582         sub(vf, rd, rn, rm).UnsignedSaturate(vf);
9583         break;
9584       case NEON_SQSUB_scalar:
9585         sub(vf, rd, rn, rm).SignedSaturate(vf);
9586         break;
9587       case NEON_UQSHL_scalar:
9588         ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
9589         break;
9590       case NEON_SQSHL_scalar:
9591         sshl(vf, rd, rn, rm).SignedSaturate(vf);
9592         break;
9593       case NEON_URSHL_scalar:
9594         ushl(vf, rd, rn, rm).Round(vf);
9595         break;
9596       case NEON_SRSHL_scalar:
9597         sshl(vf, rd, rn, rm).Round(vf);
9598         break;
9599       case NEON_UQRSHL_scalar:
9600         ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
9601         break;
9602       case NEON_SQRSHL_scalar:
9603         sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
9604         break;
9605       default:
9606         VIXL_UNIMPLEMENTED();
9607     }
9608   }
9609 }
9610 
VisitNEONScalar3SameFP16(const Instruction * instr)9611 void Simulator::VisitNEONScalar3SameFP16(const Instruction* instr) {
9612   SimVRegister& rd = ReadVRegister(instr->GetRd());
9613   SimVRegister& rn = ReadVRegister(instr->GetRn());
9614   SimVRegister& rm = ReadVRegister(instr->GetRm());
9615 
9616   switch (instr->Mask(NEONScalar3SameFP16Mask)) {
9617     case NEON_FABD_H_scalar:
9618       fabd(kFormatH, rd, rn, rm);
9619       break;
9620     case NEON_FMULX_H_scalar:
9621       fmulx(kFormatH, rd, rn, rm);
9622       break;
9623     case NEON_FCMEQ_H_scalar:
9624       fcmp(kFormatH, rd, rn, rm, eq);
9625       break;
9626     case NEON_FCMGE_H_scalar:
9627       fcmp(kFormatH, rd, rn, rm, ge);
9628       break;
9629     case NEON_FCMGT_H_scalar:
9630       fcmp(kFormatH, rd, rn, rm, gt);
9631       break;
9632     case NEON_FACGE_H_scalar:
9633       fabscmp(kFormatH, rd, rn, rm, ge);
9634       break;
9635     case NEON_FACGT_H_scalar:
9636       fabscmp(kFormatH, rd, rn, rm, gt);
9637       break;
9638     case NEON_FRECPS_H_scalar:
9639       frecps(kFormatH, rd, rn, rm);
9640       break;
9641     case NEON_FRSQRTS_H_scalar:
9642       frsqrts(kFormatH, rd, rn, rm);
9643       break;
9644     default:
9645       VIXL_UNREACHABLE();
9646   }
9647 }
9648 
9649 
VisitNEONScalar3SameExtra(const Instruction * instr)9650 void Simulator::VisitNEONScalar3SameExtra(const Instruction* instr) {
9651   NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
9652   VectorFormat vf = nfd.GetVectorFormat();
9653 
9654   SimVRegister& rd = ReadVRegister(instr->GetRd());
9655   SimVRegister& rn = ReadVRegister(instr->GetRn());
9656   SimVRegister& rm = ReadVRegister(instr->GetRm());
9657 
9658   switch (instr->Mask(NEONScalar3SameExtraMask)) {
9659     case NEON_SQRDMLAH_scalar:
9660       sqrdmlah(vf, rd, rn, rm);
9661       break;
9662     case NEON_SQRDMLSH_scalar:
9663       sqrdmlsh(vf, rd, rn, rm);
9664       break;
9665     default:
9666       VIXL_UNIMPLEMENTED();
9667   }
9668 }
9669 
VisitNEONScalarByIndexedElement(const Instruction * instr)9670 void Simulator::VisitNEONScalarByIndexedElement(const Instruction* instr) {
9671   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
9672   VectorFormat vf = nfd.GetVectorFormat();
9673   SimVRegister& rd = ReadVRegister(instr->GetRd());
9674   SimVRegister& rn = ReadVRegister(instr->GetRn());
9675   ByElementOp Op = NULL;
9676 
9677   std::pair<int, int> rm_and_index = instr->GetNEONMulRmAndIndex();
9678   std::unordered_map<uint32_t, ByElementOp> handler = {
9679       {"sqdmull_asisdelem_l"_h, &Simulator::sqdmull},
9680       {"sqdmlal_asisdelem_l"_h, &Simulator::sqdmlal},
9681       {"sqdmlsl_asisdelem_l"_h, &Simulator::sqdmlsl},
9682       {"sqdmulh_asisdelem_r"_h, &Simulator::sqdmulh},
9683       {"sqrdmulh_asisdelem_r"_h, &Simulator::sqrdmulh},
9684       {"sqrdmlah_asisdelem_r"_h, &Simulator::sqrdmlah},
9685       {"sqrdmlsh_asisdelem_r"_h, &Simulator::sqrdmlsh},
9686       {"fmul_asisdelem_rh_h"_h, &Simulator::fmul},
9687       {"fmul_asisdelem_r_sd"_h, &Simulator::fmul},
9688       {"fmla_asisdelem_rh_h"_h, &Simulator::fmla},
9689       {"fmla_asisdelem_r_sd"_h, &Simulator::fmla},
9690       {"fmls_asisdelem_rh_h"_h, &Simulator::fmls},
9691       {"fmls_asisdelem_r_sd"_h, &Simulator::fmls},
9692       {"fmulx_asisdelem_rh_h"_h, &Simulator::fmulx},
9693       {"fmulx_asisdelem_r_sd"_h, &Simulator::fmulx},
9694   };
9695 
9696   std::unordered_map<uint32_t, ByElementOp>::const_iterator it =
9697       handler.find(form_hash_);
9698 
9699   if (it == handler.end()) {
9700     VIXL_UNIMPLEMENTED();
9701   } else {
9702     Op = it->second;
9703   }
9704 
9705   switch (form_hash_) {
9706     case "sqdmull_asisdelem_l"_h:
9707     case "sqdmlal_asisdelem_l"_h:
9708     case "sqdmlsl_asisdelem_l"_h:
9709       if ((vf == kFormatB) || (vf == kFormatH)) {
9710         VisitUnallocated(instr);
9711         return;
9712       }
9713       break;
9714     case "sqdmulh_asisdelem_r"_h:
9715     case "sqrdmulh_asisdelem_r"_h:
9716     case "sqrdmlah_asisdelem_r"_h:
9717     case "sqrdmlsh_asisdelem_r"_h:
9718       vf = nfd.GetVectorFormat(nfd.ScalarFormatMap());
9719       if ((vf == kFormatB) || (vf == kFormatD)) {
9720         VisitUnallocated(instr);
9721         return;
9722       }
9723       break;
9724     case "fmul_asisdelem_r_sd"_h:
9725     case "fmla_asisdelem_r_sd"_h:
9726     case "fmls_asisdelem_r_sd"_h:
9727     case "fmulx_asisdelem_r_sd"_h:
9728       vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
9729       break;
9730     case "fmul_asisdelem_rh_h"_h:
9731     case "fmla_asisdelem_rh_h"_h:
9732     case "fmls_asisdelem_rh_h"_h:
9733     case "fmulx_asisdelem_rh_h"_h:
9734       vf = kFormatH;
9735       break;
9736   }
9737 
9738   (this->*Op)(vf,
9739               rd,
9740               rn,
9741               ReadVRegister(rm_and_index.first),
9742               rm_and_index.second);
9743 }
9744 
9745 
VisitNEONScalarCopy(const Instruction * instr)9746 void Simulator::VisitNEONScalarCopy(const Instruction* instr) {
9747   NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
9748   VectorFormat vf = nfd.GetVectorFormat();
9749 
9750   SimVRegister& rd = ReadVRegister(instr->GetRd());
9751   SimVRegister& rn = ReadVRegister(instr->GetRn());
9752 
9753   if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
9754     int imm5 = instr->GetImmNEON5();
9755     int tz = CountTrailingZeros(imm5, 32);
9756     int rn_index = ExtractSignedBitfield32(31, tz + 1, imm5);
9757     dup_element(vf, rd, rn, rn_index);
9758   } else {
9759     VIXL_UNIMPLEMENTED();
9760   }
9761 }
9762 
9763 
VisitNEONScalarPairwise(const Instruction * instr)9764 void Simulator::VisitNEONScalarPairwise(const Instruction* instr) {
9765   NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarPairwiseFormatMap());
9766   VectorFormat vf = nfd.GetVectorFormat();
9767 
9768   SimVRegister& rd = ReadVRegister(instr->GetRd());
9769   SimVRegister& rn = ReadVRegister(instr->GetRn());
9770   switch (instr->Mask(NEONScalarPairwiseMask)) {
9771     case NEON_ADDP_scalar: {
9772       // All pairwise operations except ADDP use bit U to differentiate FP16
9773       // from FP32/FP64 variations.
9774       NEONFormatDecoder nfd_addp(instr, NEONFormatDecoder::FPScalarFormatMap());
9775       addp(nfd_addp.GetVectorFormat(), rd, rn);
9776       break;
9777     }
9778     case NEON_FADDP_h_scalar:
9779     case NEON_FADDP_scalar:
9780       faddp(vf, rd, rn);
9781       break;
9782     case NEON_FMAXP_h_scalar:
9783     case NEON_FMAXP_scalar:
9784       fmaxp(vf, rd, rn);
9785       break;
9786     case NEON_FMAXNMP_h_scalar:
9787     case NEON_FMAXNMP_scalar:
9788       fmaxnmp(vf, rd, rn);
9789       break;
9790     case NEON_FMINP_h_scalar:
9791     case NEON_FMINP_scalar:
9792       fminp(vf, rd, rn);
9793       break;
9794     case NEON_FMINNMP_h_scalar:
9795     case NEON_FMINNMP_scalar:
9796       fminnmp(vf, rd, rn);
9797       break;
9798     default:
9799       VIXL_UNIMPLEMENTED();
9800   }
9801 }
9802 
9803 
VisitNEONScalarShiftImmediate(const Instruction * instr)9804 void Simulator::VisitNEONScalarShiftImmediate(const Instruction* instr) {
9805   SimVRegister& rd = ReadVRegister(instr->GetRd());
9806   SimVRegister& rn = ReadVRegister(instr->GetRn());
9807   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
9808 
9809   static const NEONFormatMap map = {{22, 21, 20, 19},
9810                                     {NF_UNDEF,
9811                                      NF_B,
9812                                      NF_H,
9813                                      NF_H,
9814                                      NF_S,
9815                                      NF_S,
9816                                      NF_S,
9817                                      NF_S,
9818                                      NF_D,
9819                                      NF_D,
9820                                      NF_D,
9821                                      NF_D,
9822                                      NF_D,
9823                                      NF_D,
9824                                      NF_D,
9825                                      NF_D}};
9826   NEONFormatDecoder nfd(instr, &map);
9827   VectorFormat vf = nfd.GetVectorFormat();
9828 
9829   int highest_set_bit = HighestSetBitPosition(instr->GetImmNEONImmh());
9830   int immh_immb = instr->GetImmNEONImmhImmb();
9831   int right_shift = (16 << highest_set_bit) - immh_immb;
9832   int left_shift = immh_immb - (8 << highest_set_bit);
9833   switch (instr->Mask(NEONScalarShiftImmediateMask)) {
9834     case NEON_SHL_scalar:
9835       shl(vf, rd, rn, left_shift);
9836       break;
9837     case NEON_SLI_scalar:
9838       sli(vf, rd, rn, left_shift);
9839       break;
9840     case NEON_SQSHL_imm_scalar:
9841       sqshl(vf, rd, rn, left_shift);
9842       break;
9843     case NEON_UQSHL_imm_scalar:
9844       uqshl(vf, rd, rn, left_shift);
9845       break;
9846     case NEON_SQSHLU_scalar:
9847       sqshlu(vf, rd, rn, left_shift);
9848       break;
9849     case NEON_SRI_scalar:
9850       sri(vf, rd, rn, right_shift);
9851       break;
9852     case NEON_SSHR_scalar:
9853       sshr(vf, rd, rn, right_shift);
9854       break;
9855     case NEON_USHR_scalar:
9856       ushr(vf, rd, rn, right_shift);
9857       break;
9858     case NEON_SRSHR_scalar:
9859       sshr(vf, rd, rn, right_shift).Round(vf);
9860       break;
9861     case NEON_URSHR_scalar:
9862       ushr(vf, rd, rn, right_shift).Round(vf);
9863       break;
9864     case NEON_SSRA_scalar:
9865       ssra(vf, rd, rn, right_shift);
9866       break;
9867     case NEON_USRA_scalar:
9868       usra(vf, rd, rn, right_shift);
9869       break;
9870     case NEON_SRSRA_scalar:
9871       srsra(vf, rd, rn, right_shift);
9872       break;
9873     case NEON_URSRA_scalar:
9874       ursra(vf, rd, rn, right_shift);
9875       break;
9876     case NEON_UQSHRN_scalar:
9877       uqshrn(vf, rd, rn, right_shift);
9878       break;
9879     case NEON_UQRSHRN_scalar:
9880       uqrshrn(vf, rd, rn, right_shift);
9881       break;
9882     case NEON_SQSHRN_scalar:
9883       sqshrn(vf, rd, rn, right_shift);
9884       break;
9885     case NEON_SQRSHRN_scalar:
9886       sqrshrn(vf, rd, rn, right_shift);
9887       break;
9888     case NEON_SQSHRUN_scalar:
9889       sqshrun(vf, rd, rn, right_shift);
9890       break;
9891     case NEON_SQRSHRUN_scalar:
9892       sqrshrun(vf, rd, rn, right_shift);
9893       break;
9894     case NEON_FCVTZS_imm_scalar:
9895       fcvts(vf, rd, rn, FPZero, right_shift);
9896       break;
9897     case NEON_FCVTZU_imm_scalar:
9898       fcvtu(vf, rd, rn, FPZero, right_shift);
9899       break;
9900     case NEON_SCVTF_imm_scalar:
9901       scvtf(vf, rd, rn, right_shift, fpcr_rounding);
9902       break;
9903     case NEON_UCVTF_imm_scalar:
9904       ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
9905       break;
9906     default:
9907       VIXL_UNIMPLEMENTED();
9908   }
9909 }
9910 
9911 
VisitNEONShiftImmediate(const Instruction * instr)9912 void Simulator::VisitNEONShiftImmediate(const Instruction* instr) {
9913   SimVRegister& rd = ReadVRegister(instr->GetRd());
9914   SimVRegister& rn = ReadVRegister(instr->GetRn());
9915   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
9916 
9917   // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
9918   // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
9919   static const NEONFormatMap map = {{22, 21, 20, 19, 30},
9920                                     {NF_UNDEF, NF_UNDEF, NF_8B,    NF_16B,
9921                                      NF_4H,    NF_8H,    NF_4H,    NF_8H,
9922                                      NF_2S,    NF_4S,    NF_2S,    NF_4S,
9923                                      NF_2S,    NF_4S,    NF_2S,    NF_4S,
9924                                      NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,
9925                                      NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,
9926                                      NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,
9927                                      NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D}};
9928   NEONFormatDecoder nfd(instr, &map);
9929   VectorFormat vf = nfd.GetVectorFormat();
9930 
9931   // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
9932   static const NEONFormatMap map_l =
9933       {{22, 21, 20, 19},
9934        {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}};
9935   VectorFormat vf_l = nfd.GetVectorFormat(&map_l);
9936 
9937   int highest_set_bit = HighestSetBitPosition(instr->GetImmNEONImmh());
9938   int immh_immb = instr->GetImmNEONImmhImmb();
9939   int right_shift = (16 << highest_set_bit) - immh_immb;
9940   int left_shift = immh_immb - (8 << highest_set_bit);
9941 
9942   switch (instr->Mask(NEONShiftImmediateMask)) {
9943     case NEON_SHL:
9944       shl(vf, rd, rn, left_shift);
9945       break;
9946     case NEON_SLI:
9947       sli(vf, rd, rn, left_shift);
9948       break;
9949     case NEON_SQSHLU:
9950       sqshlu(vf, rd, rn, left_shift);
9951       break;
9952     case NEON_SRI:
9953       sri(vf, rd, rn, right_shift);
9954       break;
9955     case NEON_SSHR:
9956       sshr(vf, rd, rn, right_shift);
9957       break;
9958     case NEON_USHR:
9959       ushr(vf, rd, rn, right_shift);
9960       break;
9961     case NEON_SRSHR:
9962       sshr(vf, rd, rn, right_shift).Round(vf);
9963       break;
9964     case NEON_URSHR:
9965       ushr(vf, rd, rn, right_shift).Round(vf);
9966       break;
9967     case NEON_SSRA:
9968       ssra(vf, rd, rn, right_shift);
9969       break;
9970     case NEON_USRA:
9971       usra(vf, rd, rn, right_shift);
9972       break;
9973     case NEON_SRSRA:
9974       srsra(vf, rd, rn, right_shift);
9975       break;
9976     case NEON_URSRA:
9977       ursra(vf, rd, rn, right_shift);
9978       break;
9979     case NEON_SQSHL_imm:
9980       sqshl(vf, rd, rn, left_shift);
9981       break;
9982     case NEON_UQSHL_imm:
9983       uqshl(vf, rd, rn, left_shift);
9984       break;
9985     case NEON_SCVTF_imm:
9986       scvtf(vf, rd, rn, right_shift, fpcr_rounding);
9987       break;
9988     case NEON_UCVTF_imm:
9989       ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
9990       break;
9991     case NEON_FCVTZS_imm:
9992       fcvts(vf, rd, rn, FPZero, right_shift);
9993       break;
9994     case NEON_FCVTZU_imm:
9995       fcvtu(vf, rd, rn, FPZero, right_shift);
9996       break;
9997     case NEON_SSHLL:
9998       vf = vf_l;
9999       if (instr->Mask(NEON_Q)) {
10000         sshll2(vf, rd, rn, left_shift);
10001       } else {
10002         sshll(vf, rd, rn, left_shift);
10003       }
10004       break;
10005     case NEON_USHLL:
10006       vf = vf_l;
10007       if (instr->Mask(NEON_Q)) {
10008         ushll2(vf, rd, rn, left_shift);
10009       } else {
10010         ushll(vf, rd, rn, left_shift);
10011       }
10012       break;
10013     case NEON_SHRN:
10014       if (instr->Mask(NEON_Q)) {
10015         shrn2(vf, rd, rn, right_shift);
10016       } else {
10017         shrn(vf, rd, rn, right_shift);
10018       }
10019       break;
10020     case NEON_RSHRN:
10021       if (instr->Mask(NEON_Q)) {
10022         rshrn2(vf, rd, rn, right_shift);
10023       } else {
10024         rshrn(vf, rd, rn, right_shift);
10025       }
10026       break;
10027     case NEON_UQSHRN:
10028       if (instr->Mask(NEON_Q)) {
10029         uqshrn2(vf, rd, rn, right_shift);
10030       } else {
10031         uqshrn(vf, rd, rn, right_shift);
10032       }
10033       break;
10034     case NEON_UQRSHRN:
10035       if (instr->Mask(NEON_Q)) {
10036         uqrshrn2(vf, rd, rn, right_shift);
10037       } else {
10038         uqrshrn(vf, rd, rn, right_shift);
10039       }
10040       break;
10041     case NEON_SQSHRN:
10042       if (instr->Mask(NEON_Q)) {
10043         sqshrn2(vf, rd, rn, right_shift);
10044       } else {
10045         sqshrn(vf, rd, rn, right_shift);
10046       }
10047       break;
10048     case NEON_SQRSHRN:
10049       if (instr->Mask(NEON_Q)) {
10050         sqrshrn2(vf, rd, rn, right_shift);
10051       } else {
10052         sqrshrn(vf, rd, rn, right_shift);
10053       }
10054       break;
10055     case NEON_SQSHRUN:
10056       if (instr->Mask(NEON_Q)) {
10057         sqshrun2(vf, rd, rn, right_shift);
10058       } else {
10059         sqshrun(vf, rd, rn, right_shift);
10060       }
10061       break;
10062     case NEON_SQRSHRUN:
10063       if (instr->Mask(NEON_Q)) {
10064         sqrshrun2(vf, rd, rn, right_shift);
10065       } else {
10066         sqrshrun(vf, rd, rn, right_shift);
10067       }
10068       break;
10069     default:
10070       VIXL_UNIMPLEMENTED();
10071   }
10072 }
10073 
10074 
VisitNEONTable(const Instruction * instr)10075 void Simulator::VisitNEONTable(const Instruction* instr) {
10076   NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
10077   VectorFormat vf = nfd.GetVectorFormat();
10078 
10079   SimVRegister& rd = ReadVRegister(instr->GetRd());
10080   SimVRegister& rn = ReadVRegister(instr->GetRn());
10081   SimVRegister& rn2 = ReadVRegister((instr->GetRn() + 1) % kNumberOfVRegisters);
10082   SimVRegister& rn3 = ReadVRegister((instr->GetRn() + 2) % kNumberOfVRegisters);
10083   SimVRegister& rn4 = ReadVRegister((instr->GetRn() + 3) % kNumberOfVRegisters);
10084   SimVRegister& rm = ReadVRegister(instr->GetRm());
10085 
10086   switch (instr->Mask(NEONTableMask)) {
10087     case NEON_TBL_1v:
10088       tbl(vf, rd, rn, rm);
10089       break;
10090     case NEON_TBL_2v:
10091       tbl(vf, rd, rn, rn2, rm);
10092       break;
10093     case NEON_TBL_3v:
10094       tbl(vf, rd, rn, rn2, rn3, rm);
10095       break;
10096     case NEON_TBL_4v:
10097       tbl(vf, rd, rn, rn2, rn3, rn4, rm);
10098       break;
10099     case NEON_TBX_1v:
10100       tbx(vf, rd, rn, rm);
10101       break;
10102     case NEON_TBX_2v:
10103       tbx(vf, rd, rn, rn2, rm);
10104       break;
10105     case NEON_TBX_3v:
10106       tbx(vf, rd, rn, rn2, rn3, rm);
10107       break;
10108     case NEON_TBX_4v:
10109       tbx(vf, rd, rn, rn2, rn3, rn4, rm);
10110       break;
10111     default:
10112       VIXL_UNIMPLEMENTED();
10113   }
10114 }
10115 
10116 
VisitNEONPerm(const Instruction * instr)10117 void Simulator::VisitNEONPerm(const Instruction* instr) {
10118   NEONFormatDecoder nfd(instr);
10119   VectorFormat vf = nfd.GetVectorFormat();
10120 
10121   SimVRegister& rd = ReadVRegister(instr->GetRd());
10122   SimVRegister& rn = ReadVRegister(instr->GetRn());
10123   SimVRegister& rm = ReadVRegister(instr->GetRm());
10124 
10125   switch (instr->Mask(NEONPermMask)) {
10126     case NEON_TRN1:
10127       trn1(vf, rd, rn, rm);
10128       break;
10129     case NEON_TRN2:
10130       trn2(vf, rd, rn, rm);
10131       break;
10132     case NEON_UZP1:
10133       uzp1(vf, rd, rn, rm);
10134       break;
10135     case NEON_UZP2:
10136       uzp2(vf, rd, rn, rm);
10137       break;
10138     case NEON_ZIP1:
10139       zip1(vf, rd, rn, rm);
10140       break;
10141     case NEON_ZIP2:
10142       zip2(vf, rd, rn, rm);
10143       break;
10144     default:
10145       VIXL_UNIMPLEMENTED();
10146   }
10147 }
10148 
SimulateNEONSHA3(const Instruction * instr)10149 void Simulator::SimulateNEONSHA3(const Instruction* instr) {
10150   SimVRegister& rd = ReadVRegister(instr->GetRd());
10151   SimVRegister& rn = ReadVRegister(instr->GetRn());
10152   SimVRegister& rm = ReadVRegister(instr->GetRm());
10153   SimVRegister& ra = ReadVRegister(instr->GetRa());
10154   SimVRegister temp;
10155 
10156   switch (form_hash_) {
10157     case "bcax_vvv16_crypto4"_h:
10158       bic(kFormat16B, temp, rm, ra);
10159       eor(kFormat16B, rd, rn, temp);
10160       break;
10161     case "eor3_vvv16_crypto4"_h:
10162       eor(kFormat16B, temp, rm, ra);
10163       eor(kFormat16B, rd, rn, temp);
10164       break;
10165     case "rax1_vvv2_cryptosha512_3"_h:
10166       ror(kFormat2D, temp, rm, 63);  // rol(1) => ror(63)
10167       eor(kFormat2D, rd, rn, temp);
10168       break;
10169     case "xar_vvv2_crypto3_imm6"_h:
10170       int rot = instr->ExtractBits(15, 10);
10171       eor(kFormat2D, temp, rn, rm);
10172       ror(kFormat2D, rd, temp, rot);
10173       break;
10174   }
10175 }
10176 
VisitSVEAddressGeneration(const Instruction * instr)10177 void Simulator::VisitSVEAddressGeneration(const Instruction* instr) {
10178   SimVRegister& zd = ReadVRegister(instr->GetRd());
10179   SimVRegister& zn = ReadVRegister(instr->GetRn());
10180   SimVRegister& zm = ReadVRegister(instr->GetRm());
10181   SimVRegister temp;
10182 
10183   VectorFormat vform = kFormatVnD;
10184   mov(vform, temp, zm);
10185 
10186   switch (instr->Mask(SVEAddressGenerationMask)) {
10187     case ADR_z_az_d_s32_scaled:
10188       sxt(vform, temp, temp, kSRegSize);
10189       break;
10190     case ADR_z_az_d_u32_scaled:
10191       uxt(vform, temp, temp, kSRegSize);
10192       break;
10193     case ADR_z_az_s_same_scaled:
10194       vform = kFormatVnS;
10195       break;
10196     case ADR_z_az_d_same_scaled:
10197       // Nothing to do.
10198       break;
10199     default:
10200       VIXL_UNIMPLEMENTED();
10201       break;
10202   }
10203 
10204   int shift_amount = instr->ExtractBits(11, 10);
10205   shl(vform, temp, temp, shift_amount);
10206   add(vform, zd, zn, temp);
10207 }
10208 
VisitSVEBitwiseLogicalWithImm_Unpredicated(const Instruction * instr)10209 void Simulator::VisitSVEBitwiseLogicalWithImm_Unpredicated(
10210     const Instruction* instr) {
10211   Instr op = instr->Mask(SVEBitwiseLogicalWithImm_UnpredicatedMask);
10212   switch (op) {
10213     case AND_z_zi:
10214     case EOR_z_zi:
10215     case ORR_z_zi: {
10216       int lane_size = instr->GetSVEBitwiseImmLaneSizeInBytesLog2();
10217       uint64_t imm = instr->GetSVEImmLogical();
10218       // Valid immediate is a non-zero bits
10219       VIXL_ASSERT(imm != 0);
10220       SVEBitwiseImmHelper(static_cast<SVEBitwiseLogicalWithImm_UnpredicatedOp>(
10221                               op),
10222                           SVEFormatFromLaneSizeInBytesLog2(lane_size),
10223                           ReadVRegister(instr->GetRd()),
10224                           imm);
10225       break;
10226     }
10227     default:
10228       VIXL_UNIMPLEMENTED();
10229       break;
10230   }
10231 }
10232 
VisitSVEBroadcastBitmaskImm(const Instruction * instr)10233 void Simulator::VisitSVEBroadcastBitmaskImm(const Instruction* instr) {
10234   switch (instr->Mask(SVEBroadcastBitmaskImmMask)) {
10235     case DUPM_z_i: {
10236       /* DUPM uses the same lane size and immediate encoding as bitwise logical
10237        * immediate instructions. */
10238       int lane_size = instr->GetSVEBitwiseImmLaneSizeInBytesLog2();
10239       uint64_t imm = instr->GetSVEImmLogical();
10240       VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
10241       dup_immediate(vform, ReadVRegister(instr->GetRd()), imm);
10242       break;
10243     }
10244     default:
10245       VIXL_UNIMPLEMENTED();
10246       break;
10247   }
10248 }
10249 
VisitSVEBitwiseLogicalUnpredicated(const Instruction * instr)10250 void Simulator::VisitSVEBitwiseLogicalUnpredicated(const Instruction* instr) {
10251   SimVRegister& zd = ReadVRegister(instr->GetRd());
10252   SimVRegister& zn = ReadVRegister(instr->GetRn());
10253   SimVRegister& zm = ReadVRegister(instr->GetRm());
10254   Instr op = instr->Mask(SVEBitwiseLogicalUnpredicatedMask);
10255 
10256   LogicalOp logical_op = LogicalOpMask;
10257   switch (op) {
10258     case AND_z_zz:
10259       logical_op = AND;
10260       break;
10261     case BIC_z_zz:
10262       logical_op = BIC;
10263       break;
10264     case EOR_z_zz:
10265       logical_op = EOR;
10266       break;
10267     case ORR_z_zz:
10268       logical_op = ORR;
10269       break;
10270     default:
10271       VIXL_UNIMPLEMENTED();
10272       break;
10273   }
10274   // Lane size of registers is irrelevant to the bitwise operations, so perform
10275   // the operation on D-sized lanes.
10276   SVEBitwiseLogicalUnpredicatedHelper(logical_op, kFormatVnD, zd, zn, zm);
10277 }
10278 
VisitSVEBitwiseShiftByImm_Predicated(const Instruction * instr)10279 void Simulator::VisitSVEBitwiseShiftByImm_Predicated(const Instruction* instr) {
10280   SimVRegister& zdn = ReadVRegister(instr->GetRd());
10281   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10282 
10283   SimVRegister scratch;
10284   SimVRegister result;
10285 
10286   bool for_division = false;
10287   Shift shift_op = NO_SHIFT;
10288   switch (instr->Mask(SVEBitwiseShiftByImm_PredicatedMask)) {
10289     case ASRD_z_p_zi:
10290       shift_op = ASR;
10291       for_division = true;
10292       break;
10293     case ASR_z_p_zi:
10294       shift_op = ASR;
10295       break;
10296     case LSL_z_p_zi:
10297       shift_op = LSL;
10298       break;
10299     case LSR_z_p_zi:
10300       shift_op = LSR;
10301       break;
10302     default:
10303       VIXL_UNIMPLEMENTED();
10304       break;
10305   }
10306 
10307   std::pair<int, int> shift_and_lane_size =
10308       instr->GetSVEImmShiftAndLaneSizeLog2(/* is_predicated = */ true);
10309   unsigned lane_size = shift_and_lane_size.second;
10310   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
10311   int shift_dist = shift_and_lane_size.first;
10312 
10313   if ((shift_op == ASR) && for_division) {
10314     asrd(vform, result, zdn, shift_dist);
10315   } else {
10316     if (shift_op == LSL) {
10317       // Shift distance is computed differently for LSL. Convert the result.
10318       shift_dist = (8 << lane_size) - shift_dist;
10319     }
10320     dup_immediate(vform, scratch, shift_dist);
10321     SVEBitwiseShiftHelper(shift_op, vform, result, zdn, scratch, false);
10322   }
10323   mov_merging(vform, zdn, pg, result);
10324 }
10325 
VisitSVEBitwiseShiftByVector_Predicated(const Instruction * instr)10326 void Simulator::VisitSVEBitwiseShiftByVector_Predicated(
10327     const Instruction* instr) {
10328   VectorFormat vform = instr->GetSVEVectorFormat();
10329   SimVRegister& zdn = ReadVRegister(instr->GetRd());
10330   SimVRegister& zm = ReadVRegister(instr->GetRn());
10331   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10332   SimVRegister result;
10333 
10334   // SVE uses the whole (saturated) lane for the shift amount.
10335   bool shift_in_ls_byte = false;
10336 
10337   switch (form_hash_) {
10338     case "asrr_z_p_zz"_h:
10339       sshr(vform, result, zm, zdn);
10340       break;
10341     case "asr_z_p_zz"_h:
10342       sshr(vform, result, zdn, zm);
10343       break;
10344     case "lslr_z_p_zz"_h:
10345       sshl(vform, result, zm, zdn, shift_in_ls_byte);
10346       break;
10347     case "lsl_z_p_zz"_h:
10348       sshl(vform, result, zdn, zm, shift_in_ls_byte);
10349       break;
10350     case "lsrr_z_p_zz"_h:
10351       ushr(vform, result, zm, zdn);
10352       break;
10353     case "lsr_z_p_zz"_h:
10354       ushr(vform, result, zdn, zm);
10355       break;
10356     case "sqrshl_z_p_zz"_h:
10357       sshl(vform, result, zdn, zm, shift_in_ls_byte)
10358           .Round(vform)
10359           .SignedSaturate(vform);
10360       break;
10361     case "sqrshlr_z_p_zz"_h:
10362       sshl(vform, result, zm, zdn, shift_in_ls_byte)
10363           .Round(vform)
10364           .SignedSaturate(vform);
10365       break;
10366     case "sqshl_z_p_zz"_h:
10367       sshl(vform, result, zdn, zm, shift_in_ls_byte).SignedSaturate(vform);
10368       break;
10369     case "sqshlr_z_p_zz"_h:
10370       sshl(vform, result, zm, zdn, shift_in_ls_byte).SignedSaturate(vform);
10371       break;
10372     case "srshl_z_p_zz"_h:
10373       sshl(vform, result, zdn, zm, shift_in_ls_byte).Round(vform);
10374       break;
10375     case "srshlr_z_p_zz"_h:
10376       sshl(vform, result, zm, zdn, shift_in_ls_byte).Round(vform);
10377       break;
10378     case "uqrshl_z_p_zz"_h:
10379       ushl(vform, result, zdn, zm, shift_in_ls_byte)
10380           .Round(vform)
10381           .UnsignedSaturate(vform);
10382       break;
10383     case "uqrshlr_z_p_zz"_h:
10384       ushl(vform, result, zm, zdn, shift_in_ls_byte)
10385           .Round(vform)
10386           .UnsignedSaturate(vform);
10387       break;
10388     case "uqshl_z_p_zz"_h:
10389       ushl(vform, result, zdn, zm, shift_in_ls_byte).UnsignedSaturate(vform);
10390       break;
10391     case "uqshlr_z_p_zz"_h:
10392       ushl(vform, result, zm, zdn, shift_in_ls_byte).UnsignedSaturate(vform);
10393       break;
10394     case "urshl_z_p_zz"_h:
10395       ushl(vform, result, zdn, zm, shift_in_ls_byte).Round(vform);
10396       break;
10397     case "urshlr_z_p_zz"_h:
10398       ushl(vform, result, zm, zdn, shift_in_ls_byte).Round(vform);
10399       break;
10400     default:
10401       VIXL_UNIMPLEMENTED();
10402       break;
10403   }
10404   mov_merging(vform, zdn, pg, result);
10405 }
10406 
VisitSVEBitwiseShiftByWideElements_Predicated(const Instruction * instr)10407 void Simulator::VisitSVEBitwiseShiftByWideElements_Predicated(
10408     const Instruction* instr) {
10409   VectorFormat vform = instr->GetSVEVectorFormat();
10410   SimVRegister& zdn = ReadVRegister(instr->GetRd());
10411   SimVRegister& zm = ReadVRegister(instr->GetRn());
10412   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10413 
10414   SimVRegister result;
10415   Shift shift_op = ASR;
10416 
10417   switch (instr->Mask(SVEBitwiseShiftByWideElements_PredicatedMask)) {
10418     case ASR_z_p_zw:
10419       break;
10420     case LSL_z_p_zw:
10421       shift_op = LSL;
10422       break;
10423     case LSR_z_p_zw:
10424       shift_op = LSR;
10425       break;
10426     default:
10427       VIXL_UNIMPLEMENTED();
10428       break;
10429   }
10430   SVEBitwiseShiftHelper(shift_op,
10431                         vform,
10432                         result,
10433                         zdn,
10434                         zm,
10435                         /* is_wide_elements = */ true);
10436   mov_merging(vform, zdn, pg, result);
10437 }
10438 
VisitSVEBitwiseShiftUnpredicated(const Instruction * instr)10439 void Simulator::VisitSVEBitwiseShiftUnpredicated(const Instruction* instr) {
10440   SimVRegister& zd = ReadVRegister(instr->GetRd());
10441   SimVRegister& zn = ReadVRegister(instr->GetRn());
10442 
10443   Shift shift_op = NO_SHIFT;
10444   switch (instr->Mask(SVEBitwiseShiftUnpredicatedMask)) {
10445     case ASR_z_zi:
10446     case ASR_z_zw:
10447       shift_op = ASR;
10448       break;
10449     case LSL_z_zi:
10450     case LSL_z_zw:
10451       shift_op = LSL;
10452       break;
10453     case LSR_z_zi:
10454     case LSR_z_zw:
10455       shift_op = LSR;
10456       break;
10457     default:
10458       VIXL_UNIMPLEMENTED();
10459       break;
10460   }
10461 
10462   switch (instr->Mask(SVEBitwiseShiftUnpredicatedMask)) {
10463     case ASR_z_zi:
10464     case LSL_z_zi:
10465     case LSR_z_zi: {
10466       SimVRegister scratch;
10467       std::pair<int, int> shift_and_lane_size =
10468           instr->GetSVEImmShiftAndLaneSizeLog2(/* is_predicated = */ false);
10469       unsigned lane_size = shift_and_lane_size.second;
10470       VIXL_ASSERT(lane_size <= kDRegSizeInBytesLog2);
10471       VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(lane_size);
10472       int shift_dist = shift_and_lane_size.first;
10473       if (shift_op == LSL) {
10474         // Shift distance is computed differently for LSL. Convert the result.
10475         shift_dist = (8 << lane_size) - shift_dist;
10476       }
10477       dup_immediate(vform, scratch, shift_dist);
10478       SVEBitwiseShiftHelper(shift_op, vform, zd, zn, scratch, false);
10479       break;
10480     }
10481     case ASR_z_zw:
10482     case LSL_z_zw:
10483     case LSR_z_zw:
10484       SVEBitwiseShiftHelper(shift_op,
10485                             instr->GetSVEVectorFormat(),
10486                             zd,
10487                             zn,
10488                             ReadVRegister(instr->GetRm()),
10489                             true);
10490       break;
10491     default:
10492       VIXL_UNIMPLEMENTED();
10493       break;
10494   }
10495 }
10496 
VisitSVEIncDecRegisterByElementCount(const Instruction * instr)10497 void Simulator::VisitSVEIncDecRegisterByElementCount(const Instruction* instr) {
10498   // Although the instructions have a separate encoding class, the lane size is
10499   // encoded in the same way as most other SVE instructions.
10500   VectorFormat vform = instr->GetSVEVectorFormat();
10501 
10502   int pattern = instr->GetImmSVEPredicateConstraint();
10503   int count = GetPredicateConstraintLaneCount(vform, pattern);
10504   int multiplier = instr->ExtractBits(19, 16) + 1;
10505 
10506   switch (instr->Mask(SVEIncDecRegisterByElementCountMask)) {
10507     case DECB_r_rs:
10508     case DECD_r_rs:
10509     case DECH_r_rs:
10510     case DECW_r_rs:
10511       count = -count;
10512       break;
10513     case INCB_r_rs:
10514     case INCD_r_rs:
10515     case INCH_r_rs:
10516     case INCW_r_rs:
10517       // Nothing to do.
10518       break;
10519     default:
10520       VIXL_UNIMPLEMENTED();
10521       return;
10522   }
10523 
10524   WriteXRegister(instr->GetRd(),
10525                  IncDecN(ReadXRegister(instr->GetRd()),
10526                          count * multiplier,
10527                          kXRegSize));
10528 }
10529 
VisitSVEIncDecVectorByElementCount(const Instruction * instr)10530 void Simulator::VisitSVEIncDecVectorByElementCount(const Instruction* instr) {
10531   VectorFormat vform = instr->GetSVEVectorFormat();
10532   if (LaneSizeInBitsFromFormat(vform) == kBRegSize) {
10533     VIXL_UNIMPLEMENTED();
10534   }
10535 
10536   int pattern = instr->GetImmSVEPredicateConstraint();
10537   int count = GetPredicateConstraintLaneCount(vform, pattern);
10538   int multiplier = instr->ExtractBits(19, 16) + 1;
10539 
10540   switch (instr->Mask(SVEIncDecVectorByElementCountMask)) {
10541     case DECD_z_zs:
10542     case DECH_z_zs:
10543     case DECW_z_zs:
10544       count = -count;
10545       break;
10546     case INCD_z_zs:
10547     case INCH_z_zs:
10548     case INCW_z_zs:
10549       // Nothing to do.
10550       break;
10551     default:
10552       VIXL_UNIMPLEMENTED();
10553       break;
10554   }
10555 
10556   SimVRegister& zd = ReadVRegister(instr->GetRd());
10557   SimVRegister scratch;
10558   dup_immediate(vform,
10559                 scratch,
10560                 IncDecN(0,
10561                         count * multiplier,
10562                         LaneSizeInBitsFromFormat(vform)));
10563   add(vform, zd, zd, scratch);
10564 }
10565 
VisitSVESaturatingIncDecRegisterByElementCount(const Instruction * instr)10566 void Simulator::VisitSVESaturatingIncDecRegisterByElementCount(
10567     const Instruction* instr) {
10568   // Although the instructions have a separate encoding class, the lane size is
10569   // encoded in the same way as most other SVE instructions.
10570   VectorFormat vform = instr->GetSVEVectorFormat();
10571 
10572   int pattern = instr->GetImmSVEPredicateConstraint();
10573   int count = GetPredicateConstraintLaneCount(vform, pattern);
10574   int multiplier = instr->ExtractBits(19, 16) + 1;
10575 
10576   unsigned width = kXRegSize;
10577   bool is_signed = false;
10578 
10579   switch (instr->Mask(SVESaturatingIncDecRegisterByElementCountMask)) {
10580     case SQDECB_r_rs_sx:
10581     case SQDECD_r_rs_sx:
10582     case SQDECH_r_rs_sx:
10583     case SQDECW_r_rs_sx:
10584       width = kWRegSize;
10585       VIXL_FALLTHROUGH();
10586     case SQDECB_r_rs_x:
10587     case SQDECD_r_rs_x:
10588     case SQDECH_r_rs_x:
10589     case SQDECW_r_rs_x:
10590       is_signed = true;
10591       count = -count;
10592       break;
10593     case SQINCB_r_rs_sx:
10594     case SQINCD_r_rs_sx:
10595     case SQINCH_r_rs_sx:
10596     case SQINCW_r_rs_sx:
10597       width = kWRegSize;
10598       VIXL_FALLTHROUGH();
10599     case SQINCB_r_rs_x:
10600     case SQINCD_r_rs_x:
10601     case SQINCH_r_rs_x:
10602     case SQINCW_r_rs_x:
10603       is_signed = true;
10604       break;
10605     case UQDECB_r_rs_uw:
10606     case UQDECD_r_rs_uw:
10607     case UQDECH_r_rs_uw:
10608     case UQDECW_r_rs_uw:
10609       width = kWRegSize;
10610       VIXL_FALLTHROUGH();
10611     case UQDECB_r_rs_x:
10612     case UQDECD_r_rs_x:
10613     case UQDECH_r_rs_x:
10614     case UQDECW_r_rs_x:
10615       count = -count;
10616       break;
10617     case UQINCB_r_rs_uw:
10618     case UQINCD_r_rs_uw:
10619     case UQINCH_r_rs_uw:
10620     case UQINCW_r_rs_uw:
10621       width = kWRegSize;
10622       VIXL_FALLTHROUGH();
10623     case UQINCB_r_rs_x:
10624     case UQINCD_r_rs_x:
10625     case UQINCH_r_rs_x:
10626     case UQINCW_r_rs_x:
10627       // Nothing to do.
10628       break;
10629     default:
10630       VIXL_UNIMPLEMENTED();
10631       break;
10632   }
10633 
10634   WriteXRegister(instr->GetRd(),
10635                  IncDecN(ReadXRegister(instr->GetRd()),
10636                          count * multiplier,
10637                          width,
10638                          true,
10639                          is_signed));
10640 }
10641 
VisitSVESaturatingIncDecVectorByElementCount(const Instruction * instr)10642 void Simulator::VisitSVESaturatingIncDecVectorByElementCount(
10643     const Instruction* instr) {
10644   VectorFormat vform = instr->GetSVEVectorFormat();
10645   if (LaneSizeInBitsFromFormat(vform) == kBRegSize) {
10646     VIXL_UNIMPLEMENTED();
10647   }
10648 
10649   int pattern = instr->GetImmSVEPredicateConstraint();
10650   int count = GetPredicateConstraintLaneCount(vform, pattern);
10651   int multiplier = instr->ExtractBits(19, 16) + 1;
10652 
10653   SimVRegister& zd = ReadVRegister(instr->GetRd());
10654   SimVRegister scratch;
10655   dup_immediate(vform,
10656                 scratch,
10657                 IncDecN(0,
10658                         count * multiplier,
10659                         LaneSizeInBitsFromFormat(vform)));
10660 
10661   switch (instr->Mask(SVESaturatingIncDecVectorByElementCountMask)) {
10662     case SQDECD_z_zs:
10663     case SQDECH_z_zs:
10664     case SQDECW_z_zs:
10665       sub(vform, zd, zd, scratch).SignedSaturate(vform);
10666       break;
10667     case SQINCD_z_zs:
10668     case SQINCH_z_zs:
10669     case SQINCW_z_zs:
10670       add(vform, zd, zd, scratch).SignedSaturate(vform);
10671       break;
10672     case UQDECD_z_zs:
10673     case UQDECH_z_zs:
10674     case UQDECW_z_zs:
10675       sub(vform, zd, zd, scratch).UnsignedSaturate(vform);
10676       break;
10677     case UQINCD_z_zs:
10678     case UQINCH_z_zs:
10679     case UQINCW_z_zs:
10680       add(vform, zd, zd, scratch).UnsignedSaturate(vform);
10681       break;
10682     default:
10683       VIXL_UNIMPLEMENTED();
10684       break;
10685   }
10686 }
10687 
VisitSVEElementCount(const Instruction * instr)10688 void Simulator::VisitSVEElementCount(const Instruction* instr) {
10689   switch (instr->Mask(SVEElementCountMask)) {
10690     case CNTB_r_s:
10691     case CNTD_r_s:
10692     case CNTH_r_s:
10693     case CNTW_r_s:
10694       // All handled below.
10695       break;
10696     default:
10697       VIXL_UNIMPLEMENTED();
10698       break;
10699   }
10700 
10701   // Although the instructions are separated, the lane size is encoded in the
10702   // same way as most other SVE instructions.
10703   VectorFormat vform = instr->GetSVEVectorFormat();
10704 
10705   int pattern = instr->GetImmSVEPredicateConstraint();
10706   int count = GetPredicateConstraintLaneCount(vform, pattern);
10707   int multiplier = instr->ExtractBits(19, 16) + 1;
10708   WriteXRegister(instr->GetRd(), count * multiplier);
10709 }
10710 
VisitSVEFPAccumulatingReduction(const Instruction * instr)10711 void Simulator::VisitSVEFPAccumulatingReduction(const Instruction* instr) {
10712   VectorFormat vform = instr->GetSVEVectorFormat();
10713   SimVRegister& vdn = ReadVRegister(instr->GetRd());
10714   SimVRegister& zm = ReadVRegister(instr->GetRn());
10715   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10716 
10717   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
10718 
10719   switch (instr->Mask(SVEFPAccumulatingReductionMask)) {
10720     case FADDA_v_p_z:
10721       fadda(vform, vdn, pg, zm);
10722       break;
10723     default:
10724       VIXL_UNIMPLEMENTED();
10725       break;
10726   }
10727 }
10728 
VisitSVEFPArithmetic_Predicated(const Instruction * instr)10729 void Simulator::VisitSVEFPArithmetic_Predicated(const Instruction* instr) {
10730   VectorFormat vform = instr->GetSVEVectorFormat();
10731   SimVRegister& zdn = ReadVRegister(instr->GetRd());
10732   SimVRegister& zm = ReadVRegister(instr->GetRn());
10733   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10734 
10735   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
10736 
10737   SimVRegister result;
10738   switch (instr->Mask(SVEFPArithmetic_PredicatedMask)) {
10739     case FABD_z_p_zz:
10740       fabd(vform, result, zdn, zm);
10741       break;
10742     case FADD_z_p_zz:
10743       fadd(vform, result, zdn, zm);
10744       break;
10745     case FDIVR_z_p_zz:
10746       fdiv(vform, result, zm, zdn);
10747       break;
10748     case FDIV_z_p_zz:
10749       fdiv(vform, result, zdn, zm);
10750       break;
10751     case FMAXNM_z_p_zz:
10752       fmaxnm(vform, result, zdn, zm);
10753       break;
10754     case FMAX_z_p_zz:
10755       fmax(vform, result, zdn, zm);
10756       break;
10757     case FMINNM_z_p_zz:
10758       fminnm(vform, result, zdn, zm);
10759       break;
10760     case FMIN_z_p_zz:
10761       fmin(vform, result, zdn, zm);
10762       break;
10763     case FMULX_z_p_zz:
10764       fmulx(vform, result, zdn, zm);
10765       break;
10766     case FMUL_z_p_zz:
10767       fmul(vform, result, zdn, zm);
10768       break;
10769     case FSCALE_z_p_zz:
10770       fscale(vform, result, zdn, zm);
10771       break;
10772     case FSUBR_z_p_zz:
10773       fsub(vform, result, zm, zdn);
10774       break;
10775     case FSUB_z_p_zz:
10776       fsub(vform, result, zdn, zm);
10777       break;
10778     default:
10779       VIXL_UNIMPLEMENTED();
10780       break;
10781   }
10782   mov_merging(vform, zdn, pg, result);
10783 }
10784 
VisitSVEFPArithmeticWithImm_Predicated(const Instruction * instr)10785 void Simulator::VisitSVEFPArithmeticWithImm_Predicated(
10786     const Instruction* instr) {
10787   VectorFormat vform = instr->GetSVEVectorFormat();
10788   if (LaneSizeInBitsFromFormat(vform) == kBRegSize) {
10789     VIXL_UNIMPLEMENTED();
10790   }
10791 
10792   SimVRegister& zdn = ReadVRegister(instr->GetRd());
10793   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10794   SimVRegister result;
10795 
10796   int i1 = instr->ExtractBit(5);
10797   SimVRegister add_sub_imm, min_max_imm, mul_imm;
10798   uint64_t half = FPToRawbitsWithSize(LaneSizeInBitsFromFormat(vform), 0.5);
10799   uint64_t one = FPToRawbitsWithSize(LaneSizeInBitsFromFormat(vform), 1.0);
10800   uint64_t two = FPToRawbitsWithSize(LaneSizeInBitsFromFormat(vform), 2.0);
10801   dup_immediate(vform, add_sub_imm, i1 ? one : half);
10802   dup_immediate(vform, min_max_imm, i1 ? one : 0);
10803   dup_immediate(vform, mul_imm, i1 ? two : half);
10804 
10805   switch (instr->Mask(SVEFPArithmeticWithImm_PredicatedMask)) {
10806     case FADD_z_p_zs:
10807       fadd(vform, result, zdn, add_sub_imm);
10808       break;
10809     case FMAXNM_z_p_zs:
10810       fmaxnm(vform, result, zdn, min_max_imm);
10811       break;
10812     case FMAX_z_p_zs:
10813       fmax(vform, result, zdn, min_max_imm);
10814       break;
10815     case FMINNM_z_p_zs:
10816       fminnm(vform, result, zdn, min_max_imm);
10817       break;
10818     case FMIN_z_p_zs:
10819       fmin(vform, result, zdn, min_max_imm);
10820       break;
10821     case FMUL_z_p_zs:
10822       fmul(vform, result, zdn, mul_imm);
10823       break;
10824     case FSUBR_z_p_zs:
10825       fsub(vform, result, add_sub_imm, zdn);
10826       break;
10827     case FSUB_z_p_zs:
10828       fsub(vform, result, zdn, add_sub_imm);
10829       break;
10830     default:
10831       VIXL_UNIMPLEMENTED();
10832       break;
10833   }
10834   mov_merging(vform, zdn, pg, result);
10835 }
10836 
VisitSVEFPTrigMulAddCoefficient(const Instruction * instr)10837 void Simulator::VisitSVEFPTrigMulAddCoefficient(const Instruction* instr) {
10838   VectorFormat vform = instr->GetSVEVectorFormat();
10839   SimVRegister& zd = ReadVRegister(instr->GetRd());
10840   SimVRegister& zm = ReadVRegister(instr->GetRn());
10841 
10842   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
10843 
10844   switch (instr->Mask(SVEFPTrigMulAddCoefficientMask)) {
10845     case FTMAD_z_zzi:
10846       ftmad(vform, zd, zd, zm, instr->ExtractBits(18, 16));
10847       break;
10848     default:
10849       VIXL_UNIMPLEMENTED();
10850       break;
10851   }
10852 }
10853 
VisitSVEFPArithmeticUnpredicated(const Instruction * instr)10854 void Simulator::VisitSVEFPArithmeticUnpredicated(const Instruction* instr) {
10855   VectorFormat vform = instr->GetSVEVectorFormat();
10856   SimVRegister& zd = ReadVRegister(instr->GetRd());
10857   SimVRegister& zn = ReadVRegister(instr->GetRn());
10858   SimVRegister& zm = ReadVRegister(instr->GetRm());
10859 
10860   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
10861 
10862   switch (instr->Mask(SVEFPArithmeticUnpredicatedMask)) {
10863     case FADD_z_zz:
10864       fadd(vform, zd, zn, zm);
10865       break;
10866     case FMUL_z_zz:
10867       fmul(vform, zd, zn, zm);
10868       break;
10869     case FRECPS_z_zz:
10870       frecps(vform, zd, zn, zm);
10871       break;
10872     case FRSQRTS_z_zz:
10873       frsqrts(vform, zd, zn, zm);
10874       break;
10875     case FSUB_z_zz:
10876       fsub(vform, zd, zn, zm);
10877       break;
10878     case FTSMUL_z_zz:
10879       ftsmul(vform, zd, zn, zm);
10880       break;
10881     default:
10882       VIXL_UNIMPLEMENTED();
10883       break;
10884   }
10885 }
10886 
VisitSVEFPCompareVectors(const Instruction * instr)10887 void Simulator::VisitSVEFPCompareVectors(const Instruction* instr) {
10888   SimPRegister& pd = ReadPRegister(instr->GetPd());
10889   SimVRegister& zn = ReadVRegister(instr->GetRn());
10890   SimVRegister& zm = ReadVRegister(instr->GetRm());
10891   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10892   VectorFormat vform = instr->GetSVEVectorFormat();
10893   SimVRegister result;
10894 
10895   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
10896 
10897   switch (instr->Mask(SVEFPCompareVectorsMask)) {
10898     case FACGE_p_p_zz:
10899       fabscmp(vform, result, zn, zm, ge);
10900       break;
10901     case FACGT_p_p_zz:
10902       fabscmp(vform, result, zn, zm, gt);
10903       break;
10904     case FCMEQ_p_p_zz:
10905       fcmp(vform, result, zn, zm, eq);
10906       break;
10907     case FCMGE_p_p_zz:
10908       fcmp(vform, result, zn, zm, ge);
10909       break;
10910     case FCMGT_p_p_zz:
10911       fcmp(vform, result, zn, zm, gt);
10912       break;
10913     case FCMNE_p_p_zz:
10914       fcmp(vform, result, zn, zm, ne);
10915       break;
10916     case FCMUO_p_p_zz:
10917       fcmp(vform, result, zn, zm, uo);
10918       break;
10919     default:
10920       VIXL_UNIMPLEMENTED();
10921       break;
10922   }
10923 
10924   ExtractFromSimVRegister(vform, pd, result);
10925   mov_zeroing(pd, pg, pd);
10926 }
10927 
VisitSVEFPCompareWithZero(const Instruction * instr)10928 void Simulator::VisitSVEFPCompareWithZero(const Instruction* instr) {
10929   SimPRegister& pd = ReadPRegister(instr->GetPd());
10930   SimVRegister& zn = ReadVRegister(instr->GetRn());
10931   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10932   VectorFormat vform = instr->GetSVEVectorFormat();
10933 
10934   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
10935 
10936   SimVRegister result;
10937   SimVRegister zeros;
10938   dup_immediate(kFormatVnD, zeros, 0);
10939 
10940   switch (instr->Mask(SVEFPCompareWithZeroMask)) {
10941     case FCMEQ_p_p_z0:
10942       fcmp(vform, result, zn, zeros, eq);
10943       break;
10944     case FCMGE_p_p_z0:
10945       fcmp(vform, result, zn, zeros, ge);
10946       break;
10947     case FCMGT_p_p_z0:
10948       fcmp(vform, result, zn, zeros, gt);
10949       break;
10950     case FCMLE_p_p_z0:
10951       fcmp(vform, result, zn, zeros, le);
10952       break;
10953     case FCMLT_p_p_z0:
10954       fcmp(vform, result, zn, zeros, lt);
10955       break;
10956     case FCMNE_p_p_z0:
10957       fcmp(vform, result, zn, zeros, ne);
10958       break;
10959     default:
10960       VIXL_UNIMPLEMENTED();
10961       break;
10962   }
10963 
10964   ExtractFromSimVRegister(vform, pd, result);
10965   mov_zeroing(pd, pg, pd);
10966 }
10967 
VisitSVEFPComplexAddition(const Instruction * instr)10968 void Simulator::VisitSVEFPComplexAddition(const Instruction* instr) {
10969   VectorFormat vform = instr->GetSVEVectorFormat();
10970 
10971   if (LaneSizeInBitsFromFormat(vform) == kBRegSize) {
10972     VIXL_UNIMPLEMENTED();
10973   }
10974 
10975   SimVRegister& zdn = ReadVRegister(instr->GetRd());
10976   SimVRegister& zm = ReadVRegister(instr->GetRn());
10977   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
10978   int rot = instr->ExtractBit(16);
10979 
10980   SimVRegister result;
10981 
10982   switch (instr->Mask(SVEFPComplexAdditionMask)) {
10983     case FCADD_z_p_zz:
10984       fcadd(vform, result, zdn, zm, rot);
10985       break;
10986     default:
10987       VIXL_UNIMPLEMENTED();
10988       break;
10989   }
10990   mov_merging(vform, zdn, pg, result);
10991 }
10992 
VisitSVEFPComplexMulAdd(const Instruction * instr)10993 void Simulator::VisitSVEFPComplexMulAdd(const Instruction* instr) {
10994   VectorFormat vform = instr->GetSVEVectorFormat();
10995 
10996   if (LaneSizeInBitsFromFormat(vform) == kBRegSize) {
10997     VIXL_UNIMPLEMENTED();
10998   }
10999 
11000   SimVRegister& zda = ReadVRegister(instr->GetRd());
11001   SimVRegister& zn = ReadVRegister(instr->GetRn());
11002   SimVRegister& zm = ReadVRegister(instr->GetRm());
11003   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11004   int rot = instr->ExtractBits(14, 13);
11005 
11006   SimVRegister result;
11007 
11008   switch (instr->Mask(SVEFPComplexMulAddMask)) {
11009     case FCMLA_z_p_zzz:
11010       fcmla(vform, result, zn, zm, zda, rot);
11011       break;
11012     default:
11013       VIXL_UNIMPLEMENTED();
11014       break;
11015   }
11016   mov_merging(vform, zda, pg, result);
11017 }
11018 
VisitSVEFPComplexMulAddIndex(const Instruction * instr)11019 void Simulator::VisitSVEFPComplexMulAddIndex(const Instruction* instr) {
11020   SimVRegister& zda = ReadVRegister(instr->GetRd());
11021   SimVRegister& zn = ReadVRegister(instr->GetRn());
11022   int rot = instr->ExtractBits(11, 10);
11023   unsigned zm_code = instr->GetRm();
11024   int index = -1;
11025   VectorFormat vform, vform_dup;
11026 
11027   switch (instr->Mask(SVEFPComplexMulAddIndexMask)) {
11028     case FCMLA_z_zzzi_h:
11029       vform = kFormatVnH;
11030       vform_dup = kFormatVnS;
11031       index = zm_code >> 3;
11032       zm_code &= 0x7;
11033       break;
11034     case FCMLA_z_zzzi_s:
11035       vform = kFormatVnS;
11036       vform_dup = kFormatVnD;
11037       index = zm_code >> 4;
11038       zm_code &= 0xf;
11039       break;
11040     default:
11041       VIXL_UNIMPLEMENTED();
11042       break;
11043   }
11044 
11045   if (index >= 0) {
11046     SimVRegister temp;
11047     dup_elements_to_segments(vform_dup, temp, ReadVRegister(zm_code), index);
11048     fcmla(vform, zda, zn, temp, zda, rot);
11049   }
11050 }
11051 
11052 typedef LogicVRegister (Simulator::*FastReduceFn)(VectorFormat vform,
11053                                                   LogicVRegister dst,
11054                                                   const LogicVRegister& src);
11055 
VisitSVEFPFastReduction(const Instruction * instr)11056 void Simulator::VisitSVEFPFastReduction(const Instruction* instr) {
11057   VectorFormat vform = instr->GetSVEVectorFormat();
11058   SimVRegister& vd = ReadVRegister(instr->GetRd());
11059   SimVRegister& zn = ReadVRegister(instr->GetRn());
11060   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11061   int lane_size = LaneSizeInBitsFromFormat(vform);
11062 
11063   uint64_t inactive_value = 0;
11064   FastReduceFn fn = nullptr;
11065 
11066   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
11067 
11068   switch (instr->Mask(SVEFPFastReductionMask)) {
11069     case FADDV_v_p_z:
11070       fn = &Simulator::faddv;
11071       break;
11072     case FMAXNMV_v_p_z:
11073       inactive_value = FPToRawbitsWithSize(lane_size, kFP64DefaultNaN);
11074       fn = &Simulator::fmaxnmv;
11075       break;
11076     case FMAXV_v_p_z:
11077       inactive_value = FPToRawbitsWithSize(lane_size, kFP64NegativeInfinity);
11078       fn = &Simulator::fmaxv;
11079       break;
11080     case FMINNMV_v_p_z:
11081       inactive_value = FPToRawbitsWithSize(lane_size, kFP64DefaultNaN);
11082       fn = &Simulator::fminnmv;
11083       break;
11084     case FMINV_v_p_z:
11085       inactive_value = FPToRawbitsWithSize(lane_size, kFP64PositiveInfinity);
11086       fn = &Simulator::fminv;
11087       break;
11088     default:
11089       VIXL_UNIMPLEMENTED();
11090       break;
11091   }
11092 
11093   SimVRegister scratch;
11094   dup_immediate(vform, scratch, inactive_value);
11095   mov_merging(vform, scratch, pg, zn);
11096   if (fn != nullptr) (this->*fn)(vform, vd, scratch);
11097 }
11098 
VisitSVEFPMulIndex(const Instruction * instr)11099 void Simulator::VisitSVEFPMulIndex(const Instruction* instr) {
11100   VectorFormat vform = kFormatUndefined;
11101 
11102   switch (instr->Mask(SVEFPMulIndexMask)) {
11103     case FMUL_z_zzi_d:
11104       vform = kFormatVnD;
11105       break;
11106     case FMUL_z_zzi_h_i3h:
11107     case FMUL_z_zzi_h:
11108       vform = kFormatVnH;
11109       break;
11110     case FMUL_z_zzi_s:
11111       vform = kFormatVnS;
11112       break;
11113     default:
11114       VIXL_UNIMPLEMENTED();
11115       break;
11116   }
11117 
11118   SimVRegister& zd = ReadVRegister(instr->GetRd());
11119   SimVRegister& zn = ReadVRegister(instr->GetRn());
11120   SimVRegister temp;
11121 
11122   dup_elements_to_segments(vform, temp, instr->GetSVEMulZmAndIndex());
11123   fmul(vform, zd, zn, temp);
11124 }
11125 
VisitSVEFPMulAdd(const Instruction * instr)11126 void Simulator::VisitSVEFPMulAdd(const Instruction* instr) {
11127   VectorFormat vform = instr->GetSVEVectorFormat();
11128   SimVRegister& zd = ReadVRegister(instr->GetRd());
11129   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11130   SimVRegister result;
11131 
11132   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
11133 
11134   if (instr->ExtractBit(15) == 0) {
11135     // Floating-point multiply-accumulate writing addend.
11136     SimVRegister& zm = ReadVRegister(instr->GetRm());
11137     SimVRegister& zn = ReadVRegister(instr->GetRn());
11138 
11139     switch (instr->Mask(SVEFPMulAddMask)) {
11140       // zda = zda + zn * zm
11141       case FMLA_z_p_zzz:
11142         fmla(vform, result, zd, zn, zm);
11143         break;
11144       // zda = -zda + -zn * zm
11145       case FNMLA_z_p_zzz:
11146         fneg(vform, result, zd);
11147         fmls(vform, result, result, zn, zm);
11148         break;
11149       // zda = zda + -zn * zm
11150       case FMLS_z_p_zzz:
11151         fmls(vform, result, zd, zn, zm);
11152         break;
11153       // zda = -zda + zn * zm
11154       case FNMLS_z_p_zzz:
11155         fneg(vform, result, zd);
11156         fmla(vform, result, result, zn, zm);
11157         break;
11158       default:
11159         VIXL_UNIMPLEMENTED();
11160         break;
11161     }
11162   } else {
11163     // Floating-point multiply-accumulate writing multiplicand.
11164     SimVRegister& za = ReadVRegister(instr->GetRm());
11165     SimVRegister& zm = ReadVRegister(instr->GetRn());
11166 
11167     switch (instr->Mask(SVEFPMulAddMask)) {
11168       // zdn = za + zdn * zm
11169       case FMAD_z_p_zzz:
11170         fmla(vform, result, za, zd, zm);
11171         break;
11172       // zdn = -za + -zdn * zm
11173       case FNMAD_z_p_zzz:
11174         fneg(vform, result, za);
11175         fmls(vform, result, result, zd, zm);
11176         break;
11177       // zdn = za + -zdn * zm
11178       case FMSB_z_p_zzz:
11179         fmls(vform, result, za, zd, zm);
11180         break;
11181       // zdn = -za + zdn * zm
11182       case FNMSB_z_p_zzz:
11183         fneg(vform, result, za);
11184         fmla(vform, result, result, zd, zm);
11185         break;
11186       default:
11187         VIXL_UNIMPLEMENTED();
11188         break;
11189     }
11190   }
11191 
11192   mov_merging(vform, zd, pg, result);
11193 }
11194 
VisitSVEFPMulAddIndex(const Instruction * instr)11195 void Simulator::VisitSVEFPMulAddIndex(const Instruction* instr) {
11196   VectorFormat vform = kFormatUndefined;
11197 
11198   switch (instr->Mask(SVEFPMulAddIndexMask)) {
11199     case FMLA_z_zzzi_d:
11200     case FMLS_z_zzzi_d:
11201       vform = kFormatVnD;
11202       break;
11203     case FMLA_z_zzzi_s:
11204     case FMLS_z_zzzi_s:
11205       vform = kFormatVnS;
11206       break;
11207     case FMLA_z_zzzi_h:
11208     case FMLS_z_zzzi_h:
11209     case FMLA_z_zzzi_h_i3h:
11210     case FMLS_z_zzzi_h_i3h:
11211       vform = kFormatVnH;
11212       break;
11213     default:
11214       VIXL_UNIMPLEMENTED();
11215       break;
11216   }
11217 
11218   SimVRegister& zd = ReadVRegister(instr->GetRd());
11219   SimVRegister& zn = ReadVRegister(instr->GetRn());
11220   SimVRegister temp;
11221 
11222   dup_elements_to_segments(vform, temp, instr->GetSVEMulZmAndIndex());
11223   if (instr->ExtractBit(10) == 1) {
11224     fmls(vform, zd, zd, zn, temp);
11225   } else {
11226     fmla(vform, zd, zd, zn, temp);
11227   }
11228 }
11229 
VisitSVEFPConvertToInt(const Instruction * instr)11230 void Simulator::VisitSVEFPConvertToInt(const Instruction* instr) {
11231   SimVRegister& zd = ReadVRegister(instr->GetRd());
11232   SimVRegister& zn = ReadVRegister(instr->GetRn());
11233   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11234   int dst_data_size;
11235   int src_data_size;
11236 
11237   switch (instr->Mask(SVEFPConvertToIntMask)) {
11238     case FCVTZS_z_p_z_d2w:
11239     case FCVTZU_z_p_z_d2w:
11240       dst_data_size = kSRegSize;
11241       src_data_size = kDRegSize;
11242       break;
11243     case FCVTZS_z_p_z_d2x:
11244     case FCVTZU_z_p_z_d2x:
11245       dst_data_size = kDRegSize;
11246       src_data_size = kDRegSize;
11247       break;
11248     case FCVTZS_z_p_z_fp162h:
11249     case FCVTZU_z_p_z_fp162h:
11250       dst_data_size = kHRegSize;
11251       src_data_size = kHRegSize;
11252       break;
11253     case FCVTZS_z_p_z_fp162w:
11254     case FCVTZU_z_p_z_fp162w:
11255       dst_data_size = kSRegSize;
11256       src_data_size = kHRegSize;
11257       break;
11258     case FCVTZS_z_p_z_fp162x:
11259     case FCVTZU_z_p_z_fp162x:
11260       dst_data_size = kDRegSize;
11261       src_data_size = kHRegSize;
11262       break;
11263     case FCVTZS_z_p_z_s2w:
11264     case FCVTZU_z_p_z_s2w:
11265       dst_data_size = kSRegSize;
11266       src_data_size = kSRegSize;
11267       break;
11268     case FCVTZS_z_p_z_s2x:
11269     case FCVTZU_z_p_z_s2x:
11270       dst_data_size = kDRegSize;
11271       src_data_size = kSRegSize;
11272       break;
11273     default:
11274       VIXL_UNIMPLEMENTED();
11275       dst_data_size = 0;
11276       src_data_size = 0;
11277       break;
11278   }
11279 
11280   VectorFormat vform =
11281       SVEFormatFromLaneSizeInBits(std::max(dst_data_size, src_data_size));
11282 
11283   if (instr->ExtractBit(16) == 0) {
11284     fcvts(vform, dst_data_size, src_data_size, zd, pg, zn, FPZero);
11285   } else {
11286     fcvtu(vform, dst_data_size, src_data_size, zd, pg, zn, FPZero);
11287   }
11288 }
11289 
VisitSVEFPConvertPrecision(const Instruction * instr)11290 void Simulator::VisitSVEFPConvertPrecision(const Instruction* instr) {
11291   SimVRegister& zd = ReadVRegister(instr->GetRd());
11292   SimVRegister& zn = ReadVRegister(instr->GetRn());
11293   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11294   VectorFormat dst_data_size = kFormatUndefined;
11295   VectorFormat src_data_size = kFormatUndefined;
11296 
11297   switch (instr->Mask(SVEFPConvertPrecisionMask)) {
11298     case FCVT_z_p_z_d2h:
11299       dst_data_size = kFormatVnH;
11300       src_data_size = kFormatVnD;
11301       break;
11302     case FCVT_z_p_z_d2s:
11303       dst_data_size = kFormatVnS;
11304       src_data_size = kFormatVnD;
11305       break;
11306     case FCVT_z_p_z_h2d:
11307       dst_data_size = kFormatVnD;
11308       src_data_size = kFormatVnH;
11309       break;
11310     case FCVT_z_p_z_h2s:
11311       dst_data_size = kFormatVnS;
11312       src_data_size = kFormatVnH;
11313       break;
11314     case FCVT_z_p_z_s2d:
11315       dst_data_size = kFormatVnD;
11316       src_data_size = kFormatVnS;
11317       break;
11318     case FCVT_z_p_z_s2h:
11319       dst_data_size = kFormatVnH;
11320       src_data_size = kFormatVnS;
11321       break;
11322     default:
11323       VIXL_UNIMPLEMENTED();
11324       break;
11325   }
11326 
11327   fcvt(dst_data_size, src_data_size, zd, pg, zn);
11328 }
11329 
VisitSVEFPUnaryOp(const Instruction * instr)11330 void Simulator::VisitSVEFPUnaryOp(const Instruction* instr) {
11331   SimVRegister& zd = ReadVRegister(instr->GetRd());
11332   SimVRegister& zn = ReadVRegister(instr->GetRn());
11333   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11334   VectorFormat vform = instr->GetSVEVectorFormat();
11335   SimVRegister result;
11336 
11337   switch (instr->Mask(SVEFPUnaryOpMask)) {
11338     case FRECPX_z_p_z:
11339       frecpx(vform, result, zn);
11340       break;
11341     case FSQRT_z_p_z:
11342       fsqrt(vform, result, zn);
11343       break;
11344     default:
11345       VIXL_UNIMPLEMENTED();
11346       break;
11347   }
11348   mov_merging(vform, zd, pg, result);
11349 }
11350 
VisitSVEFPRoundToIntegralValue(const Instruction * instr)11351 void Simulator::VisitSVEFPRoundToIntegralValue(const Instruction* instr) {
11352   SimVRegister& zd = ReadVRegister(instr->GetRd());
11353   SimVRegister& zn = ReadVRegister(instr->GetRn());
11354   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11355   VectorFormat vform = instr->GetSVEVectorFormat();
11356   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
11357   bool exact_exception = false;
11358 
11359   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
11360 
11361   switch (instr->Mask(SVEFPRoundToIntegralValueMask)) {
11362     case FRINTA_z_p_z:
11363       fpcr_rounding = FPTieAway;
11364       break;
11365     case FRINTI_z_p_z:
11366       break;  // Use FPCR rounding mode.
11367     case FRINTM_z_p_z:
11368       fpcr_rounding = FPNegativeInfinity;
11369       break;
11370     case FRINTN_z_p_z:
11371       fpcr_rounding = FPTieEven;
11372       break;
11373     case FRINTP_z_p_z:
11374       fpcr_rounding = FPPositiveInfinity;
11375       break;
11376     case FRINTX_z_p_z:
11377       exact_exception = true;
11378       break;
11379     case FRINTZ_z_p_z:
11380       fpcr_rounding = FPZero;
11381       break;
11382     default:
11383       VIXL_UNIMPLEMENTED();
11384       break;
11385   }
11386 
11387   SimVRegister result;
11388   frint(vform, result, zn, fpcr_rounding, exact_exception, kFrintToInteger);
11389   mov_merging(vform, zd, pg, result);
11390 }
11391 
VisitSVEIntConvertToFP(const Instruction * instr)11392 void Simulator::VisitSVEIntConvertToFP(const Instruction* instr) {
11393   SimVRegister& zd = ReadVRegister(instr->GetRd());
11394   SimVRegister& zn = ReadVRegister(instr->GetRn());
11395   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11396   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
11397   int dst_data_size;
11398   int src_data_size;
11399 
11400   switch (instr->Mask(SVEIntConvertToFPMask)) {
11401     case SCVTF_z_p_z_h2fp16:
11402     case UCVTF_z_p_z_h2fp16:
11403       dst_data_size = kHRegSize;
11404       src_data_size = kHRegSize;
11405       break;
11406     case SCVTF_z_p_z_w2d:
11407     case UCVTF_z_p_z_w2d:
11408       dst_data_size = kDRegSize;
11409       src_data_size = kSRegSize;
11410       break;
11411     case SCVTF_z_p_z_w2fp16:
11412     case UCVTF_z_p_z_w2fp16:
11413       dst_data_size = kHRegSize;
11414       src_data_size = kSRegSize;
11415       break;
11416     case SCVTF_z_p_z_w2s:
11417     case UCVTF_z_p_z_w2s:
11418       dst_data_size = kSRegSize;
11419       src_data_size = kSRegSize;
11420       break;
11421     case SCVTF_z_p_z_x2d:
11422     case UCVTF_z_p_z_x2d:
11423       dst_data_size = kDRegSize;
11424       src_data_size = kDRegSize;
11425       break;
11426     case SCVTF_z_p_z_x2fp16:
11427     case UCVTF_z_p_z_x2fp16:
11428       dst_data_size = kHRegSize;
11429       src_data_size = kDRegSize;
11430       break;
11431     case SCVTF_z_p_z_x2s:
11432     case UCVTF_z_p_z_x2s:
11433       dst_data_size = kSRegSize;
11434       src_data_size = kDRegSize;
11435       break;
11436     default:
11437       VIXL_UNIMPLEMENTED();
11438       dst_data_size = 0;
11439       src_data_size = 0;
11440       break;
11441   }
11442 
11443   VectorFormat vform =
11444       SVEFormatFromLaneSizeInBits(std::max(dst_data_size, src_data_size));
11445 
11446   if (instr->ExtractBit(16) == 0) {
11447     scvtf(vform, dst_data_size, src_data_size, zd, pg, zn, fpcr_rounding);
11448   } else {
11449     ucvtf(vform, dst_data_size, src_data_size, zd, pg, zn, fpcr_rounding);
11450   }
11451 }
11452 
VisitSVEFPUnaryOpUnpredicated(const Instruction * instr)11453 void Simulator::VisitSVEFPUnaryOpUnpredicated(const Instruction* instr) {
11454   VectorFormat vform = instr->GetSVEVectorFormat();
11455   SimVRegister& zd = ReadVRegister(instr->GetRd());
11456   SimVRegister& zn = ReadVRegister(instr->GetRn());
11457   FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode());
11458 
11459   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
11460 
11461   switch (instr->Mask(SVEFPUnaryOpUnpredicatedMask)) {
11462     case FRECPE_z_z:
11463       frecpe(vform, zd, zn, fpcr_rounding);
11464       break;
11465     case FRSQRTE_z_z:
11466       frsqrte(vform, zd, zn);
11467       break;
11468     default:
11469       VIXL_UNIMPLEMENTED();
11470       break;
11471   }
11472 }
11473 
VisitSVEIncDecByPredicateCount(const Instruction * instr)11474 void Simulator::VisitSVEIncDecByPredicateCount(const Instruction* instr) {
11475   VectorFormat vform = instr->GetSVEVectorFormat();
11476   SimPRegister& pg = ReadPRegister(instr->ExtractBits(8, 5));
11477 
11478   int count = CountActiveLanes(vform, pg);
11479 
11480   if (instr->ExtractBit(11) == 0) {
11481     SimVRegister& zdn = ReadVRegister(instr->GetRd());
11482     switch (instr->Mask(SVEIncDecByPredicateCountMask)) {
11483       case DECP_z_p_z:
11484         sub_uint(vform, zdn, zdn, count);
11485         break;
11486       case INCP_z_p_z:
11487         add_uint(vform, zdn, zdn, count);
11488         break;
11489       case SQDECP_z_p_z:
11490         sub_uint(vform, zdn, zdn, count).SignedSaturate(vform);
11491         break;
11492       case SQINCP_z_p_z:
11493         add_uint(vform, zdn, zdn, count).SignedSaturate(vform);
11494         break;
11495       case UQDECP_z_p_z:
11496         sub_uint(vform, zdn, zdn, count).UnsignedSaturate(vform);
11497         break;
11498       case UQINCP_z_p_z:
11499         add_uint(vform, zdn, zdn, count).UnsignedSaturate(vform);
11500         break;
11501       default:
11502         VIXL_UNIMPLEMENTED();
11503         break;
11504     }
11505   } else {
11506     bool is_saturating = (instr->ExtractBit(18) == 0);
11507     bool decrement =
11508         is_saturating ? instr->ExtractBit(17) : instr->ExtractBit(16);
11509     bool is_signed = (instr->ExtractBit(16) == 0);
11510     bool sf = is_saturating ? (instr->ExtractBit(10) != 0) : true;
11511     unsigned width = sf ? kXRegSize : kWRegSize;
11512 
11513     switch (instr->Mask(SVEIncDecByPredicateCountMask)) {
11514       case DECP_r_p_r:
11515       case INCP_r_p_r:
11516       case SQDECP_r_p_r_sx:
11517       case SQDECP_r_p_r_x:
11518       case SQINCP_r_p_r_sx:
11519       case SQINCP_r_p_r_x:
11520       case UQDECP_r_p_r_uw:
11521       case UQDECP_r_p_r_x:
11522       case UQINCP_r_p_r_uw:
11523       case UQINCP_r_p_r_x:
11524         WriteXRegister(instr->GetRd(),
11525                        IncDecN(ReadXRegister(instr->GetRd()),
11526                                decrement ? -count : count,
11527                                width,
11528                                is_saturating,
11529                                is_signed));
11530         break;
11531       default:
11532         VIXL_UNIMPLEMENTED();
11533         break;
11534     }
11535   }
11536 }
11537 
IncDecN(uint64_t acc,int64_t delta,unsigned n,bool is_saturating,bool is_signed)11538 uint64_t Simulator::IncDecN(uint64_t acc,
11539                             int64_t delta,
11540                             unsigned n,
11541                             bool is_saturating,
11542                             bool is_signed) {
11543   VIXL_ASSERT(n <= 64);
11544   VIXL_ASSERT(IsIntN(n, delta));
11545 
11546   uint64_t sign_mask = UINT64_C(1) << (n - 1);
11547   uint64_t mask = GetUintMask(n);
11548 
11549   acc &= mask;  // Ignore initial accumulator high bits.
11550   uint64_t result = (acc + delta) & mask;
11551 
11552   bool result_negative = ((result & sign_mask) != 0);
11553 
11554   if (is_saturating) {
11555     if (is_signed) {
11556       bool acc_negative = ((acc & sign_mask) != 0);
11557       bool delta_negative = delta < 0;
11558 
11559       // If the signs of the operands are the same, but different from the
11560       // result, there was an overflow.
11561       if ((acc_negative == delta_negative) &&
11562           (acc_negative != result_negative)) {
11563         if (result_negative) {
11564           // Saturate to [..., INT<n>_MAX].
11565           result_negative = false;
11566           result = mask & ~sign_mask;  // E.g. 0x000000007fffffff
11567         } else {
11568           // Saturate to [INT<n>_MIN, ...].
11569           result_negative = true;
11570           result = ~mask | sign_mask;  // E.g. 0xffffffff80000000
11571         }
11572       }
11573     } else {
11574       if ((delta < 0) && (result > acc)) {
11575         // Saturate to [0, ...].
11576         result = 0;
11577       } else if ((delta > 0) && (result < acc)) {
11578         // Saturate to [..., UINT<n>_MAX].
11579         result = mask;
11580       }
11581     }
11582   }
11583 
11584   // Sign-extend if necessary.
11585   if (result_negative && is_signed) result |= ~mask;
11586 
11587   return result;
11588 }
11589 
VisitSVEIndexGeneration(const Instruction * instr)11590 void Simulator::VisitSVEIndexGeneration(const Instruction* instr) {
11591   VectorFormat vform = instr->GetSVEVectorFormat();
11592   SimVRegister& zd = ReadVRegister(instr->GetRd());
11593   switch (instr->Mask(SVEIndexGenerationMask)) {
11594     case INDEX_z_ii:
11595     case INDEX_z_ir:
11596     case INDEX_z_ri:
11597     case INDEX_z_rr: {
11598       uint64_t start = instr->ExtractBit(10) ? ReadXRegister(instr->GetRn())
11599                                              : instr->ExtractSignedBits(9, 5);
11600       uint64_t step = instr->ExtractBit(11) ? ReadXRegister(instr->GetRm())
11601                                             : instr->ExtractSignedBits(20, 16);
11602       index(vform, zd, start, step);
11603       break;
11604     }
11605     default:
11606       VIXL_UNIMPLEMENTED();
11607       break;
11608   }
11609 }
11610 
VisitSVEIntArithmeticUnpredicated(const Instruction * instr)11611 void Simulator::VisitSVEIntArithmeticUnpredicated(const Instruction* instr) {
11612   VectorFormat vform = instr->GetSVEVectorFormat();
11613   SimVRegister& zd = ReadVRegister(instr->GetRd());
11614   SimVRegister& zn = ReadVRegister(instr->GetRn());
11615   SimVRegister& zm = ReadVRegister(instr->GetRm());
11616   switch (instr->Mask(SVEIntArithmeticUnpredicatedMask)) {
11617     case ADD_z_zz:
11618       add(vform, zd, zn, zm);
11619       break;
11620     case SQADD_z_zz:
11621       add(vform, zd, zn, zm).SignedSaturate(vform);
11622       break;
11623     case SQSUB_z_zz:
11624       sub(vform, zd, zn, zm).SignedSaturate(vform);
11625       break;
11626     case SUB_z_zz:
11627       sub(vform, zd, zn, zm);
11628       break;
11629     case UQADD_z_zz:
11630       add(vform, zd, zn, zm).UnsignedSaturate(vform);
11631       break;
11632     case UQSUB_z_zz:
11633       sub(vform, zd, zn, zm).UnsignedSaturate(vform);
11634       break;
11635     default:
11636       VIXL_UNIMPLEMENTED();
11637       break;
11638   }
11639 }
11640 
VisitSVEIntAddSubtractVectors_Predicated(const Instruction * instr)11641 void Simulator::VisitSVEIntAddSubtractVectors_Predicated(
11642     const Instruction* instr) {
11643   VectorFormat vform = instr->GetSVEVectorFormat();
11644   SimVRegister& zdn = ReadVRegister(instr->GetRd());
11645   SimVRegister& zm = ReadVRegister(instr->GetRn());
11646   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11647   SimVRegister result;
11648 
11649   switch (instr->Mask(SVEIntAddSubtractVectors_PredicatedMask)) {
11650     case ADD_z_p_zz:
11651       add(vform, result, zdn, zm);
11652       break;
11653     case SUBR_z_p_zz:
11654       sub(vform, result, zm, zdn);
11655       break;
11656     case SUB_z_p_zz:
11657       sub(vform, result, zdn, zm);
11658       break;
11659     default:
11660       VIXL_UNIMPLEMENTED();
11661       break;
11662   }
11663   mov_merging(vform, zdn, pg, result);
11664 }
11665 
VisitSVEBitwiseLogical_Predicated(const Instruction * instr)11666 void Simulator::VisitSVEBitwiseLogical_Predicated(const Instruction* instr) {
11667   VectorFormat vform = instr->GetSVEVectorFormat();
11668   SimVRegister& zdn = ReadVRegister(instr->GetRd());
11669   SimVRegister& zm = ReadVRegister(instr->GetRn());
11670   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11671   SimVRegister result;
11672 
11673   switch (instr->Mask(SVEBitwiseLogical_PredicatedMask)) {
11674     case AND_z_p_zz:
11675       SVEBitwiseLogicalUnpredicatedHelper(AND, vform, result, zdn, zm);
11676       break;
11677     case BIC_z_p_zz:
11678       SVEBitwiseLogicalUnpredicatedHelper(BIC, vform, result, zdn, zm);
11679       break;
11680     case EOR_z_p_zz:
11681       SVEBitwiseLogicalUnpredicatedHelper(EOR, vform, result, zdn, zm);
11682       break;
11683     case ORR_z_p_zz:
11684       SVEBitwiseLogicalUnpredicatedHelper(ORR, vform, result, zdn, zm);
11685       break;
11686     default:
11687       VIXL_UNIMPLEMENTED();
11688       break;
11689   }
11690   mov_merging(vform, zdn, pg, result);
11691 }
11692 
VisitSVEIntMulVectors_Predicated(const Instruction * instr)11693 void Simulator::VisitSVEIntMulVectors_Predicated(const Instruction* instr) {
11694   VectorFormat vform = instr->GetSVEVectorFormat();
11695   SimVRegister& zdn = ReadVRegister(instr->GetRd());
11696   SimVRegister& zm = ReadVRegister(instr->GetRn());
11697   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11698   SimVRegister result;
11699 
11700   switch (instr->Mask(SVEIntMulVectors_PredicatedMask)) {
11701     case MUL_z_p_zz:
11702       mul(vform, result, zdn, zm);
11703       break;
11704     case SMULH_z_p_zz:
11705       smulh(vform, result, zdn, zm);
11706       break;
11707     case UMULH_z_p_zz:
11708       umulh(vform, result, zdn, zm);
11709       break;
11710     default:
11711       VIXL_UNIMPLEMENTED();
11712       break;
11713   }
11714   mov_merging(vform, zdn, pg, result);
11715 }
11716 
VisitSVEIntMinMaxDifference_Predicated(const Instruction * instr)11717 void Simulator::VisitSVEIntMinMaxDifference_Predicated(
11718     const Instruction* instr) {
11719   VectorFormat vform = instr->GetSVEVectorFormat();
11720   SimVRegister& zdn = ReadVRegister(instr->GetRd());
11721   SimVRegister& zm = ReadVRegister(instr->GetRn());
11722   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11723   SimVRegister result;
11724 
11725   switch (instr->Mask(SVEIntMinMaxDifference_PredicatedMask)) {
11726     case SABD_z_p_zz:
11727       absdiff(vform, result, zdn, zm, true);
11728       break;
11729     case SMAX_z_p_zz:
11730       smax(vform, result, zdn, zm);
11731       break;
11732     case SMIN_z_p_zz:
11733       smin(vform, result, zdn, zm);
11734       break;
11735     case UABD_z_p_zz:
11736       absdiff(vform, result, zdn, zm, false);
11737       break;
11738     case UMAX_z_p_zz:
11739       umax(vform, result, zdn, zm);
11740       break;
11741     case UMIN_z_p_zz:
11742       umin(vform, result, zdn, zm);
11743       break;
11744     default:
11745       VIXL_UNIMPLEMENTED();
11746       break;
11747   }
11748   mov_merging(vform, zdn, pg, result);
11749 }
11750 
VisitSVEIntMulImm_Unpredicated(const Instruction * instr)11751 void Simulator::VisitSVEIntMulImm_Unpredicated(const Instruction* instr) {
11752   VectorFormat vform = instr->GetSVEVectorFormat();
11753   SimVRegister& zd = ReadVRegister(instr->GetRd());
11754   SimVRegister scratch;
11755 
11756   switch (instr->Mask(SVEIntMulImm_UnpredicatedMask)) {
11757     case MUL_z_zi:
11758       dup_immediate(vform, scratch, instr->GetImmSVEIntWideSigned());
11759       mul(vform, zd, zd, scratch);
11760       break;
11761     default:
11762       VIXL_UNIMPLEMENTED();
11763       break;
11764   }
11765 }
11766 
VisitSVEIntDivideVectors_Predicated(const Instruction * instr)11767 void Simulator::VisitSVEIntDivideVectors_Predicated(const Instruction* instr) {
11768   VectorFormat vform = instr->GetSVEVectorFormat();
11769   SimVRegister& zdn = ReadVRegister(instr->GetRd());
11770   SimVRegister& zm = ReadVRegister(instr->GetRn());
11771   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
11772   SimVRegister result;
11773 
11774   VIXL_ASSERT((vform == kFormatVnS) || (vform == kFormatVnD));
11775 
11776   switch (instr->Mask(SVEIntDivideVectors_PredicatedMask)) {
11777     case SDIVR_z_p_zz:
11778       sdiv(vform, result, zm, zdn);
11779       break;
11780     case SDIV_z_p_zz:
11781       sdiv(vform, result, zdn, zm);
11782       break;
11783     case UDIVR_z_p_zz:
11784       udiv(vform, result, zm, zdn);
11785       break;
11786     case UDIV_z_p_zz:
11787       udiv(vform, result, zdn, zm);
11788       break;
11789     default:
11790       VIXL_UNIMPLEMENTED();
11791       break;
11792   }
11793   mov_merging(vform, zdn, pg, result);
11794 }
11795 
VisitSVEIntMinMaxImm_Unpredicated(const Instruction * instr)11796 void Simulator::VisitSVEIntMinMaxImm_Unpredicated(const Instruction* instr) {
11797   VectorFormat vform = instr->GetSVEVectorFormat();
11798   SimVRegister& zd = ReadVRegister(instr->GetRd());
11799   SimVRegister scratch;
11800 
11801   uint64_t unsigned_imm = instr->GetImmSVEIntWideUnsigned();
11802   int64_t signed_imm = instr->GetImmSVEIntWideSigned();
11803 
11804   switch (instr->Mask(SVEIntMinMaxImm_UnpredicatedMask)) {
11805     case SMAX_z_zi:
11806       dup_immediate(vform, scratch, signed_imm);
11807       smax(vform, zd, zd, scratch);
11808       break;
11809     case SMIN_z_zi:
11810       dup_immediate(vform, scratch, signed_imm);
11811       smin(vform, zd, zd, scratch);
11812       break;
11813     case UMAX_z_zi:
11814       dup_immediate(vform, scratch, unsigned_imm);
11815       umax(vform, zd, zd, scratch);
11816       break;
11817     case UMIN_z_zi:
11818       dup_immediate(vform, scratch, unsigned_imm);
11819       umin(vform, zd, zd, scratch);
11820       break;
11821     default:
11822       VIXL_UNIMPLEMENTED();
11823       break;
11824   }
11825 }
11826 
VisitSVEIntCompareScalarCountAndLimit(const Instruction * instr)11827 void Simulator::VisitSVEIntCompareScalarCountAndLimit(
11828     const Instruction* instr) {
11829   unsigned rn_code = instr->GetRn();
11830   unsigned rm_code = instr->GetRm();
11831   SimPRegister& pd = ReadPRegister(instr->GetPd());
11832   VectorFormat vform = instr->GetSVEVectorFormat();
11833 
11834   bool is_64_bit = instr->ExtractBit(12) == 1;
11835   int rsize = is_64_bit ? kXRegSize : kWRegSize;
11836   uint64_t mask = is_64_bit ? kXRegMask : kWRegMask;
11837 
11838   uint64_t usrc1 = ReadXRegister(rn_code);
11839   int64_t ssrc2 = is_64_bit ? ReadXRegister(rm_code) : ReadWRegister(rm_code);
11840   uint64_t usrc2 = ssrc2 & mask;
11841 
11842   bool reverse = (form_hash_ == "whilege_p_p_rr"_h) ||
11843                  (form_hash_ == "whilegt_p_p_rr"_h) ||
11844                  (form_hash_ == "whilehi_p_p_rr"_h) ||
11845                  (form_hash_ == "whilehs_p_p_rr"_h);
11846 
11847   int lane_count = LaneCountFromFormat(vform);
11848   bool last = true;
11849   for (int i = 0; i < lane_count; i++) {
11850     usrc1 &= mask;
11851     int64_t ssrc1 = ExtractSignedBitfield64(rsize - 1, 0, usrc1);
11852 
11853     bool cond = false;
11854     switch (form_hash_) {
11855       case "whilele_p_p_rr"_h:
11856         cond = ssrc1 <= ssrc2;
11857         break;
11858       case "whilelo_p_p_rr"_h:
11859         cond = usrc1 < usrc2;
11860         break;
11861       case "whilels_p_p_rr"_h:
11862         cond = usrc1 <= usrc2;
11863         break;
11864       case "whilelt_p_p_rr"_h:
11865         cond = ssrc1 < ssrc2;
11866         break;
11867       case "whilege_p_p_rr"_h:
11868         cond = ssrc1 >= ssrc2;
11869         break;
11870       case "whilegt_p_p_rr"_h:
11871         cond = ssrc1 > ssrc2;
11872         break;
11873       case "whilehi_p_p_rr"_h:
11874         cond = usrc1 > usrc2;
11875         break;
11876       case "whilehs_p_p_rr"_h:
11877         cond = usrc1 >= usrc2;
11878         break;
11879       default:
11880         VIXL_UNIMPLEMENTED();
11881         break;
11882     }
11883     last = last && cond;
11884     LogicPRegister dst(pd);
11885     int lane = reverse ? ((lane_count - 1) - i) : i;
11886     dst.SetActive(vform, lane, last);
11887     usrc1 += reverse ? -1 : 1;
11888   }
11889 
11890   PredTest(vform, GetPTrue(), pd);
11891   LogSystemRegister(NZCV);
11892 }
11893 
VisitSVEConditionallyTerminateScalars(const Instruction * instr)11894 void Simulator::VisitSVEConditionallyTerminateScalars(
11895     const Instruction* instr) {
11896   unsigned rn_code = instr->GetRn();
11897   unsigned rm_code = instr->GetRm();
11898   bool is_64_bit = instr->ExtractBit(22) == 1;
11899   uint64_t src1 = is_64_bit ? ReadXRegister(rn_code) : ReadWRegister(rn_code);
11900   uint64_t src2 = is_64_bit ? ReadXRegister(rm_code) : ReadWRegister(rm_code);
11901   bool term = false;
11902   switch (instr->Mask(SVEConditionallyTerminateScalarsMask)) {
11903     case CTERMEQ_rr:
11904       term = src1 == src2;
11905       break;
11906     case CTERMNE_rr:
11907       term = src1 != src2;
11908       break;
11909     default:
11910       VIXL_UNIMPLEMENTED();
11911       break;
11912   }
11913   ReadNzcv().SetN(term ? 1 : 0);
11914   ReadNzcv().SetV(term ? 0 : !ReadC());
11915   LogSystemRegister(NZCV);
11916 }
11917 
VisitSVEIntCompareSignedImm(const Instruction * instr)11918 void Simulator::VisitSVEIntCompareSignedImm(const Instruction* instr) {
11919   bool commute_inputs = false;
11920   Condition cond = al;
11921   switch (instr->Mask(SVEIntCompareSignedImmMask)) {
11922     case CMPEQ_p_p_zi:
11923       cond = eq;
11924       break;
11925     case CMPGE_p_p_zi:
11926       cond = ge;
11927       break;
11928     case CMPGT_p_p_zi:
11929       cond = gt;
11930       break;
11931     case CMPLE_p_p_zi:
11932       cond = ge;
11933       commute_inputs = true;
11934       break;
11935     case CMPLT_p_p_zi:
11936       cond = gt;
11937       commute_inputs = true;
11938       break;
11939     case CMPNE_p_p_zi:
11940       cond = ne;
11941       break;
11942     default:
11943       VIXL_UNIMPLEMENTED();
11944       break;
11945   }
11946 
11947   VectorFormat vform = instr->GetSVEVectorFormat();
11948   SimVRegister src2;
11949   dup_immediate(vform,
11950                 src2,
11951                 ExtractSignedBitfield64(4, 0, instr->ExtractBits(20, 16)));
11952   SVEIntCompareVectorsHelper(cond,
11953                              vform,
11954                              ReadPRegister(instr->GetPd()),
11955                              ReadPRegister(instr->GetPgLow8()),
11956                              commute_inputs ? src2
11957                                             : ReadVRegister(instr->GetRn()),
11958                              commute_inputs ? ReadVRegister(instr->GetRn())
11959                                             : src2);
11960 }
11961 
VisitSVEIntCompareUnsignedImm(const Instruction * instr)11962 void Simulator::VisitSVEIntCompareUnsignedImm(const Instruction* instr) {
11963   bool commute_inputs = false;
11964   Condition cond = al;
11965   switch (instr->Mask(SVEIntCompareUnsignedImmMask)) {
11966     case CMPHI_p_p_zi:
11967       cond = hi;
11968       break;
11969     case CMPHS_p_p_zi:
11970       cond = hs;
11971       break;
11972     case CMPLO_p_p_zi:
11973       cond = hi;
11974       commute_inputs = true;
11975       break;
11976     case CMPLS_p_p_zi:
11977       cond = hs;
11978       commute_inputs = true;
11979       break;
11980     default:
11981       VIXL_UNIMPLEMENTED();
11982       break;
11983   }
11984 
11985   VectorFormat vform = instr->GetSVEVectorFormat();
11986   SimVRegister src2;
11987   dup_immediate(vform, src2, instr->ExtractBits(20, 14));
11988   SVEIntCompareVectorsHelper(cond,
11989                              vform,
11990                              ReadPRegister(instr->GetPd()),
11991                              ReadPRegister(instr->GetPgLow8()),
11992                              commute_inputs ? src2
11993                                             : ReadVRegister(instr->GetRn()),
11994                              commute_inputs ? ReadVRegister(instr->GetRn())
11995                                             : src2);
11996 }
11997 
VisitSVEIntCompareVectors(const Instruction * instr)11998 void Simulator::VisitSVEIntCompareVectors(const Instruction* instr) {
11999   Instr op = instr->Mask(SVEIntCompareVectorsMask);
12000   bool is_wide_elements = false;
12001   switch (op) {
12002     case CMPEQ_p_p_zw:
12003     case CMPGE_p_p_zw:
12004     case CMPGT_p_p_zw:
12005     case CMPHI_p_p_zw:
12006     case CMPHS_p_p_zw:
12007     case CMPLE_p_p_zw:
12008     case CMPLO_p_p_zw:
12009     case CMPLS_p_p_zw:
12010     case CMPLT_p_p_zw:
12011     case CMPNE_p_p_zw:
12012       is_wide_elements = true;
12013       break;
12014   }
12015 
12016   Condition cond;
12017   switch (op) {
12018     case CMPEQ_p_p_zw:
12019     case CMPEQ_p_p_zz:
12020       cond = eq;
12021       break;
12022     case CMPGE_p_p_zw:
12023     case CMPGE_p_p_zz:
12024       cond = ge;
12025       break;
12026     case CMPGT_p_p_zw:
12027     case CMPGT_p_p_zz:
12028       cond = gt;
12029       break;
12030     case CMPHI_p_p_zw:
12031     case CMPHI_p_p_zz:
12032       cond = hi;
12033       break;
12034     case CMPHS_p_p_zw:
12035     case CMPHS_p_p_zz:
12036       cond = hs;
12037       break;
12038     case CMPNE_p_p_zw:
12039     case CMPNE_p_p_zz:
12040       cond = ne;
12041       break;
12042     case CMPLE_p_p_zw:
12043       cond = le;
12044       break;
12045     case CMPLO_p_p_zw:
12046       cond = lo;
12047       break;
12048     case CMPLS_p_p_zw:
12049       cond = ls;
12050       break;
12051     case CMPLT_p_p_zw:
12052       cond = lt;
12053       break;
12054     default:
12055       VIXL_UNIMPLEMENTED();
12056       cond = al;
12057       break;
12058   }
12059 
12060   SVEIntCompareVectorsHelper(cond,
12061                              instr->GetSVEVectorFormat(),
12062                              ReadPRegister(instr->GetPd()),
12063                              ReadPRegister(instr->GetPgLow8()),
12064                              ReadVRegister(instr->GetRn()),
12065                              ReadVRegister(instr->GetRm()),
12066                              is_wide_elements);
12067 }
12068 
VisitSVEFPExponentialAccelerator(const Instruction * instr)12069 void Simulator::VisitSVEFPExponentialAccelerator(const Instruction* instr) {
12070   VectorFormat vform = instr->GetSVEVectorFormat();
12071   SimVRegister& zd = ReadVRegister(instr->GetRd());
12072   SimVRegister& zn = ReadVRegister(instr->GetRn());
12073 
12074   VIXL_ASSERT((vform == kFormatVnH) || (vform == kFormatVnS) ||
12075               (vform == kFormatVnD));
12076 
12077   switch (instr->Mask(SVEFPExponentialAcceleratorMask)) {
12078     case FEXPA_z_z:
12079       fexpa(vform, zd, zn);
12080       break;
12081     default:
12082       VIXL_UNIMPLEMENTED();
12083       break;
12084   }
12085 }
12086 
VisitSVEFPTrigSelectCoefficient(const Instruction * instr)12087 void Simulator::VisitSVEFPTrigSelectCoefficient(const Instruction* instr) {
12088   VectorFormat vform = instr->GetSVEVectorFormat();
12089   SimVRegister& zd = ReadVRegister(instr->GetRd());
12090   SimVRegister& zn = ReadVRegister(instr->GetRn());
12091   SimVRegister& zm = ReadVRegister(instr->GetRm());
12092 
12093   VIXL_ASSERT((vform == kFormatVnH) || (vform == kFormatVnS) ||
12094               (vform == kFormatVnD));
12095 
12096   switch (instr->Mask(SVEFPTrigSelectCoefficientMask)) {
12097     case FTSSEL_z_zz:
12098       ftssel(vform, zd, zn, zm);
12099       break;
12100     default:
12101       VIXL_UNIMPLEMENTED();
12102       break;
12103   }
12104 }
12105 
VisitSVEConstructivePrefix_Unpredicated(const Instruction * instr)12106 void Simulator::VisitSVEConstructivePrefix_Unpredicated(
12107     const Instruction* instr) {
12108   SimVRegister& zd = ReadVRegister(instr->GetRd());
12109   SimVRegister& zn = ReadVRegister(instr->GetRn());
12110 
12111   switch (instr->Mask(SVEConstructivePrefix_UnpredicatedMask)) {
12112     case MOVPRFX_z_z:
12113       mov(kFormatVnD, zd, zn);  // The lane size is arbitrary.
12114       break;
12115     default:
12116       VIXL_UNIMPLEMENTED();
12117       break;
12118   }
12119 }
12120 
VisitSVEIntMulAddPredicated(const Instruction * instr)12121 void Simulator::VisitSVEIntMulAddPredicated(const Instruction* instr) {
12122   VectorFormat vform = instr->GetSVEVectorFormat();
12123 
12124   SimVRegister& zd = ReadVRegister(instr->GetRd());
12125   SimVRegister& zm = ReadVRegister(instr->GetRm());
12126 
12127   SimVRegister result;
12128   switch (instr->Mask(SVEIntMulAddPredicatedMask)) {
12129     case MLA_z_p_zzz:
12130       mla(vform, result, zd, ReadVRegister(instr->GetRn()), zm);
12131       break;
12132     case MLS_z_p_zzz:
12133       mls(vform, result, zd, ReadVRegister(instr->GetRn()), zm);
12134       break;
12135     case MAD_z_p_zzz:
12136       // 'za' is encoded in 'Rn'.
12137       mla(vform, result, ReadVRegister(instr->GetRn()), zd, zm);
12138       break;
12139     case MSB_z_p_zzz: {
12140       // 'za' is encoded in 'Rn'.
12141       mls(vform, result, ReadVRegister(instr->GetRn()), zd, zm);
12142       break;
12143     }
12144     default:
12145       VIXL_UNIMPLEMENTED();
12146       break;
12147   }
12148   mov_merging(vform, zd, ReadPRegister(instr->GetPgLow8()), result);
12149 }
12150 
VisitSVEIntMulAddUnpredicated(const Instruction * instr)12151 void Simulator::VisitSVEIntMulAddUnpredicated(const Instruction* instr) {
12152   VectorFormat vform = instr->GetSVEVectorFormat();
12153   SimVRegister& zda = ReadVRegister(instr->GetRd());
12154   SimVRegister& zn = ReadVRegister(instr->GetRn());
12155   SimVRegister& zm = ReadVRegister(instr->GetRm());
12156 
12157   switch (form_hash_) {
12158     case "sdot_z_zzz"_h:
12159       sdot(vform, zda, zn, zm);
12160       break;
12161     case "udot_z_zzz"_h:
12162       udot(vform, zda, zn, zm);
12163       break;
12164     case "usdot_z_zzz_s"_h:
12165       usdot(vform, zda, zn, zm);
12166       break;
12167     default:
12168       VIXL_UNIMPLEMENTED();
12169       break;
12170   }
12171 }
12172 
VisitSVEMovprfx(const Instruction * instr)12173 void Simulator::VisitSVEMovprfx(const Instruction* instr) {
12174   VectorFormat vform = instr->GetSVEVectorFormat();
12175   SimVRegister& zn = ReadVRegister(instr->GetRn());
12176   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
12177   SimVRegister& zd = ReadVRegister(instr->GetRd());
12178 
12179   switch (instr->Mask(SVEMovprfxMask)) {
12180     case MOVPRFX_z_p_z:
12181       if (instr->ExtractBit(16)) {
12182         mov_merging(vform, zd, pg, zn);
12183       } else {
12184         mov_zeroing(vform, zd, pg, zn);
12185       }
12186       break;
12187     default:
12188       VIXL_UNIMPLEMENTED();
12189       break;
12190   }
12191 }
12192 
VisitSVEIntReduction(const Instruction * instr)12193 void Simulator::VisitSVEIntReduction(const Instruction* instr) {
12194   VectorFormat vform = instr->GetSVEVectorFormat();
12195   SimVRegister& vd = ReadVRegister(instr->GetRd());
12196   SimVRegister& zn = ReadVRegister(instr->GetRn());
12197   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
12198 
12199   if (instr->Mask(SVEIntReductionLogicalFMask) == SVEIntReductionLogicalFixed) {
12200     switch (instr->Mask(SVEIntReductionLogicalMask)) {
12201       case ANDV_r_p_z:
12202         andv(vform, vd, pg, zn);
12203         break;
12204       case EORV_r_p_z:
12205         eorv(vform, vd, pg, zn);
12206         break;
12207       case ORV_r_p_z:
12208         orv(vform, vd, pg, zn);
12209         break;
12210       default:
12211         VIXL_UNIMPLEMENTED();
12212         break;
12213     }
12214   } else {
12215     switch (instr->Mask(SVEIntReductionMask)) {
12216       case SADDV_r_p_z:
12217         saddv(vform, vd, pg, zn);
12218         break;
12219       case SMAXV_r_p_z:
12220         smaxv(vform, vd, pg, zn);
12221         break;
12222       case SMINV_r_p_z:
12223         sminv(vform, vd, pg, zn);
12224         break;
12225       case UADDV_r_p_z:
12226         uaddv(vform, vd, pg, zn);
12227         break;
12228       case UMAXV_r_p_z:
12229         umaxv(vform, vd, pg, zn);
12230         break;
12231       case UMINV_r_p_z:
12232         uminv(vform, vd, pg, zn);
12233         break;
12234       default:
12235         VIXL_UNIMPLEMENTED();
12236         break;
12237     }
12238   }
12239 }
12240 
VisitSVEIntUnaryArithmeticPredicated(const Instruction * instr)12241 void Simulator::VisitSVEIntUnaryArithmeticPredicated(const Instruction* instr) {
12242   VectorFormat vform = instr->GetSVEVectorFormat();
12243   SimVRegister& zn = ReadVRegister(instr->GetRn());
12244 
12245   SimVRegister result;
12246   switch (instr->Mask(SVEIntUnaryArithmeticPredicatedMask)) {
12247     case ABS_z_p_z:
12248       abs(vform, result, zn);
12249       break;
12250     case CLS_z_p_z:
12251       cls(vform, result, zn);
12252       break;
12253     case CLZ_z_p_z:
12254       clz(vform, result, zn);
12255       break;
12256     case CNOT_z_p_z:
12257       cnot(vform, result, zn);
12258       break;
12259     case CNT_z_p_z:
12260       cnt(vform, result, zn);
12261       break;
12262     case FABS_z_p_z:
12263       fabs_(vform, result, zn);
12264       break;
12265     case FNEG_z_p_z:
12266       fneg(vform, result, zn);
12267       break;
12268     case NEG_z_p_z:
12269       neg(vform, result, zn);
12270       break;
12271     case NOT_z_p_z:
12272       not_(vform, result, zn);
12273       break;
12274     case SXTB_z_p_z:
12275     case SXTH_z_p_z:
12276     case SXTW_z_p_z:
12277       sxt(vform, result, zn, (kBitsPerByte << instr->ExtractBits(18, 17)));
12278       break;
12279     case UXTB_z_p_z:
12280     case UXTH_z_p_z:
12281     case UXTW_z_p_z:
12282       uxt(vform, result, zn, (kBitsPerByte << instr->ExtractBits(18, 17)));
12283       break;
12284     default:
12285       VIXL_UNIMPLEMENTED();
12286       break;
12287   }
12288 
12289   SimVRegister& zd = ReadVRegister(instr->GetRd());
12290   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
12291   mov_merging(vform, zd, pg, result);
12292 }
12293 
VisitSVECopyFPImm_Predicated(const Instruction * instr)12294 void Simulator::VisitSVECopyFPImm_Predicated(const Instruction* instr) {
12295   // There is only one instruction in this group.
12296   VIXL_ASSERT(instr->Mask(SVECopyFPImm_PredicatedMask) == FCPY_z_p_i);
12297 
12298   VectorFormat vform = instr->GetSVEVectorFormat();
12299   SimPRegister& pg = ReadPRegister(instr->ExtractBits(19, 16));
12300   SimVRegister& zd = ReadVRegister(instr->GetRd());
12301 
12302   if (vform == kFormatVnB) VIXL_UNIMPLEMENTED();
12303 
12304   SimVRegister result;
12305   switch (instr->Mask(SVECopyFPImm_PredicatedMask)) {
12306     case FCPY_z_p_i: {
12307       int imm8 = instr->ExtractBits(12, 5);
12308       uint64_t value = FPToRawbitsWithSize(LaneSizeInBitsFromFormat(vform),
12309                                            Instruction::Imm8ToFP64(imm8));
12310       dup_immediate(vform, result, value);
12311       break;
12312     }
12313     default:
12314       VIXL_UNIMPLEMENTED();
12315       break;
12316   }
12317   mov_merging(vform, zd, pg, result);
12318 }
12319 
VisitSVEIntAddSubtractImm_Unpredicated(const Instruction * instr)12320 void Simulator::VisitSVEIntAddSubtractImm_Unpredicated(
12321     const Instruction* instr) {
12322   VectorFormat vform = instr->GetSVEVectorFormat();
12323   SimVRegister& zd = ReadVRegister(instr->GetRd());
12324   SimVRegister scratch;
12325 
12326   uint64_t imm = instr->GetImmSVEIntWideUnsigned();
12327   imm <<= instr->ExtractBit(13) * 8;
12328 
12329   switch (instr->Mask(SVEIntAddSubtractImm_UnpredicatedMask)) {
12330     case ADD_z_zi:
12331       add_uint(vform, zd, zd, imm);
12332       break;
12333     case SQADD_z_zi:
12334       add_uint(vform, zd, zd, imm).SignedSaturate(vform);
12335       break;
12336     case SQSUB_z_zi:
12337       sub_uint(vform, zd, zd, imm).SignedSaturate(vform);
12338       break;
12339     case SUBR_z_zi:
12340       dup_immediate(vform, scratch, imm);
12341       sub(vform, zd, scratch, zd);
12342       break;
12343     case SUB_z_zi:
12344       sub_uint(vform, zd, zd, imm);
12345       break;
12346     case UQADD_z_zi:
12347       add_uint(vform, zd, zd, imm).UnsignedSaturate(vform);
12348       break;
12349     case UQSUB_z_zi:
12350       sub_uint(vform, zd, zd, imm).UnsignedSaturate(vform);
12351       break;
12352     default:
12353       break;
12354   }
12355 }
12356 
VisitSVEBroadcastIntImm_Unpredicated(const Instruction * instr)12357 void Simulator::VisitSVEBroadcastIntImm_Unpredicated(const Instruction* instr) {
12358   SimVRegister& zd = ReadVRegister(instr->GetRd());
12359 
12360   VectorFormat format = instr->GetSVEVectorFormat();
12361   int64_t imm = instr->GetImmSVEIntWideSigned();
12362   int shift = instr->ExtractBit(13) * 8;
12363   imm *= uint64_t{1} << shift;
12364 
12365   switch (instr->Mask(SVEBroadcastIntImm_UnpredicatedMask)) {
12366     case DUP_z_i:
12367       // The encoding of byte-sized lanes with lsl #8 is undefined.
12368       if ((format == kFormatVnB) && (shift == 8)) {
12369         VIXL_UNIMPLEMENTED();
12370       } else {
12371         dup_immediate(format, zd, imm);
12372       }
12373       break;
12374     default:
12375       VIXL_UNIMPLEMENTED();
12376       break;
12377   }
12378 }
12379 
VisitSVEBroadcastFPImm_Unpredicated(const Instruction * instr)12380 void Simulator::VisitSVEBroadcastFPImm_Unpredicated(const Instruction* instr) {
12381   VectorFormat vform = instr->GetSVEVectorFormat();
12382   SimVRegister& zd = ReadVRegister(instr->GetRd());
12383 
12384   switch (instr->Mask(SVEBroadcastFPImm_UnpredicatedMask)) {
12385     case FDUP_z_i:
12386       switch (vform) {
12387         case kFormatVnH:
12388           dup_immediate(vform, zd, Float16ToRawbits(instr->GetSVEImmFP16()));
12389           break;
12390         case kFormatVnS:
12391           dup_immediate(vform, zd, FloatToRawbits(instr->GetSVEImmFP32()));
12392           break;
12393         case kFormatVnD:
12394           dup_immediate(vform, zd, DoubleToRawbits(instr->GetSVEImmFP64()));
12395           break;
12396         default:
12397           VIXL_UNIMPLEMENTED();
12398       }
12399       break;
12400     default:
12401       VIXL_UNIMPLEMENTED();
12402       break;
12403   }
12404 }
12405 
VisitSVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsets(const Instruction * instr)12406 void Simulator::VisitSVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsets(
12407     const Instruction* instr) {
12408   switch (instr->Mask(
12409       SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsetsMask)) {
12410     case LD1H_z_p_bz_s_x32_scaled:
12411     case LD1SH_z_p_bz_s_x32_scaled:
12412     case LDFF1H_z_p_bz_s_x32_scaled:
12413     case LDFF1SH_z_p_bz_s_x32_scaled:
12414       break;
12415     default:
12416       VIXL_UNIMPLEMENTED();
12417       break;
12418   }
12419 
12420   SVEOffsetModifier mod = (instr->ExtractBit(22) == 1) ? SVE_SXTW : SVE_UXTW;
12421   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnS, mod);
12422 }
12423 
VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets(const Instruction * instr)12424 void Simulator::VisitSVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets(
12425     const Instruction* instr) {
12426   switch (instr->Mask(SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsetsMask)) {
12427     case LD1B_z_p_bz_s_x32_unscaled:
12428     case LD1H_z_p_bz_s_x32_unscaled:
12429     case LD1SB_z_p_bz_s_x32_unscaled:
12430     case LD1SH_z_p_bz_s_x32_unscaled:
12431     case LD1W_z_p_bz_s_x32_unscaled:
12432     case LDFF1B_z_p_bz_s_x32_unscaled:
12433     case LDFF1H_z_p_bz_s_x32_unscaled:
12434     case LDFF1SB_z_p_bz_s_x32_unscaled:
12435     case LDFF1SH_z_p_bz_s_x32_unscaled:
12436     case LDFF1W_z_p_bz_s_x32_unscaled:
12437       break;
12438     default:
12439       VIXL_UNIMPLEMENTED();
12440       break;
12441   }
12442 
12443   SVEOffsetModifier mod = (instr->ExtractBit(22) == 1) ? SVE_SXTW : SVE_UXTW;
12444   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnS, mod);
12445 }
12446 
VisitSVE32BitGatherLoad_VectorPlusImm(const Instruction * instr)12447 void Simulator::VisitSVE32BitGatherLoad_VectorPlusImm(
12448     const Instruction* instr) {
12449   switch (instr->Mask(SVE32BitGatherLoad_VectorPlusImmMask)) {
12450     case LD1B_z_p_ai_s:
12451       VIXL_UNIMPLEMENTED();
12452       break;
12453     case LD1H_z_p_ai_s:
12454       VIXL_UNIMPLEMENTED();
12455       break;
12456     case LD1SB_z_p_ai_s:
12457       VIXL_UNIMPLEMENTED();
12458       break;
12459     case LD1SH_z_p_ai_s:
12460       VIXL_UNIMPLEMENTED();
12461       break;
12462     case LD1W_z_p_ai_s:
12463       VIXL_UNIMPLEMENTED();
12464       break;
12465     case LDFF1B_z_p_ai_s:
12466       VIXL_UNIMPLEMENTED();
12467       break;
12468     case LDFF1H_z_p_ai_s:
12469       VIXL_UNIMPLEMENTED();
12470       break;
12471     case LDFF1SB_z_p_ai_s:
12472       VIXL_UNIMPLEMENTED();
12473       break;
12474     case LDFF1SH_z_p_ai_s:
12475       VIXL_UNIMPLEMENTED();
12476       break;
12477     case LDFF1W_z_p_ai_s:
12478       VIXL_UNIMPLEMENTED();
12479       break;
12480     default:
12481       VIXL_UNIMPLEMENTED();
12482       break;
12483   }
12484 }
12485 
VisitSVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsets(const Instruction * instr)12486 void Simulator::VisitSVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsets(
12487     const Instruction* instr) {
12488   switch (
12489       instr->Mask(SVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsetsMask)) {
12490     case LD1W_z_p_bz_s_x32_scaled:
12491     case LDFF1W_z_p_bz_s_x32_scaled:
12492       break;
12493     default:
12494       VIXL_UNIMPLEMENTED();
12495       break;
12496   }
12497 
12498   SVEOffsetModifier mod = (instr->ExtractBit(22) == 1) ? SVE_SXTW : SVE_UXTW;
12499   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnS, mod);
12500 }
12501 
VisitSVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsets(const Instruction * instr)12502 void Simulator::VisitSVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsets(
12503     const Instruction* instr) {
12504   switch (
12505       instr->Mask(SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsetsMask)) {
12506     // Ignore prefetch hint instructions.
12507     case PRFB_i_p_bz_s_x32_scaled:
12508     case PRFD_i_p_bz_s_x32_scaled:
12509     case PRFH_i_p_bz_s_x32_scaled:
12510     case PRFW_i_p_bz_s_x32_scaled:
12511       break;
12512     default:
12513       VIXL_UNIMPLEMENTED();
12514       break;
12515   }
12516 }
12517 
VisitSVE32BitGatherPrefetch_VectorPlusImm(const Instruction * instr)12518 void Simulator::VisitSVE32BitGatherPrefetch_VectorPlusImm(
12519     const Instruction* instr) {
12520   switch (instr->Mask(SVE32BitGatherPrefetch_VectorPlusImmMask)) {
12521     // Ignore prefetch hint instructions.
12522     case PRFB_i_p_ai_s:
12523     case PRFD_i_p_ai_s:
12524     case PRFH_i_p_ai_s:
12525     case PRFW_i_p_ai_s:
12526       break;
12527     default:
12528       VIXL_UNIMPLEMENTED();
12529       break;
12530   }
12531 }
12532 
VisitSVEContiguousPrefetch_ScalarPlusImm(const Instruction * instr)12533 void Simulator::VisitSVEContiguousPrefetch_ScalarPlusImm(
12534     const Instruction* instr) {
12535   switch (instr->Mask(SVEContiguousPrefetch_ScalarPlusImmMask)) {
12536     // Ignore prefetch hint instructions.
12537     case PRFB_i_p_bi_s:
12538     case PRFD_i_p_bi_s:
12539     case PRFH_i_p_bi_s:
12540     case PRFW_i_p_bi_s:
12541       break;
12542     default:
12543       VIXL_UNIMPLEMENTED();
12544       break;
12545   }
12546 }
12547 
VisitSVEContiguousPrefetch_ScalarPlusScalar(const Instruction * instr)12548 void Simulator::VisitSVEContiguousPrefetch_ScalarPlusScalar(
12549     const Instruction* instr) {
12550   switch (instr->Mask(SVEContiguousPrefetch_ScalarPlusScalarMask)) {
12551     // Ignore prefetch hint instructions.
12552     case PRFB_i_p_br_s:
12553     case PRFD_i_p_br_s:
12554     case PRFH_i_p_br_s:
12555     case PRFW_i_p_br_s:
12556       if (instr->GetRm() == kZeroRegCode) {
12557         VIXL_UNIMPLEMENTED();
12558       }
12559       break;
12560     default:
12561       VIXL_UNIMPLEMENTED();
12562       break;
12563   }
12564 }
12565 
VisitSVELoadAndBroadcastElement(const Instruction * instr)12566 void Simulator::VisitSVELoadAndBroadcastElement(const Instruction* instr) {
12567   bool is_signed;
12568   switch (instr->Mask(SVELoadAndBroadcastElementMask)) {
12569     case LD1RB_z_p_bi_u8:
12570     case LD1RB_z_p_bi_u16:
12571     case LD1RB_z_p_bi_u32:
12572     case LD1RB_z_p_bi_u64:
12573     case LD1RH_z_p_bi_u16:
12574     case LD1RH_z_p_bi_u32:
12575     case LD1RH_z_p_bi_u64:
12576     case LD1RW_z_p_bi_u32:
12577     case LD1RW_z_p_bi_u64:
12578     case LD1RD_z_p_bi_u64:
12579       is_signed = false;
12580       break;
12581     case LD1RSB_z_p_bi_s16:
12582     case LD1RSB_z_p_bi_s32:
12583     case LD1RSB_z_p_bi_s64:
12584     case LD1RSH_z_p_bi_s32:
12585     case LD1RSH_z_p_bi_s64:
12586     case LD1RSW_z_p_bi_s64:
12587       is_signed = true;
12588       break;
12589     default:
12590       // This encoding group is complete, so no other values should be possible.
12591       VIXL_UNREACHABLE();
12592       is_signed = false;
12593       break;
12594   }
12595 
12596   int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(is_signed);
12597   int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(is_signed, 13);
12598   VIXL_ASSERT(msize_in_bytes_log2 <= esize_in_bytes_log2);
12599   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
12600   uint64_t offset = instr->ExtractBits(21, 16) << msize_in_bytes_log2;
12601   uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer) + offset;
12602   VectorFormat unpack_vform =
12603       SVEFormatFromLaneSizeInBytesLog2(msize_in_bytes_log2);
12604   SimVRegister temp;
12605   if (!ld1r(vform, unpack_vform, temp, base, is_signed)) return;
12606   mov_zeroing(vform,
12607               ReadVRegister(instr->GetRt()),
12608               ReadPRegister(instr->GetPgLow8()),
12609               temp);
12610 }
12611 
VisitSVELoadPredicateRegister(const Instruction * instr)12612 void Simulator::VisitSVELoadPredicateRegister(const Instruction* instr) {
12613   switch (instr->Mask(SVELoadPredicateRegisterMask)) {
12614     case LDR_p_bi: {
12615       SimPRegister& pt = ReadPRegister(instr->GetPt());
12616       int pl = GetPredicateLengthInBytes();
12617       int imm9 = (instr->ExtractBits(21, 16) << 3) | instr->ExtractBits(12, 10);
12618       uint64_t multiplier = ExtractSignedBitfield64(8, 0, imm9);
12619       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
12620       uint64_t address = base + multiplier * pl;
12621       for (int i = 0; i < pl; i++) {
12622         VIXL_DEFINE_OR_RETURN(value, MemRead<uint8_t>(address + i));
12623         pt.Insert(i, value);
12624       }
12625       LogPRead(instr->GetPt(), address);
12626       break;
12627     }
12628     default:
12629       VIXL_UNIMPLEMENTED();
12630       break;
12631   }
12632 }
12633 
VisitSVELoadVectorRegister(const Instruction * instr)12634 void Simulator::VisitSVELoadVectorRegister(const Instruction* instr) {
12635   switch (instr->Mask(SVELoadVectorRegisterMask)) {
12636     case LDR_z_bi: {
12637       SimVRegister& zt = ReadVRegister(instr->GetRt());
12638       int vl = GetVectorLengthInBytes();
12639       int imm9 = (instr->ExtractBits(21, 16) << 3) | instr->ExtractBits(12, 10);
12640       uint64_t multiplier = ExtractSignedBitfield64(8, 0, imm9);
12641       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
12642       uint64_t address = base + multiplier * vl;
12643       for (int i = 0; i < vl; i++) {
12644         VIXL_DEFINE_OR_RETURN(value, MemRead<uint8_t>(address + i));
12645         zt.Insert(i, value);
12646       }
12647       LogZRead(instr->GetRt(), address);
12648       break;
12649     }
12650     default:
12651       VIXL_UNIMPLEMENTED();
12652       break;
12653   }
12654 }
12655 
VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets(const Instruction * instr)12656 void Simulator::VisitSVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets(
12657     const Instruction* instr) {
12658   switch (instr->Mask(
12659       SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsetsMask)) {
12660     case LD1D_z_p_bz_d_x32_scaled:
12661     case LD1H_z_p_bz_d_x32_scaled:
12662     case LD1SH_z_p_bz_d_x32_scaled:
12663     case LD1SW_z_p_bz_d_x32_scaled:
12664     case LD1W_z_p_bz_d_x32_scaled:
12665     case LDFF1H_z_p_bz_d_x32_scaled:
12666     case LDFF1W_z_p_bz_d_x32_scaled:
12667     case LDFF1D_z_p_bz_d_x32_scaled:
12668     case LDFF1SH_z_p_bz_d_x32_scaled:
12669     case LDFF1SW_z_p_bz_d_x32_scaled:
12670       break;
12671     default:
12672       VIXL_UNIMPLEMENTED();
12673       break;
12674   }
12675 
12676   SVEOffsetModifier mod = (instr->ExtractBit(22) == 1) ? SVE_SXTW : SVE_UXTW;
12677   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnD, mod);
12678 }
12679 
VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets(const Instruction * instr)12680 void Simulator::VisitSVE64BitGatherLoad_ScalarPlus64BitScaledOffsets(
12681     const Instruction* instr) {
12682   switch (instr->Mask(SVE64BitGatherLoad_ScalarPlus64BitScaledOffsetsMask)) {
12683     case LD1D_z_p_bz_d_64_scaled:
12684     case LD1H_z_p_bz_d_64_scaled:
12685     case LD1SH_z_p_bz_d_64_scaled:
12686     case LD1SW_z_p_bz_d_64_scaled:
12687     case LD1W_z_p_bz_d_64_scaled:
12688     case LDFF1H_z_p_bz_d_64_scaled:
12689     case LDFF1W_z_p_bz_d_64_scaled:
12690     case LDFF1D_z_p_bz_d_64_scaled:
12691     case LDFF1SH_z_p_bz_d_64_scaled:
12692     case LDFF1SW_z_p_bz_d_64_scaled:
12693       break;
12694     default:
12695       VIXL_UNIMPLEMENTED();
12696       break;
12697   }
12698 
12699   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnD, SVE_LSL);
12700 }
12701 
VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets(const Instruction * instr)12702 void Simulator::VisitSVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets(
12703     const Instruction* instr) {
12704   switch (instr->Mask(SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsetsMask)) {
12705     case LD1B_z_p_bz_d_64_unscaled:
12706     case LD1D_z_p_bz_d_64_unscaled:
12707     case LD1H_z_p_bz_d_64_unscaled:
12708     case LD1SB_z_p_bz_d_64_unscaled:
12709     case LD1SH_z_p_bz_d_64_unscaled:
12710     case LD1SW_z_p_bz_d_64_unscaled:
12711     case LD1W_z_p_bz_d_64_unscaled:
12712     case LDFF1B_z_p_bz_d_64_unscaled:
12713     case LDFF1D_z_p_bz_d_64_unscaled:
12714     case LDFF1H_z_p_bz_d_64_unscaled:
12715     case LDFF1SB_z_p_bz_d_64_unscaled:
12716     case LDFF1SH_z_p_bz_d_64_unscaled:
12717     case LDFF1SW_z_p_bz_d_64_unscaled:
12718     case LDFF1W_z_p_bz_d_64_unscaled:
12719       break;
12720     default:
12721       VIXL_UNIMPLEMENTED();
12722       break;
12723   }
12724 
12725   SVEGatherLoadScalarPlusVectorHelper(instr,
12726                                       kFormatVnD,
12727                                       NO_SVE_OFFSET_MODIFIER);
12728 }
12729 
VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets(const Instruction * instr)12730 void Simulator::VisitSVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets(
12731     const Instruction* instr) {
12732   switch (instr->Mask(
12733       SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsetsMask)) {
12734     case LD1B_z_p_bz_d_x32_unscaled:
12735     case LD1D_z_p_bz_d_x32_unscaled:
12736     case LD1H_z_p_bz_d_x32_unscaled:
12737     case LD1SB_z_p_bz_d_x32_unscaled:
12738     case LD1SH_z_p_bz_d_x32_unscaled:
12739     case LD1SW_z_p_bz_d_x32_unscaled:
12740     case LD1W_z_p_bz_d_x32_unscaled:
12741     case LDFF1B_z_p_bz_d_x32_unscaled:
12742     case LDFF1H_z_p_bz_d_x32_unscaled:
12743     case LDFF1W_z_p_bz_d_x32_unscaled:
12744     case LDFF1D_z_p_bz_d_x32_unscaled:
12745     case LDFF1SB_z_p_bz_d_x32_unscaled:
12746     case LDFF1SH_z_p_bz_d_x32_unscaled:
12747     case LDFF1SW_z_p_bz_d_x32_unscaled:
12748       break;
12749     default:
12750       VIXL_UNIMPLEMENTED();
12751       break;
12752   }
12753 
12754   SVEOffsetModifier mod = (instr->ExtractBit(22) == 1) ? SVE_SXTW : SVE_UXTW;
12755   SVEGatherLoadScalarPlusVectorHelper(instr, kFormatVnD, mod);
12756 }
12757 
VisitSVE64BitGatherLoad_VectorPlusImm(const Instruction * instr)12758 void Simulator::VisitSVE64BitGatherLoad_VectorPlusImm(
12759     const Instruction* instr) {
12760   switch (instr->Mask(SVE64BitGatherLoad_VectorPlusImmMask)) {
12761     case LD1B_z_p_ai_d:
12762     case LD1D_z_p_ai_d:
12763     case LD1H_z_p_ai_d:
12764     case LD1SB_z_p_ai_d:
12765     case LD1SH_z_p_ai_d:
12766     case LD1SW_z_p_ai_d:
12767     case LD1W_z_p_ai_d:
12768     case LDFF1B_z_p_ai_d:
12769     case LDFF1D_z_p_ai_d:
12770     case LDFF1H_z_p_ai_d:
12771     case LDFF1SB_z_p_ai_d:
12772     case LDFF1SH_z_p_ai_d:
12773     case LDFF1SW_z_p_ai_d:
12774     case LDFF1W_z_p_ai_d:
12775       break;
12776     default:
12777       VIXL_UNIMPLEMENTED();
12778       break;
12779   }
12780   bool is_signed = instr->ExtractBit(14) == 0;
12781   bool is_ff = instr->ExtractBit(13) == 1;
12782   // Note that these instructions don't use the Dtype encoding.
12783   int msize_in_bytes_log2 = instr->ExtractBits(24, 23);
12784   uint64_t imm = instr->ExtractBits(20, 16) << msize_in_bytes_log2;
12785   LogicSVEAddressVector addr(imm, &ReadVRegister(instr->GetRn()), kFormatVnD);
12786   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
12787   if (is_ff) {
12788     VIXL_UNIMPLEMENTED();
12789   } else {
12790     SVEStructuredLoadHelper(kFormatVnD,
12791                             ReadPRegister(instr->GetPgLow8()),
12792                             instr->GetRt(),
12793                             addr,
12794                             is_signed);
12795   }
12796 }
12797 
VisitSVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsets(const Instruction * instr)12798 void Simulator::VisitSVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsets(
12799     const Instruction* instr) {
12800   switch (
12801       instr->Mask(SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsetsMask)) {
12802     // Ignore prefetch hint instructions.
12803     case PRFB_i_p_bz_d_64_scaled:
12804     case PRFD_i_p_bz_d_64_scaled:
12805     case PRFH_i_p_bz_d_64_scaled:
12806     case PRFW_i_p_bz_d_64_scaled:
12807       break;
12808     default:
12809       VIXL_UNIMPLEMENTED();
12810       break;
12811   }
12812 }
12813 
12814 void Simulator::
VisitSVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsets(const Instruction * instr)12815     VisitSVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsets(
12816         const Instruction* instr) {
12817   switch (instr->Mask(
12818       SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsetsMask)) {
12819     // Ignore prefetch hint instructions.
12820     case PRFB_i_p_bz_d_x32_scaled:
12821     case PRFD_i_p_bz_d_x32_scaled:
12822     case PRFH_i_p_bz_d_x32_scaled:
12823     case PRFW_i_p_bz_d_x32_scaled:
12824       break;
12825     default:
12826       VIXL_UNIMPLEMENTED();
12827       break;
12828   }
12829 }
12830 
VisitSVE64BitGatherPrefetch_VectorPlusImm(const Instruction * instr)12831 void Simulator::VisitSVE64BitGatherPrefetch_VectorPlusImm(
12832     const Instruction* instr) {
12833   switch (instr->Mask(SVE64BitGatherPrefetch_VectorPlusImmMask)) {
12834     // Ignore prefetch hint instructions.
12835     case PRFB_i_p_ai_d:
12836     case PRFD_i_p_ai_d:
12837     case PRFH_i_p_ai_d:
12838     case PRFW_i_p_ai_d:
12839       break;
12840     default:
12841       VIXL_UNIMPLEMENTED();
12842       break;
12843   }
12844 }
12845 
VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar(const Instruction * instr)12846 void Simulator::VisitSVEContiguousFirstFaultLoad_ScalarPlusScalar(
12847     const Instruction* instr) {
12848   bool is_signed;
12849   switch (instr->Mask(SVEContiguousLoad_ScalarPlusScalarMask)) {
12850     case LDFF1B_z_p_br_u8:
12851     case LDFF1B_z_p_br_u16:
12852     case LDFF1B_z_p_br_u32:
12853     case LDFF1B_z_p_br_u64:
12854     case LDFF1H_z_p_br_u16:
12855     case LDFF1H_z_p_br_u32:
12856     case LDFF1H_z_p_br_u64:
12857     case LDFF1W_z_p_br_u32:
12858     case LDFF1W_z_p_br_u64:
12859     case LDFF1D_z_p_br_u64:
12860       is_signed = false;
12861       break;
12862     case LDFF1SB_z_p_br_s16:
12863     case LDFF1SB_z_p_br_s32:
12864     case LDFF1SB_z_p_br_s64:
12865     case LDFF1SH_z_p_br_s32:
12866     case LDFF1SH_z_p_br_s64:
12867     case LDFF1SW_z_p_br_s64:
12868       is_signed = true;
12869       break;
12870     default:
12871       // This encoding group is complete, so no other values should be possible.
12872       VIXL_UNREACHABLE();
12873       is_signed = false;
12874       break;
12875   }
12876 
12877   int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(is_signed);
12878   int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(is_signed);
12879   VIXL_ASSERT(msize_in_bytes_log2 <= esize_in_bytes_log2);
12880   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
12881   uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
12882   uint64_t offset = ReadXRegister(instr->GetRm());
12883   offset <<= msize_in_bytes_log2;
12884   LogicSVEAddressVector addr(base + offset);
12885   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
12886   SVEFaultTolerantLoadHelper(vform,
12887                              ReadPRegister(instr->GetPgLow8()),
12888                              instr->GetRt(),
12889                              addr,
12890                              kSVEFirstFaultLoad,
12891                              is_signed);
12892 }
12893 
VisitSVEContiguousNonFaultLoad_ScalarPlusImm(const Instruction * instr)12894 void Simulator::VisitSVEContiguousNonFaultLoad_ScalarPlusImm(
12895     const Instruction* instr) {
12896   bool is_signed = false;
12897   switch (instr->Mask(SVEContiguousNonFaultLoad_ScalarPlusImmMask)) {
12898     case LDNF1B_z_p_bi_u16:
12899     case LDNF1B_z_p_bi_u32:
12900     case LDNF1B_z_p_bi_u64:
12901     case LDNF1B_z_p_bi_u8:
12902     case LDNF1D_z_p_bi_u64:
12903     case LDNF1H_z_p_bi_u16:
12904     case LDNF1H_z_p_bi_u32:
12905     case LDNF1H_z_p_bi_u64:
12906     case LDNF1W_z_p_bi_u32:
12907     case LDNF1W_z_p_bi_u64:
12908       break;
12909     case LDNF1SB_z_p_bi_s16:
12910     case LDNF1SB_z_p_bi_s32:
12911     case LDNF1SB_z_p_bi_s64:
12912     case LDNF1SH_z_p_bi_s32:
12913     case LDNF1SH_z_p_bi_s64:
12914     case LDNF1SW_z_p_bi_s64:
12915       is_signed = true;
12916       break;
12917     default:
12918       VIXL_UNIMPLEMENTED();
12919       break;
12920   }
12921   int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(is_signed);
12922   int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(is_signed);
12923   VIXL_ASSERT(msize_in_bytes_log2 <= esize_in_bytes_log2);
12924   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
12925   int vl = GetVectorLengthInBytes();
12926   int vl_divisor_log2 = esize_in_bytes_log2 - msize_in_bytes_log2;
12927   uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
12928   uint64_t offset =
12929       (instr->ExtractSignedBits(19, 16) * vl) / (1 << vl_divisor_log2);
12930   LogicSVEAddressVector addr(base + offset);
12931   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
12932   SVEFaultTolerantLoadHelper(vform,
12933                              ReadPRegister(instr->GetPgLow8()),
12934                              instr->GetRt(),
12935                              addr,
12936                              kSVENonFaultLoad,
12937                              is_signed);
12938 }
12939 
VisitSVEContiguousNonTemporalLoad_ScalarPlusImm(const Instruction * instr)12940 void Simulator::VisitSVEContiguousNonTemporalLoad_ScalarPlusImm(
12941     const Instruction* instr) {
12942   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
12943   VectorFormat vform = kFormatUndefined;
12944 
12945   switch (instr->Mask(SVEContiguousNonTemporalLoad_ScalarPlusImmMask)) {
12946     case LDNT1B_z_p_bi_contiguous:
12947       vform = kFormatVnB;
12948       break;
12949     case LDNT1D_z_p_bi_contiguous:
12950       vform = kFormatVnD;
12951       break;
12952     case LDNT1H_z_p_bi_contiguous:
12953       vform = kFormatVnH;
12954       break;
12955     case LDNT1W_z_p_bi_contiguous:
12956       vform = kFormatVnS;
12957       break;
12958     default:
12959       VIXL_UNIMPLEMENTED();
12960       break;
12961   }
12962   int msize_in_bytes_log2 = LaneSizeInBytesLog2FromFormat(vform);
12963   int vl = GetVectorLengthInBytes();
12964   uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
12965   uint64_t offset = instr->ExtractSignedBits(19, 16) * vl;
12966   LogicSVEAddressVector addr(base + offset);
12967   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
12968   SVEStructuredLoadHelper(vform,
12969                           pg,
12970                           instr->GetRt(),
12971                           addr,
12972                           /* is_signed = */ false);
12973 }
12974 
VisitSVEContiguousNonTemporalLoad_ScalarPlusScalar(const Instruction * instr)12975 void Simulator::VisitSVEContiguousNonTemporalLoad_ScalarPlusScalar(
12976     const Instruction* instr) {
12977   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
12978   VectorFormat vform = kFormatUndefined;
12979 
12980   switch (instr->Mask(SVEContiguousNonTemporalLoad_ScalarPlusScalarMask)) {
12981     case LDNT1B_z_p_br_contiguous:
12982       vform = kFormatVnB;
12983       break;
12984     case LDNT1D_z_p_br_contiguous:
12985       vform = kFormatVnD;
12986       break;
12987     case LDNT1H_z_p_br_contiguous:
12988       vform = kFormatVnH;
12989       break;
12990     case LDNT1W_z_p_br_contiguous:
12991       vform = kFormatVnS;
12992       break;
12993     default:
12994       VIXL_UNIMPLEMENTED();
12995       break;
12996   }
12997   int msize_in_bytes_log2 = LaneSizeInBytesLog2FromFormat(vform);
12998   uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
12999   uint64_t offset = ReadXRegister(instr->GetRm()) << msize_in_bytes_log2;
13000   LogicSVEAddressVector addr(base + offset);
13001   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
13002   SVEStructuredLoadHelper(vform,
13003                           pg,
13004                           instr->GetRt(),
13005                           addr,
13006                           /* is_signed = */ false);
13007 }
13008 
VisitSVELoadAndBroadcastQOWord_ScalarPlusImm(const Instruction * instr)13009 void Simulator::VisitSVELoadAndBroadcastQOWord_ScalarPlusImm(
13010     const Instruction* instr) {
13011   SimVRegister& zt = ReadVRegister(instr->GetRt());
13012   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
13013 
13014   uint64_t dwords = 2;
13015   VectorFormat vform_dst = kFormatVnQ;
13016   if ((form_hash_ == "ld1rob_z_p_bi_u8"_h) ||
13017       (form_hash_ == "ld1roh_z_p_bi_u16"_h) ||
13018       (form_hash_ == "ld1row_z_p_bi_u32"_h) ||
13019       (form_hash_ == "ld1rod_z_p_bi_u64"_h)) {
13020     dwords = 4;
13021     vform_dst = kFormatVnO;
13022   }
13023 
13024   uint64_t addr = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13025   uint64_t offset =
13026       instr->ExtractSignedBits(19, 16) * dwords * kDRegSizeInBytes;
13027   int msz = instr->ExtractBits(24, 23);
13028   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(msz);
13029 
13030   for (unsigned i = 0; i < dwords; i++) {
13031     if (!ld1(kFormatVnD, zt, i, addr + offset + (i * kDRegSizeInBytes))) return;
13032   }
13033   mov_zeroing(vform, zt, pg, zt);
13034   dup_element(vform_dst, zt, zt, 0);
13035 }
13036 
VisitSVELoadAndBroadcastQOWord_ScalarPlusScalar(const Instruction * instr)13037 void Simulator::VisitSVELoadAndBroadcastQOWord_ScalarPlusScalar(
13038     const Instruction* instr) {
13039   SimVRegister& zt = ReadVRegister(instr->GetRt());
13040   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
13041 
13042   uint64_t bytes = 16;
13043   VectorFormat vform_dst = kFormatVnQ;
13044   if ((form_hash_ == "ld1rob_z_p_br_contiguous"_h) ||
13045       (form_hash_ == "ld1roh_z_p_br_contiguous"_h) ||
13046       (form_hash_ == "ld1row_z_p_br_contiguous"_h) ||
13047       (form_hash_ == "ld1rod_z_p_br_contiguous"_h)) {
13048     bytes = 32;
13049     vform_dst = kFormatVnO;
13050   }
13051 
13052   uint64_t addr = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13053   uint64_t offset = ReadXRegister(instr->GetRm());
13054   int msz = instr->ExtractBits(24, 23);
13055   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(msz);
13056   offset <<= msz;
13057   for (unsigned i = 0; i < bytes; i++) {
13058     if (!ld1(kFormatVnB, zt, i, addr + offset + i)) return;
13059   }
13060   mov_zeroing(vform, zt, pg, zt);
13061   dup_element(vform_dst, zt, zt, 0);
13062 }
13063 
VisitSVELoadMultipleStructures_ScalarPlusImm(const Instruction * instr)13064 void Simulator::VisitSVELoadMultipleStructures_ScalarPlusImm(
13065     const Instruction* instr) {
13066   switch (instr->Mask(SVELoadMultipleStructures_ScalarPlusImmMask)) {
13067     case LD2B_z_p_bi_contiguous:
13068     case LD2D_z_p_bi_contiguous:
13069     case LD2H_z_p_bi_contiguous:
13070     case LD2W_z_p_bi_contiguous:
13071     case LD3B_z_p_bi_contiguous:
13072     case LD3D_z_p_bi_contiguous:
13073     case LD3H_z_p_bi_contiguous:
13074     case LD3W_z_p_bi_contiguous:
13075     case LD4B_z_p_bi_contiguous:
13076     case LD4D_z_p_bi_contiguous:
13077     case LD4H_z_p_bi_contiguous:
13078     case LD4W_z_p_bi_contiguous: {
13079       int vl = GetVectorLengthInBytes();
13080       int msz = instr->ExtractBits(24, 23);
13081       int reg_count = instr->ExtractBits(22, 21) + 1;
13082       uint64_t offset = instr->ExtractSignedBits(19, 16) * vl * reg_count;
13083       LogicSVEAddressVector addr(
13084           ReadXRegister(instr->GetRn(), Reg31IsStackPointer) + offset);
13085       addr.SetMsizeInBytesLog2(msz);
13086       addr.SetRegCount(reg_count);
13087       SVEStructuredLoadHelper(SVEFormatFromLaneSizeInBytesLog2(msz),
13088                               ReadPRegister(instr->GetPgLow8()),
13089                               instr->GetRt(),
13090                               addr);
13091       break;
13092     }
13093     default:
13094       VIXL_UNIMPLEMENTED();
13095       break;
13096   }
13097 }
13098 
VisitSVELoadMultipleStructures_ScalarPlusScalar(const Instruction * instr)13099 void Simulator::VisitSVELoadMultipleStructures_ScalarPlusScalar(
13100     const Instruction* instr) {
13101   switch (instr->Mask(SVELoadMultipleStructures_ScalarPlusScalarMask)) {
13102     case LD2B_z_p_br_contiguous:
13103     case LD2D_z_p_br_contiguous:
13104     case LD2H_z_p_br_contiguous:
13105     case LD2W_z_p_br_contiguous:
13106     case LD3B_z_p_br_contiguous:
13107     case LD3D_z_p_br_contiguous:
13108     case LD3H_z_p_br_contiguous:
13109     case LD3W_z_p_br_contiguous:
13110     case LD4B_z_p_br_contiguous:
13111     case LD4D_z_p_br_contiguous:
13112     case LD4H_z_p_br_contiguous:
13113     case LD4W_z_p_br_contiguous: {
13114       int msz = instr->ExtractBits(24, 23);
13115       uint64_t offset = ReadXRegister(instr->GetRm()) * (uint64_t{1} << msz);
13116       VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(msz);
13117       LogicSVEAddressVector addr(
13118           ReadXRegister(instr->GetRn(), Reg31IsStackPointer) + offset);
13119       addr.SetMsizeInBytesLog2(msz);
13120       addr.SetRegCount(instr->ExtractBits(22, 21) + 1);
13121       SVEStructuredLoadHelper(vform,
13122                               ReadPRegister(instr->GetPgLow8()),
13123                               instr->GetRt(),
13124                               addr,
13125                               false);
13126       break;
13127     }
13128     default:
13129       VIXL_UNIMPLEMENTED();
13130       break;
13131   }
13132 }
13133 
VisitSVE32BitScatterStore_ScalarPlus32BitScaledOffsets(const Instruction * instr)13134 void Simulator::VisitSVE32BitScatterStore_ScalarPlus32BitScaledOffsets(
13135     const Instruction* instr) {
13136   switch (instr->Mask(SVE32BitScatterStore_ScalarPlus32BitScaledOffsetsMask)) {
13137     case ST1H_z_p_bz_s_x32_scaled:
13138     case ST1W_z_p_bz_s_x32_scaled: {
13139       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
13140       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
13141       int scale = instr->ExtractBit(21) * msize_in_bytes_log2;
13142       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13143       SVEOffsetModifier mod =
13144           (instr->ExtractBit(14) == 1) ? SVE_SXTW : SVE_UXTW;
13145       LogicSVEAddressVector addr(base,
13146                                  &ReadVRegister(instr->GetRm()),
13147                                  kFormatVnS,
13148                                  mod,
13149                                  scale);
13150       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
13151       SVEStructuredStoreHelper(kFormatVnS,
13152                                ReadPRegister(instr->GetPgLow8()),
13153                                instr->GetRt(),
13154                                addr);
13155       break;
13156     }
13157     default:
13158       VIXL_UNIMPLEMENTED();
13159       break;
13160   }
13161 }
13162 
VisitSVE32BitScatterStore_ScalarPlus32BitUnscaledOffsets(const Instruction * instr)13163 void Simulator::VisitSVE32BitScatterStore_ScalarPlus32BitUnscaledOffsets(
13164     const Instruction* instr) {
13165   switch (
13166       instr->Mask(SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsetsMask)) {
13167     case ST1B_z_p_bz_s_x32_unscaled:
13168     case ST1H_z_p_bz_s_x32_unscaled:
13169     case ST1W_z_p_bz_s_x32_unscaled: {
13170       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
13171       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
13172       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13173       SVEOffsetModifier mod =
13174           (instr->ExtractBit(14) == 1) ? SVE_SXTW : SVE_UXTW;
13175       LogicSVEAddressVector addr(base,
13176                                  &ReadVRegister(instr->GetRm()),
13177                                  kFormatVnS,
13178                                  mod);
13179       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
13180       SVEStructuredStoreHelper(kFormatVnS,
13181                                ReadPRegister(instr->GetPgLow8()),
13182                                instr->GetRt(),
13183                                addr);
13184       break;
13185     }
13186     default:
13187       VIXL_UNIMPLEMENTED();
13188       break;
13189   }
13190 }
13191 
VisitSVE32BitScatterStore_VectorPlusImm(const Instruction * instr)13192 void Simulator::VisitSVE32BitScatterStore_VectorPlusImm(
13193     const Instruction* instr) {
13194   int msz = 0;
13195   switch (instr->Mask(SVE32BitScatterStore_VectorPlusImmMask)) {
13196     case ST1B_z_p_ai_s:
13197       msz = 0;
13198       break;
13199     case ST1H_z_p_ai_s:
13200       msz = 1;
13201       break;
13202     case ST1W_z_p_ai_s:
13203       msz = 2;
13204       break;
13205     default:
13206       VIXL_UNIMPLEMENTED();
13207       break;
13208   }
13209   uint64_t imm = instr->ExtractBits(20, 16) << msz;
13210   LogicSVEAddressVector addr(imm, &ReadVRegister(instr->GetRn()), kFormatVnS);
13211   addr.SetMsizeInBytesLog2(msz);
13212   SVEStructuredStoreHelper(kFormatVnS,
13213                            ReadPRegister(instr->GetPgLow8()),
13214                            instr->GetRt(),
13215                            addr);
13216 }
13217 
VisitSVE64BitScatterStore_ScalarPlus64BitScaledOffsets(const Instruction * instr)13218 void Simulator::VisitSVE64BitScatterStore_ScalarPlus64BitScaledOffsets(
13219     const Instruction* instr) {
13220   switch (instr->Mask(SVE64BitScatterStore_ScalarPlus64BitScaledOffsetsMask)) {
13221     case ST1D_z_p_bz_d_64_scaled:
13222     case ST1H_z_p_bz_d_64_scaled:
13223     case ST1W_z_p_bz_d_64_scaled: {
13224       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
13225       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
13226       int scale = instr->ExtractBit(21) * msize_in_bytes_log2;
13227       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13228       LogicSVEAddressVector addr(base,
13229                                  &ReadVRegister(instr->GetRm()),
13230                                  kFormatVnD,
13231                                  SVE_LSL,
13232                                  scale);
13233       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
13234       SVEStructuredStoreHelper(kFormatVnD,
13235                                ReadPRegister(instr->GetPgLow8()),
13236                                instr->GetRt(),
13237                                addr);
13238       break;
13239     }
13240     default:
13241       VIXL_UNIMPLEMENTED();
13242       break;
13243   }
13244 }
13245 
VisitSVE64BitScatterStore_ScalarPlus64BitUnscaledOffsets(const Instruction * instr)13246 void Simulator::VisitSVE64BitScatterStore_ScalarPlus64BitUnscaledOffsets(
13247     const Instruction* instr) {
13248   switch (
13249       instr->Mask(SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsetsMask)) {
13250     case ST1B_z_p_bz_d_64_unscaled:
13251     case ST1D_z_p_bz_d_64_unscaled:
13252     case ST1H_z_p_bz_d_64_unscaled:
13253     case ST1W_z_p_bz_d_64_unscaled: {
13254       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
13255       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
13256       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13257       LogicSVEAddressVector addr(base,
13258                                  &ReadVRegister(instr->GetRm()),
13259                                  kFormatVnD,
13260                                  NO_SVE_OFFSET_MODIFIER);
13261       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
13262       SVEStructuredStoreHelper(kFormatVnD,
13263                                ReadPRegister(instr->GetPgLow8()),
13264                                instr->GetRt(),
13265                                addr);
13266       break;
13267     }
13268     default:
13269       VIXL_UNIMPLEMENTED();
13270       break;
13271   }
13272 }
13273 
VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsets(const Instruction * instr)13274 void Simulator::VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsets(
13275     const Instruction* instr) {
13276   switch (instr->Mask(
13277       SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsetsMask)) {
13278     case ST1D_z_p_bz_d_x32_scaled:
13279     case ST1H_z_p_bz_d_x32_scaled:
13280     case ST1W_z_p_bz_d_x32_scaled: {
13281       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
13282       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
13283       int scale = instr->ExtractBit(21) * msize_in_bytes_log2;
13284       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13285       SVEOffsetModifier mod =
13286           (instr->ExtractBit(14) == 1) ? SVE_SXTW : SVE_UXTW;
13287       LogicSVEAddressVector addr(base,
13288                                  &ReadVRegister(instr->GetRm()),
13289                                  kFormatVnD,
13290                                  mod,
13291                                  scale);
13292       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
13293       SVEStructuredStoreHelper(kFormatVnD,
13294                                ReadPRegister(instr->GetPgLow8()),
13295                                instr->GetRt(),
13296                                addr);
13297       break;
13298     }
13299     default:
13300       VIXL_UNIMPLEMENTED();
13301       break;
13302   }
13303 }
13304 
13305 void Simulator::
VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsets(const Instruction * instr)13306     VisitSVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsets(
13307         const Instruction* instr) {
13308   switch (instr->Mask(
13309       SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsetsMask)) {
13310     case ST1B_z_p_bz_d_x32_unscaled:
13311     case ST1D_z_p_bz_d_x32_unscaled:
13312     case ST1H_z_p_bz_d_x32_unscaled:
13313     case ST1W_z_p_bz_d_x32_unscaled: {
13314       unsigned msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
13315       VIXL_ASSERT(kDRegSizeInBytesLog2 >= msize_in_bytes_log2);
13316       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13317       SVEOffsetModifier mod =
13318           (instr->ExtractBit(14) == 1) ? SVE_SXTW : SVE_UXTW;
13319       LogicSVEAddressVector addr(base,
13320                                  &ReadVRegister(instr->GetRm()),
13321                                  kFormatVnD,
13322                                  mod);
13323       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
13324       SVEStructuredStoreHelper(kFormatVnD,
13325                                ReadPRegister(instr->GetPgLow8()),
13326                                instr->GetRt(),
13327                                addr);
13328       break;
13329     }
13330     default:
13331       VIXL_UNIMPLEMENTED();
13332       break;
13333   }
13334 }
13335 
VisitSVE64BitScatterStore_VectorPlusImm(const Instruction * instr)13336 void Simulator::VisitSVE64BitScatterStore_VectorPlusImm(
13337     const Instruction* instr) {
13338   int msz = 0;
13339   switch (instr->Mask(SVE64BitScatterStore_VectorPlusImmMask)) {
13340     case ST1B_z_p_ai_d:
13341       msz = 0;
13342       break;
13343     case ST1D_z_p_ai_d:
13344       msz = 3;
13345       break;
13346     case ST1H_z_p_ai_d:
13347       msz = 1;
13348       break;
13349     case ST1W_z_p_ai_d:
13350       msz = 2;
13351       break;
13352     default:
13353       VIXL_UNIMPLEMENTED();
13354       break;
13355   }
13356   uint64_t imm = instr->ExtractBits(20, 16) << msz;
13357   LogicSVEAddressVector addr(imm, &ReadVRegister(instr->GetRn()), kFormatVnD);
13358   addr.SetMsizeInBytesLog2(msz);
13359   SVEStructuredStoreHelper(kFormatVnD,
13360                            ReadPRegister(instr->GetPgLow8()),
13361                            instr->GetRt(),
13362                            addr);
13363 }
13364 
VisitSVEContiguousNonTemporalStore_ScalarPlusImm(const Instruction * instr)13365 void Simulator::VisitSVEContiguousNonTemporalStore_ScalarPlusImm(
13366     const Instruction* instr) {
13367   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
13368   VectorFormat vform = kFormatUndefined;
13369 
13370   switch (instr->Mask(SVEContiguousNonTemporalStore_ScalarPlusImmMask)) {
13371     case STNT1B_z_p_bi_contiguous:
13372       vform = kFormatVnB;
13373       break;
13374     case STNT1D_z_p_bi_contiguous:
13375       vform = kFormatVnD;
13376       break;
13377     case STNT1H_z_p_bi_contiguous:
13378       vform = kFormatVnH;
13379       break;
13380     case STNT1W_z_p_bi_contiguous:
13381       vform = kFormatVnS;
13382       break;
13383     default:
13384       VIXL_UNIMPLEMENTED();
13385       break;
13386   }
13387   int msize_in_bytes_log2 = LaneSizeInBytesLog2FromFormat(vform);
13388   int vl = GetVectorLengthInBytes();
13389   uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13390   uint64_t offset = instr->ExtractSignedBits(19, 16) * vl;
13391   LogicSVEAddressVector addr(base + offset);
13392   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
13393   SVEStructuredStoreHelper(vform, pg, instr->GetRt(), addr);
13394 }
13395 
VisitSVEContiguousNonTemporalStore_ScalarPlusScalar(const Instruction * instr)13396 void Simulator::VisitSVEContiguousNonTemporalStore_ScalarPlusScalar(
13397     const Instruction* instr) {
13398   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
13399   VectorFormat vform = kFormatUndefined;
13400 
13401   switch (instr->Mask(SVEContiguousNonTemporalStore_ScalarPlusScalarMask)) {
13402     case STNT1B_z_p_br_contiguous:
13403       vform = kFormatVnB;
13404       break;
13405     case STNT1D_z_p_br_contiguous:
13406       vform = kFormatVnD;
13407       break;
13408     case STNT1H_z_p_br_contiguous:
13409       vform = kFormatVnH;
13410       break;
13411     case STNT1W_z_p_br_contiguous:
13412       vform = kFormatVnS;
13413       break;
13414     default:
13415       VIXL_UNIMPLEMENTED();
13416       break;
13417   }
13418   int msize_in_bytes_log2 = LaneSizeInBytesLog2FromFormat(vform);
13419   uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13420   uint64_t offset = ReadXRegister(instr->GetRm()) << msize_in_bytes_log2;
13421   LogicSVEAddressVector addr(base + offset);
13422   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
13423   SVEStructuredStoreHelper(vform, pg, instr->GetRt(), addr);
13424 }
13425 
VisitSVEContiguousStore_ScalarPlusImm(const Instruction * instr)13426 void Simulator::VisitSVEContiguousStore_ScalarPlusImm(
13427     const Instruction* instr) {
13428   switch (instr->Mask(SVEContiguousStore_ScalarPlusImmMask)) {
13429     case ST1B_z_p_bi:
13430     case ST1D_z_p_bi:
13431     case ST1H_z_p_bi:
13432     case ST1W_z_p_bi: {
13433       int vl = GetVectorLengthInBytes();
13434       int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(false);
13435       int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(false);
13436       VIXL_ASSERT(esize_in_bytes_log2 >= msize_in_bytes_log2);
13437       int vl_divisor_log2 = esize_in_bytes_log2 - msize_in_bytes_log2;
13438       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13439       uint64_t offset =
13440           (instr->ExtractSignedBits(19, 16) * vl) / (1 << vl_divisor_log2);
13441       VectorFormat vform =
13442           SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
13443       LogicSVEAddressVector addr(base + offset);
13444       addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
13445       SVEStructuredStoreHelper(vform,
13446                                ReadPRegister(instr->GetPgLow8()),
13447                                instr->GetRt(),
13448                                addr);
13449       break;
13450     }
13451     default:
13452       VIXL_UNIMPLEMENTED();
13453       break;
13454   }
13455 }
13456 
VisitSVEContiguousStore_ScalarPlusScalar(const Instruction * instr)13457 void Simulator::VisitSVEContiguousStore_ScalarPlusScalar(
13458     const Instruction* instr) {
13459   switch (instr->Mask(SVEContiguousStore_ScalarPlusScalarMask)) {
13460     case ST1B_z_p_br:
13461     case ST1D_z_p_br:
13462     case ST1H_z_p_br:
13463     case ST1W_z_p_br: {
13464       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13465       uint64_t offset = ReadXRegister(instr->GetRm());
13466       offset <<= instr->ExtractBits(24, 23);
13467       VectorFormat vform =
13468           SVEFormatFromLaneSizeInBytesLog2(instr->ExtractBits(22, 21));
13469       LogicSVEAddressVector addr(base + offset);
13470       addr.SetMsizeInBytesLog2(instr->ExtractBits(24, 23));
13471       SVEStructuredStoreHelper(vform,
13472                                ReadPRegister(instr->GetPgLow8()),
13473                                instr->GetRt(),
13474                                addr);
13475       break;
13476     }
13477     default:
13478       VIXL_UNIMPLEMENTED();
13479       break;
13480   }
13481 }
13482 
VisitSVECopySIMDFPScalarRegisterToVector_Predicated(const Instruction * instr)13483 void Simulator::VisitSVECopySIMDFPScalarRegisterToVector_Predicated(
13484     const Instruction* instr) {
13485   VectorFormat vform = instr->GetSVEVectorFormat();
13486   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
13487   SimVRegister z_result;
13488 
13489   switch (instr->Mask(SVECopySIMDFPScalarRegisterToVector_PredicatedMask)) {
13490     case CPY_z_p_v:
13491       dup_element(vform, z_result, ReadVRegister(instr->GetRn()), 0);
13492       mov_merging(vform, ReadVRegister(instr->GetRd()), pg, z_result);
13493       break;
13494     default:
13495       VIXL_UNIMPLEMENTED();
13496       break;
13497   }
13498 }
13499 
VisitSVEStoreMultipleStructures_ScalarPlusImm(const Instruction * instr)13500 void Simulator::VisitSVEStoreMultipleStructures_ScalarPlusImm(
13501     const Instruction* instr) {
13502   switch (instr->Mask(SVEStoreMultipleStructures_ScalarPlusImmMask)) {
13503     case ST2B_z_p_bi_contiguous:
13504     case ST2D_z_p_bi_contiguous:
13505     case ST2H_z_p_bi_contiguous:
13506     case ST2W_z_p_bi_contiguous:
13507     case ST3B_z_p_bi_contiguous:
13508     case ST3D_z_p_bi_contiguous:
13509     case ST3H_z_p_bi_contiguous:
13510     case ST3W_z_p_bi_contiguous:
13511     case ST4B_z_p_bi_contiguous:
13512     case ST4D_z_p_bi_contiguous:
13513     case ST4H_z_p_bi_contiguous:
13514     case ST4W_z_p_bi_contiguous: {
13515       int vl = GetVectorLengthInBytes();
13516       int msz = instr->ExtractBits(24, 23);
13517       int reg_count = instr->ExtractBits(22, 21) + 1;
13518       uint64_t offset = instr->ExtractSignedBits(19, 16) * vl * reg_count;
13519       LogicSVEAddressVector addr(
13520           ReadXRegister(instr->GetRn(), Reg31IsStackPointer) + offset);
13521       addr.SetMsizeInBytesLog2(msz);
13522       addr.SetRegCount(reg_count);
13523       SVEStructuredStoreHelper(SVEFormatFromLaneSizeInBytesLog2(msz),
13524                                ReadPRegister(instr->GetPgLow8()),
13525                                instr->GetRt(),
13526                                addr);
13527       break;
13528     }
13529     default:
13530       VIXL_UNIMPLEMENTED();
13531       break;
13532   }
13533 }
13534 
VisitSVEStoreMultipleStructures_ScalarPlusScalar(const Instruction * instr)13535 void Simulator::VisitSVEStoreMultipleStructures_ScalarPlusScalar(
13536     const Instruction* instr) {
13537   switch (instr->Mask(SVEStoreMultipleStructures_ScalarPlusScalarMask)) {
13538     case ST2B_z_p_br_contiguous:
13539     case ST2D_z_p_br_contiguous:
13540     case ST2H_z_p_br_contiguous:
13541     case ST2W_z_p_br_contiguous:
13542     case ST3B_z_p_br_contiguous:
13543     case ST3D_z_p_br_contiguous:
13544     case ST3H_z_p_br_contiguous:
13545     case ST3W_z_p_br_contiguous:
13546     case ST4B_z_p_br_contiguous:
13547     case ST4D_z_p_br_contiguous:
13548     case ST4H_z_p_br_contiguous:
13549     case ST4W_z_p_br_contiguous: {
13550       int msz = instr->ExtractBits(24, 23);
13551       uint64_t offset = ReadXRegister(instr->GetRm()) * (uint64_t{1} << msz);
13552       VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(msz);
13553       LogicSVEAddressVector addr(
13554           ReadXRegister(instr->GetRn(), Reg31IsStackPointer) + offset);
13555       addr.SetMsizeInBytesLog2(msz);
13556       addr.SetRegCount(instr->ExtractBits(22, 21) + 1);
13557       SVEStructuredStoreHelper(vform,
13558                                ReadPRegister(instr->GetPgLow8()),
13559                                instr->GetRt(),
13560                                addr);
13561       break;
13562     }
13563     default:
13564       VIXL_UNIMPLEMENTED();
13565       break;
13566   }
13567 }
13568 
VisitSVEStorePredicateRegister(const Instruction * instr)13569 void Simulator::VisitSVEStorePredicateRegister(const Instruction* instr) {
13570   switch (instr->Mask(SVEStorePredicateRegisterMask)) {
13571     case STR_p_bi: {
13572       SimPRegister& pt = ReadPRegister(instr->GetPt());
13573       int pl = GetPredicateLengthInBytes();
13574       int imm9 = (instr->ExtractBits(21, 16) << 3) | instr->ExtractBits(12, 10);
13575       uint64_t multiplier = ExtractSignedBitfield64(8, 0, imm9);
13576       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13577       uint64_t address = base + multiplier * pl;
13578       for (int i = 0; i < pl; i++) {
13579         if (!MemWrite(address + i, pt.GetLane<uint8_t>(i))) return;
13580       }
13581       LogPWrite(instr->GetPt(), address);
13582       break;
13583     }
13584     default:
13585       VIXL_UNIMPLEMENTED();
13586       break;
13587   }
13588 }
13589 
VisitSVEStoreVectorRegister(const Instruction * instr)13590 void Simulator::VisitSVEStoreVectorRegister(const Instruction* instr) {
13591   switch (instr->Mask(SVEStoreVectorRegisterMask)) {
13592     case STR_z_bi: {
13593       SimVRegister& zt = ReadVRegister(instr->GetRt());
13594       int vl = GetVectorLengthInBytes();
13595       int imm9 = (instr->ExtractBits(21, 16) << 3) | instr->ExtractBits(12, 10);
13596       uint64_t multiplier = ExtractSignedBitfield64(8, 0, imm9);
13597       uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
13598       uint64_t address = base + multiplier * vl;
13599       for (int i = 0; i < vl; i++) {
13600         if (!MemWrite(address + i, zt.GetLane<uint8_t>(i))) return;
13601       }
13602       LogZWrite(instr->GetRt(), address);
13603       break;
13604     }
13605     default:
13606       VIXL_UNIMPLEMENTED();
13607       break;
13608   }
13609 }
13610 
VisitSVEMulIndex(const Instruction * instr)13611 void Simulator::VisitSVEMulIndex(const Instruction* instr) {
13612   VectorFormat vform = instr->GetSVEVectorFormat();
13613   SimVRegister& zda = ReadVRegister(instr->GetRd());
13614   SimVRegister& zn = ReadVRegister(instr->GetRn());
13615   std::pair<int, int> zm_and_index = instr->GetSVEMulZmAndIndex();
13616   SimVRegister zm = ReadVRegister(zm_and_index.first);
13617   int index = zm_and_index.second;
13618 
13619   SimVRegister temp;
13620   dup_elements_to_segments(vform, temp, zm, index);
13621 
13622   switch (form_hash_) {
13623     case "sdot_z_zzzi_d"_h:
13624     case "sdot_z_zzzi_s"_h:
13625       sdot(vform, zda, zn, temp);
13626       break;
13627     case "udot_z_zzzi_d"_h:
13628     case "udot_z_zzzi_s"_h:
13629       udot(vform, zda, zn, temp);
13630       break;
13631     case "sudot_z_zzzi_s"_h:
13632       usdot(vform, zda, temp, zn);
13633       break;
13634     case "usdot_z_zzzi_s"_h:
13635       usdot(vform, zda, zn, temp);
13636       break;
13637     default:
13638       VIXL_UNIMPLEMENTED();
13639       break;
13640   }
13641 }
13642 
SimulateMatrixMul(const Instruction * instr)13643 void Simulator::SimulateMatrixMul(const Instruction* instr) {
13644   VectorFormat vform = kFormatVnS;
13645   SimVRegister& dn = ReadVRegister(instr->GetRd());
13646   SimVRegister& n = ReadVRegister(instr->GetRn());
13647   SimVRegister& m = ReadVRegister(instr->GetRm());
13648 
13649   bool n_signed = false;
13650   bool m_signed = false;
13651   switch (form_hash_) {
13652     case "smmla_asimdsame2_g"_h:
13653       vform = kFormat4S;
13654       VIXL_FALLTHROUGH();
13655     case "smmla_z_zzz"_h:
13656       n_signed = m_signed = true;
13657       break;
13658     case "ummla_asimdsame2_g"_h:
13659       vform = kFormat4S;
13660       VIXL_FALLTHROUGH();
13661     case "ummla_z_zzz"_h:
13662       // Nothing to do.
13663       break;
13664     case "usmmla_asimdsame2_g"_h:
13665       vform = kFormat4S;
13666       VIXL_FALLTHROUGH();
13667     case "usmmla_z_zzz"_h:
13668       m_signed = true;
13669       break;
13670     default:
13671       VIXL_UNIMPLEMENTED();
13672       break;
13673   }
13674   matmul(vform, dn, n, m, n_signed, m_signed);
13675 }
13676 
SimulateSVEFPMatrixMul(const Instruction * instr)13677 void Simulator::SimulateSVEFPMatrixMul(const Instruction* instr) {
13678   VectorFormat vform = instr->GetSVEVectorFormat();
13679   SimVRegister& zdn = ReadVRegister(instr->GetRd());
13680   SimVRegister& zn = ReadVRegister(instr->GetRn());
13681   SimVRegister& zm = ReadVRegister(instr->GetRm());
13682 
13683   switch (form_hash_) {
13684     case "fmmla_z_zzz_s"_h:
13685     case "fmmla_z_zzz_d"_h:
13686       fmatmul(vform, zdn, zn, zm);
13687       break;
13688     default:
13689       VIXL_UNIMPLEMENTED();
13690       break;
13691   }
13692 }
13693 
VisitSVEPartitionBreakCondition(const Instruction * instr)13694 void Simulator::VisitSVEPartitionBreakCondition(const Instruction* instr) {
13695   SimPRegister& pd = ReadPRegister(instr->GetPd());
13696   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
13697   SimPRegister& pn = ReadPRegister(instr->GetPn());
13698   SimPRegister result;
13699 
13700   switch (instr->Mask(SVEPartitionBreakConditionMask)) {
13701     case BRKAS_p_p_p_z:
13702     case BRKA_p_p_p:
13703       brka(result, pg, pn);
13704       break;
13705     case BRKBS_p_p_p_z:
13706     case BRKB_p_p_p:
13707       brkb(result, pg, pn);
13708       break;
13709     default:
13710       VIXL_UNIMPLEMENTED();
13711       break;
13712   }
13713 
13714   if (instr->ExtractBit(4) == 1) {
13715     mov_merging(pd, pg, result);
13716   } else {
13717     mov_zeroing(pd, pg, result);
13718   }
13719 
13720   // Set flag if needed.
13721   if (instr->ExtractBit(22) == 1) {
13722     PredTest(kFormatVnB, pg, pd);
13723   }
13724 }
13725 
VisitSVEPropagateBreakToNextPartition(const Instruction * instr)13726 void Simulator::VisitSVEPropagateBreakToNextPartition(
13727     const Instruction* instr) {
13728   SimPRegister& pdm = ReadPRegister(instr->GetPd());
13729   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
13730   SimPRegister& pn = ReadPRegister(instr->GetPn());
13731 
13732   switch (instr->Mask(SVEPropagateBreakToNextPartitionMask)) {
13733     case BRKNS_p_p_pp:
13734     case BRKN_p_p_pp:
13735       brkn(pdm, pg, pn);
13736       break;
13737     default:
13738       VIXL_UNIMPLEMENTED();
13739       break;
13740   }
13741 
13742   // Set flag if needed.
13743   if (instr->ExtractBit(22) == 1) {
13744     // Note that this ignores `pg`.
13745     PredTest(kFormatVnB, GetPTrue(), pdm);
13746   }
13747 }
13748 
VisitSVEUnpackPredicateElements(const Instruction * instr)13749 void Simulator::VisitSVEUnpackPredicateElements(const Instruction* instr) {
13750   SimPRegister& pd = ReadPRegister(instr->GetPd());
13751   SimPRegister& pn = ReadPRegister(instr->GetPn());
13752 
13753   SimVRegister temp = Simulator::ExpandToSimVRegister(pn);
13754   SimVRegister zero;
13755   dup_immediate(kFormatVnB, zero, 0);
13756 
13757   switch (instr->Mask(SVEUnpackPredicateElementsMask)) {
13758     case PUNPKHI_p_p:
13759       zip2(kFormatVnB, temp, temp, zero);
13760       break;
13761     case PUNPKLO_p_p:
13762       zip1(kFormatVnB, temp, temp, zero);
13763       break;
13764     default:
13765       VIXL_UNIMPLEMENTED();
13766       break;
13767   }
13768   Simulator::ExtractFromSimVRegister(kFormatVnB, pd, temp);
13769 }
13770 
VisitSVEPermutePredicateElements(const Instruction * instr)13771 void Simulator::VisitSVEPermutePredicateElements(const Instruction* instr) {
13772   VectorFormat vform = instr->GetSVEVectorFormat();
13773   SimPRegister& pd = ReadPRegister(instr->GetPd());
13774   SimPRegister& pn = ReadPRegister(instr->GetPn());
13775   SimPRegister& pm = ReadPRegister(instr->GetPm());
13776 
13777   SimVRegister temp0 = Simulator::ExpandToSimVRegister(pn);
13778   SimVRegister temp1 = Simulator::ExpandToSimVRegister(pm);
13779 
13780   switch (instr->Mask(SVEPermutePredicateElementsMask)) {
13781     case TRN1_p_pp:
13782       trn1(vform, temp0, temp0, temp1);
13783       break;
13784     case TRN2_p_pp:
13785       trn2(vform, temp0, temp0, temp1);
13786       break;
13787     case UZP1_p_pp:
13788       uzp1(vform, temp0, temp0, temp1);
13789       break;
13790     case UZP2_p_pp:
13791       uzp2(vform, temp0, temp0, temp1);
13792       break;
13793     case ZIP1_p_pp:
13794       zip1(vform, temp0, temp0, temp1);
13795       break;
13796     case ZIP2_p_pp:
13797       zip2(vform, temp0, temp0, temp1);
13798       break;
13799     default:
13800       VIXL_UNIMPLEMENTED();
13801       break;
13802   }
13803   Simulator::ExtractFromSimVRegister(kFormatVnB, pd, temp0);
13804 }
13805 
VisitSVEReversePredicateElements(const Instruction * instr)13806 void Simulator::VisitSVEReversePredicateElements(const Instruction* instr) {
13807   switch (instr->Mask(SVEReversePredicateElementsMask)) {
13808     case REV_p_p: {
13809       VectorFormat vform = instr->GetSVEVectorFormat();
13810       SimPRegister& pn = ReadPRegister(instr->GetPn());
13811       SimPRegister& pd = ReadPRegister(instr->GetPd());
13812       SimVRegister temp = Simulator::ExpandToSimVRegister(pn);
13813       rev(vform, temp, temp);
13814       Simulator::ExtractFromSimVRegister(kFormatVnB, pd, temp);
13815       break;
13816     }
13817     default:
13818       VIXL_UNIMPLEMENTED();
13819       break;
13820   }
13821 }
13822 
VisitSVEPermuteVectorExtract(const Instruction * instr)13823 void Simulator::VisitSVEPermuteVectorExtract(const Instruction* instr) {
13824   SimVRegister& zdn = ReadVRegister(instr->GetRd());
13825   // Second source register "Zm" is encoded where "Zn" would usually be.
13826   SimVRegister& zm = ReadVRegister(instr->GetRn());
13827 
13828   int index = instr->GetSVEExtractImmediate();
13829   int vl = GetVectorLengthInBytes();
13830   index = (index >= vl) ? 0 : index;
13831 
13832   switch (instr->Mask(SVEPermuteVectorExtractMask)) {
13833     case EXT_z_zi_des:
13834       ext(kFormatVnB, zdn, zdn, zm, index);
13835       break;
13836     default:
13837       VIXL_UNIMPLEMENTED();
13838       break;
13839   }
13840 }
13841 
VisitSVEPermuteVectorInterleaving(const Instruction * instr)13842 void Simulator::VisitSVEPermuteVectorInterleaving(const Instruction* instr) {
13843   VectorFormat vform = instr->GetSVEVectorFormat();
13844   SimVRegister& zd = ReadVRegister(instr->GetRd());
13845   SimVRegister& zn = ReadVRegister(instr->GetRn());
13846   SimVRegister& zm = ReadVRegister(instr->GetRm());
13847 
13848   switch (instr->Mask(SVEPermuteVectorInterleavingMask)) {
13849     case TRN1_z_zz:
13850       trn1(vform, zd, zn, zm);
13851       break;
13852     case TRN2_z_zz:
13853       trn2(vform, zd, zn, zm);
13854       break;
13855     case UZP1_z_zz:
13856       uzp1(vform, zd, zn, zm);
13857       break;
13858     case UZP2_z_zz:
13859       uzp2(vform, zd, zn, zm);
13860       break;
13861     case ZIP1_z_zz:
13862       zip1(vform, zd, zn, zm);
13863       break;
13864     case ZIP2_z_zz:
13865       zip2(vform, zd, zn, zm);
13866       break;
13867     default:
13868       VIXL_UNIMPLEMENTED();
13869       break;
13870   }
13871 }
13872 
VisitSVEConditionallyBroadcastElementToVector(const Instruction * instr)13873 void Simulator::VisitSVEConditionallyBroadcastElementToVector(
13874     const Instruction* instr) {
13875   VectorFormat vform = instr->GetSVEVectorFormat();
13876   SimVRegister& zdn = ReadVRegister(instr->GetRd());
13877   SimVRegister& zm = ReadVRegister(instr->GetRn());
13878   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
13879 
13880   int active_offset = -1;
13881   switch (instr->Mask(SVEConditionallyBroadcastElementToVectorMask)) {
13882     case CLASTA_z_p_zz:
13883       active_offset = 1;
13884       break;
13885     case CLASTB_z_p_zz:
13886       active_offset = 0;
13887       break;
13888     default:
13889       VIXL_UNIMPLEMENTED();
13890       break;
13891   }
13892 
13893   if (active_offset >= 0) {
13894     std::pair<bool, uint64_t> value = clast(vform, pg, zm, active_offset);
13895     if (value.first) {
13896       dup_immediate(vform, zdn, value.second);
13897     } else {
13898       // Trigger a line of trace for the operation, even though it doesn't
13899       // change the register value.
13900       mov(vform, zdn, zdn);
13901     }
13902   }
13903 }
13904 
VisitSVEConditionallyExtractElementToSIMDFPScalar(const Instruction * instr)13905 void Simulator::VisitSVEConditionallyExtractElementToSIMDFPScalar(
13906     const Instruction* instr) {
13907   VectorFormat vform = instr->GetSVEVectorFormat();
13908   SimVRegister& vdn = ReadVRegister(instr->GetRd());
13909   SimVRegister& zm = ReadVRegister(instr->GetRn());
13910   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
13911 
13912   int active_offset = -1;
13913   switch (instr->Mask(SVEConditionallyExtractElementToSIMDFPScalarMask)) {
13914     case CLASTA_v_p_z:
13915       active_offset = 1;
13916       break;
13917     case CLASTB_v_p_z:
13918       active_offset = 0;
13919       break;
13920     default:
13921       VIXL_UNIMPLEMENTED();
13922       break;
13923   }
13924 
13925   if (active_offset >= 0) {
13926     LogicVRegister dst(vdn);
13927     uint64_t src1_value = dst.Uint(vform, 0);
13928     std::pair<bool, uint64_t> src2_value = clast(vform, pg, zm, active_offset);
13929     dup_immediate(vform, vdn, 0);
13930     dst.SetUint(vform, 0, src2_value.first ? src2_value.second : src1_value);
13931   }
13932 }
13933 
VisitSVEConditionallyExtractElementToGeneralRegister(const Instruction * instr)13934 void Simulator::VisitSVEConditionallyExtractElementToGeneralRegister(
13935     const Instruction* instr) {
13936   VectorFormat vform = instr->GetSVEVectorFormat();
13937   SimVRegister& zm = ReadVRegister(instr->GetRn());
13938   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
13939 
13940   int active_offset = -1;
13941   switch (instr->Mask(SVEConditionallyExtractElementToGeneralRegisterMask)) {
13942     case CLASTA_r_p_z:
13943       active_offset = 1;
13944       break;
13945     case CLASTB_r_p_z:
13946       active_offset = 0;
13947       break;
13948     default:
13949       VIXL_UNIMPLEMENTED();
13950       break;
13951   }
13952 
13953   if (active_offset >= 0) {
13954     std::pair<bool, uint64_t> value = clast(vform, pg, zm, active_offset);
13955     uint64_t masked_src = ReadXRegister(instr->GetRd()) &
13956                           GetUintMask(LaneSizeInBitsFromFormat(vform));
13957     WriteXRegister(instr->GetRd(), value.first ? value.second : masked_src);
13958   }
13959 }
13960 
VisitSVEExtractElementToSIMDFPScalarRegister(const Instruction * instr)13961 void Simulator::VisitSVEExtractElementToSIMDFPScalarRegister(
13962     const Instruction* instr) {
13963   VectorFormat vform = instr->GetSVEVectorFormat();
13964   SimVRegister& vdn = ReadVRegister(instr->GetRd());
13965   SimVRegister& zm = ReadVRegister(instr->GetRn());
13966   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
13967 
13968   int active_offset = -1;
13969   switch (instr->Mask(SVEExtractElementToSIMDFPScalarRegisterMask)) {
13970     case LASTA_v_p_z:
13971       active_offset = 1;
13972       break;
13973     case LASTB_v_p_z:
13974       active_offset = 0;
13975       break;
13976     default:
13977       VIXL_UNIMPLEMENTED();
13978       break;
13979   }
13980 
13981   if (active_offset >= 0) {
13982     LogicVRegister dst(vdn);
13983     std::pair<bool, uint64_t> value = clast(vform, pg, zm, active_offset);
13984     dup_immediate(vform, vdn, 0);
13985     dst.SetUint(vform, 0, value.second);
13986   }
13987 }
13988 
VisitSVEExtractElementToGeneralRegister(const Instruction * instr)13989 void Simulator::VisitSVEExtractElementToGeneralRegister(
13990     const Instruction* instr) {
13991   VectorFormat vform = instr->GetSVEVectorFormat();
13992   SimVRegister& zm = ReadVRegister(instr->GetRn());
13993   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
13994 
13995   int active_offset = -1;
13996   switch (instr->Mask(SVEExtractElementToGeneralRegisterMask)) {
13997     case LASTA_r_p_z:
13998       active_offset = 1;
13999       break;
14000     case LASTB_r_p_z:
14001       active_offset = 0;
14002       break;
14003     default:
14004       VIXL_UNIMPLEMENTED();
14005       break;
14006   }
14007 
14008   if (active_offset >= 0) {
14009     std::pair<bool, uint64_t> value = clast(vform, pg, zm, active_offset);
14010     WriteXRegister(instr->GetRd(), value.second);
14011   }
14012 }
14013 
VisitSVECompressActiveElements(const Instruction * instr)14014 void Simulator::VisitSVECompressActiveElements(const Instruction* instr) {
14015   VectorFormat vform = instr->GetSVEVectorFormat();
14016   SimVRegister& zd = ReadVRegister(instr->GetRd());
14017   SimVRegister& zn = ReadVRegister(instr->GetRn());
14018   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
14019 
14020   switch (instr->Mask(SVECompressActiveElementsMask)) {
14021     case COMPACT_z_p_z:
14022       compact(vform, zd, pg, zn);
14023       break;
14024     default:
14025       VIXL_UNIMPLEMENTED();
14026       break;
14027   }
14028 }
14029 
VisitSVECopyGeneralRegisterToVector_Predicated(const Instruction * instr)14030 void Simulator::VisitSVECopyGeneralRegisterToVector_Predicated(
14031     const Instruction* instr) {
14032   VectorFormat vform = instr->GetSVEVectorFormat();
14033   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
14034   SimVRegister z_result;
14035 
14036   switch (instr->Mask(SVECopyGeneralRegisterToVector_PredicatedMask)) {
14037     case CPY_z_p_r:
14038       dup_immediate(vform,
14039                     z_result,
14040                     ReadXRegister(instr->GetRn(), Reg31IsStackPointer));
14041       mov_merging(vform, ReadVRegister(instr->GetRd()), pg, z_result);
14042       break;
14043     default:
14044       VIXL_UNIMPLEMENTED();
14045       break;
14046   }
14047 }
14048 
VisitSVECopyIntImm_Predicated(const Instruction * instr)14049 void Simulator::VisitSVECopyIntImm_Predicated(const Instruction* instr) {
14050   VectorFormat vform = instr->GetSVEVectorFormat();
14051   SimPRegister& pg = ReadPRegister(instr->ExtractBits(19, 16));
14052   SimVRegister& zd = ReadVRegister(instr->GetRd());
14053 
14054   SimVRegister result;
14055   switch (instr->Mask(SVECopyIntImm_PredicatedMask)) {
14056     case CPY_z_p_i: {
14057       // Use unsigned arithmetic to avoid undefined behaviour during the shift.
14058       uint64_t imm8 = instr->GetImmSVEIntWideSigned();
14059       dup_immediate(vform, result, imm8 << (instr->ExtractBit(13) * 8));
14060       break;
14061     }
14062     default:
14063       VIXL_UNIMPLEMENTED();
14064       break;
14065   }
14066 
14067   if (instr->ExtractBit(14) != 0) {
14068     mov_merging(vform, zd, pg, result);
14069   } else {
14070     mov_zeroing(vform, zd, pg, result);
14071   }
14072 }
14073 
VisitSVEReverseWithinElements(const Instruction * instr)14074 void Simulator::VisitSVEReverseWithinElements(const Instruction* instr) {
14075   SimVRegister& zd = ReadVRegister(instr->GetRd());
14076   SimVRegister& zn = ReadVRegister(instr->GetRn());
14077   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
14078   SimVRegister result;
14079 
14080   // In NEON, the chunk size in which elements are REVersed is in the
14081   // instruction mnemonic, and the element size attached to the register.
14082   // SVE reverses the semantics; the mapping to logic functions below is to
14083   // account for this.
14084   VectorFormat chunk_form = instr->GetSVEVectorFormat();
14085   VectorFormat element_form = kFormatUndefined;
14086 
14087   switch (instr->Mask(SVEReverseWithinElementsMask)) {
14088     case RBIT_z_p_z:
14089       rbit(chunk_form, result, zn);
14090       break;
14091     case REVB_z_z:
14092       VIXL_ASSERT((chunk_form == kFormatVnH) || (chunk_form == kFormatVnS) ||
14093                   (chunk_form == kFormatVnD));
14094       element_form = kFormatVnB;
14095       break;
14096     case REVH_z_z:
14097       VIXL_ASSERT((chunk_form == kFormatVnS) || (chunk_form == kFormatVnD));
14098       element_form = kFormatVnH;
14099       break;
14100     case REVW_z_z:
14101       VIXL_ASSERT(chunk_form == kFormatVnD);
14102       element_form = kFormatVnS;
14103       break;
14104     default:
14105       VIXL_UNIMPLEMENTED();
14106       break;
14107   }
14108 
14109   if (instr->Mask(SVEReverseWithinElementsMask) != RBIT_z_p_z) {
14110     VIXL_ASSERT(element_form != kFormatUndefined);
14111     switch (chunk_form) {
14112       case kFormatVnH:
14113         rev16(element_form, result, zn);
14114         break;
14115       case kFormatVnS:
14116         rev32(element_form, result, zn);
14117         break;
14118       case kFormatVnD:
14119         rev64(element_form, result, zn);
14120         break;
14121       default:
14122         VIXL_UNIMPLEMENTED();
14123     }
14124   }
14125 
14126   mov_merging(chunk_form, zd, pg, result);
14127 }
14128 
VisitSVEVectorSplice(const Instruction * instr)14129 void Simulator::VisitSVEVectorSplice(const Instruction* instr) {
14130   VectorFormat vform = instr->GetSVEVectorFormat();
14131   SimVRegister& zd = ReadVRegister(instr->GetRd());
14132   SimVRegister& zn = ReadVRegister(instr->GetRn());
14133   SimVRegister& zn2 = ReadVRegister((instr->GetRn() + 1) % kNumberOfZRegisters);
14134   SimPRegister& pg = ReadPRegister(instr->GetPgLow8());
14135 
14136   switch (form_hash_) {
14137     case "splice_z_p_zz_des"_h:
14138       splice(vform, zd, pg, zd, zn);
14139       break;
14140     case "splice_z_p_zz_con"_h:
14141       splice(vform, zd, pg, zn, zn2);
14142       break;
14143     default:
14144       VIXL_UNIMPLEMENTED();
14145       break;
14146   }
14147 }
14148 
VisitSVEBroadcastGeneralRegister(const Instruction * instr)14149 void Simulator::VisitSVEBroadcastGeneralRegister(const Instruction* instr) {
14150   SimVRegister& zd = ReadVRegister(instr->GetRd());
14151   switch (instr->Mask(SVEBroadcastGeneralRegisterMask)) {
14152     case DUP_z_r:
14153       dup_immediate(instr->GetSVEVectorFormat(),
14154                     zd,
14155                     ReadXRegister(instr->GetRn(), Reg31IsStackPointer));
14156       break;
14157     default:
14158       VIXL_UNIMPLEMENTED();
14159       break;
14160   }
14161 }
14162 
VisitSVEInsertSIMDFPScalarRegister(const Instruction * instr)14163 void Simulator::VisitSVEInsertSIMDFPScalarRegister(const Instruction* instr) {
14164   SimVRegister& zd = ReadVRegister(instr->GetRd());
14165   VectorFormat vform = instr->GetSVEVectorFormat();
14166   switch (instr->Mask(SVEInsertSIMDFPScalarRegisterMask)) {
14167     case INSR_z_v:
14168       insr(vform, zd, ReadDRegisterBits(instr->GetRn()));
14169       break;
14170     default:
14171       VIXL_UNIMPLEMENTED();
14172       break;
14173   }
14174 }
14175 
VisitSVEInsertGeneralRegister(const Instruction * instr)14176 void Simulator::VisitSVEInsertGeneralRegister(const Instruction* instr) {
14177   SimVRegister& zd = ReadVRegister(instr->GetRd());
14178   VectorFormat vform = instr->GetSVEVectorFormat();
14179   switch (instr->Mask(SVEInsertGeneralRegisterMask)) {
14180     case INSR_z_r:
14181       insr(vform, zd, ReadXRegister(instr->GetRn()));
14182       break;
14183     default:
14184       VIXL_UNIMPLEMENTED();
14185       break;
14186   }
14187 }
14188 
VisitSVEBroadcastIndexElement(const Instruction * instr)14189 void Simulator::VisitSVEBroadcastIndexElement(const Instruction* instr) {
14190   SimVRegister& zd = ReadVRegister(instr->GetRd());
14191   switch (instr->Mask(SVEBroadcastIndexElementMask)) {
14192     case DUP_z_zi: {
14193       std::pair<int, int> index_and_lane_size =
14194           instr->GetSVEPermuteIndexAndLaneSizeLog2();
14195       int index = index_and_lane_size.first;
14196       int lane_size_in_bytes_log_2 = index_and_lane_size.second;
14197       VectorFormat vform =
14198           SVEFormatFromLaneSizeInBytesLog2(lane_size_in_bytes_log_2);
14199       if ((index < 0) || (index >= LaneCountFromFormat(vform))) {
14200         // Out of bounds, set the destination register to zero.
14201         dup_immediate(kFormatVnD, zd, 0);
14202       } else {
14203         dup_element(vform, zd, ReadVRegister(instr->GetRn()), index);
14204       }
14205       return;
14206     }
14207     default:
14208       VIXL_UNIMPLEMENTED();
14209       break;
14210   }
14211 }
14212 
VisitSVEReverseVectorElements(const Instruction * instr)14213 void Simulator::VisitSVEReverseVectorElements(const Instruction* instr) {
14214   SimVRegister& zd = ReadVRegister(instr->GetRd());
14215   VectorFormat vform = instr->GetSVEVectorFormat();
14216   switch (instr->Mask(SVEReverseVectorElementsMask)) {
14217     case REV_z_z:
14218       rev(vform, zd, ReadVRegister(instr->GetRn()));
14219       break;
14220     default:
14221       VIXL_UNIMPLEMENTED();
14222       break;
14223   }
14224 }
14225 
VisitSVEUnpackVectorElements(const Instruction * instr)14226 void Simulator::VisitSVEUnpackVectorElements(const Instruction* instr) {
14227   SimVRegister& zd = ReadVRegister(instr->GetRd());
14228   VectorFormat vform = instr->GetSVEVectorFormat();
14229   switch (instr->Mask(SVEUnpackVectorElementsMask)) {
14230     case SUNPKHI_z_z:
14231       unpk(vform, zd, ReadVRegister(instr->GetRn()), kHiHalf, kSignedExtend);
14232       break;
14233     case SUNPKLO_z_z:
14234       unpk(vform, zd, ReadVRegister(instr->GetRn()), kLoHalf, kSignedExtend);
14235       break;
14236     case UUNPKHI_z_z:
14237       unpk(vform, zd, ReadVRegister(instr->GetRn()), kHiHalf, kUnsignedExtend);
14238       break;
14239     case UUNPKLO_z_z:
14240       unpk(vform, zd, ReadVRegister(instr->GetRn()), kLoHalf, kUnsignedExtend);
14241       break;
14242     default:
14243       VIXL_UNIMPLEMENTED();
14244       break;
14245   }
14246 }
14247 
VisitSVETableLookup(const Instruction * instr)14248 void Simulator::VisitSVETableLookup(const Instruction* instr) {
14249   VectorFormat vform = instr->GetSVEVectorFormat();
14250   SimVRegister& zd = ReadVRegister(instr->GetRd());
14251   SimVRegister& zn = ReadVRegister(instr->GetRn());
14252   SimVRegister& zn2 = ReadVRegister((instr->GetRn() + 1) % kNumberOfZRegisters);
14253   SimVRegister& zm = ReadVRegister(instr->GetRm());
14254 
14255   switch (form_hash_) {
14256     case "tbl_z_zz_1"_h:
14257       tbl(vform, zd, zn, zm);
14258       break;
14259     case "tbl_z_zz_2"_h:
14260       tbl(vform, zd, zn, zn2, zm);
14261       break;
14262     case "tbx_z_zz"_h:
14263       tbx(vform, zd, zn, zm);
14264       break;
14265     default:
14266       VIXL_UNIMPLEMENTED();
14267       break;
14268   }
14269 }
14270 
VisitSVEPredicateCount(const Instruction * instr)14271 void Simulator::VisitSVEPredicateCount(const Instruction* instr) {
14272   VectorFormat vform = instr->GetSVEVectorFormat();
14273   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
14274   SimPRegister& pn = ReadPRegister(instr->GetPn());
14275 
14276   switch (instr->Mask(SVEPredicateCountMask)) {
14277     case CNTP_r_p_p: {
14278       WriteXRegister(instr->GetRd(), CountActiveAndTrueLanes(vform, pg, pn));
14279       break;
14280     }
14281     default:
14282       VIXL_UNIMPLEMENTED();
14283       break;
14284   }
14285 }
14286 
VisitSVEPredicateLogical(const Instruction * instr)14287 void Simulator::VisitSVEPredicateLogical(const Instruction* instr) {
14288   Instr op = instr->Mask(SVEPredicateLogicalMask);
14289   SimPRegister& pd = ReadPRegister(instr->GetPd());
14290   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
14291   SimPRegister& pn = ReadPRegister(instr->GetPn());
14292   SimPRegister& pm = ReadPRegister(instr->GetPm());
14293   SimPRegister result;
14294   switch (op) {
14295     case ANDS_p_p_pp_z:
14296     case AND_p_p_pp_z:
14297     case BICS_p_p_pp_z:
14298     case BIC_p_p_pp_z:
14299     case EORS_p_p_pp_z:
14300     case EOR_p_p_pp_z:
14301     case NANDS_p_p_pp_z:
14302     case NAND_p_p_pp_z:
14303     case NORS_p_p_pp_z:
14304     case NOR_p_p_pp_z:
14305     case ORNS_p_p_pp_z:
14306     case ORN_p_p_pp_z:
14307     case ORRS_p_p_pp_z:
14308     case ORR_p_p_pp_z:
14309       SVEPredicateLogicalHelper(static_cast<SVEPredicateLogicalOp>(op),
14310                                 result,
14311                                 pn,
14312                                 pm);
14313       break;
14314     case SEL_p_p_pp:
14315       sel(pd, pg, pn, pm);
14316       return;
14317     default:
14318       VIXL_UNIMPLEMENTED();
14319       break;
14320   }
14321 
14322   mov_zeroing(pd, pg, result);
14323   if (instr->Mask(SVEPredicateLogicalSetFlagsBit) != 0) {
14324     PredTest(kFormatVnB, pg, pd);
14325   }
14326 }
14327 
VisitSVEPredicateFirstActive(const Instruction * instr)14328 void Simulator::VisitSVEPredicateFirstActive(const Instruction* instr) {
14329   LogicPRegister pg = ReadPRegister(instr->ExtractBits(8, 5));
14330   LogicPRegister pdn = ReadPRegister(instr->GetPd());
14331   switch (instr->Mask(SVEPredicateFirstActiveMask)) {
14332     case PFIRST_p_p_p:
14333       pfirst(pdn, pg, pdn);
14334       // TODO: Is this broken when pg == pdn?
14335       PredTest(kFormatVnB, pg, pdn);
14336       break;
14337     default:
14338       VIXL_UNIMPLEMENTED();
14339       break;
14340   }
14341 }
14342 
VisitSVEPredicateInitialize(const Instruction * instr)14343 void Simulator::VisitSVEPredicateInitialize(const Instruction* instr) {
14344   // This group only contains PTRUE{S}, and there are no unallocated encodings.
14345   VIXL_STATIC_ASSERT(
14346       SVEPredicateInitializeMask ==
14347       (SVEPredicateInitializeFMask | SVEPredicateInitializeSetFlagsBit));
14348   VIXL_ASSERT((instr->Mask(SVEPredicateInitializeMask) == PTRUE_p_s) ||
14349               (instr->Mask(SVEPredicateInitializeMask) == PTRUES_p_s));
14350 
14351   LogicPRegister pdn = ReadPRegister(instr->GetPd());
14352   VectorFormat vform = instr->GetSVEVectorFormat();
14353 
14354   ptrue(vform, pdn, instr->GetImmSVEPredicateConstraint());
14355   if (instr->ExtractBit(16)) PredTest(vform, pdn, pdn);
14356 }
14357 
VisitSVEPredicateNextActive(const Instruction * instr)14358 void Simulator::VisitSVEPredicateNextActive(const Instruction* instr) {
14359   // This group only contains PNEXT, and there are no unallocated encodings.
14360   VIXL_STATIC_ASSERT(SVEPredicateNextActiveFMask == SVEPredicateNextActiveMask);
14361   VIXL_ASSERT(instr->Mask(SVEPredicateNextActiveMask) == PNEXT_p_p_p);
14362 
14363   LogicPRegister pg = ReadPRegister(instr->ExtractBits(8, 5));
14364   LogicPRegister pdn = ReadPRegister(instr->GetPd());
14365   VectorFormat vform = instr->GetSVEVectorFormat();
14366 
14367   pnext(vform, pdn, pg, pdn);
14368   // TODO: Is this broken when pg == pdn?
14369   PredTest(vform, pg, pdn);
14370 }
14371 
VisitSVEPredicateReadFromFFR_Predicated(const Instruction * instr)14372 void Simulator::VisitSVEPredicateReadFromFFR_Predicated(
14373     const Instruction* instr) {
14374   LogicPRegister pd(ReadPRegister(instr->GetPd()));
14375   LogicPRegister pg(ReadPRegister(instr->GetPn()));
14376   FlagsUpdate flags = LeaveFlags;
14377   switch (instr->Mask(SVEPredicateReadFromFFR_PredicatedMask)) {
14378     case RDFFR_p_p_f:
14379       // Do nothing.
14380       break;
14381     case RDFFRS_p_p_f:
14382       flags = SetFlags;
14383       break;
14384     default:
14385       VIXL_UNIMPLEMENTED();
14386       break;
14387   }
14388 
14389   LogicPRegister ffr(ReadFFR());
14390   mov_zeroing(pd, pg, ffr);
14391 
14392   if (flags == SetFlags) {
14393     PredTest(kFormatVnB, pg, pd);
14394   }
14395 }
14396 
VisitSVEPredicateReadFromFFR_Unpredicated(const Instruction * instr)14397 void Simulator::VisitSVEPredicateReadFromFFR_Unpredicated(
14398     const Instruction* instr) {
14399   LogicPRegister pd(ReadPRegister(instr->GetPd()));
14400   LogicPRegister ffr(ReadFFR());
14401   switch (instr->Mask(SVEPredicateReadFromFFR_UnpredicatedMask)) {
14402     case RDFFR_p_f:
14403       mov(pd, ffr);
14404       break;
14405     default:
14406       VIXL_UNIMPLEMENTED();
14407       break;
14408   }
14409 }
14410 
VisitSVEPredicateTest(const Instruction * instr)14411 void Simulator::VisitSVEPredicateTest(const Instruction* instr) {
14412   switch (instr->Mask(SVEPredicateTestMask)) {
14413     case PTEST_p_p:
14414       PredTest(kFormatVnB,
14415                ReadPRegister(instr->ExtractBits(13, 10)),
14416                ReadPRegister(instr->GetPn()));
14417       break;
14418     default:
14419       VIXL_UNIMPLEMENTED();
14420       break;
14421   }
14422 }
14423 
VisitSVEPredicateZero(const Instruction * instr)14424 void Simulator::VisitSVEPredicateZero(const Instruction* instr) {
14425   switch (instr->Mask(SVEPredicateZeroMask)) {
14426     case PFALSE_p:
14427       pfalse(ReadPRegister(instr->GetPd()));
14428       break;
14429     default:
14430       VIXL_UNIMPLEMENTED();
14431       break;
14432   }
14433 }
14434 
VisitSVEPropagateBreak(const Instruction * instr)14435 void Simulator::VisitSVEPropagateBreak(const Instruction* instr) {
14436   SimPRegister& pd = ReadPRegister(instr->GetPd());
14437   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
14438   SimPRegister& pn = ReadPRegister(instr->GetPn());
14439   SimPRegister& pm = ReadPRegister(instr->GetPm());
14440 
14441   bool set_flags = false;
14442   switch (instr->Mask(SVEPropagateBreakMask)) {
14443     case BRKPAS_p_p_pp:
14444       set_flags = true;
14445       VIXL_FALLTHROUGH();
14446     case BRKPA_p_p_pp:
14447       brkpa(pd, pg, pn, pm);
14448       break;
14449     case BRKPBS_p_p_pp:
14450       set_flags = true;
14451       VIXL_FALLTHROUGH();
14452     case BRKPB_p_p_pp:
14453       brkpb(pd, pg, pn, pm);
14454       break;
14455     default:
14456       VIXL_UNIMPLEMENTED();
14457       break;
14458   }
14459 
14460   if (set_flags) {
14461     PredTest(kFormatVnB, pg, pd);
14462   }
14463 }
14464 
VisitSVEStackFrameAdjustment(const Instruction * instr)14465 void Simulator::VisitSVEStackFrameAdjustment(const Instruction* instr) {
14466   uint64_t length = 0;
14467   switch (instr->Mask(SVEStackFrameAdjustmentMask)) {
14468     case ADDPL_r_ri:
14469       length = GetPredicateLengthInBytes();
14470       break;
14471     case ADDVL_r_ri:
14472       length = GetVectorLengthInBytes();
14473       break;
14474     default:
14475       VIXL_UNIMPLEMENTED();
14476   }
14477   uint64_t base = ReadXRegister(instr->GetRm(), Reg31IsStackPointer);
14478   WriteXRegister(instr->GetRd(),
14479                  base + (length * instr->GetImmSVEVLScale()),
14480                  LogRegWrites,
14481                  Reg31IsStackPointer);
14482 }
14483 
VisitSVEStackFrameSize(const Instruction * instr)14484 void Simulator::VisitSVEStackFrameSize(const Instruction* instr) {
14485   int64_t scale = instr->GetImmSVEVLScale();
14486 
14487   switch (instr->Mask(SVEStackFrameSizeMask)) {
14488     case RDVL_r_i:
14489       WriteXRegister(instr->GetRd(), GetVectorLengthInBytes() * scale);
14490       break;
14491     default:
14492       VIXL_UNIMPLEMENTED();
14493   }
14494 }
14495 
VisitSVEVectorSelect(const Instruction * instr)14496 void Simulator::VisitSVEVectorSelect(const Instruction* instr) {
14497   // The only instruction in this group is `sel`, and there are no unused
14498   // encodings.
14499   VIXL_ASSERT(instr->Mask(SVEVectorSelectMask) == SEL_z_p_zz);
14500 
14501   VectorFormat vform = instr->GetSVEVectorFormat();
14502   SimVRegister& zd = ReadVRegister(instr->GetRd());
14503   SimPRegister& pg = ReadPRegister(instr->ExtractBits(13, 10));
14504   SimVRegister& zn = ReadVRegister(instr->GetRn());
14505   SimVRegister& zm = ReadVRegister(instr->GetRm());
14506 
14507   sel(vform, zd, pg, zn, zm);
14508 }
14509 
VisitSVEFFRInitialise(const Instruction * instr)14510 void Simulator::VisitSVEFFRInitialise(const Instruction* instr) {
14511   switch (instr->Mask(SVEFFRInitialiseMask)) {
14512     case SETFFR_f: {
14513       LogicPRegister ffr(ReadFFR());
14514       ffr.SetAllBits();
14515       break;
14516     }
14517     default:
14518       VIXL_UNIMPLEMENTED();
14519       break;
14520   }
14521 }
14522 
VisitSVEFFRWriteFromPredicate(const Instruction * instr)14523 void Simulator::VisitSVEFFRWriteFromPredicate(const Instruction* instr) {
14524   switch (instr->Mask(SVEFFRWriteFromPredicateMask)) {
14525     case WRFFR_f_p: {
14526       SimPRegister pn(ReadPRegister(instr->GetPn()));
14527       bool last_active = true;
14528       for (unsigned i = 0; i < pn.GetSizeInBits(); i++) {
14529         bool active = pn.GetBit(i);
14530         if (active && !last_active) {
14531           // `pn` is non-monotonic. This is UNPREDICTABLE.
14532           VIXL_ABORT();
14533         }
14534         last_active = active;
14535       }
14536       mov(ReadFFR(), pn);
14537       break;
14538     }
14539     default:
14540       VIXL_UNIMPLEMENTED();
14541       break;
14542   }
14543 }
14544 
VisitSVEContiguousLoad_ScalarPlusImm(const Instruction * instr)14545 void Simulator::VisitSVEContiguousLoad_ScalarPlusImm(const Instruction* instr) {
14546   bool is_signed;
14547   switch (instr->Mask(SVEContiguousLoad_ScalarPlusImmMask)) {
14548     case LD1B_z_p_bi_u8:
14549     case LD1B_z_p_bi_u16:
14550     case LD1B_z_p_bi_u32:
14551     case LD1B_z_p_bi_u64:
14552     case LD1H_z_p_bi_u16:
14553     case LD1H_z_p_bi_u32:
14554     case LD1H_z_p_bi_u64:
14555     case LD1W_z_p_bi_u32:
14556     case LD1W_z_p_bi_u64:
14557     case LD1D_z_p_bi_u64:
14558       is_signed = false;
14559       break;
14560     case LD1SB_z_p_bi_s16:
14561     case LD1SB_z_p_bi_s32:
14562     case LD1SB_z_p_bi_s64:
14563     case LD1SH_z_p_bi_s32:
14564     case LD1SH_z_p_bi_s64:
14565     case LD1SW_z_p_bi_s64:
14566       is_signed = true;
14567       break;
14568     default:
14569       // This encoding group is complete, so no other values should be possible.
14570       VIXL_UNREACHABLE();
14571       is_signed = false;
14572       break;
14573   }
14574 
14575   int vl = GetVectorLengthInBytes();
14576   int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(is_signed);
14577   int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(is_signed);
14578   VIXL_ASSERT(esize_in_bytes_log2 >= msize_in_bytes_log2);
14579   int vl_divisor_log2 = esize_in_bytes_log2 - msize_in_bytes_log2;
14580   uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
14581   uint64_t offset =
14582       (instr->ExtractSignedBits(19, 16) * vl) / (1 << vl_divisor_log2);
14583   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
14584   LogicSVEAddressVector addr(base + offset);
14585   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
14586   SVEStructuredLoadHelper(vform,
14587                           ReadPRegister(instr->GetPgLow8()),
14588                           instr->GetRt(),
14589                           addr,
14590                           is_signed);
14591 }
14592 
VisitSVEContiguousLoad_ScalarPlusScalar(const Instruction * instr)14593 void Simulator::VisitSVEContiguousLoad_ScalarPlusScalar(
14594     const Instruction* instr) {
14595   bool is_signed;
14596   switch (instr->Mask(SVEContiguousLoad_ScalarPlusScalarMask)) {
14597     case LD1B_z_p_br_u8:
14598     case LD1B_z_p_br_u16:
14599     case LD1B_z_p_br_u32:
14600     case LD1B_z_p_br_u64:
14601     case LD1H_z_p_br_u16:
14602     case LD1H_z_p_br_u32:
14603     case LD1H_z_p_br_u64:
14604     case LD1W_z_p_br_u32:
14605     case LD1W_z_p_br_u64:
14606     case LD1D_z_p_br_u64:
14607       is_signed = false;
14608       break;
14609     case LD1SB_z_p_br_s16:
14610     case LD1SB_z_p_br_s32:
14611     case LD1SB_z_p_br_s64:
14612     case LD1SH_z_p_br_s32:
14613     case LD1SH_z_p_br_s64:
14614     case LD1SW_z_p_br_s64:
14615       is_signed = true;
14616       break;
14617     default:
14618       // This encoding group is complete, so no other values should be possible.
14619       VIXL_UNREACHABLE();
14620       is_signed = false;
14621       break;
14622   }
14623 
14624   int msize_in_bytes_log2 = instr->GetSVEMsizeFromDtype(is_signed);
14625   int esize_in_bytes_log2 = instr->GetSVEEsizeFromDtype(is_signed);
14626   VIXL_ASSERT(msize_in_bytes_log2 <= esize_in_bytes_log2);
14627   VectorFormat vform = SVEFormatFromLaneSizeInBytesLog2(esize_in_bytes_log2);
14628   uint64_t base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
14629   uint64_t offset = ReadXRegister(instr->GetRm());
14630   offset <<= msize_in_bytes_log2;
14631   LogicSVEAddressVector addr(base + offset);
14632   addr.SetMsizeInBytesLog2(msize_in_bytes_log2);
14633   SVEStructuredLoadHelper(vform,
14634                           ReadPRegister(instr->GetPgLow8()),
14635                           instr->GetRt(),
14636                           addr,
14637                           is_signed);
14638 }
14639 
DoUnreachable(const Instruction * instr)14640 void Simulator::DoUnreachable(const Instruction* instr) {
14641   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
14642               (instr->GetImmException() == kUnreachableOpcode));
14643 
14644   fprintf(stream_,
14645           "Hit UNREACHABLE marker at pc=%p.\n",
14646           reinterpret_cast<const void*>(instr));
14647   abort();
14648 }
14649 
Simulate_XdSP_XnSP_Xm(const Instruction * instr)14650 void Simulator::Simulate_XdSP_XnSP_Xm(const Instruction* instr) {
14651   VIXL_ASSERT(form_hash_ == Hash("irg_64i_dp_2src"));
14652   uint64_t rn = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
14653   uint64_t rm = ReadXRegister(instr->GetRm());
14654   uint64_t tag = GenerateRandomTag(rm & 0xffff);
14655   uint64_t new_val = GetAddressWithAllocationTag(rn, tag);
14656   WriteXRegister(instr->GetRd(), new_val, LogRegWrites, Reg31IsStackPointer);
14657 }
14658 
SimulateMTEAddSubTag(const Instruction * instr)14659 void Simulator::SimulateMTEAddSubTag(const Instruction* instr) {
14660   uint64_t rn = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
14661   uint64_t rn_tag = GetAllocationTagFromAddress(rn);
14662   uint64_t tag_offset = instr->ExtractBits(13, 10);
14663   // TODO: implement GCR_EL1.Exclude to provide a tag exclusion list.
14664   uint64_t new_tag = ChooseNonExcludedTag(rn_tag, tag_offset);
14665 
14666   uint64_t offset = instr->ExtractBits(21, 16) * kMTETagGranuleInBytes;
14667   int carry = 0;
14668   if (form_hash_ == Hash("subg_64_addsub_immtags")) {
14669     offset = ~offset;
14670     carry = 1;
14671   } else {
14672     VIXL_ASSERT(form_hash_ == Hash("addg_64_addsub_immtags"));
14673   }
14674   uint64_t new_val =
14675       AddWithCarry(kXRegSize, /* set_flags = */ false, rn, offset, carry);
14676   new_val = GetAddressWithAllocationTag(new_val, new_tag);
14677   WriteXRegister(instr->GetRd(), new_val, LogRegWrites, Reg31IsStackPointer);
14678 }
14679 
SimulateMTETagMaskInsert(const Instruction * instr)14680 void Simulator::SimulateMTETagMaskInsert(const Instruction* instr) {
14681   VIXL_ASSERT(form_hash_ == Hash("gmi_64g_dp_2src"));
14682   uint64_t mask = ReadXRegister(instr->GetRm());
14683   uint64_t tag = GetAllocationTagFromAddress(
14684       ReadXRegister(instr->GetRn(), Reg31IsStackPointer));
14685   uint64_t mask_bit = uint64_t{1} << tag;
14686   WriteXRegister(instr->GetRd(), mask | mask_bit);
14687 }
14688 
SimulateMTESubPointer(const Instruction * instr)14689 void Simulator::SimulateMTESubPointer(const Instruction* instr) {
14690   uint64_t rn = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
14691   uint64_t rm = ReadXRegister(instr->GetRm(), Reg31IsStackPointer);
14692 
14693   VIXL_ASSERT((form_hash_ == Hash("subps_64s_dp_2src")) ||
14694               (form_hash_ == Hash("subp_64s_dp_2src")));
14695   bool set_flags = (form_hash_ == Hash("subps_64s_dp_2src"));
14696 
14697   rn = ExtractSignedBitfield64(55, 0, rn);
14698   rm = ExtractSignedBitfield64(55, 0, rm);
14699   uint64_t new_val = AddWithCarry(kXRegSize, set_flags, rn, ~rm, 1);
14700   WriteXRegister(instr->GetRd(), new_val);
14701 }
14702 
SimulateMTEStoreTagPair(const Instruction * instr)14703 void Simulator::SimulateMTEStoreTagPair(const Instruction* instr) {
14704   uint64_t rn = ReadXRegister(instr->GetRn(), Reg31IsStackPointer);
14705   uint64_t rt = ReadXRegister(instr->GetRt());
14706   uint64_t rt2 = ReadXRegister(instr->GetRt2());
14707   int offset = instr->GetImmLSPair() * static_cast<int>(kMTETagGranuleInBytes);
14708 
14709   AddrMode addr_mode = Offset;
14710   switch (form_hash_) {
14711     case Hash("stgp_64_ldstpair_off"):
14712       // Default is the offset mode.
14713       break;
14714     case Hash("stgp_64_ldstpair_post"):
14715       addr_mode = PostIndex;
14716       break;
14717     case Hash("stgp_64_ldstpair_pre"):
14718       addr_mode = PreIndex;
14719       break;
14720     default:
14721       VIXL_UNIMPLEMENTED();
14722   }
14723 
14724   uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addr_mode);
14725   if (!IsAligned(address, kMTETagGranuleInBytes)) {
14726     VIXL_ALIGNMENT_EXCEPTION();
14727   }
14728 
14729   int tag = GetAllocationTagFromAddress(rn);
14730   meta_data_.SetMTETag(address, tag);
14731 
14732   if (!MemWrite<uint64_t>(address, rt)) return;
14733   if (!MemWrite<uint64_t>(address + kXRegSizeInBytes, rt2)) return;
14734 }
14735 
SimulateMTEStoreTag(const Instruction * instr)14736 void Simulator::SimulateMTEStoreTag(const Instruction* instr) {
14737   uint64_t rt = ReadXRegister(instr->GetRt(), Reg31IsStackPointer);
14738   int offset = instr->GetImmLS() * static_cast<int>(kMTETagGranuleInBytes);
14739 
14740   AddrMode addr_mode = Offset;
14741   switch (form_hash_) {
14742     case Hash("st2g_64soffset_ldsttags"):
14743     case Hash("stg_64soffset_ldsttags"):
14744     case Hash("stz2g_64soffset_ldsttags"):
14745     case Hash("stzg_64soffset_ldsttags"):
14746       // Default is the offset mode.
14747       break;
14748     case Hash("st2g_64spost_ldsttags"):
14749     case Hash("stg_64spost_ldsttags"):
14750     case Hash("stz2g_64spost_ldsttags"):
14751     case Hash("stzg_64spost_ldsttags"):
14752       addr_mode = PostIndex;
14753       break;
14754     case Hash("st2g_64spre_ldsttags"):
14755     case Hash("stg_64spre_ldsttags"):
14756     case Hash("stz2g_64spre_ldsttags"):
14757     case Hash("stzg_64spre_ldsttags"):
14758       addr_mode = PreIndex;
14759       break;
14760     default:
14761       VIXL_UNIMPLEMENTED();
14762   }
14763 
14764   bool is_pair = false;
14765   switch (form_hash_) {
14766     case Hash("st2g_64soffset_ldsttags"):
14767     case Hash("st2g_64spost_ldsttags"):
14768     case Hash("st2g_64spre_ldsttags"):
14769     case Hash("stz2g_64soffset_ldsttags"):
14770     case Hash("stz2g_64spost_ldsttags"):
14771     case Hash("stz2g_64spre_ldsttags"):
14772       is_pair = true;
14773       break;
14774     default:
14775       break;
14776   }
14777 
14778   bool is_zeroing = false;
14779   switch (form_hash_) {
14780     case Hash("stz2g_64soffset_ldsttags"):
14781     case Hash("stz2g_64spost_ldsttags"):
14782     case Hash("stz2g_64spre_ldsttags"):
14783     case Hash("stzg_64soffset_ldsttags"):
14784     case Hash("stzg_64spost_ldsttags"):
14785     case Hash("stzg_64spre_ldsttags"):
14786       is_zeroing = true;
14787       break;
14788     default:
14789       break;
14790   }
14791 
14792   uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addr_mode);
14793 
14794   if (is_zeroing) {
14795     if (!IsAligned(address, kMTETagGranuleInBytes)) {
14796       VIXL_ALIGNMENT_EXCEPTION();
14797     }
14798     VIXL_STATIC_ASSERT(kMTETagGranuleInBytes >= sizeof(uint64_t));
14799     VIXL_STATIC_ASSERT(kMTETagGranuleInBytes % sizeof(uint64_t) == 0);
14800 
14801     size_t fill_size = kMTETagGranuleInBytes;
14802     if (is_pair) {
14803       fill_size += kMTETagGranuleInBytes;
14804     }
14805 
14806     size_t fill_offset = 0;
14807     while (fill_offset < fill_size) {
14808       if (!MemWrite<uint64_t>(address + fill_offset, 0)) return;
14809       fill_offset += sizeof(uint64_t);
14810     }
14811   }
14812 
14813   int tag = GetAllocationTagFromAddress(rt);
14814   meta_data_.SetMTETag(address, tag, instr);
14815   if (is_pair) {
14816     meta_data_.SetMTETag(address + kMTETagGranuleInBytes, tag, instr);
14817   }
14818 }
14819 
SimulateMTELoadTag(const Instruction * instr)14820 void Simulator::SimulateMTELoadTag(const Instruction* instr) {
14821   uint64_t rt = ReadXRegister(instr->GetRt());
14822   int offset = instr->GetImmLS() * static_cast<int>(kMTETagGranuleInBytes);
14823 
14824   switch (form_hash_) {
14825     case Hash("ldg_64loffset_ldsttags"):
14826       break;
14827     default:
14828       VIXL_UNIMPLEMENTED();
14829   }
14830 
14831   uintptr_t address = AddressModeHelper(instr->GetRn(), offset, Offset);
14832   address = AlignDown(address, kMTETagGranuleInBytes);
14833   uint64_t tag = meta_data_.GetMTETag(address, instr);
14834   WriteXRegister(instr->GetRt(), GetAddressWithAllocationTag(rt, tag));
14835 }
14836 
SimulateCpyFP(const Instruction * instr)14837 void Simulator::SimulateCpyFP(const Instruction* instr) {
14838   MOPSPHelper<"cpy"_h>(instr);
14839   LogSystemRegister(NZCV);
14840 }
14841 
SimulateCpyP(const Instruction * instr)14842 void Simulator::SimulateCpyP(const Instruction* instr) {
14843   MOPSPHelper<"cpy"_h>(instr);
14844 
14845   int d = instr->GetRd();
14846   int n = instr->GetRn();
14847   int s = instr->GetRs();
14848 
14849   // Determine copy direction. For cases in which direction is implementation
14850   // defined, use forward.
14851   bool is_backwards = false;
14852   uint64_t xs = ReadXRegister(s);
14853   uint64_t xd = ReadXRegister(d);
14854   uint64_t xn = ReadXRegister(n);
14855 
14856   // Ignore the top byte of addresses for comparisons. We can use xn as is,
14857   // as it should have zero in bits 63:55.
14858   uint64_t xs_tbi = ExtractUnsignedBitfield64(55, 0, xs);
14859   uint64_t xd_tbi = ExtractUnsignedBitfield64(55, 0, xd);
14860   VIXL_ASSERT(ExtractUnsignedBitfield64(63, 55, xn) == 0);
14861   if ((xs_tbi < xd_tbi) && ((xs_tbi + xn) > xd_tbi)) {
14862     is_backwards = true;
14863     WriteXRegister(s, xs + xn);
14864     WriteXRegister(d, xd + xn);
14865   }
14866 
14867   ReadNzcv().SetN(is_backwards ? 1 : 0);
14868   LogSystemRegister(NZCV);
14869 }
14870 
SimulateCpyM(const Instruction * instr)14871 void Simulator::SimulateCpyM(const Instruction* instr) {
14872   VIXL_ASSERT(instr->IsConsistentMOPSTriplet<"cpy"_h>());
14873   VIXL_ASSERT(instr->IsMOPSMainOf(GetLastExecutedInstruction(), "cpy"_h));
14874 
14875   int d = instr->GetRd();
14876   int n = instr->GetRn();
14877   int s = instr->GetRs();
14878 
14879   uint64_t xd = ReadXRegister(d);
14880   uint64_t xn = ReadXRegister(n);
14881   uint64_t xs = ReadXRegister(s);
14882   bool is_backwards = ReadN();
14883 
14884   int step = 1;
14885   if (is_backwards) {
14886     step = -1;
14887     xs--;
14888     xd--;
14889   }
14890 
14891   while (xn--) {
14892     VIXL_DEFINE_OR_RETURN(temp, MemRead<uint8_t>(xs));
14893     if (!MemWrite<uint8_t>(xd, temp)) return;
14894     LogMemTransfer(xd, xs, temp);
14895     xs += step;
14896     xd += step;
14897   }
14898 
14899   if (is_backwards) {
14900     xs++;
14901     xd++;
14902   }
14903 
14904   WriteXRegister(d, xd);
14905   WriteXRegister(n, 0);
14906   WriteXRegister(s, xs);
14907 }
14908 
SimulateCpyE(const Instruction * instr)14909 void Simulator::SimulateCpyE(const Instruction* instr) {
14910   USE(instr);
14911   VIXL_ASSERT(instr->IsConsistentMOPSTriplet<"cpy"_h>());
14912   VIXL_ASSERT(instr->IsMOPSEpilogueOf(GetLastExecutedInstruction(), "cpy"_h));
14913   // This implementation does nothing in the epilogue; all copying is completed
14914   // in the "main" part.
14915 }
14916 
SimulateSetP(const Instruction * instr)14917 void Simulator::SimulateSetP(const Instruction* instr) {
14918   MOPSPHelper<"set"_h>(instr);
14919   LogSystemRegister(NZCV);
14920 }
14921 
SimulateSetM(const Instruction * instr)14922 void Simulator::SimulateSetM(const Instruction* instr) {
14923   VIXL_ASSERT(instr->IsConsistentMOPSTriplet<"set"_h>());
14924   VIXL_ASSERT(instr->IsMOPSMainOf(GetLastExecutedInstruction(), "set"_h));
14925 
14926   uint64_t xd = ReadXRegister(instr->GetRd());
14927   uint64_t xn = ReadXRegister(instr->GetRn());
14928   uint64_t xs = ReadXRegister(instr->GetRs());
14929 
14930   while (xn--) {
14931     LogWrite(instr->GetRs(), GetPrintRegPartial(kPrintRegLaneSizeB), xd);
14932     if (!MemWrite<uint8_t>(xd++, static_cast<uint8_t>(xs))) return;
14933   }
14934   WriteXRegister(instr->GetRd(), xd);
14935   WriteXRegister(instr->GetRn(), 0);
14936 }
14937 
SimulateSetE(const Instruction * instr)14938 void Simulator::SimulateSetE(const Instruction* instr) {
14939   USE(instr);
14940   VIXL_ASSERT(instr->IsConsistentMOPSTriplet<"set"_h>());
14941   VIXL_ASSERT(instr->IsMOPSEpilogueOf(GetLastExecutedInstruction(), "set"_h));
14942   // This implementation does nothing in the epilogue; all setting is completed
14943   // in the "main" part.
14944 }
14945 
SimulateSetGP(const Instruction * instr)14946 void Simulator::SimulateSetGP(const Instruction* instr) {
14947   MOPSPHelper<"setg"_h>(instr);
14948 
14949   uint64_t xd = ReadXRegister(instr->GetRd());
14950   uint64_t xn = ReadXRegister(instr->GetRn());
14951 
14952   if ((xn > 0) && !IsAligned(xd, kMTETagGranuleInBytes)) {
14953     VIXL_ALIGNMENT_EXCEPTION();
14954   }
14955 
14956   if (!IsAligned(xn, kMTETagGranuleInBytes)) {
14957     VIXL_ALIGNMENT_EXCEPTION();
14958   }
14959 
14960   LogSystemRegister(NZCV);
14961 }
14962 
SimulateSetGM(const Instruction * instr)14963 void Simulator::SimulateSetGM(const Instruction* instr) {
14964   uint64_t xd = ReadXRegister(instr->GetRd());
14965   uint64_t xn = ReadXRegister(instr->GetRn());
14966 
14967   int tag = GetAllocationTagFromAddress(xd);
14968   while (xn) {
14969     meta_data_.SetMTETag(xd, tag);
14970     xd += 16;
14971     xn -= 16;
14972   }
14973   SimulateSetM(instr);
14974 }
14975 
DoTrace(const Instruction * instr)14976 void Simulator::DoTrace(const Instruction* instr) {
14977   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
14978               (instr->GetImmException() == kTraceOpcode));
14979 
14980   // Read the arguments encoded inline in the instruction stream.
14981   uint32_t parameters;
14982   uint32_t command;
14983 
14984   VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
14985   memcpy(&parameters, instr + kTraceParamsOffset, sizeof(parameters));
14986   memcpy(&command, instr + kTraceCommandOffset, sizeof(command));
14987 
14988   switch (command) {
14989     case TRACE_ENABLE:
14990       SetTraceParameters(GetTraceParameters() | parameters);
14991       break;
14992     case TRACE_DISABLE:
14993       SetTraceParameters(GetTraceParameters() & ~parameters);
14994       break;
14995     default:
14996       VIXL_UNREACHABLE();
14997   }
14998 
14999   WritePc(instr->GetInstructionAtOffset(kTraceLength));
15000 }
15001 
15002 
DoLog(const Instruction * instr)15003 void Simulator::DoLog(const Instruction* instr) {
15004   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
15005               (instr->GetImmException() == kLogOpcode));
15006 
15007   // Read the arguments encoded inline in the instruction stream.
15008   uint32_t parameters;
15009 
15010   VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
15011   memcpy(&parameters, instr + kTraceParamsOffset, sizeof(parameters));
15012 
15013   // We don't support a one-shot LOG_DISASM.
15014   VIXL_ASSERT((parameters & LOG_DISASM) == 0);
15015   // Print the requested information.
15016   if (parameters & LOG_SYSREGS) PrintSystemRegisters();
15017   if (parameters & LOG_REGS) PrintRegisters();
15018   if (parameters & LOG_VREGS) PrintVRegisters();
15019 
15020   WritePc(instr->GetInstructionAtOffset(kLogLength));
15021 }
15022 
15023 
DoPrintf(const Instruction * instr)15024 void Simulator::DoPrintf(const Instruction* instr) {
15025   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
15026               (instr->GetImmException() == kPrintfOpcode));
15027 
15028   // Read the arguments encoded inline in the instruction stream.
15029   uint32_t arg_count;
15030   uint32_t arg_pattern_list;
15031   VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
15032   memcpy(&arg_count, instr + kPrintfArgCountOffset, sizeof(arg_count));
15033   memcpy(&arg_pattern_list,
15034          instr + kPrintfArgPatternListOffset,
15035          sizeof(arg_pattern_list));
15036 
15037   VIXL_ASSERT(arg_count <= kPrintfMaxArgCount);
15038   VIXL_ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
15039 
15040   // We need to call the host printf function with a set of arguments defined by
15041   // arg_pattern_list. Because we don't know the types and sizes of the
15042   // arguments, this is very difficult to do in a robust and portable way. To
15043   // work around the problem, we pick apart the format string, and print one
15044   // format placeholder at a time.
15045 
15046   // Allocate space for the format string. We take a copy, so we can modify it.
15047   // Leave enough space for one extra character per expected argument (plus the
15048   // '\0' termination).
15049   const char* format_base = ReadRegister<const char*>(0);
15050   VIXL_ASSERT(format_base != NULL);
15051   size_t length = strlen(format_base) + 1;
15052   char* const format = new char[length + arg_count];
15053 
15054   // A list of chunks, each with exactly one format placeholder.
15055   const char* chunks[kPrintfMaxArgCount];
15056 
15057   // Copy the format string and search for format placeholders.
15058   uint32_t placeholder_count = 0;
15059   char* format_scratch = format;
15060   for (size_t i = 0; i < length; i++) {
15061     if (format_base[i] != '%') {
15062       *format_scratch++ = format_base[i];
15063     } else {
15064       if (format_base[i + 1] == '%') {
15065         // Ignore explicit "%%" sequences.
15066         *format_scratch++ = format_base[i];
15067         i++;
15068         // Chunks after the first are passed as format strings to printf, so we
15069         // need to escape '%' characters in those chunks.
15070         if (placeholder_count > 0) *format_scratch++ = format_base[i];
15071       } else {
15072         VIXL_CHECK(placeholder_count < arg_count);
15073         // Insert '\0' before placeholders, and store their locations.
15074         *format_scratch++ = '\0';
15075         chunks[placeholder_count++] = format_scratch;
15076         *format_scratch++ = format_base[i];
15077       }
15078     }
15079   }
15080   VIXL_CHECK(placeholder_count == arg_count);
15081 
15082   // Finally, call printf with each chunk, passing the appropriate register
15083   // argument. Normally, printf returns the number of bytes transmitted, so we
15084   // can emulate a single printf call by adding the result from each chunk. If
15085   // any call returns a negative (error) value, though, just return that value.
15086 
15087   printf("%s", clr_printf);
15088 
15089   // Because '\0' is inserted before each placeholder, the first string in
15090   // 'format' contains no format placeholders and should be printed literally.
15091   int result = printf("%s", format);
15092   int pcs_r = 1;  // Start at x1. x0 holds the format string.
15093   int pcs_f = 0;  // Start at d0.
15094   if (result >= 0) {
15095     for (uint32_t i = 0; i < placeholder_count; i++) {
15096       int part_result = -1;
15097 
15098       uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits);
15099       arg_pattern &= (1 << kPrintfArgPatternBits) - 1;
15100       switch (arg_pattern) {
15101         case kPrintfArgW:
15102           part_result = printf(chunks[i], ReadWRegister(pcs_r++));
15103           break;
15104         case kPrintfArgX:
15105           part_result = printf(chunks[i], ReadXRegister(pcs_r++));
15106           break;
15107         case kPrintfArgD:
15108           part_result = printf(chunks[i], ReadDRegister(pcs_f++));
15109           break;
15110         default:
15111           VIXL_UNREACHABLE();
15112       }
15113 
15114       if (part_result < 0) {
15115         // Handle error values.
15116         result = part_result;
15117         break;
15118       }
15119 
15120       result += part_result;
15121     }
15122   }
15123 
15124   printf("%s", clr_normal);
15125 
15126   // Printf returns its result in x0 (just like the C library's printf).
15127   WriteXRegister(0, result);
15128 
15129   // The printf parameters are inlined in the code, so skip them.
15130   WritePc(instr->GetInstructionAtOffset(kPrintfLength));
15131 
15132   // Set LR as if we'd just called a native printf function.
15133   WriteLr(ReadPc());
15134 
15135   delete[] format;
15136 }
15137 
15138 
15139 #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
DoRuntimeCall(const Instruction * instr)15140 void Simulator::DoRuntimeCall(const Instruction* instr) {
15141   VIXL_STATIC_ASSERT(kRuntimeCallAddressSize == sizeof(uintptr_t));
15142   // The appropriate `Simulator::SimulateRuntimeCall()` wrapper and the function
15143   // to call are passed inlined in the assembly.
15144   VIXL_DEFINE_OR_RETURN(call_wrapper_address,
15145                         MemRead<uintptr_t>(instr + kRuntimeCallWrapperOffset));
15146   VIXL_DEFINE_OR_RETURN(function_address,
15147                         MemRead<uintptr_t>(instr + kRuntimeCallFunctionOffset));
15148   VIXL_DEFINE_OR_RETURN(call_type,
15149                         MemRead<uint32_t>(instr + kRuntimeCallTypeOffset));
15150   auto runtime_call_wrapper =
15151       reinterpret_cast<void (*)(Simulator*, uintptr_t)>(call_wrapper_address);
15152 
15153   if (static_cast<RuntimeCallType>(call_type) == kCallRuntime) {
15154     const Instruction* addr = instr->GetInstructionAtOffset(kRuntimeCallLength);
15155     WriteLr(addr);
15156     GCSPush(reinterpret_cast<uint64_t>(addr));
15157   }
15158   runtime_call_wrapper(this, function_address);
15159   // Read the return address from `lr` and write it into `pc`.
15160   uint64_t addr = ReadRegister<uint64_t>(kLinkRegCode);
15161   if (IsGCSCheckEnabled()) {
15162     uint64_t expected_lr = GCSPeek();
15163     char msg[128];
15164     if (expected_lr != 0) {
15165       if ((expected_lr & 0x3) != 0) {
15166         snprintf(msg,
15167                  sizeof(msg),
15168                  "GCS contains misaligned return address: 0x%016" PRIx64 "\n",
15169                  expected_lr);
15170         ReportGCSFailure(msg);
15171       } else if ((addr != 0) && (addr != expected_lr)) {
15172         snprintf(msg,
15173                  sizeof(msg),
15174                  "GCS mismatch: lr = 0x%016" PRIx64 ", gcs = 0x%016" PRIx64
15175                  "\n",
15176                  addr,
15177                  expected_lr);
15178         ReportGCSFailure(msg);
15179       }
15180       GCSPop();
15181     }
15182   }
15183   WritePc(reinterpret_cast<Instruction*>(addr));
15184 }
15185 #else
DoRuntimeCall(const Instruction * instr)15186 void Simulator::DoRuntimeCall(const Instruction* instr) {
15187   USE(instr);
15188   VIXL_UNREACHABLE();
15189 }
15190 #endif
15191 
15192 
DoConfigureCPUFeatures(const Instruction * instr)15193 void Simulator::DoConfigureCPUFeatures(const Instruction* instr) {
15194   VIXL_ASSERT(instr->Mask(ExceptionMask) == HLT);
15195 
15196   typedef ConfigureCPUFeaturesElementType ElementType;
15197   VIXL_ASSERT(CPUFeatures::kNumberOfFeatures <
15198               std::numeric_limits<ElementType>::max());
15199 
15200   // k{Set,Enable,Disable}CPUFeatures have the same parameter encoding.
15201 
15202   size_t element_size = sizeof(ElementType);
15203   size_t offset = kConfigureCPUFeaturesListOffset;
15204 
15205   // Read the kNone-terminated list of features.
15206   CPUFeatures parameters;
15207   while (true) {
15208     VIXL_DEFINE_OR_RETURN(feature, MemRead<ElementType>(instr + offset));
15209     offset += element_size;
15210     if (feature == static_cast<ElementType>(CPUFeatures::kNone)) break;
15211     parameters.Combine(static_cast<CPUFeatures::Feature>(feature));
15212   }
15213 
15214   switch (instr->GetImmException()) {
15215     case kSetCPUFeaturesOpcode:
15216       SetCPUFeatures(parameters);
15217       break;
15218     case kEnableCPUFeaturesOpcode:
15219       GetCPUFeatures()->Combine(parameters);
15220       break;
15221     case kDisableCPUFeaturesOpcode:
15222       GetCPUFeatures()->Remove(parameters);
15223       break;
15224     default:
15225       VIXL_UNREACHABLE();
15226       break;
15227   }
15228 
15229   WritePc(instr->GetInstructionAtOffset(AlignUp(offset, kInstructionSize)));
15230 }
15231 
15232 
DoSaveCPUFeatures(const Instruction * instr)15233 void Simulator::DoSaveCPUFeatures(const Instruction* instr) {
15234   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
15235               (instr->GetImmException() == kSaveCPUFeaturesOpcode));
15236   USE(instr);
15237 
15238   saved_cpu_features_.push_back(*GetCPUFeatures());
15239 }
15240 
15241 
DoRestoreCPUFeatures(const Instruction * instr)15242 void Simulator::DoRestoreCPUFeatures(const Instruction* instr) {
15243   VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
15244               (instr->GetImmException() == kRestoreCPUFeaturesOpcode));
15245   USE(instr);
15246 
15247   SetCPUFeatures(saved_cpu_features_.back());
15248   saved_cpu_features_.pop_back();
15249 }
15250 
15251 #ifdef VIXL_HAS_SIMULATED_MMAP
Mmap(void * address,size_t length,int prot,int flags,int fd,off_t offset)15252 void* Simulator::Mmap(
15253     void* address, size_t length, int prot, int flags, int fd, off_t offset) {
15254   // The underlying system `mmap` in the simulated environment doesn't recognize
15255   // PROT_BTI and PROT_MTE. Although the kernel probably just ignores the bits
15256   // it doesn't know, mask those protections out before calling is safer.
15257   int intenal_prot = prot;
15258   prot &= ~(PROT_BTI | PROT_MTE);
15259 
15260   uint64_t address2 = reinterpret_cast<uint64_t>(
15261       mmap(address, length, prot, flags, fd, offset));
15262 
15263   if (intenal_prot & PROT_MTE) {
15264     // The returning address of `mmap` isn't tagged.
15265     int tag = static_cast<int>(GenerateRandomTag());
15266     SetGranuleTag(address2, tag, length);
15267     address2 = GetAddressWithAllocationTag(address2, tag);
15268   }
15269 
15270   return reinterpret_cast<void*>(address2);
15271 }
15272 
15273 
Munmap(void * address,size_t length,int prot)15274 int Simulator::Munmap(void* address, size_t length, int prot) {
15275   if (prot & PROT_MTE) {
15276     // Untag the address since `munmap` doesn't recognize the memory tagging
15277     // managed by the Simulator.
15278     address = AddressUntag(address);
15279     CleanGranuleTag(reinterpret_cast<char*>(address), length);
15280   }
15281 
15282   return munmap(address, length);
15283 }
15284 #endif  // VIXL_HAS_SIMULATED_MMAP
15285 
15286 }  // namespace aarch64
15287 }  // namespace vixl
15288 
15289 #endif  // VIXL_INCLUDE_SIMULATOR_AARCH64
15290