• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 
28 #include <cmath>
29 
30 #include "assembler-aarch64.h"
31 #include "macro-assembler-aarch64.h"
32 
33 namespace vixl {
34 namespace aarch64 {
35 
RawLiteral(size_t size,LiteralPool * literal_pool,DeletionPolicy deletion_policy)36 RawLiteral::RawLiteral(size_t size,
37                        LiteralPool* literal_pool,
38                        DeletionPolicy deletion_policy)
39     : size_(size),
40       offset_(0),
41       low64_(0),
42       high64_(0),
43       literal_pool_(literal_pool),
44       deletion_policy_(deletion_policy) {
45   VIXL_ASSERT((deletion_policy == kManuallyDeleted) || (literal_pool_ != NULL));
46   if (deletion_policy == kDeletedOnPoolDestruction) {
47     literal_pool_->DeleteOnDestruction(this);
48   }
49 }
50 
51 
Reset()52 void Assembler::Reset() { GetBuffer()->Reset(); }
53 
54 
bind(Label * label)55 void Assembler::bind(Label* label) {
56   BindToOffset(label, GetBuffer()->GetCursorOffset());
57 }
58 
59 
BindToOffset(Label * label,ptrdiff_t offset)60 void Assembler::BindToOffset(Label* label, ptrdiff_t offset) {
61   VIXL_ASSERT((offset >= 0) && (offset <= GetBuffer()->GetCursorOffset()));
62   VIXL_ASSERT(offset % kInstructionSize == 0);
63 
64   label->Bind(offset);
65 
66   for (Label::LabelLinksIterator it(label); !it.Done(); it.Advance()) {
67     Instruction* link =
68         GetBuffer()->GetOffsetAddress<Instruction*>(*it.Current());
69     link->SetImmPCOffsetTarget(GetLabelAddress<Instruction*>(label));
70   }
71   label->ClearAllLinks();
72 }
73 
74 
75 // A common implementation for the LinkAndGet<Type>OffsetTo helpers.
76 //
77 // The offset is calculated by aligning the PC and label addresses down to a
78 // multiple of 1 << element_shift, then calculating the (scaled) offset between
79 // them. This matches the semantics of adrp, for example.
80 template <int element_shift>
LinkAndGetOffsetTo(Label * label)81 ptrdiff_t Assembler::LinkAndGetOffsetTo(Label* label) {
82   VIXL_STATIC_ASSERT(element_shift < (sizeof(ptrdiff_t) * 8));
83 
84   if (label->IsBound()) {
85     uintptr_t pc_offset = GetCursorAddress<uintptr_t>() >> element_shift;
86     uintptr_t label_offset = GetLabelAddress<uintptr_t>(label) >> element_shift;
87     return label_offset - pc_offset;
88   } else {
89     label->AddLink(GetBuffer()->GetCursorOffset());
90     return 0;
91   }
92 }
93 
94 
LinkAndGetByteOffsetTo(Label * label)95 ptrdiff_t Assembler::LinkAndGetByteOffsetTo(Label* label) {
96   return LinkAndGetOffsetTo<0>(label);
97 }
98 
99 
LinkAndGetInstructionOffsetTo(Label * label)100 ptrdiff_t Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
101   return LinkAndGetOffsetTo<kInstructionSizeLog2>(label);
102 }
103 
104 
LinkAndGetPageOffsetTo(Label * label)105 ptrdiff_t Assembler::LinkAndGetPageOffsetTo(Label* label) {
106   return LinkAndGetOffsetTo<kPageSizeLog2>(label);
107 }
108 
109 
place(RawLiteral * literal)110 void Assembler::place(RawLiteral* literal) {
111   VIXL_ASSERT(!literal->IsPlaced());
112 
113   // Patch instructions using this literal.
114   if (literal->IsUsed()) {
115     Instruction* target = GetCursorAddress<Instruction*>();
116     ptrdiff_t offset = literal->GetLastUse();
117     bool done;
118     do {
119       Instruction* ldr = GetBuffer()->GetOffsetAddress<Instruction*>(offset);
120       VIXL_ASSERT(ldr->IsLoadLiteral());
121 
122       ptrdiff_t imm19 = ldr->GetImmLLiteral();
123       VIXL_ASSERT(imm19 <= 0);
124       done = (imm19 == 0);
125       offset += imm19 * kLiteralEntrySize;
126 
127       ldr->SetImmLLiteral(target);
128     } while (!done);
129   }
130 
131   // "bind" the literal.
132   literal->SetOffset(GetCursorOffset());
133   // Copy the data into the pool.
134   switch (literal->GetSize()) {
135     case kSRegSizeInBytes:
136       dc32(literal->GetRawValue32());
137       break;
138     case kDRegSizeInBytes:
139       dc64(literal->GetRawValue64());
140       break;
141     default:
142       VIXL_ASSERT(literal->GetSize() == kQRegSizeInBytes);
143       dc64(literal->GetRawValue128Low64());
144       dc64(literal->GetRawValue128High64());
145   }
146 
147   literal->literal_pool_ = NULL;
148 }
149 
150 
LinkAndGetWordOffsetTo(RawLiteral * literal)151 ptrdiff_t Assembler::LinkAndGetWordOffsetTo(RawLiteral* literal) {
152   VIXL_ASSERT(IsWordAligned(GetCursorOffset()));
153 
154   bool register_first_use =
155       (literal->GetLiteralPool() != NULL) && !literal->IsUsed();
156 
157   if (literal->IsPlaced()) {
158     // The literal is "behind", the offset will be negative.
159     VIXL_ASSERT((literal->GetOffset() - GetCursorOffset()) <= 0);
160     return (literal->GetOffset() - GetCursorOffset()) >> kLiteralEntrySizeLog2;
161   }
162 
163   ptrdiff_t offset = 0;
164   // Link all uses together.
165   if (literal->IsUsed()) {
166     offset =
167         (literal->GetLastUse() - GetCursorOffset()) >> kLiteralEntrySizeLog2;
168   }
169   literal->SetLastUse(GetCursorOffset());
170 
171   if (register_first_use) {
172     literal->GetLiteralPool()->AddEntry(literal);
173   }
174 
175   return offset;
176 }
177 
178 
179 // Code generation.
br(const Register & xn)180 void Assembler::br(const Register& xn) {
181   VIXL_ASSERT(xn.Is64Bits());
182   Emit(BR | Rn(xn));
183 }
184 
185 
blr(const Register & xn)186 void Assembler::blr(const Register& xn) {
187   VIXL_ASSERT(xn.Is64Bits());
188   Emit(BLR | Rn(xn));
189 }
190 
191 
ret(const Register & xn)192 void Assembler::ret(const Register& xn) {
193   VIXL_ASSERT(xn.Is64Bits());
194   Emit(RET | Rn(xn));
195 }
196 
197 
b(int64_t imm26)198 void Assembler::b(int64_t imm26) { Emit(B | ImmUncondBranch(imm26)); }
199 
200 
b(int64_t imm19,Condition cond)201 void Assembler::b(int64_t imm19, Condition cond) {
202   Emit(B_cond | ImmCondBranch(imm19) | cond);
203 }
204 
205 
b(Label * label)206 void Assembler::b(Label* label) {
207   int64_t offset = LinkAndGetInstructionOffsetTo(label);
208   VIXL_ASSERT(Instruction::IsValidImmPCOffset(UncondBranchType, offset));
209   b(static_cast<int>(offset));
210 }
211 
212 
b(Label * label,Condition cond)213 void Assembler::b(Label* label, Condition cond) {
214   int64_t offset = LinkAndGetInstructionOffsetTo(label);
215   VIXL_ASSERT(Instruction::IsValidImmPCOffset(CondBranchType, offset));
216   b(static_cast<int>(offset), cond);
217 }
218 
219 
bl(int64_t imm26)220 void Assembler::bl(int64_t imm26) { Emit(BL | ImmUncondBranch(imm26)); }
221 
222 
bl(Label * label)223 void Assembler::bl(Label* label) {
224   int64_t offset = LinkAndGetInstructionOffsetTo(label);
225   VIXL_ASSERT(Instruction::IsValidImmPCOffset(UncondBranchType, offset));
226   bl(static_cast<int>(offset));
227 }
228 
229 
cbz(const Register & rt,int64_t imm19)230 void Assembler::cbz(const Register& rt, int64_t imm19) {
231   Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
232 }
233 
234 
cbz(const Register & rt,Label * label)235 void Assembler::cbz(const Register& rt, Label* label) {
236   int64_t offset = LinkAndGetInstructionOffsetTo(label);
237   VIXL_ASSERT(Instruction::IsValidImmPCOffset(CompareBranchType, offset));
238   cbz(rt, static_cast<int>(offset));
239 }
240 
241 
cbnz(const Register & rt,int64_t imm19)242 void Assembler::cbnz(const Register& rt, int64_t imm19) {
243   Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
244 }
245 
246 
cbnz(const Register & rt,Label * label)247 void Assembler::cbnz(const Register& rt, Label* label) {
248   int64_t offset = LinkAndGetInstructionOffsetTo(label);
249   VIXL_ASSERT(Instruction::IsValidImmPCOffset(CompareBranchType, offset));
250   cbnz(rt, static_cast<int>(offset));
251 }
252 
253 
NEONTable(const VRegister & vd,const VRegister & vn,const VRegister & vm,NEONTableOp op)254 void Assembler::NEONTable(const VRegister& vd,
255                           const VRegister& vn,
256                           const VRegister& vm,
257                           NEONTableOp op) {
258   VIXL_ASSERT(vd.Is16B() || vd.Is8B());
259   VIXL_ASSERT(vn.Is16B());
260   VIXL_ASSERT(AreSameFormat(vd, vm));
261   Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
262 }
263 
264 
tbl(const VRegister & vd,const VRegister & vn,const VRegister & vm)265 void Assembler::tbl(const VRegister& vd,
266                     const VRegister& vn,
267                     const VRegister& vm) {
268   NEONTable(vd, vn, vm, NEON_TBL_1v);
269 }
270 
271 
tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)272 void Assembler::tbl(const VRegister& vd,
273                     const VRegister& vn,
274                     const VRegister& vn2,
275                     const VRegister& vm) {
276   USE(vn2);
277   VIXL_ASSERT(AreSameFormat(vn, vn2));
278   VIXL_ASSERT(AreConsecutive(vn, vn2));
279   NEONTable(vd, vn, vm, NEON_TBL_2v);
280 }
281 
282 
tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)283 void Assembler::tbl(const VRegister& vd,
284                     const VRegister& vn,
285                     const VRegister& vn2,
286                     const VRegister& vn3,
287                     const VRegister& vm) {
288   USE(vn2, vn3);
289   VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
290   VIXL_ASSERT(AreConsecutive(vn, vn2, vn3));
291   NEONTable(vd, vn, vm, NEON_TBL_3v);
292 }
293 
294 
tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)295 void Assembler::tbl(const VRegister& vd,
296                     const VRegister& vn,
297                     const VRegister& vn2,
298                     const VRegister& vn3,
299                     const VRegister& vn4,
300                     const VRegister& vm) {
301   USE(vn2, vn3, vn4);
302   VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
303   VIXL_ASSERT(AreConsecutive(vn, vn2, vn3, vn4));
304   NEONTable(vd, vn, vm, NEON_TBL_4v);
305 }
306 
307 
tbx(const VRegister & vd,const VRegister & vn,const VRegister & vm)308 void Assembler::tbx(const VRegister& vd,
309                     const VRegister& vn,
310                     const VRegister& vm) {
311   NEONTable(vd, vn, vm, NEON_TBX_1v);
312 }
313 
314 
tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)315 void Assembler::tbx(const VRegister& vd,
316                     const VRegister& vn,
317                     const VRegister& vn2,
318                     const VRegister& vm) {
319   USE(vn2);
320   VIXL_ASSERT(AreSameFormat(vn, vn2));
321   VIXL_ASSERT(AreConsecutive(vn, vn2));
322   NEONTable(vd, vn, vm, NEON_TBX_2v);
323 }
324 
325 
tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)326 void Assembler::tbx(const VRegister& vd,
327                     const VRegister& vn,
328                     const VRegister& vn2,
329                     const VRegister& vn3,
330                     const VRegister& vm) {
331   USE(vn2, vn3);
332   VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
333   VIXL_ASSERT(AreConsecutive(vn, vn2, vn3));
334   NEONTable(vd, vn, vm, NEON_TBX_3v);
335 }
336 
337 
tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)338 void Assembler::tbx(const VRegister& vd,
339                     const VRegister& vn,
340                     const VRegister& vn2,
341                     const VRegister& vn3,
342                     const VRegister& vn4,
343                     const VRegister& vm) {
344   USE(vn2, vn3, vn4);
345   VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
346   VIXL_ASSERT(AreConsecutive(vn, vn2, vn3, vn4));
347   NEONTable(vd, vn, vm, NEON_TBX_4v);
348 }
349 
350 
tbz(const Register & rt,unsigned bit_pos,int64_t imm14)351 void Assembler::tbz(const Register& rt, unsigned bit_pos, int64_t imm14) {
352   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
353   Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
354 }
355 
356 
tbz(const Register & rt,unsigned bit_pos,Label * label)357 void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
358   ptrdiff_t offset = LinkAndGetInstructionOffsetTo(label);
359   VIXL_ASSERT(Instruction::IsValidImmPCOffset(TestBranchType, offset));
360   tbz(rt, bit_pos, static_cast<int>(offset));
361 }
362 
363 
tbnz(const Register & rt,unsigned bit_pos,int64_t imm14)364 void Assembler::tbnz(const Register& rt, unsigned bit_pos, int64_t imm14) {
365   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
366   Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
367 }
368 
369 
tbnz(const Register & rt,unsigned bit_pos,Label * label)370 void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
371   ptrdiff_t offset = LinkAndGetInstructionOffsetTo(label);
372   VIXL_ASSERT(Instruction::IsValidImmPCOffset(TestBranchType, offset));
373   tbnz(rt, bit_pos, static_cast<int>(offset));
374 }
375 
376 
adr(const Register & xd,int64_t imm21)377 void Assembler::adr(const Register& xd, int64_t imm21) {
378   VIXL_ASSERT(xd.Is64Bits());
379   Emit(ADR | ImmPCRelAddress(imm21) | Rd(xd));
380 }
381 
382 
adr(const Register & xd,Label * label)383 void Assembler::adr(const Register& xd, Label* label) {
384   adr(xd, static_cast<int>(LinkAndGetByteOffsetTo(label)));
385 }
386 
387 
adrp(const Register & xd,int64_t imm21)388 void Assembler::adrp(const Register& xd, int64_t imm21) {
389   VIXL_ASSERT(xd.Is64Bits());
390   Emit(ADRP | ImmPCRelAddress(imm21) | Rd(xd));
391 }
392 
393 
adrp(const Register & xd,Label * label)394 void Assembler::adrp(const Register& xd, Label* label) {
395   VIXL_ASSERT(AllowPageOffsetDependentCode());
396   adrp(xd, static_cast<int>(LinkAndGetPageOffsetTo(label)));
397 }
398 
399 
add(const Register & rd,const Register & rn,const Operand & operand)400 void Assembler::add(const Register& rd,
401                     const Register& rn,
402                     const Operand& operand) {
403   AddSub(rd, rn, operand, LeaveFlags, ADD);
404 }
405 
406 
adds(const Register & rd,const Register & rn,const Operand & operand)407 void Assembler::adds(const Register& rd,
408                      const Register& rn,
409                      const Operand& operand) {
410   AddSub(rd, rn, operand, SetFlags, ADD);
411 }
412 
413 
cmn(const Register & rn,const Operand & operand)414 void Assembler::cmn(const Register& rn, const Operand& operand) {
415   Register zr = AppropriateZeroRegFor(rn);
416   adds(zr, rn, operand);
417 }
418 
419 
sub(const Register & rd,const Register & rn,const Operand & operand)420 void Assembler::sub(const Register& rd,
421                     const Register& rn,
422                     const Operand& operand) {
423   AddSub(rd, rn, operand, LeaveFlags, SUB);
424 }
425 
426 
subs(const Register & rd,const Register & rn,const Operand & operand)427 void Assembler::subs(const Register& rd,
428                      const Register& rn,
429                      const Operand& operand) {
430   AddSub(rd, rn, operand, SetFlags, SUB);
431 }
432 
433 
cmp(const Register & rn,const Operand & operand)434 void Assembler::cmp(const Register& rn, const Operand& operand) {
435   Register zr = AppropriateZeroRegFor(rn);
436   subs(zr, rn, operand);
437 }
438 
439 
neg(const Register & rd,const Operand & operand)440 void Assembler::neg(const Register& rd, const Operand& operand) {
441   Register zr = AppropriateZeroRegFor(rd);
442   sub(rd, zr, operand);
443 }
444 
445 
negs(const Register & rd,const Operand & operand)446 void Assembler::negs(const Register& rd, const Operand& operand) {
447   Register zr = AppropriateZeroRegFor(rd);
448   subs(rd, zr, operand);
449 }
450 
451 
adc(const Register & rd,const Register & rn,const Operand & operand)452 void Assembler::adc(const Register& rd,
453                     const Register& rn,
454                     const Operand& operand) {
455   AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
456 }
457 
458 
adcs(const Register & rd,const Register & rn,const Operand & operand)459 void Assembler::adcs(const Register& rd,
460                      const Register& rn,
461                      const Operand& operand) {
462   AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
463 }
464 
465 
sbc(const Register & rd,const Register & rn,const Operand & operand)466 void Assembler::sbc(const Register& rd,
467                     const Register& rn,
468                     const Operand& operand) {
469   AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
470 }
471 
472 
sbcs(const Register & rd,const Register & rn,const Operand & operand)473 void Assembler::sbcs(const Register& rd,
474                      const Register& rn,
475                      const Operand& operand) {
476   AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
477 }
478 
479 
ngc(const Register & rd,const Operand & operand)480 void Assembler::ngc(const Register& rd, const Operand& operand) {
481   Register zr = AppropriateZeroRegFor(rd);
482   sbc(rd, zr, operand);
483 }
484 
485 
ngcs(const Register & rd,const Operand & operand)486 void Assembler::ngcs(const Register& rd, const Operand& operand) {
487   Register zr = AppropriateZeroRegFor(rd);
488   sbcs(rd, zr, operand);
489 }
490 
491 
492 // Logical instructions.
and_(const Register & rd,const Register & rn,const Operand & operand)493 void Assembler::and_(const Register& rd,
494                      const Register& rn,
495                      const Operand& operand) {
496   Logical(rd, rn, operand, AND);
497 }
498 
499 
ands(const Register & rd,const Register & rn,const Operand & operand)500 void Assembler::ands(const Register& rd,
501                      const Register& rn,
502                      const Operand& operand) {
503   Logical(rd, rn, operand, ANDS);
504 }
505 
506 
tst(const Register & rn,const Operand & operand)507 void Assembler::tst(const Register& rn, const Operand& operand) {
508   ands(AppropriateZeroRegFor(rn), rn, operand);
509 }
510 
511 
bic(const Register & rd,const Register & rn,const Operand & operand)512 void Assembler::bic(const Register& rd,
513                     const Register& rn,
514                     const Operand& operand) {
515   Logical(rd, rn, operand, BIC);
516 }
517 
518 
bics(const Register & rd,const Register & rn,const Operand & operand)519 void Assembler::bics(const Register& rd,
520                      const Register& rn,
521                      const Operand& operand) {
522   Logical(rd, rn, operand, BICS);
523 }
524 
525 
orr(const Register & rd,const Register & rn,const Operand & operand)526 void Assembler::orr(const Register& rd,
527                     const Register& rn,
528                     const Operand& operand) {
529   Logical(rd, rn, operand, ORR);
530 }
531 
532 
orn(const Register & rd,const Register & rn,const Operand & operand)533 void Assembler::orn(const Register& rd,
534                     const Register& rn,
535                     const Operand& operand) {
536   Logical(rd, rn, operand, ORN);
537 }
538 
539 
eor(const Register & rd,const Register & rn,const Operand & operand)540 void Assembler::eor(const Register& rd,
541                     const Register& rn,
542                     const Operand& operand) {
543   Logical(rd, rn, operand, EOR);
544 }
545 
546 
eon(const Register & rd,const Register & rn,const Operand & operand)547 void Assembler::eon(const Register& rd,
548                     const Register& rn,
549                     const Operand& operand) {
550   Logical(rd, rn, operand, EON);
551 }
552 
553 
lslv(const Register & rd,const Register & rn,const Register & rm)554 void Assembler::lslv(const Register& rd,
555                      const Register& rn,
556                      const Register& rm) {
557   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
558   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
559   Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
560 }
561 
562 
lsrv(const Register & rd,const Register & rn,const Register & rm)563 void Assembler::lsrv(const Register& rd,
564                      const Register& rn,
565                      const Register& rm) {
566   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
567   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
568   Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
569 }
570 
571 
asrv(const Register & rd,const Register & rn,const Register & rm)572 void Assembler::asrv(const Register& rd,
573                      const Register& rn,
574                      const Register& rm) {
575   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
576   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
577   Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
578 }
579 
580 
rorv(const Register & rd,const Register & rn,const Register & rm)581 void Assembler::rorv(const Register& rd,
582                      const Register& rn,
583                      const Register& rm) {
584   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
585   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
586   Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
587 }
588 
589 
590 // Bitfield operations.
bfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)591 void Assembler::bfm(const Register& rd,
592                     const Register& rn,
593                     unsigned immr,
594                     unsigned imms) {
595   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
596   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
597   Emit(SF(rd) | BFM | N | ImmR(immr, rd.GetSizeInBits()) |
598        ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd));
599 }
600 
601 
sbfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)602 void Assembler::sbfm(const Register& rd,
603                      const Register& rn,
604                      unsigned immr,
605                      unsigned imms) {
606   VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits());
607   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
608   Emit(SF(rd) | SBFM | N | ImmR(immr, rd.GetSizeInBits()) |
609        ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd));
610 }
611 
612 
ubfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)613 void Assembler::ubfm(const Register& rd,
614                      const Register& rn,
615                      unsigned immr,
616                      unsigned imms) {
617   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
618   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
619   Emit(SF(rd) | UBFM | N | ImmR(immr, rd.GetSizeInBits()) |
620        ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd));
621 }
622 
623 
extr(const Register & rd,const Register & rn,const Register & rm,unsigned lsb)624 void Assembler::extr(const Register& rd,
625                      const Register& rn,
626                      const Register& rm,
627                      unsigned lsb) {
628   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
629   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
630   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
631   Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.GetSizeInBits()) | Rn(rn) |
632        Rd(rd));
633 }
634 
635 
csel(const Register & rd,const Register & rn,const Register & rm,Condition cond)636 void Assembler::csel(const Register& rd,
637                      const Register& rn,
638                      const Register& rm,
639                      Condition cond) {
640   ConditionalSelect(rd, rn, rm, cond, CSEL);
641 }
642 
643 
csinc(const Register & rd,const Register & rn,const Register & rm,Condition cond)644 void Assembler::csinc(const Register& rd,
645                       const Register& rn,
646                       const Register& rm,
647                       Condition cond) {
648   ConditionalSelect(rd, rn, rm, cond, CSINC);
649 }
650 
651 
csinv(const Register & rd,const Register & rn,const Register & rm,Condition cond)652 void Assembler::csinv(const Register& rd,
653                       const Register& rn,
654                       const Register& rm,
655                       Condition cond) {
656   ConditionalSelect(rd, rn, rm, cond, CSINV);
657 }
658 
659 
csneg(const Register & rd,const Register & rn,const Register & rm,Condition cond)660 void Assembler::csneg(const Register& rd,
661                       const Register& rn,
662                       const Register& rm,
663                       Condition cond) {
664   ConditionalSelect(rd, rn, rm, cond, CSNEG);
665 }
666 
667 
cset(const Register & rd,Condition cond)668 void Assembler::cset(const Register& rd, Condition cond) {
669   VIXL_ASSERT((cond != al) && (cond != nv));
670   Register zr = AppropriateZeroRegFor(rd);
671   csinc(rd, zr, zr, InvertCondition(cond));
672 }
673 
674 
csetm(const Register & rd,Condition cond)675 void Assembler::csetm(const Register& rd, Condition cond) {
676   VIXL_ASSERT((cond != al) && (cond != nv));
677   Register zr = AppropriateZeroRegFor(rd);
678   csinv(rd, zr, zr, InvertCondition(cond));
679 }
680 
681 
cinc(const Register & rd,const Register & rn,Condition cond)682 void Assembler::cinc(const Register& rd, const Register& rn, Condition cond) {
683   VIXL_ASSERT((cond != al) && (cond != nv));
684   csinc(rd, rn, rn, InvertCondition(cond));
685 }
686 
687 
cinv(const Register & rd,const Register & rn,Condition cond)688 void Assembler::cinv(const Register& rd, const Register& rn, Condition cond) {
689   VIXL_ASSERT((cond != al) && (cond != nv));
690   csinv(rd, rn, rn, InvertCondition(cond));
691 }
692 
693 
cneg(const Register & rd,const Register & rn,Condition cond)694 void Assembler::cneg(const Register& rd, const Register& rn, Condition cond) {
695   VIXL_ASSERT((cond != al) && (cond != nv));
696   csneg(rd, rn, rn, InvertCondition(cond));
697 }
698 
699 
ConditionalSelect(const Register & rd,const Register & rn,const Register & rm,Condition cond,ConditionalSelectOp op)700 void Assembler::ConditionalSelect(const Register& rd,
701                                   const Register& rn,
702                                   const Register& rm,
703                                   Condition cond,
704                                   ConditionalSelectOp op) {
705   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
706   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
707   Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
708 }
709 
710 
ccmn(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond)711 void Assembler::ccmn(const Register& rn,
712                      const Operand& operand,
713                      StatusFlags nzcv,
714                      Condition cond) {
715   ConditionalCompare(rn, operand, nzcv, cond, CCMN);
716 }
717 
718 
ccmp(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond)719 void Assembler::ccmp(const Register& rn,
720                      const Operand& operand,
721                      StatusFlags nzcv,
722                      Condition cond) {
723   ConditionalCompare(rn, operand, nzcv, cond, CCMP);
724 }
725 
726 
DataProcessing3Source(const Register & rd,const Register & rn,const Register & rm,const Register & ra,DataProcessing3SourceOp op)727 void Assembler::DataProcessing3Source(const Register& rd,
728                                       const Register& rn,
729                                       const Register& rm,
730                                       const Register& ra,
731                                       DataProcessing3SourceOp op) {
732   Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
733 }
734 
735 
crc32b(const Register & wd,const Register & wn,const Register & wm)736 void Assembler::crc32b(const Register& wd,
737                        const Register& wn,
738                        const Register& wm) {
739   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
740   Emit(SF(wm) | Rm(wm) | CRC32B | Rn(wn) | Rd(wd));
741 }
742 
743 
crc32h(const Register & wd,const Register & wn,const Register & wm)744 void Assembler::crc32h(const Register& wd,
745                        const Register& wn,
746                        const Register& wm) {
747   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
748   Emit(SF(wm) | Rm(wm) | CRC32H | Rn(wn) | Rd(wd));
749 }
750 
751 
crc32w(const Register & wd,const Register & wn,const Register & wm)752 void Assembler::crc32w(const Register& wd,
753                        const Register& wn,
754                        const Register& wm) {
755   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
756   Emit(SF(wm) | Rm(wm) | CRC32W | Rn(wn) | Rd(wd));
757 }
758 
759 
crc32x(const Register & wd,const Register & wn,const Register & xm)760 void Assembler::crc32x(const Register& wd,
761                        const Register& wn,
762                        const Register& xm) {
763   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && xm.Is64Bits());
764   Emit(SF(xm) | Rm(xm) | CRC32X | Rn(wn) | Rd(wd));
765 }
766 
767 
crc32cb(const Register & wd,const Register & wn,const Register & wm)768 void Assembler::crc32cb(const Register& wd,
769                         const Register& wn,
770                         const Register& wm) {
771   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
772   Emit(SF(wm) | Rm(wm) | CRC32CB | Rn(wn) | Rd(wd));
773 }
774 
775 
crc32ch(const Register & wd,const Register & wn,const Register & wm)776 void Assembler::crc32ch(const Register& wd,
777                         const Register& wn,
778                         const Register& wm) {
779   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
780   Emit(SF(wm) | Rm(wm) | CRC32CH | Rn(wn) | Rd(wd));
781 }
782 
783 
crc32cw(const Register & wd,const Register & wn,const Register & wm)784 void Assembler::crc32cw(const Register& wd,
785                         const Register& wn,
786                         const Register& wm) {
787   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
788   Emit(SF(wm) | Rm(wm) | CRC32CW | Rn(wn) | Rd(wd));
789 }
790 
791 
crc32cx(const Register & wd,const Register & wn,const Register & xm)792 void Assembler::crc32cx(const Register& wd,
793                         const Register& wn,
794                         const Register& xm) {
795   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && xm.Is64Bits());
796   Emit(SF(xm) | Rm(xm) | CRC32CX | Rn(wn) | Rd(wd));
797 }
798 
799 
mul(const Register & rd,const Register & rn,const Register & rm)800 void Assembler::mul(const Register& rd,
801                     const Register& rn,
802                     const Register& rm) {
803   VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
804   DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD);
805 }
806 
807 
madd(const Register & rd,const Register & rn,const Register & rm,const Register & ra)808 void Assembler::madd(const Register& rd,
809                      const Register& rn,
810                      const Register& rm,
811                      const Register& ra) {
812   DataProcessing3Source(rd, rn, rm, ra, MADD);
813 }
814 
815 
mneg(const Register & rd,const Register & rn,const Register & rm)816 void Assembler::mneg(const Register& rd,
817                      const Register& rn,
818                      const Register& rm) {
819   VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
820   DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB);
821 }
822 
823 
msub(const Register & rd,const Register & rn,const Register & rm,const Register & ra)824 void Assembler::msub(const Register& rd,
825                      const Register& rn,
826                      const Register& rm,
827                      const Register& ra) {
828   DataProcessing3Source(rd, rn, rm, ra, MSUB);
829 }
830 
831 
umaddl(const Register & xd,const Register & wn,const Register & wm,const Register & xa)832 void Assembler::umaddl(const Register& xd,
833                        const Register& wn,
834                        const Register& wm,
835                        const Register& xa) {
836   VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits());
837   VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits());
838   DataProcessing3Source(xd, wn, wm, xa, UMADDL_x);
839 }
840 
841 
smaddl(const Register & xd,const Register & wn,const Register & wm,const Register & xa)842 void Assembler::smaddl(const Register& xd,
843                        const Register& wn,
844                        const Register& wm,
845                        const Register& xa) {
846   VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits());
847   VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits());
848   DataProcessing3Source(xd, wn, wm, xa, SMADDL_x);
849 }
850 
851 
umsubl(const Register & xd,const Register & wn,const Register & wm,const Register & xa)852 void Assembler::umsubl(const Register& xd,
853                        const Register& wn,
854                        const Register& wm,
855                        const Register& xa) {
856   VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits());
857   VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits());
858   DataProcessing3Source(xd, wn, wm, xa, UMSUBL_x);
859 }
860 
861 
smsubl(const Register & xd,const Register & wn,const Register & wm,const Register & xa)862 void Assembler::smsubl(const Register& xd,
863                        const Register& wn,
864                        const Register& wm,
865                        const Register& xa) {
866   VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits());
867   VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits());
868   DataProcessing3Source(xd, wn, wm, xa, SMSUBL_x);
869 }
870 
871 
smull(const Register & xd,const Register & wn,const Register & wm)872 void Assembler::smull(const Register& xd,
873                       const Register& wn,
874                       const Register& wm) {
875   VIXL_ASSERT(xd.Is64Bits());
876   VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits());
877   DataProcessing3Source(xd, wn, wm, xzr, SMADDL_x);
878 }
879 
880 
sdiv(const Register & rd,const Register & rn,const Register & rm)881 void Assembler::sdiv(const Register& rd,
882                      const Register& rn,
883                      const Register& rm) {
884   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
885   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
886   Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
887 }
888 
889 
smulh(const Register & xd,const Register & xn,const Register & xm)890 void Assembler::smulh(const Register& xd,
891                       const Register& xn,
892                       const Register& xm) {
893   VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
894   DataProcessing3Source(xd, xn, xm, xzr, SMULH_x);
895 }
896 
897 
umulh(const Register & xd,const Register & xn,const Register & xm)898 void Assembler::umulh(const Register& xd,
899                       const Register& xn,
900                       const Register& xm) {
901   VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
902   DataProcessing3Source(xd, xn, xm, xzr, UMULH_x);
903 }
904 
905 
udiv(const Register & rd,const Register & rn,const Register & rm)906 void Assembler::udiv(const Register& rd,
907                      const Register& rn,
908                      const Register& rm) {
909   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
910   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
911   Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
912 }
913 
914 
rbit(const Register & rd,const Register & rn)915 void Assembler::rbit(const Register& rd, const Register& rn) {
916   DataProcessing1Source(rd, rn, RBIT);
917 }
918 
919 
rev16(const Register & rd,const Register & rn)920 void Assembler::rev16(const Register& rd, const Register& rn) {
921   DataProcessing1Source(rd, rn, REV16);
922 }
923 
924 
rev32(const Register & xd,const Register & xn)925 void Assembler::rev32(const Register& xd, const Register& xn) {
926   VIXL_ASSERT(xd.Is64Bits());
927   DataProcessing1Source(xd, xn, REV);
928 }
929 
930 
rev(const Register & rd,const Register & rn)931 void Assembler::rev(const Register& rd, const Register& rn) {
932   DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
933 }
934 
935 
clz(const Register & rd,const Register & rn)936 void Assembler::clz(const Register& rd, const Register& rn) {
937   DataProcessing1Source(rd, rn, CLZ);
938 }
939 
940 
cls(const Register & rd,const Register & rn)941 void Assembler::cls(const Register& rd, const Register& rn) {
942   DataProcessing1Source(rd, rn, CLS);
943 }
944 
945 
ldp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)946 void Assembler::ldp(const CPURegister& rt,
947                     const CPURegister& rt2,
948                     const MemOperand& src) {
949   LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
950 }
951 
952 
stp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)953 void Assembler::stp(const CPURegister& rt,
954                     const CPURegister& rt2,
955                     const MemOperand& dst) {
956   LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
957 }
958 
959 
ldpsw(const Register & xt,const Register & xt2,const MemOperand & src)960 void Assembler::ldpsw(const Register& xt,
961                       const Register& xt2,
962                       const MemOperand& src) {
963   VIXL_ASSERT(xt.Is64Bits() && xt2.Is64Bits());
964   LoadStorePair(xt, xt2, src, LDPSW_x);
965 }
966 
967 
LoadStorePair(const CPURegister & rt,const CPURegister & rt2,const MemOperand & addr,LoadStorePairOp op)968 void Assembler::LoadStorePair(const CPURegister& rt,
969                               const CPURegister& rt2,
970                               const MemOperand& addr,
971                               LoadStorePairOp op) {
972   // 'rt' and 'rt2' can only be aliased for stores.
973   VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
974   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
975   VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), CalcLSPairDataSize(op)));
976 
977   int offset = static_cast<int>(addr.GetOffset());
978   Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) |
979                 ImmLSPair(offset, CalcLSPairDataSize(op));
980 
981   Instr addrmodeop;
982   if (addr.IsImmediateOffset()) {
983     addrmodeop = LoadStorePairOffsetFixed;
984   } else {
985     VIXL_ASSERT(addr.GetOffset() != 0);
986     if (addr.IsPreIndex()) {
987       addrmodeop = LoadStorePairPreIndexFixed;
988     } else {
989       VIXL_ASSERT(addr.IsPostIndex());
990       addrmodeop = LoadStorePairPostIndexFixed;
991     }
992   }
993   Emit(addrmodeop | memop);
994 }
995 
996 
ldnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)997 void Assembler::ldnp(const CPURegister& rt,
998                      const CPURegister& rt2,
999                      const MemOperand& src) {
1000   LoadStorePairNonTemporal(rt, rt2, src, LoadPairNonTemporalOpFor(rt, rt2));
1001 }
1002 
1003 
stnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)1004 void Assembler::stnp(const CPURegister& rt,
1005                      const CPURegister& rt2,
1006                      const MemOperand& dst) {
1007   LoadStorePairNonTemporal(rt, rt2, dst, StorePairNonTemporalOpFor(rt, rt2));
1008 }
1009 
1010 
LoadStorePairNonTemporal(const CPURegister & rt,const CPURegister & rt2,const MemOperand & addr,LoadStorePairNonTemporalOp op)1011 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
1012                                          const CPURegister& rt2,
1013                                          const MemOperand& addr,
1014                                          LoadStorePairNonTemporalOp op) {
1015   VIXL_ASSERT(!rt.Is(rt2));
1016   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
1017   VIXL_ASSERT(addr.IsImmediateOffset());
1018 
1019   unsigned size =
1020       CalcLSPairDataSize(static_cast<LoadStorePairOp>(op & LoadStorePairMask));
1021   VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), size));
1022   int offset = static_cast<int>(addr.GetOffset());
1023   Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) |
1024        ImmLSPair(offset, size));
1025 }
1026 
1027 
1028 // Memory instructions.
ldrb(const Register & rt,const MemOperand & src,LoadStoreScalingOption option)1029 void Assembler::ldrb(const Register& rt,
1030                      const MemOperand& src,
1031                      LoadStoreScalingOption option) {
1032   VIXL_ASSERT(option != RequireUnscaledOffset);
1033   VIXL_ASSERT(option != PreferUnscaledOffset);
1034   LoadStore(rt, src, LDRB_w, option);
1035 }
1036 
1037 
strb(const Register & rt,const MemOperand & dst,LoadStoreScalingOption option)1038 void Assembler::strb(const Register& rt,
1039                      const MemOperand& dst,
1040                      LoadStoreScalingOption option) {
1041   VIXL_ASSERT(option != RequireUnscaledOffset);
1042   VIXL_ASSERT(option != PreferUnscaledOffset);
1043   LoadStore(rt, dst, STRB_w, option);
1044 }
1045 
1046 
ldrsb(const Register & rt,const MemOperand & src,LoadStoreScalingOption option)1047 void Assembler::ldrsb(const Register& rt,
1048                       const MemOperand& src,
1049                       LoadStoreScalingOption option) {
1050   VIXL_ASSERT(option != RequireUnscaledOffset);
1051   VIXL_ASSERT(option != PreferUnscaledOffset);
1052   LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
1053 }
1054 
1055 
ldrh(const Register & rt,const MemOperand & src,LoadStoreScalingOption option)1056 void Assembler::ldrh(const Register& rt,
1057                      const MemOperand& src,
1058                      LoadStoreScalingOption option) {
1059   VIXL_ASSERT(option != RequireUnscaledOffset);
1060   VIXL_ASSERT(option != PreferUnscaledOffset);
1061   LoadStore(rt, src, LDRH_w, option);
1062 }
1063 
1064 
strh(const Register & rt,const MemOperand & dst,LoadStoreScalingOption option)1065 void Assembler::strh(const Register& rt,
1066                      const MemOperand& dst,
1067                      LoadStoreScalingOption option) {
1068   VIXL_ASSERT(option != RequireUnscaledOffset);
1069   VIXL_ASSERT(option != PreferUnscaledOffset);
1070   LoadStore(rt, dst, STRH_w, option);
1071 }
1072 
1073 
ldrsh(const Register & rt,const MemOperand & src,LoadStoreScalingOption option)1074 void Assembler::ldrsh(const Register& rt,
1075                       const MemOperand& src,
1076                       LoadStoreScalingOption option) {
1077   VIXL_ASSERT(option != RequireUnscaledOffset);
1078   VIXL_ASSERT(option != PreferUnscaledOffset);
1079   LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
1080 }
1081 
1082 
ldr(const CPURegister & rt,const MemOperand & src,LoadStoreScalingOption option)1083 void Assembler::ldr(const CPURegister& rt,
1084                     const MemOperand& src,
1085                     LoadStoreScalingOption option) {
1086   VIXL_ASSERT(option != RequireUnscaledOffset);
1087   VIXL_ASSERT(option != PreferUnscaledOffset);
1088   LoadStore(rt, src, LoadOpFor(rt), option);
1089 }
1090 
1091 
str(const CPURegister & rt,const MemOperand & dst,LoadStoreScalingOption option)1092 void Assembler::str(const CPURegister& rt,
1093                     const MemOperand& dst,
1094                     LoadStoreScalingOption option) {
1095   VIXL_ASSERT(option != RequireUnscaledOffset);
1096   VIXL_ASSERT(option != PreferUnscaledOffset);
1097   LoadStore(rt, dst, StoreOpFor(rt), option);
1098 }
1099 
1100 
ldrsw(const Register & xt,const MemOperand & src,LoadStoreScalingOption option)1101 void Assembler::ldrsw(const Register& xt,
1102                       const MemOperand& src,
1103                       LoadStoreScalingOption option) {
1104   VIXL_ASSERT(xt.Is64Bits());
1105   VIXL_ASSERT(option != RequireUnscaledOffset);
1106   VIXL_ASSERT(option != PreferUnscaledOffset);
1107   LoadStore(xt, src, LDRSW_x, option);
1108 }
1109 
1110 
ldurb(const Register & rt,const MemOperand & src,LoadStoreScalingOption option)1111 void Assembler::ldurb(const Register& rt,
1112                       const MemOperand& src,
1113                       LoadStoreScalingOption option) {
1114   VIXL_ASSERT(option != RequireScaledOffset);
1115   VIXL_ASSERT(option != PreferScaledOffset);
1116   LoadStore(rt, src, LDRB_w, option);
1117 }
1118 
1119 
sturb(const Register & rt,const MemOperand & dst,LoadStoreScalingOption option)1120 void Assembler::sturb(const Register& rt,
1121                       const MemOperand& dst,
1122                       LoadStoreScalingOption option) {
1123   VIXL_ASSERT(option != RequireScaledOffset);
1124   VIXL_ASSERT(option != PreferScaledOffset);
1125   LoadStore(rt, dst, STRB_w, option);
1126 }
1127 
1128 
ldursb(const Register & rt,const MemOperand & src,LoadStoreScalingOption option)1129 void Assembler::ldursb(const Register& rt,
1130                        const MemOperand& src,
1131                        LoadStoreScalingOption option) {
1132   VIXL_ASSERT(option != RequireScaledOffset);
1133   VIXL_ASSERT(option != PreferScaledOffset);
1134   LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
1135 }
1136 
1137 
ldurh(const Register & rt,const MemOperand & src,LoadStoreScalingOption option)1138 void Assembler::ldurh(const Register& rt,
1139                       const MemOperand& src,
1140                       LoadStoreScalingOption option) {
1141   VIXL_ASSERT(option != RequireScaledOffset);
1142   VIXL_ASSERT(option != PreferScaledOffset);
1143   LoadStore(rt, src, LDRH_w, option);
1144 }
1145 
1146 
sturh(const Register & rt,const MemOperand & dst,LoadStoreScalingOption option)1147 void Assembler::sturh(const Register& rt,
1148                       const MemOperand& dst,
1149                       LoadStoreScalingOption option) {
1150   VIXL_ASSERT(option != RequireScaledOffset);
1151   VIXL_ASSERT(option != PreferScaledOffset);
1152   LoadStore(rt, dst, STRH_w, option);
1153 }
1154 
1155 
ldursh(const Register & rt,const MemOperand & src,LoadStoreScalingOption option)1156 void Assembler::ldursh(const Register& rt,
1157                        const MemOperand& src,
1158                        LoadStoreScalingOption option) {
1159   VIXL_ASSERT(option != RequireScaledOffset);
1160   VIXL_ASSERT(option != PreferScaledOffset);
1161   LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
1162 }
1163 
1164 
ldur(const CPURegister & rt,const MemOperand & src,LoadStoreScalingOption option)1165 void Assembler::ldur(const CPURegister& rt,
1166                      const MemOperand& src,
1167                      LoadStoreScalingOption option) {
1168   VIXL_ASSERT(option != RequireScaledOffset);
1169   VIXL_ASSERT(option != PreferScaledOffset);
1170   LoadStore(rt, src, LoadOpFor(rt), option);
1171 }
1172 
1173 
stur(const CPURegister & rt,const MemOperand & dst,LoadStoreScalingOption option)1174 void Assembler::stur(const CPURegister& rt,
1175                      const MemOperand& dst,
1176                      LoadStoreScalingOption option) {
1177   VIXL_ASSERT(option != RequireScaledOffset);
1178   VIXL_ASSERT(option != PreferScaledOffset);
1179   LoadStore(rt, dst, StoreOpFor(rt), option);
1180 }
1181 
1182 
ldursw(const Register & xt,const MemOperand & src,LoadStoreScalingOption option)1183 void Assembler::ldursw(const Register& xt,
1184                        const MemOperand& src,
1185                        LoadStoreScalingOption option) {
1186   VIXL_ASSERT(xt.Is64Bits());
1187   VIXL_ASSERT(option != RequireScaledOffset);
1188   VIXL_ASSERT(option != PreferScaledOffset);
1189   LoadStore(xt, src, LDRSW_x, option);
1190 }
1191 
1192 
ldrsw(const Register & xt,RawLiteral * literal)1193 void Assembler::ldrsw(const Register& xt, RawLiteral* literal) {
1194   VIXL_ASSERT(xt.Is64Bits());
1195   VIXL_ASSERT(literal->GetSize() == kWRegSizeInBytes);
1196   ldrsw(xt, static_cast<int>(LinkAndGetWordOffsetTo(literal)));
1197 }
1198 
1199 
ldr(const CPURegister & rt,RawLiteral * literal)1200 void Assembler::ldr(const CPURegister& rt, RawLiteral* literal) {
1201   VIXL_ASSERT(literal->GetSize() == static_cast<size_t>(rt.GetSizeInBytes()));
1202   ldr(rt, static_cast<int>(LinkAndGetWordOffsetTo(literal)));
1203 }
1204 
1205 
ldrsw(const Register & rt,int64_t imm19)1206 void Assembler::ldrsw(const Register& rt, int64_t imm19) {
1207   Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt));
1208 }
1209 
1210 
ldr(const CPURegister & rt,int64_t imm19)1211 void Assembler::ldr(const CPURegister& rt, int64_t imm19) {
1212   LoadLiteralOp op = LoadLiteralOpFor(rt);
1213   Emit(op | ImmLLiteral(imm19) | Rt(rt));
1214 }
1215 
1216 
prfm(PrefetchOperation op,int64_t imm19)1217 void Assembler::prfm(PrefetchOperation op, int64_t imm19) {
1218   Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19));
1219 }
1220 
1221 
1222 // Exclusive-access instructions.
stxrb(const Register & rs,const Register & rt,const MemOperand & dst)1223 void Assembler::stxrb(const Register& rs,
1224                       const Register& rt,
1225                       const MemOperand& dst) {
1226   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1227   Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
1228 }
1229 
1230 
stxrh(const Register & rs,const Register & rt,const MemOperand & dst)1231 void Assembler::stxrh(const Register& rs,
1232                       const Register& rt,
1233                       const MemOperand& dst) {
1234   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1235   Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
1236 }
1237 
1238 
stxr(const Register & rs,const Register & rt,const MemOperand & dst)1239 void Assembler::stxr(const Register& rs,
1240                      const Register& rt,
1241                      const MemOperand& dst) {
1242   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1243   LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w;
1244   Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
1245 }
1246 
1247 
ldxrb(const Register & rt,const MemOperand & src)1248 void Assembler::ldxrb(const Register& rt, const MemOperand& src) {
1249   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1250   Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
1251 }
1252 
1253 
ldxrh(const Register & rt,const MemOperand & src)1254 void Assembler::ldxrh(const Register& rt, const MemOperand& src) {
1255   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1256   Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
1257 }
1258 
1259 
ldxr(const Register & rt,const MemOperand & src)1260 void Assembler::ldxr(const Register& rt, const MemOperand& src) {
1261   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1262   LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w;
1263   Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
1264 }
1265 
1266 
stxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)1267 void Assembler::stxp(const Register& rs,
1268                      const Register& rt,
1269                      const Register& rt2,
1270                      const MemOperand& dst) {
1271   VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits());
1272   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1273   LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w;
1274   Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.GetBaseRegister()));
1275 }
1276 
1277 
ldxp(const Register & rt,const Register & rt2,const MemOperand & src)1278 void Assembler::ldxp(const Register& rt,
1279                      const Register& rt2,
1280                      const MemOperand& src) {
1281   VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits());
1282   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1283   LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w;
1284   Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.GetBaseRegister()));
1285 }
1286 
1287 
stlxrb(const Register & rs,const Register & rt,const MemOperand & dst)1288 void Assembler::stlxrb(const Register& rs,
1289                        const Register& rt,
1290                        const MemOperand& dst) {
1291   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1292   Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
1293 }
1294 
1295 
stlxrh(const Register & rs,const Register & rt,const MemOperand & dst)1296 void Assembler::stlxrh(const Register& rs,
1297                        const Register& rt,
1298                        const MemOperand& dst) {
1299   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1300   Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
1301 }
1302 
1303 
stlxr(const Register & rs,const Register & rt,const MemOperand & dst)1304 void Assembler::stlxr(const Register& rs,
1305                       const Register& rt,
1306                       const MemOperand& dst) {
1307   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1308   LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w;
1309   Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
1310 }
1311 
1312 
ldaxrb(const Register & rt,const MemOperand & src)1313 void Assembler::ldaxrb(const Register& rt, const MemOperand& src) {
1314   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1315   Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
1316 }
1317 
1318 
ldaxrh(const Register & rt,const MemOperand & src)1319 void Assembler::ldaxrh(const Register& rt, const MemOperand& src) {
1320   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1321   Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
1322 }
1323 
1324 
ldaxr(const Register & rt,const MemOperand & src)1325 void Assembler::ldaxr(const Register& rt, const MemOperand& src) {
1326   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1327   LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w;
1328   Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
1329 }
1330 
1331 
stlxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)1332 void Assembler::stlxp(const Register& rs,
1333                       const Register& rt,
1334                       const Register& rt2,
1335                       const MemOperand& dst) {
1336   VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits());
1337   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1338   LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w;
1339   Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.GetBaseRegister()));
1340 }
1341 
1342 
ldaxp(const Register & rt,const Register & rt2,const MemOperand & src)1343 void Assembler::ldaxp(const Register& rt,
1344                       const Register& rt2,
1345                       const MemOperand& src) {
1346   VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits());
1347   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1348   LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w;
1349   Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.GetBaseRegister()));
1350 }
1351 
1352 
stlrb(const Register & rt,const MemOperand & dst)1353 void Assembler::stlrb(const Register& rt, const MemOperand& dst) {
1354   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1355   Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
1356 }
1357 
1358 
stlrh(const Register & rt,const MemOperand & dst)1359 void Assembler::stlrh(const Register& rt, const MemOperand& dst) {
1360   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1361   Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
1362 }
1363 
1364 
stlr(const Register & rt,const MemOperand & dst)1365 void Assembler::stlr(const Register& rt, const MemOperand& dst) {
1366   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
1367   LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w;
1368   Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
1369 }
1370 
1371 
ldarb(const Register & rt,const MemOperand & src)1372 void Assembler::ldarb(const Register& rt, const MemOperand& src) {
1373   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1374   Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
1375 }
1376 
1377 
ldarh(const Register & rt,const MemOperand & src)1378 void Assembler::ldarh(const Register& rt, const MemOperand& src) {
1379   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1380   Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
1381 }
1382 
1383 
ldar(const Register & rt,const MemOperand & src)1384 void Assembler::ldar(const Register& rt, const MemOperand& src) {
1385   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
1386   LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w;
1387   Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
1388 }
1389 
1390 
prfm(PrefetchOperation op,const MemOperand & address,LoadStoreScalingOption option)1391 void Assembler::prfm(PrefetchOperation op,
1392                      const MemOperand& address,
1393                      LoadStoreScalingOption option) {
1394   VIXL_ASSERT(option != RequireUnscaledOffset);
1395   VIXL_ASSERT(option != PreferUnscaledOffset);
1396   Prefetch(op, address, option);
1397 }
1398 
1399 
prfum(PrefetchOperation op,const MemOperand & address,LoadStoreScalingOption option)1400 void Assembler::prfum(PrefetchOperation op,
1401                       const MemOperand& address,
1402                       LoadStoreScalingOption option) {
1403   VIXL_ASSERT(option != RequireScaledOffset);
1404   VIXL_ASSERT(option != PreferScaledOffset);
1405   Prefetch(op, address, option);
1406 }
1407 
1408 
prfm(PrefetchOperation op,RawLiteral * literal)1409 void Assembler::prfm(PrefetchOperation op, RawLiteral* literal) {
1410   prfm(op, static_cast<int>(LinkAndGetWordOffsetTo(literal)));
1411 }
1412 
1413 
sys(int op1,int crn,int crm,int op2,const Register & xt)1414 void Assembler::sys(int op1, int crn, int crm, int op2, const Register& xt) {
1415   VIXL_ASSERT(xt.Is64Bits());
1416   Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(xt));
1417 }
1418 
1419 
sys(int op,const Register & xt)1420 void Assembler::sys(int op, const Register& xt) {
1421   VIXL_ASSERT(xt.Is64Bits());
1422   Emit(SYS | SysOp(op) | Rt(xt));
1423 }
1424 
1425 
dc(DataCacheOp op,const Register & rt)1426 void Assembler::dc(DataCacheOp op, const Register& rt) {
1427   VIXL_ASSERT((op == CVAC) || (op == CVAU) || (op == CIVAC) || (op == ZVA));
1428   sys(op, rt);
1429 }
1430 
1431 
ic(InstructionCacheOp op,const Register & rt)1432 void Assembler::ic(InstructionCacheOp op, const Register& rt) {
1433   VIXL_ASSERT(op == IVAU);
1434   sys(op, rt);
1435 }
1436 
1437 
hint(SystemHint code)1438 void Assembler::hint(SystemHint code) { Emit(HINT | ImmHint(code) | Rt(xzr)); }
1439 
1440 
1441 // NEON structure loads and stores.
LoadStoreStructAddrModeField(const MemOperand & addr)1442 Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
1443   Instr addr_field = RnSP(addr.GetBaseRegister());
1444 
1445   if (addr.IsPostIndex()) {
1446     VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex ==
1447                        static_cast<NEONLoadStoreMultiStructPostIndexOp>(
1448                            NEONLoadStoreSingleStructPostIndex));
1449 
1450     addr_field |= NEONLoadStoreMultiStructPostIndex;
1451     if (addr.GetOffset() == 0) {
1452       addr_field |= RmNot31(addr.GetRegisterOffset());
1453     } else {
1454       // The immediate post index addressing mode is indicated by rm = 31.
1455       // The immediate is implied by the number of vector registers used.
1456       addr_field |= (0x1f << Rm_offset);
1457     }
1458   } else {
1459     VIXL_ASSERT(addr.IsImmediateOffset() && (addr.GetOffset() == 0));
1460   }
1461   return addr_field;
1462 }
1463 
LoadStoreStructVerify(const VRegister & vt,const MemOperand & addr,Instr op)1464 void Assembler::LoadStoreStructVerify(const VRegister& vt,
1465                                       const MemOperand& addr,
1466                                       Instr op) {
1467 #ifdef VIXL_DEBUG
1468   // Assert that addressing mode is either offset (with immediate 0), post
1469   // index by immediate of the size of the register list, or post index by a
1470   // value in a core register.
1471   if (addr.IsImmediateOffset()) {
1472     VIXL_ASSERT(addr.GetOffset() == 0);
1473   } else {
1474     int offset = vt.GetSizeInBytes();
1475     switch (op) {
1476       case NEON_LD1_1v:
1477       case NEON_ST1_1v:
1478         offset *= 1;
1479         break;
1480       case NEONLoadStoreSingleStructLoad1:
1481       case NEONLoadStoreSingleStructStore1:
1482       case NEON_LD1R:
1483         offset = (offset / vt.GetLanes()) * 1;
1484         break;
1485 
1486       case NEON_LD1_2v:
1487       case NEON_ST1_2v:
1488       case NEON_LD2:
1489       case NEON_ST2:
1490         offset *= 2;
1491         break;
1492       case NEONLoadStoreSingleStructLoad2:
1493       case NEONLoadStoreSingleStructStore2:
1494       case NEON_LD2R:
1495         offset = (offset / vt.GetLanes()) * 2;
1496         break;
1497 
1498       case NEON_LD1_3v:
1499       case NEON_ST1_3v:
1500       case NEON_LD3:
1501       case NEON_ST3:
1502         offset *= 3;
1503         break;
1504       case NEONLoadStoreSingleStructLoad3:
1505       case NEONLoadStoreSingleStructStore3:
1506       case NEON_LD3R:
1507         offset = (offset / vt.GetLanes()) * 3;
1508         break;
1509 
1510       case NEON_LD1_4v:
1511       case NEON_ST1_4v:
1512       case NEON_LD4:
1513       case NEON_ST4:
1514         offset *= 4;
1515         break;
1516       case NEONLoadStoreSingleStructLoad4:
1517       case NEONLoadStoreSingleStructStore4:
1518       case NEON_LD4R:
1519         offset = (offset / vt.GetLanes()) * 4;
1520         break;
1521       default:
1522         VIXL_UNREACHABLE();
1523     }
1524     VIXL_ASSERT(!addr.GetRegisterOffset().Is(NoReg) ||
1525                 addr.GetOffset() == offset);
1526   }
1527 #else
1528   USE(vt, addr, op);
1529 #endif
1530 }
1531 
LoadStoreStruct(const VRegister & vt,const MemOperand & addr,NEONLoadStoreMultiStructOp op)1532 void Assembler::LoadStoreStruct(const VRegister& vt,
1533                                 const MemOperand& addr,
1534                                 NEONLoadStoreMultiStructOp op) {
1535   LoadStoreStructVerify(vt, addr, op);
1536   VIXL_ASSERT(vt.IsVector() || vt.Is1D());
1537   Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
1538 }
1539 
1540 
LoadStoreStructSingleAllLanes(const VRegister & vt,const MemOperand & addr,NEONLoadStoreSingleStructOp op)1541 void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt,
1542                                               const MemOperand& addr,
1543                                               NEONLoadStoreSingleStructOp op) {
1544   LoadStoreStructVerify(vt, addr, op);
1545   Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
1546 }
1547 
1548 
ld1(const VRegister & vt,const MemOperand & src)1549 void Assembler::ld1(const VRegister& vt, const MemOperand& src) {
1550   LoadStoreStruct(vt, src, NEON_LD1_1v);
1551 }
1552 
1553 
ld1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1554 void Assembler::ld1(const VRegister& vt,
1555                     const VRegister& vt2,
1556                     const MemOperand& src) {
1557   USE(vt2);
1558   VIXL_ASSERT(AreSameFormat(vt, vt2));
1559   VIXL_ASSERT(AreConsecutive(vt, vt2));
1560   LoadStoreStruct(vt, src, NEON_LD1_2v);
1561 }
1562 
1563 
ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1564 void Assembler::ld1(const VRegister& vt,
1565                     const VRegister& vt2,
1566                     const VRegister& vt3,
1567                     const MemOperand& src) {
1568   USE(vt2, vt3);
1569   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
1570   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
1571   LoadStoreStruct(vt, src, NEON_LD1_3v);
1572 }
1573 
1574 
ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1575 void Assembler::ld1(const VRegister& vt,
1576                     const VRegister& vt2,
1577                     const VRegister& vt3,
1578                     const VRegister& vt4,
1579                     const MemOperand& src) {
1580   USE(vt2, vt3, vt4);
1581   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
1582   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
1583   LoadStoreStruct(vt, src, NEON_LD1_4v);
1584 }
1585 
1586 
ld2(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1587 void Assembler::ld2(const VRegister& vt,
1588                     const VRegister& vt2,
1589                     const MemOperand& src) {
1590   USE(vt2);
1591   VIXL_ASSERT(AreSameFormat(vt, vt2));
1592   VIXL_ASSERT(AreConsecutive(vt, vt2));
1593   LoadStoreStruct(vt, src, NEON_LD2);
1594 }
1595 
1596 
ld2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & src)1597 void Assembler::ld2(const VRegister& vt,
1598                     const VRegister& vt2,
1599                     int lane,
1600                     const MemOperand& src) {
1601   USE(vt2);
1602   VIXL_ASSERT(AreSameFormat(vt, vt2));
1603   VIXL_ASSERT(AreConsecutive(vt, vt2));
1604   LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
1605 }
1606 
1607 
ld2r(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1608 void Assembler::ld2r(const VRegister& vt,
1609                      const VRegister& vt2,
1610                      const MemOperand& src) {
1611   USE(vt2);
1612   VIXL_ASSERT(AreSameFormat(vt, vt2));
1613   VIXL_ASSERT(AreConsecutive(vt, vt2));
1614   LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
1615 }
1616 
1617 
ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1618 void Assembler::ld3(const VRegister& vt,
1619                     const VRegister& vt2,
1620                     const VRegister& vt3,
1621                     const MemOperand& src) {
1622   USE(vt2, vt3);
1623   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
1624   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
1625   LoadStoreStruct(vt, src, NEON_LD3);
1626 }
1627 
1628 
ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & src)1629 void Assembler::ld3(const VRegister& vt,
1630                     const VRegister& vt2,
1631                     const VRegister& vt3,
1632                     int lane,
1633                     const MemOperand& src) {
1634   USE(vt2, vt3);
1635   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
1636   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
1637   LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
1638 }
1639 
1640 
ld3r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1641 void Assembler::ld3r(const VRegister& vt,
1642                      const VRegister& vt2,
1643                      const VRegister& vt3,
1644                      const MemOperand& src) {
1645   USE(vt2, vt3);
1646   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
1647   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
1648   LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
1649 }
1650 
1651 
ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1652 void Assembler::ld4(const VRegister& vt,
1653                     const VRegister& vt2,
1654                     const VRegister& vt3,
1655                     const VRegister& vt4,
1656                     const MemOperand& src) {
1657   USE(vt2, vt3, vt4);
1658   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
1659   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
1660   LoadStoreStruct(vt, src, NEON_LD4);
1661 }
1662 
1663 
ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & src)1664 void Assembler::ld4(const VRegister& vt,
1665                     const VRegister& vt2,
1666                     const VRegister& vt3,
1667                     const VRegister& vt4,
1668                     int lane,
1669                     const MemOperand& src) {
1670   USE(vt2, vt3, vt4);
1671   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
1672   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
1673   LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
1674 }
1675 
1676 
ld4r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1677 void Assembler::ld4r(const VRegister& vt,
1678                      const VRegister& vt2,
1679                      const VRegister& vt3,
1680                      const VRegister& vt4,
1681                      const MemOperand& src) {
1682   USE(vt2, vt3, vt4);
1683   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
1684   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
1685   LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
1686 }
1687 
1688 
st1(const VRegister & vt,const MemOperand & src)1689 void Assembler::st1(const VRegister& vt, const MemOperand& src) {
1690   LoadStoreStruct(vt, src, NEON_ST1_1v);
1691 }
1692 
1693 
st1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1694 void Assembler::st1(const VRegister& vt,
1695                     const VRegister& vt2,
1696                     const MemOperand& src) {
1697   USE(vt2);
1698   VIXL_ASSERT(AreSameFormat(vt, vt2));
1699   VIXL_ASSERT(AreConsecutive(vt, vt2));
1700   LoadStoreStruct(vt, src, NEON_ST1_2v);
1701 }
1702 
1703 
st1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1704 void Assembler::st1(const VRegister& vt,
1705                     const VRegister& vt2,
1706                     const VRegister& vt3,
1707                     const MemOperand& src) {
1708   USE(vt2, vt3);
1709   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
1710   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
1711   LoadStoreStruct(vt, src, NEON_ST1_3v);
1712 }
1713 
1714 
st1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1715 void Assembler::st1(const VRegister& vt,
1716                     const VRegister& vt2,
1717                     const VRegister& vt3,
1718                     const VRegister& vt4,
1719                     const MemOperand& src) {
1720   USE(vt2, vt3, vt4);
1721   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
1722   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
1723   LoadStoreStruct(vt, src, NEON_ST1_4v);
1724 }
1725 
1726 
st2(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)1727 void Assembler::st2(const VRegister& vt,
1728                     const VRegister& vt2,
1729                     const MemOperand& dst) {
1730   USE(vt2);
1731   VIXL_ASSERT(AreSameFormat(vt, vt2));
1732   VIXL_ASSERT(AreConsecutive(vt, vt2));
1733   LoadStoreStruct(vt, dst, NEON_ST2);
1734 }
1735 
1736 
st2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & dst)1737 void Assembler::st2(const VRegister& vt,
1738                     const VRegister& vt2,
1739                     int lane,
1740                     const MemOperand& dst) {
1741   USE(vt2);
1742   VIXL_ASSERT(AreSameFormat(vt, vt2));
1743   VIXL_ASSERT(AreConsecutive(vt, vt2));
1744   LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
1745 }
1746 
1747 
st3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)1748 void Assembler::st3(const VRegister& vt,
1749                     const VRegister& vt2,
1750                     const VRegister& vt3,
1751                     const MemOperand& dst) {
1752   USE(vt2, vt3);
1753   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
1754   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
1755   LoadStoreStruct(vt, dst, NEON_ST3);
1756 }
1757 
1758 
st3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & dst)1759 void Assembler::st3(const VRegister& vt,
1760                     const VRegister& vt2,
1761                     const VRegister& vt3,
1762                     int lane,
1763                     const MemOperand& dst) {
1764   USE(vt2, vt3);
1765   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
1766   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
1767   LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
1768 }
1769 
1770 
st4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)1771 void Assembler::st4(const VRegister& vt,
1772                     const VRegister& vt2,
1773                     const VRegister& vt3,
1774                     const VRegister& vt4,
1775                     const MemOperand& dst) {
1776   USE(vt2, vt3, vt4);
1777   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
1778   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
1779   LoadStoreStruct(vt, dst, NEON_ST4);
1780 }
1781 
1782 
st4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & dst)1783 void Assembler::st4(const VRegister& vt,
1784                     const VRegister& vt2,
1785                     const VRegister& vt3,
1786                     const VRegister& vt4,
1787                     int lane,
1788                     const MemOperand& dst) {
1789   USE(vt2, vt3, vt4);
1790   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
1791   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
1792   LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
1793 }
1794 
1795 
LoadStoreStructSingle(const VRegister & vt,uint32_t lane,const MemOperand & addr,NEONLoadStoreSingleStructOp op)1796 void Assembler::LoadStoreStructSingle(const VRegister& vt,
1797                                       uint32_t lane,
1798                                       const MemOperand& addr,
1799                                       NEONLoadStoreSingleStructOp op) {
1800   LoadStoreStructVerify(vt, addr, op);
1801 
1802   // We support vt arguments of the form vt.VxT() or vt.T(), where x is the
1803   // number of lanes, and T is b, h, s or d.
1804   unsigned lane_size = vt.GetLaneSizeInBytes();
1805   VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size));
1806 
1807   // Lane size is encoded in the opcode field. Lane index is encoded in the Q,
1808   // S and size fields.
1809   lane *= lane_size;
1810   if (lane_size == 8) lane++;
1811 
1812   Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
1813   Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
1814   Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;
1815 
1816   Instr instr = op;
1817   switch (lane_size) {
1818     case 1:
1819       instr |= NEONLoadStoreSingle_b;
1820       break;
1821     case 2:
1822       instr |= NEONLoadStoreSingle_h;
1823       break;
1824     case 4:
1825       instr |= NEONLoadStoreSingle_s;
1826       break;
1827     default:
1828       VIXL_ASSERT(lane_size == 8);
1829       instr |= NEONLoadStoreSingle_d;
1830   }
1831 
1832   Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
1833 }
1834 
1835 
ld1(const VRegister & vt,int lane,const MemOperand & src)1836 void Assembler::ld1(const VRegister& vt, int lane, const MemOperand& src) {
1837   LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
1838 }
1839 
1840 
ld1r(const VRegister & vt,const MemOperand & src)1841 void Assembler::ld1r(const VRegister& vt, const MemOperand& src) {
1842   LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
1843 }
1844 
1845 
st1(const VRegister & vt,int lane,const MemOperand & dst)1846 void Assembler::st1(const VRegister& vt, int lane, const MemOperand& dst) {
1847   LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
1848 }
1849 
1850 
NEON3DifferentL(const VRegister & vd,const VRegister & vn,const VRegister & vm,NEON3DifferentOp vop)1851 void Assembler::NEON3DifferentL(const VRegister& vd,
1852                                 const VRegister& vn,
1853                                 const VRegister& vm,
1854                                 NEON3DifferentOp vop) {
1855   VIXL_ASSERT(AreSameFormat(vn, vm));
1856   VIXL_ASSERT((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) ||
1857               (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
1858               (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
1859               (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
1860   Instr format, op = vop;
1861   if (vd.IsScalar()) {
1862     op |= NEON_Q | NEONScalar;
1863     format = SFormat(vn);
1864   } else {
1865     format = VFormat(vn);
1866   }
1867   Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
1868 }
1869 
1870 
NEON3DifferentW(const VRegister & vd,const VRegister & vn,const VRegister & vm,NEON3DifferentOp vop)1871 void Assembler::NEON3DifferentW(const VRegister& vd,
1872                                 const VRegister& vn,
1873                                 const VRegister& vm,
1874                                 NEON3DifferentOp vop) {
1875   VIXL_ASSERT(AreSameFormat(vd, vn));
1876   VIXL_ASSERT((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) ||
1877               (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) ||
1878               (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D()));
1879   Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
1880 }
1881 
1882 
NEON3DifferentHN(const VRegister & vd,const VRegister & vn,const VRegister & vm,NEON3DifferentOp vop)1883 void Assembler::NEON3DifferentHN(const VRegister& vd,
1884                                  const VRegister& vn,
1885                                  const VRegister& vm,
1886                                  NEON3DifferentOp vop) {
1887   VIXL_ASSERT(AreSameFormat(vm, vn));
1888   VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
1889               (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
1890               (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
1891   Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
1892 }
1893 
1894 
1895 // clang-format off
1896 #define NEON_3DIFF_LONG_LIST(V) \
1897   V(pmull,  NEON_PMULL,  vn.IsVector() && vn.Is8B())                           \
1898   V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B())                          \
1899   V(saddl,  NEON_SADDL,  vn.IsVector() && vn.IsD())                            \
1900   V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ())                            \
1901   V(sabal,  NEON_SABAL,  vn.IsVector() && vn.IsD())                            \
1902   V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ())                            \
1903   V(uabal,  NEON_UABAL,  vn.IsVector() && vn.IsD())                            \
1904   V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ())                            \
1905   V(sabdl,  NEON_SABDL,  vn.IsVector() && vn.IsD())                            \
1906   V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ())                            \
1907   V(uabdl,  NEON_UABDL,  vn.IsVector() && vn.IsD())                            \
1908   V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ())                            \
1909   V(smlal,  NEON_SMLAL,  vn.IsVector() && vn.IsD())                            \
1910   V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ())                            \
1911   V(umlal,  NEON_UMLAL,  vn.IsVector() && vn.IsD())                            \
1912   V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ())                            \
1913   V(smlsl,  NEON_SMLSL,  vn.IsVector() && vn.IsD())                            \
1914   V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ())                            \
1915   V(umlsl,  NEON_UMLSL,  vn.IsVector() && vn.IsD())                            \
1916   V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ())                            \
1917   V(smull,  NEON_SMULL,  vn.IsVector() && vn.IsD())                            \
1918   V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ())                            \
1919   V(umull,  NEON_UMULL,  vn.IsVector() && vn.IsD())                            \
1920   V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ())                            \
1921   V(ssubl,  NEON_SSUBL,  vn.IsVector() && vn.IsD())                            \
1922   V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ())                            \
1923   V(uaddl,  NEON_UADDL,  vn.IsVector() && vn.IsD())                            \
1924   V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ())                            \
1925   V(usubl,  NEON_USUBL,  vn.IsVector() && vn.IsD())                            \
1926   V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ())                            \
1927   V(sqdmlal,  NEON_SQDMLAL,  vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
1928   V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
1929   V(sqdmlsl,  NEON_SQDMLSL,  vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
1930   V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
1931   V(sqdmull,  NEON_SQDMULL,  vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
1932   V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
1933 // clang-format on
1934 
1935 
1936 #define DEFINE_ASM_FUNC(FN, OP, AS)        \
1937 void Assembler::FN(const VRegister& vd,    \
1938                    const VRegister& vn,    \
1939                    const VRegister& vm) {  \
1940   VIXL_ASSERT(AS);                         \
1941   NEON3DifferentL(vd, vn, vm, OP);         \
1942 }
1943 NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC)
1944 #undef DEFINE_ASM_FUNC
1945 
1946 // clang-format off
1947 #define NEON_3DIFF_HN_LIST(V)         \
1948   V(addhn,   NEON_ADDHN,   vd.IsD())  \
1949   V(addhn2,  NEON_ADDHN2,  vd.IsQ())  \
1950   V(raddhn,  NEON_RADDHN,  vd.IsD())  \
1951   V(raddhn2, NEON_RADDHN2, vd.IsQ())  \
1952   V(subhn,   NEON_SUBHN,   vd.IsD())  \
1953   V(subhn2,  NEON_SUBHN2,  vd.IsQ())  \
1954   V(rsubhn,  NEON_RSUBHN,  vd.IsD())  \
1955   V(rsubhn2, NEON_RSUBHN2, vd.IsQ())
1956 // clang-format on
1957 
1958 #define DEFINE_ASM_FUNC(FN, OP, AS)         \
1959   void Assembler::FN(const VRegister& vd,   \
1960                      const VRegister& vn,   \
1961                      const VRegister& vm) { \
1962     VIXL_ASSERT(AS);                        \
1963     NEON3DifferentHN(vd, vn, vm, OP);       \
1964   }
NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)1965 NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)
1966 #undef DEFINE_ASM_FUNC
1967 
1968 void Assembler::uaddw(const VRegister& vd,
1969                       const VRegister& vn,
1970                       const VRegister& vm) {
1971   VIXL_ASSERT(vm.IsD());
1972   NEON3DifferentW(vd, vn, vm, NEON_UADDW);
1973 }
1974 
1975 
uaddw2(const VRegister & vd,const VRegister & vn,const VRegister & vm)1976 void Assembler::uaddw2(const VRegister& vd,
1977                        const VRegister& vn,
1978                        const VRegister& vm) {
1979   VIXL_ASSERT(vm.IsQ());
1980   NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
1981 }
1982 
1983 
saddw(const VRegister & vd,const VRegister & vn,const VRegister & vm)1984 void Assembler::saddw(const VRegister& vd,
1985                       const VRegister& vn,
1986                       const VRegister& vm) {
1987   VIXL_ASSERT(vm.IsD());
1988   NEON3DifferentW(vd, vn, vm, NEON_SADDW);
1989 }
1990 
1991 
saddw2(const VRegister & vd,const VRegister & vn,const VRegister & vm)1992 void Assembler::saddw2(const VRegister& vd,
1993                        const VRegister& vn,
1994                        const VRegister& vm) {
1995   VIXL_ASSERT(vm.IsQ());
1996   NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
1997 }
1998 
1999 
usubw(const VRegister & vd,const VRegister & vn,const VRegister & vm)2000 void Assembler::usubw(const VRegister& vd,
2001                       const VRegister& vn,
2002                       const VRegister& vm) {
2003   VIXL_ASSERT(vm.IsD());
2004   NEON3DifferentW(vd, vn, vm, NEON_USUBW);
2005 }
2006 
2007 
usubw2(const VRegister & vd,const VRegister & vn,const VRegister & vm)2008 void Assembler::usubw2(const VRegister& vd,
2009                        const VRegister& vn,
2010                        const VRegister& vm) {
2011   VIXL_ASSERT(vm.IsQ());
2012   NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
2013 }
2014 
2015 
ssubw(const VRegister & vd,const VRegister & vn,const VRegister & vm)2016 void Assembler::ssubw(const VRegister& vd,
2017                       const VRegister& vn,
2018                       const VRegister& vm) {
2019   VIXL_ASSERT(vm.IsD());
2020   NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
2021 }
2022 
2023 
ssubw2(const VRegister & vd,const VRegister & vn,const VRegister & vm)2024 void Assembler::ssubw2(const VRegister& vd,
2025                        const VRegister& vn,
2026                        const VRegister& vm) {
2027   VIXL_ASSERT(vm.IsQ());
2028   NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
2029 }
2030 
2031 
mov(const Register & rd,const Register & rm)2032 void Assembler::mov(const Register& rd, const Register& rm) {
2033   // Moves involving the stack pointer are encoded as add immediate with
2034   // second operand of zero. Otherwise, orr with first operand zr is
2035   // used.
2036   if (rd.IsSP() || rm.IsSP()) {
2037     add(rd, rm, 0);
2038   } else {
2039     orr(rd, AppropriateZeroRegFor(rd), rm);
2040   }
2041 }
2042 
2043 
mvn(const Register & rd,const Operand & operand)2044 void Assembler::mvn(const Register& rd, const Operand& operand) {
2045   orn(rd, AppropriateZeroRegFor(rd), operand);
2046 }
2047 
2048 
mrs(const Register & xt,SystemRegister sysreg)2049 void Assembler::mrs(const Register& xt, SystemRegister sysreg) {
2050   VIXL_ASSERT(xt.Is64Bits());
2051   Emit(MRS | ImmSystemRegister(sysreg) | Rt(xt));
2052 }
2053 
2054 
msr(SystemRegister sysreg,const Register & xt)2055 void Assembler::msr(SystemRegister sysreg, const Register& xt) {
2056   VIXL_ASSERT(xt.Is64Bits());
2057   Emit(MSR | Rt(xt) | ImmSystemRegister(sysreg));
2058 }
2059 
2060 
clrex(int imm4)2061 void Assembler::clrex(int imm4) { Emit(CLREX | CRm(imm4)); }
2062 
2063 
dmb(BarrierDomain domain,BarrierType type)2064 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
2065   Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
2066 }
2067 
2068 
dsb(BarrierDomain domain,BarrierType type)2069 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
2070   Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
2071 }
2072 
2073 
isb()2074 void Assembler::isb() {
2075   Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
2076 }
2077 
2078 
fmov(const VRegister & vd,double imm)2079 void Assembler::fmov(const VRegister& vd, double imm) {
2080   if (vd.IsScalar()) {
2081     VIXL_ASSERT(vd.Is1D());
2082     Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm));
2083   } else {
2084     VIXL_ASSERT(vd.Is2D());
2085     Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
2086     Instr q = NEON_Q;
2087     uint32_t encoded_imm = FP64ToImm8(imm);
2088     Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
2089   }
2090 }
2091 
2092 
fmov(const VRegister & vd,float imm)2093 void Assembler::fmov(const VRegister& vd, float imm) {
2094   if (vd.IsScalar()) {
2095     VIXL_ASSERT(vd.Is1S());
2096     Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm));
2097   } else {
2098     VIXL_ASSERT(vd.Is2S() | vd.Is4S());
2099     Instr op = NEONModifiedImmediate_MOVI;
2100     Instr q = vd.Is4S() ? NEON_Q : 0;
2101     uint32_t encoded_imm = FP32ToImm8(imm);
2102     Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
2103   }
2104 }
2105 
2106 
fmov(const Register & rd,const VRegister & vn)2107 void Assembler::fmov(const Register& rd, const VRegister& vn) {
2108   VIXL_ASSERT(vn.Is1S() || vn.Is1D());
2109   VIXL_ASSERT(rd.GetSizeInBits() == vn.GetSizeInBits());
2110   FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
2111   Emit(op | Rd(rd) | Rn(vn));
2112 }
2113 
2114 
fmov(const VRegister & vd,const Register & rn)2115 void Assembler::fmov(const VRegister& vd, const Register& rn) {
2116   VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2117   VIXL_ASSERT(vd.GetSizeInBits() == rn.GetSizeInBits());
2118   FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx;
2119   Emit(op | Rd(vd) | Rn(rn));
2120 }
2121 
2122 
fmov(const VRegister & vd,const VRegister & vn)2123 void Assembler::fmov(const VRegister& vd, const VRegister& vn) {
2124   VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2125   VIXL_ASSERT(vd.IsSameFormat(vn));
2126   Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
2127 }
2128 
2129 
fmov(const VRegister & vd,int index,const Register & rn)2130 void Assembler::fmov(const VRegister& vd, int index, const Register& rn) {
2131   VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX());
2132   USE(index);
2133   Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
2134 }
2135 
2136 
fmov(const Register & rd,const VRegister & vn,int index)2137 void Assembler::fmov(const Register& rd, const VRegister& vn, int index) {
2138   VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX());
2139   USE(index);
2140   Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
2141 }
2142 
2143 
fmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)2144 void Assembler::fmadd(const VRegister& vd,
2145                       const VRegister& vn,
2146                       const VRegister& vm,
2147                       const VRegister& va) {
2148   FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMADD_s : FMADD_d);
2149 }
2150 
2151 
fmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)2152 void Assembler::fmsub(const VRegister& vd,
2153                       const VRegister& vn,
2154                       const VRegister& vm,
2155                       const VRegister& va) {
2156   FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMSUB_s : FMSUB_d);
2157 }
2158 
2159 
fnmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)2160 void Assembler::fnmadd(const VRegister& vd,
2161                        const VRegister& vn,
2162                        const VRegister& vm,
2163                        const VRegister& va) {
2164   FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMADD_s : FNMADD_d);
2165 }
2166 
2167 
fnmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)2168 void Assembler::fnmsub(const VRegister& vd,
2169                        const VRegister& vn,
2170                        const VRegister& vm,
2171                        const VRegister& va) {
2172   FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMSUB_s : FNMSUB_d);
2173 }
2174 
2175 
fnmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)2176 void Assembler::fnmul(const VRegister& vd,
2177                       const VRegister& vn,
2178                       const VRegister& vm) {
2179   VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm));
2180   Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d;
2181   Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
2182 }
2183 
2184 
FPCompareMacro(const VRegister & vn,double value,FPTrapFlags trap)2185 void Assembler::FPCompareMacro(const VRegister& vn,
2186                                double value,
2187                                FPTrapFlags trap) {
2188   USE(value);
2189   // Although the fcmp{e} instructions can strictly only take an immediate
2190   // value of +0.0, we don't need to check for -0.0 because the sign of 0.0
2191   // doesn't affect the result of the comparison.
2192   VIXL_ASSERT(value == 0.0);
2193   VIXL_ASSERT(vn.Is1S() || vn.Is1D());
2194   Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero;
2195   Emit(FPType(vn) | op | Rn(vn));
2196 }
2197 
2198 
FPCompareMacro(const VRegister & vn,const VRegister & vm,FPTrapFlags trap)2199 void Assembler::FPCompareMacro(const VRegister& vn,
2200                                const VRegister& vm,
2201                                FPTrapFlags trap) {
2202   VIXL_ASSERT(vn.Is1S() || vn.Is1D());
2203   VIXL_ASSERT(vn.IsSameSizeAndType(vm));
2204   Instr op = (trap == EnableTrap) ? FCMPE : FCMP;
2205   Emit(FPType(vn) | op | Rm(vm) | Rn(vn));
2206 }
2207 
2208 
fcmp(const VRegister & vn,const VRegister & vm)2209 void Assembler::fcmp(const VRegister& vn, const VRegister& vm) {
2210   FPCompareMacro(vn, vm, DisableTrap);
2211 }
2212 
2213 
fcmpe(const VRegister & vn,const VRegister & vm)2214 void Assembler::fcmpe(const VRegister& vn, const VRegister& vm) {
2215   FPCompareMacro(vn, vm, EnableTrap);
2216 }
2217 
2218 
fcmp(const VRegister & vn,double value)2219 void Assembler::fcmp(const VRegister& vn, double value) {
2220   FPCompareMacro(vn, value, DisableTrap);
2221 }
2222 
2223 
fcmpe(const VRegister & vn,double value)2224 void Assembler::fcmpe(const VRegister& vn, double value) {
2225   FPCompareMacro(vn, value, EnableTrap);
2226 }
2227 
2228 
FPCCompareMacro(const VRegister & vn,const VRegister & vm,StatusFlags nzcv,Condition cond,FPTrapFlags trap)2229 void Assembler::FPCCompareMacro(const VRegister& vn,
2230                                 const VRegister& vm,
2231                                 StatusFlags nzcv,
2232                                 Condition cond,
2233                                 FPTrapFlags trap) {
2234   VIXL_ASSERT(vn.Is1S() || vn.Is1D());
2235   VIXL_ASSERT(vn.IsSameSizeAndType(vm));
2236   Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP;
2237   Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv));
2238 }
2239 
fccmp(const VRegister & vn,const VRegister & vm,StatusFlags nzcv,Condition cond)2240 void Assembler::fccmp(const VRegister& vn,
2241                       const VRegister& vm,
2242                       StatusFlags nzcv,
2243                       Condition cond) {
2244   FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap);
2245 }
2246 
2247 
fccmpe(const VRegister & vn,const VRegister & vm,StatusFlags nzcv,Condition cond)2248 void Assembler::fccmpe(const VRegister& vn,
2249                        const VRegister& vm,
2250                        StatusFlags nzcv,
2251                        Condition cond) {
2252   FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap);
2253 }
2254 
2255 
fcsel(const VRegister & vd,const VRegister & vn,const VRegister & vm,Condition cond)2256 void Assembler::fcsel(const VRegister& vd,
2257                       const VRegister& vn,
2258                       const VRegister& vm,
2259                       Condition cond) {
2260   VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2261   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
2262   Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd));
2263 }
2264 
2265 
NEONFPConvertToInt(const Register & rd,const VRegister & vn,Instr op)2266 void Assembler::NEONFPConvertToInt(const Register& rd,
2267                                    const VRegister& vn,
2268                                    Instr op) {
2269   Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
2270 }
2271 
2272 
NEONFPConvertToInt(const VRegister & vd,const VRegister & vn,Instr op)2273 void Assembler::NEONFPConvertToInt(const VRegister& vd,
2274                                    const VRegister& vn,
2275                                    Instr op) {
2276   if (vn.IsScalar()) {
2277     VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
2278     op |= NEON_Q | NEONScalar;
2279   }
2280   Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
2281 }
2282 
2283 
fcvt(const VRegister & vd,const VRegister & vn)2284 void Assembler::fcvt(const VRegister& vd, const VRegister& vn) {
2285   FPDataProcessing1SourceOp op;
2286   if (vd.Is1D()) {
2287     VIXL_ASSERT(vn.Is1S() || vn.Is1H());
2288     op = vn.Is1S() ? FCVT_ds : FCVT_dh;
2289   } else if (vd.Is1S()) {
2290     VIXL_ASSERT(vn.Is1D() || vn.Is1H());
2291     op = vn.Is1D() ? FCVT_sd : FCVT_sh;
2292   } else {
2293     VIXL_ASSERT(vd.Is1H());
2294     VIXL_ASSERT(vn.Is1D() || vn.Is1S());
2295     op = vn.Is1D() ? FCVT_hd : FCVT_hs;
2296   }
2297   FPDataProcessing1Source(vd, vn, op);
2298 }
2299 
2300 
fcvtl(const VRegister & vd,const VRegister & vn)2301 void Assembler::fcvtl(const VRegister& vd, const VRegister& vn) {
2302   VIXL_ASSERT((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S()));
2303   Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
2304   Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
2305 }
2306 
2307 
fcvtl2(const VRegister & vd,const VRegister & vn)2308 void Assembler::fcvtl2(const VRegister& vd, const VRegister& vn) {
2309   VIXL_ASSERT((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S()));
2310   Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
2311   Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
2312 }
2313 
2314 
fcvtn(const VRegister & vd,const VRegister & vn)2315 void Assembler::fcvtn(const VRegister& vd, const VRegister& vn) {
2316   VIXL_ASSERT((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S()));
2317   Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
2318   Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
2319 }
2320 
2321 
fcvtn2(const VRegister & vd,const VRegister & vn)2322 void Assembler::fcvtn2(const VRegister& vd, const VRegister& vn) {
2323   VIXL_ASSERT((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S()));
2324   Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
2325   Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
2326 }
2327 
2328 
fcvtxn(const VRegister & vd,const VRegister & vn)2329 void Assembler::fcvtxn(const VRegister& vd, const VRegister& vn) {
2330   Instr format = 1 << NEONSize_offset;
2331   if (vd.IsScalar()) {
2332     VIXL_ASSERT(vd.Is1S() && vn.Is1D());
2333     Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
2334   } else {
2335     VIXL_ASSERT(vd.Is2S() && vn.Is2D());
2336     Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
2337   }
2338 }
2339 
2340 
fcvtxn2(const VRegister & vd,const VRegister & vn)2341 void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) {
2342   VIXL_ASSERT(vd.Is4S() && vn.Is2D());
2343   Instr format = 1 << NEONSize_offset;
2344   Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
2345 }
2346 
2347 
2348 #define NEON_FP2REGMISC_FCVT_LIST(V) \
2349   V(fcvtnu, NEON_FCVTNU, FCVTNU)     \
2350   V(fcvtns, NEON_FCVTNS, FCVTNS)     \
2351   V(fcvtpu, NEON_FCVTPU, FCVTPU)     \
2352   V(fcvtps, NEON_FCVTPS, FCVTPS)     \
2353   V(fcvtmu, NEON_FCVTMU, FCVTMU)     \
2354   V(fcvtms, NEON_FCVTMS, FCVTMS)     \
2355   V(fcvtau, NEON_FCVTAU, FCVTAU)     \
2356   V(fcvtas, NEON_FCVTAS, FCVTAS)
2357 
2358 #define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP)                     \
2359   void Assembler::FN(const Register& rd, const VRegister& vn) {  \
2360     NEONFPConvertToInt(rd, vn, SCA_OP);                          \
2361   }                                                              \
2362   void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
2363     NEONFPConvertToInt(vd, vn, VEC_OP);                          \
2364   }
NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)2365 NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)
2366 #undef DEFINE_ASM_FUNCS
2367 
2368 
2369 void Assembler::fcvtzs(const Register& rd, const VRegister& vn, int fbits) {
2370   VIXL_ASSERT(vn.Is1S() || vn.Is1D());
2371   VIXL_ASSERT((fbits >= 0) && (fbits <= rd.GetSizeInBits()));
2372   if (fbits == 0) {
2373     Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
2374   } else {
2375     Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) |
2376          Rd(rd));
2377   }
2378 }
2379 
2380 
fcvtzs(const VRegister & vd,const VRegister & vn,int fbits)2381 void Assembler::fcvtzs(const VRegister& vd, const VRegister& vn, int fbits) {
2382   VIXL_ASSERT(fbits >= 0);
2383   if (fbits == 0) {
2384     NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
2385   } else {
2386     VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
2387     NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
2388   }
2389 }
2390 
2391 
fcvtzu(const Register & rd,const VRegister & vn,int fbits)2392 void Assembler::fcvtzu(const Register& rd, const VRegister& vn, int fbits) {
2393   VIXL_ASSERT(vn.Is1S() || vn.Is1D());
2394   VIXL_ASSERT((fbits >= 0) && (fbits <= rd.GetSizeInBits()));
2395   if (fbits == 0) {
2396     Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
2397   } else {
2398     Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) |
2399          Rd(rd));
2400   }
2401 }
2402 
2403 
fcvtzu(const VRegister & vd,const VRegister & vn,int fbits)2404 void Assembler::fcvtzu(const VRegister& vd, const VRegister& vn, int fbits) {
2405   VIXL_ASSERT(fbits >= 0);
2406   if (fbits == 0) {
2407     NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
2408   } else {
2409     VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
2410     NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
2411   }
2412 }
2413 
ucvtf(const VRegister & vd,const VRegister & vn,int fbits)2414 void Assembler::ucvtf(const VRegister& vd, const VRegister& vn, int fbits) {
2415   VIXL_ASSERT(fbits >= 0);
2416   if (fbits == 0) {
2417     NEONFP2RegMisc(vd, vn, NEON_UCVTF);
2418   } else {
2419     VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
2420     NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
2421   }
2422 }
2423 
scvtf(const VRegister & vd,const VRegister & vn,int fbits)2424 void Assembler::scvtf(const VRegister& vd, const VRegister& vn, int fbits) {
2425   VIXL_ASSERT(fbits >= 0);
2426   if (fbits == 0) {
2427     NEONFP2RegMisc(vd, vn, NEON_SCVTF);
2428   } else {
2429     VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
2430     NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
2431   }
2432 }
2433 
2434 
scvtf(const VRegister & vd,const Register & rn,int fbits)2435 void Assembler::scvtf(const VRegister& vd, const Register& rn, int fbits) {
2436   VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2437   VIXL_ASSERT(fbits >= 0);
2438   if (fbits == 0) {
2439     Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
2440   } else {
2441     Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2442          Rd(vd));
2443   }
2444 }
2445 
2446 
ucvtf(const VRegister & vd,const Register & rn,int fbits)2447 void Assembler::ucvtf(const VRegister& vd, const Register& rn, int fbits) {
2448   VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2449   VIXL_ASSERT(fbits >= 0);
2450   if (fbits == 0) {
2451     Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd));
2452   } else {
2453     Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2454          Rd(vd));
2455   }
2456 }
2457 
2458 
NEON3Same(const VRegister & vd,const VRegister & vn,const VRegister & vm,NEON3SameOp vop)2459 void Assembler::NEON3Same(const VRegister& vd,
2460                           const VRegister& vn,
2461                           const VRegister& vm,
2462                           NEON3SameOp vop) {
2463   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
2464   VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
2465 
2466   Instr format, op = vop;
2467   if (vd.IsScalar()) {
2468     op |= NEON_Q | NEONScalar;
2469     format = SFormat(vd);
2470   } else {
2471     format = VFormat(vd);
2472   }
2473 
2474   Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
2475 }
2476 
2477 
NEONFP3Same(const VRegister & vd,const VRegister & vn,const VRegister & vm,Instr op)2478 void Assembler::NEONFP3Same(const VRegister& vd,
2479                             const VRegister& vn,
2480                             const VRegister& vm,
2481                             Instr op) {
2482   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
2483   Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
2484 }
2485 
2486 
2487 // clang-format off
2488 #define NEON_FP2REGMISC_LIST(V)                 \
2489   V(fabs,    NEON_FABS,    FABS)                \
2490   V(fneg,    NEON_FNEG,    FNEG)                \
2491   V(fsqrt,   NEON_FSQRT,   FSQRT)               \
2492   V(frintn,  NEON_FRINTN,  FRINTN)              \
2493   V(frinta,  NEON_FRINTA,  FRINTA)              \
2494   V(frintp,  NEON_FRINTP,  FRINTP)              \
2495   V(frintm,  NEON_FRINTM,  FRINTM)              \
2496   V(frintx,  NEON_FRINTX,  FRINTX)              \
2497   V(frintz,  NEON_FRINTZ,  FRINTZ)              \
2498   V(frinti,  NEON_FRINTI,  FRINTI)              \
2499   V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \
2500   V(frecpe,  NEON_FRECPE,  NEON_FRECPE_scalar )
2501 // clang-format on
2502 
2503 
2504 #define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP)                      \
2505   void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
2506     Instr op;                                                    \
2507     if (vd.IsScalar()) {                                         \
2508       VIXL_ASSERT(vd.Is1S() || vd.Is1D());                       \
2509       op = SCA_OP;                                               \
2510     } else {                                                     \
2511       VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());          \
2512       op = VEC_OP;                                               \
2513     }                                                            \
2514     NEONFP2RegMisc(vd, vn, op);                                  \
2515   }
NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)2516 NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)
2517 #undef DEFINE_ASM_FUNC
2518 
2519 
2520 void Assembler::NEONFP2RegMisc(const VRegister& vd,
2521                                const VRegister& vn,
2522                                Instr op) {
2523   VIXL_ASSERT(AreSameFormat(vd, vn));
2524   Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
2525 }
2526 
2527 
NEON2RegMisc(const VRegister & vd,const VRegister & vn,NEON2RegMiscOp vop,int value)2528 void Assembler::NEON2RegMisc(const VRegister& vd,
2529                              const VRegister& vn,
2530                              NEON2RegMiscOp vop,
2531                              int value) {
2532   VIXL_ASSERT(AreSameFormat(vd, vn));
2533   VIXL_ASSERT(value == 0);
2534   USE(value);
2535 
2536   Instr format, op = vop;
2537   if (vd.IsScalar()) {
2538     op |= NEON_Q | NEONScalar;
2539     format = SFormat(vd);
2540   } else {
2541     format = VFormat(vd);
2542   }
2543 
2544   Emit(format | op | Rn(vn) | Rd(vd));
2545 }
2546 
2547 
cmeq(const VRegister & vd,const VRegister & vn,int value)2548 void Assembler::cmeq(const VRegister& vd, const VRegister& vn, int value) {
2549   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
2550   NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
2551 }
2552 
2553 
cmge(const VRegister & vd,const VRegister & vn,int value)2554 void Assembler::cmge(const VRegister& vd, const VRegister& vn, int value) {
2555   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
2556   NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
2557 }
2558 
2559 
cmgt(const VRegister & vd,const VRegister & vn,int value)2560 void Assembler::cmgt(const VRegister& vd, const VRegister& vn, int value) {
2561   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
2562   NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
2563 }
2564 
2565 
cmle(const VRegister & vd,const VRegister & vn,int value)2566 void Assembler::cmle(const VRegister& vd, const VRegister& vn, int value) {
2567   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
2568   NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
2569 }
2570 
2571 
cmlt(const VRegister & vd,const VRegister & vn,int value)2572 void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) {
2573   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
2574   NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
2575 }
2576 
2577 
shll(const VRegister & vd,const VRegister & vn,int shift)2578 void Assembler::shll(const VRegister& vd, const VRegister& vn, int shift) {
2579   VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) ||
2580               (vd.Is4S() && vn.Is4H() && shift == 16) ||
2581               (vd.Is2D() && vn.Is2S() && shift == 32));
2582   USE(shift);
2583   Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
2584 }
2585 
2586 
shll2(const VRegister & vd,const VRegister & vn,int shift)2587 void Assembler::shll2(const VRegister& vd, const VRegister& vn, int shift) {
2588   USE(shift);
2589   VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) ||
2590               (vd.Is4S() && vn.Is8H() && shift == 16) ||
2591               (vd.Is2D() && vn.Is4S() && shift == 32));
2592   Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
2593 }
2594 
2595 
NEONFP2RegMisc(const VRegister & vd,const VRegister & vn,NEON2RegMiscOp vop,double value)2596 void Assembler::NEONFP2RegMisc(const VRegister& vd,
2597                                const VRegister& vn,
2598                                NEON2RegMiscOp vop,
2599                                double value) {
2600   VIXL_ASSERT(AreSameFormat(vd, vn));
2601   VIXL_ASSERT(value == 0.0);
2602   USE(value);
2603 
2604   Instr op = vop;
2605   if (vd.IsScalar()) {
2606     VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2607     op |= NEON_Q | NEONScalar;
2608   } else {
2609     VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());
2610   }
2611 
2612   Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
2613 }
2614 
2615 
fcmeq(const VRegister & vd,const VRegister & vn,double value)2616 void Assembler::fcmeq(const VRegister& vd, const VRegister& vn, double value) {
2617   NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
2618 }
2619 
2620 
fcmge(const VRegister & vd,const VRegister & vn,double value)2621 void Assembler::fcmge(const VRegister& vd, const VRegister& vn, double value) {
2622   NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
2623 }
2624 
2625 
fcmgt(const VRegister & vd,const VRegister & vn,double value)2626 void Assembler::fcmgt(const VRegister& vd, const VRegister& vn, double value) {
2627   NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
2628 }
2629 
2630 
fcmle(const VRegister & vd,const VRegister & vn,double value)2631 void Assembler::fcmle(const VRegister& vd, const VRegister& vn, double value) {
2632   NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
2633 }
2634 
2635 
fcmlt(const VRegister & vd,const VRegister & vn,double value)2636 void Assembler::fcmlt(const VRegister& vd, const VRegister& vn, double value) {
2637   NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
2638 }
2639 
2640 
frecpx(const VRegister & vd,const VRegister & vn)2641 void Assembler::frecpx(const VRegister& vd, const VRegister& vn) {
2642   VIXL_ASSERT(vd.IsScalar());
2643   VIXL_ASSERT(AreSameFormat(vd, vn));
2644   VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2645   Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
2646 }
2647 
2648 
2649 // clang-format off
2650 #define NEON_3SAME_LIST(V) \
2651   V(add,      NEON_ADD,      vd.IsVector() || vd.Is1D())            \
2652   V(addp,     NEON_ADDP,     vd.IsVector() || vd.Is1D())            \
2653   V(sub,      NEON_SUB,      vd.IsVector() || vd.Is1D())            \
2654   V(cmeq,     NEON_CMEQ,     vd.IsVector() || vd.Is1D())            \
2655   V(cmge,     NEON_CMGE,     vd.IsVector() || vd.Is1D())            \
2656   V(cmgt,     NEON_CMGT,     vd.IsVector() || vd.Is1D())            \
2657   V(cmhi,     NEON_CMHI,     vd.IsVector() || vd.Is1D())            \
2658   V(cmhs,     NEON_CMHS,     vd.IsVector() || vd.Is1D())            \
2659   V(cmtst,    NEON_CMTST,    vd.IsVector() || vd.Is1D())            \
2660   V(sshl,     NEON_SSHL,     vd.IsVector() || vd.Is1D())            \
2661   V(ushl,     NEON_USHL,     vd.IsVector() || vd.Is1D())            \
2662   V(srshl,    NEON_SRSHL,    vd.IsVector() || vd.Is1D())            \
2663   V(urshl,    NEON_URSHL,    vd.IsVector() || vd.Is1D())            \
2664   V(sqdmulh,  NEON_SQDMULH,  vd.IsLaneSizeH() || vd.IsLaneSizeS())  \
2665   V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS())  \
2666   V(shadd,    NEON_SHADD,    vd.IsVector() && !vd.IsLaneSizeD())    \
2667   V(uhadd,    NEON_UHADD,    vd.IsVector() && !vd.IsLaneSizeD())    \
2668   V(srhadd,   NEON_SRHADD,   vd.IsVector() && !vd.IsLaneSizeD())    \
2669   V(urhadd,   NEON_URHADD,   vd.IsVector() && !vd.IsLaneSizeD())    \
2670   V(shsub,    NEON_SHSUB,    vd.IsVector() && !vd.IsLaneSizeD())    \
2671   V(uhsub,    NEON_UHSUB,    vd.IsVector() && !vd.IsLaneSizeD())    \
2672   V(smax,     NEON_SMAX,     vd.IsVector() && !vd.IsLaneSizeD())    \
2673   V(smaxp,    NEON_SMAXP,    vd.IsVector() && !vd.IsLaneSizeD())    \
2674   V(smin,     NEON_SMIN,     vd.IsVector() && !vd.IsLaneSizeD())    \
2675   V(sminp,    NEON_SMINP,    vd.IsVector() && !vd.IsLaneSizeD())    \
2676   V(umax,     NEON_UMAX,     vd.IsVector() && !vd.IsLaneSizeD())    \
2677   V(umaxp,    NEON_UMAXP,    vd.IsVector() && !vd.IsLaneSizeD())    \
2678   V(umin,     NEON_UMIN,     vd.IsVector() && !vd.IsLaneSizeD())    \
2679   V(uminp,    NEON_UMINP,    vd.IsVector() && !vd.IsLaneSizeD())    \
2680   V(saba,     NEON_SABA,     vd.IsVector() && !vd.IsLaneSizeD())    \
2681   V(sabd,     NEON_SABD,     vd.IsVector() && !vd.IsLaneSizeD())    \
2682   V(uaba,     NEON_UABA,     vd.IsVector() && !vd.IsLaneSizeD())    \
2683   V(uabd,     NEON_UABD,     vd.IsVector() && !vd.IsLaneSizeD())    \
2684   V(mla,      NEON_MLA,      vd.IsVector() && !vd.IsLaneSizeD())    \
2685   V(mls,      NEON_MLS,      vd.IsVector() && !vd.IsLaneSizeD())    \
2686   V(mul,      NEON_MUL,      vd.IsVector() && !vd.IsLaneSizeD())    \
2687   V(and_,     NEON_AND,      vd.Is8B() || vd.Is16B())               \
2688   V(orr,      NEON_ORR,      vd.Is8B() || vd.Is16B())               \
2689   V(orn,      NEON_ORN,      vd.Is8B() || vd.Is16B())               \
2690   V(eor,      NEON_EOR,      vd.Is8B() || vd.Is16B())               \
2691   V(bic,      NEON_BIC,      vd.Is8B() || vd.Is16B())               \
2692   V(bit,      NEON_BIT,      vd.Is8B() || vd.Is16B())               \
2693   V(bif,      NEON_BIF,      vd.Is8B() || vd.Is16B())               \
2694   V(bsl,      NEON_BSL,      vd.Is8B() || vd.Is16B())               \
2695   V(pmul,     NEON_PMUL,     vd.Is8B() || vd.Is16B())               \
2696   V(uqadd,    NEON_UQADD,    true)                                  \
2697   V(sqadd,    NEON_SQADD,    true)                                  \
2698   V(uqsub,    NEON_UQSUB,    true)                                  \
2699   V(sqsub,    NEON_SQSUB,    true)                                  \
2700   V(sqshl,    NEON_SQSHL,    true)                                  \
2701   V(uqshl,    NEON_UQSHL,    true)                                  \
2702   V(sqrshl,   NEON_SQRSHL,   true)                                  \
2703   V(uqrshl,   NEON_UQRSHL,   true)
2704 // clang-format on
2705 
2706 #define DEFINE_ASM_FUNC(FN, OP, AS)         \
2707   void Assembler::FN(const VRegister& vd,   \
2708                      const VRegister& vn,   \
2709                      const VRegister& vm) { \
2710     VIXL_ASSERT(AS);                        \
2711     NEON3Same(vd, vn, vm, OP);              \
2712   }
2713 NEON_3SAME_LIST(DEFINE_ASM_FUNC)
2714 #undef DEFINE_ASM_FUNC
2715 
2716 
2717 // clang-format off
2718 #define NEON_FP3SAME_OP_LIST(V)                  \
2719   V(fadd,    NEON_FADD,    FADD)                 \
2720   V(fsub,    NEON_FSUB,    FSUB)                 \
2721   V(fmul,    NEON_FMUL,    FMUL)                 \
2722   V(fdiv,    NEON_FDIV,    FDIV)                 \
2723   V(fmax,    NEON_FMAX,    FMAX)                 \
2724   V(fmaxnm,  NEON_FMAXNM,  FMAXNM)               \
2725   V(fmin,    NEON_FMIN,    FMIN)                 \
2726   V(fminnm,  NEON_FMINNM,  FMINNM)               \
2727   V(fmulx,   NEON_FMULX,   NEON_FMULX_scalar)    \
2728   V(frecps,  NEON_FRECPS,  NEON_FRECPS_scalar)   \
2729   V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar)  \
2730   V(fabd,    NEON_FABD,    NEON_FABD_scalar)     \
2731   V(fmla,    NEON_FMLA,    0)                    \
2732   V(fmls,    NEON_FMLS,    0)                    \
2733   V(facge,   NEON_FACGE,   NEON_FACGE_scalar)    \
2734   V(facgt,   NEON_FACGT,   NEON_FACGT_scalar)    \
2735   V(fcmeq,   NEON_FCMEQ,   NEON_FCMEQ_scalar)    \
2736   V(fcmge,   NEON_FCMGE,   NEON_FCMGE_scalar)    \
2737   V(fcmgt,   NEON_FCMGT,   NEON_FCMGT_scalar)    \
2738   V(faddp,   NEON_FADDP,   0)                    \
2739   V(fmaxp,   NEON_FMAXP,   0)                    \
2740   V(fminp,   NEON_FMINP,   0)                    \
2741   V(fmaxnmp, NEON_FMAXNMP, 0)                    \
2742   V(fminnmp, NEON_FMINNMP, 0)
2743 // clang-format on
2744 
2745 #define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP)             \
2746   void Assembler::FN(const VRegister& vd,               \
2747                      const VRegister& vn,               \
2748                      const VRegister& vm) {             \
2749     Instr op;                                           \
2750     if ((SCA_OP != 0) && vd.IsScalar()) {               \
2751       VIXL_ASSERT(vd.Is1S() || vd.Is1D());              \
2752       op = SCA_OP;                                      \
2753     } else {                                            \
2754       VIXL_ASSERT(vd.IsVector());                       \
2755       VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
2756       op = VEC_OP;                                      \
2757     }                                                   \
2758     NEONFP3Same(vd, vn, vm, op);                        \
2759   }
NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC)2760 NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC)
2761 #undef DEFINE_ASM_FUNC
2762 
2763 
2764 void Assembler::addp(const VRegister& vd, const VRegister& vn) {
2765   VIXL_ASSERT((vd.Is1D() && vn.Is2D()));
2766   Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
2767 }
2768 
2769 
faddp(const VRegister & vd,const VRegister & vn)2770 void Assembler::faddp(const VRegister& vd, const VRegister& vn) {
2771   VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
2772   Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
2773 }
2774 
2775 
fmaxp(const VRegister & vd,const VRegister & vn)2776 void Assembler::fmaxp(const VRegister& vd, const VRegister& vn) {
2777   VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
2778   Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
2779 }
2780 
2781 
fminp(const VRegister & vd,const VRegister & vn)2782 void Assembler::fminp(const VRegister& vd, const VRegister& vn) {
2783   VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
2784   Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
2785 }
2786 
2787 
fmaxnmp(const VRegister & vd,const VRegister & vn)2788 void Assembler::fmaxnmp(const VRegister& vd, const VRegister& vn) {
2789   VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
2790   Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
2791 }
2792 
2793 
fminnmp(const VRegister & vd,const VRegister & vn)2794 void Assembler::fminnmp(const VRegister& vd, const VRegister& vn) {
2795   VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
2796   Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
2797 }
2798 
2799 
orr(const VRegister & vd,const int imm8,const int left_shift)2800 void Assembler::orr(const VRegister& vd, const int imm8, const int left_shift) {
2801   NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR);
2802 }
2803 
2804 
mov(const VRegister & vd,const VRegister & vn)2805 void Assembler::mov(const VRegister& vd, const VRegister& vn) {
2806   VIXL_ASSERT(AreSameFormat(vd, vn));
2807   if (vd.IsD()) {
2808     orr(vd.V8B(), vn.V8B(), vn.V8B());
2809   } else {
2810     VIXL_ASSERT(vd.IsQ());
2811     orr(vd.V16B(), vn.V16B(), vn.V16B());
2812   }
2813 }
2814 
2815 
bic(const VRegister & vd,const int imm8,const int left_shift)2816 void Assembler::bic(const VRegister& vd, const int imm8, const int left_shift) {
2817   NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC);
2818 }
2819 
2820 
movi(const VRegister & vd,const uint64_t imm,Shift shift,const int shift_amount)2821 void Assembler::movi(const VRegister& vd,
2822                      const uint64_t imm,
2823                      Shift shift,
2824                      const int shift_amount) {
2825   VIXL_ASSERT((shift == LSL) || (shift == MSL));
2826   if (vd.Is2D() || vd.Is1D()) {
2827     VIXL_ASSERT(shift_amount == 0);
2828     int imm8 = 0;
2829     for (int i = 0; i < 8; ++i) {
2830       int byte = (imm >> (i * 8)) & 0xff;
2831       VIXL_ASSERT((byte == 0) || (byte == 0xff));
2832       if (byte == 0xff) {
2833         imm8 |= (1 << i);
2834       }
2835     }
2836     int q = vd.Is2D() ? NEON_Q : 0;
2837     Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
2838          ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
2839   } else if (shift == LSL) {
2840     VIXL_ASSERT(IsUint8(imm));
2841     NEONModifiedImmShiftLsl(vd,
2842                             static_cast<int>(imm),
2843                             shift_amount,
2844                             NEONModifiedImmediate_MOVI);
2845   } else {
2846     VIXL_ASSERT(IsUint8(imm));
2847     NEONModifiedImmShiftMsl(vd,
2848                             static_cast<int>(imm),
2849                             shift_amount,
2850                             NEONModifiedImmediate_MOVI);
2851   }
2852 }
2853 
2854 
mvn(const VRegister & vd,const VRegister & vn)2855 void Assembler::mvn(const VRegister& vd, const VRegister& vn) {
2856   VIXL_ASSERT(AreSameFormat(vd, vn));
2857   if (vd.IsD()) {
2858     not_(vd.V8B(), vn.V8B());
2859   } else {
2860     VIXL_ASSERT(vd.IsQ());
2861     not_(vd.V16B(), vn.V16B());
2862   }
2863 }
2864 
2865 
mvni(const VRegister & vd,const int imm8,Shift shift,const int shift_amount)2866 void Assembler::mvni(const VRegister& vd,
2867                      const int imm8,
2868                      Shift shift,
2869                      const int shift_amount) {
2870   VIXL_ASSERT((shift == LSL) || (shift == MSL));
2871   if (shift == LSL) {
2872     NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
2873   } else {
2874     NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
2875   }
2876 }
2877 
2878 
NEONFPByElement(const VRegister & vd,const VRegister & vn,const VRegister & vm,int vm_index,NEONByIndexedElementOp vop)2879 void Assembler::NEONFPByElement(const VRegister& vd,
2880                                 const VRegister& vn,
2881                                 const VRegister& vm,
2882                                 int vm_index,
2883                                 NEONByIndexedElementOp vop) {
2884   VIXL_ASSERT(AreSameFormat(vd, vn));
2885   VIXL_ASSERT((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) ||
2886               (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) ||
2887               (vd.Is1D() && vm.Is1D()));
2888   VIXL_ASSERT((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2)));
2889 
2890   Instr op = vop;
2891   int index_num_bits = vm.Is1S() ? 2 : 1;
2892   if (vd.IsScalar()) {
2893     op |= NEON_Q | NEONScalar;
2894   }
2895 
2896   Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) |
2897        Rn(vn) | Rd(vd));
2898 }
2899 
2900 
NEONByElement(const VRegister & vd,const VRegister & vn,const VRegister & vm,int vm_index,NEONByIndexedElementOp vop)2901 void Assembler::NEONByElement(const VRegister& vd,
2902                               const VRegister& vn,
2903                               const VRegister& vm,
2904                               int vm_index,
2905                               NEONByIndexedElementOp vop) {
2906   VIXL_ASSERT(AreSameFormat(vd, vn));
2907   VIXL_ASSERT((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) ||
2908               (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) ||
2909               (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S()));
2910   VIXL_ASSERT((vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)) ||
2911               (vm.Is1S() && (vm_index < 4)));
2912 
2913   Instr format, op = vop;
2914   int index_num_bits = vm.Is1H() ? 3 : 2;
2915   if (vd.IsScalar()) {
2916     op |= NEONScalar | NEON_Q;
2917     format = SFormat(vn);
2918   } else {
2919     format = VFormat(vn);
2920   }
2921   Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
2922        Rd(vd));
2923 }
2924 
2925 
NEONByElementL(const VRegister & vd,const VRegister & vn,const VRegister & vm,int vm_index,NEONByIndexedElementOp vop)2926 void Assembler::NEONByElementL(const VRegister& vd,
2927                                const VRegister& vn,
2928                                const VRegister& vm,
2929                                int vm_index,
2930                                NEONByIndexedElementOp vop) {
2931   VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
2932               (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
2933               (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
2934               (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
2935               (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
2936               (vd.Is1D() && vn.Is1S() && vm.Is1S()));
2937 
2938   VIXL_ASSERT((vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)) ||
2939               (vm.Is1S() && (vm_index < 4)));
2940 
2941   Instr format, op = vop;
2942   int index_num_bits = vm.Is1H() ? 3 : 2;
2943   if (vd.IsScalar()) {
2944     op |= NEONScalar | NEON_Q;
2945     format = SFormat(vn);
2946   } else {
2947     format = VFormat(vn);
2948   }
2949   Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
2950        Rd(vd));
2951 }
2952 
2953 
2954 // clang-format off
2955 #define NEON_BYELEMENT_LIST(V)                         \
2956   V(mul,      NEON_MUL_byelement,      vn.IsVector())  \
2957   V(mla,      NEON_MLA_byelement,      vn.IsVector())  \
2958   V(mls,      NEON_MLS_byelement,      vn.IsVector())  \
2959   V(sqdmulh,  NEON_SQDMULH_byelement,  true)           \
2960   V(sqrdmulh, NEON_SQRDMULH_byelement, true)
2961 // clang-format on
2962 
2963 
2964 #define DEFINE_ASM_FUNC(FN, OP, AS)          \
2965   void Assembler::FN(const VRegister& vd,    \
2966                      const VRegister& vn,    \
2967                      const VRegister& vm,    \
2968                      int vm_index) {         \
2969     VIXL_ASSERT(AS);                         \
2970     NEONByElement(vd, vn, vm, vm_index, OP); \
2971   }
2972 NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC)
2973 #undef DEFINE_ASM_FUNC
2974 
2975 
2976 // clang-format off
2977 #define NEON_FPBYELEMENT_LIST(V) \
2978   V(fmul,  NEON_FMUL_byelement)  \
2979   V(fmla,  NEON_FMLA_byelement)  \
2980   V(fmls,  NEON_FMLS_byelement)  \
2981   V(fmulx, NEON_FMULX_byelement)
2982 // clang-format on
2983 
2984 
2985 #define DEFINE_ASM_FUNC(FN, OP)                \
2986   void Assembler::FN(const VRegister& vd,      \
2987                      const VRegister& vn,      \
2988                      const VRegister& vm,      \
2989                      int vm_index) {           \
2990     NEONFPByElement(vd, vn, vm, vm_index, OP); \
2991   }
NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)2992 NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)
2993 #undef DEFINE_ASM_FUNC
2994 
2995 
2996 // clang-format off
2997 #define NEON_BYELEMENT_LONG_LIST(V)                               \
2998   V(sqdmull,  NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD())  \
2999   V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ())  \
3000   V(sqdmlal,  NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD())  \
3001   V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ())  \
3002   V(sqdmlsl,  NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD())  \
3003   V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ())  \
3004   V(smull,    NEON_SMULL_byelement,   vn.IsVector() && vn.IsD())  \
3005   V(smull2,   NEON_SMULL_byelement,   vn.IsVector() && vn.IsQ())  \
3006   V(umull,    NEON_UMULL_byelement,   vn.IsVector() && vn.IsD())  \
3007   V(umull2,   NEON_UMULL_byelement,   vn.IsVector() && vn.IsQ())  \
3008   V(smlal,    NEON_SMLAL_byelement,   vn.IsVector() && vn.IsD())  \
3009   V(smlal2,   NEON_SMLAL_byelement,   vn.IsVector() && vn.IsQ())  \
3010   V(umlal,    NEON_UMLAL_byelement,   vn.IsVector() && vn.IsD())  \
3011   V(umlal2,   NEON_UMLAL_byelement,   vn.IsVector() && vn.IsQ())  \
3012   V(smlsl,    NEON_SMLSL_byelement,   vn.IsVector() && vn.IsD())  \
3013   V(smlsl2,   NEON_SMLSL_byelement,   vn.IsVector() && vn.IsQ())  \
3014   V(umlsl,    NEON_UMLSL_byelement,   vn.IsVector() && vn.IsD())  \
3015   V(umlsl2,   NEON_UMLSL_byelement,   vn.IsVector() && vn.IsQ())
3016 // clang-format on
3017 
3018 
3019 #define DEFINE_ASM_FUNC(FN, OP, AS)           \
3020   void Assembler::FN(const VRegister& vd,     \
3021                      const VRegister& vn,     \
3022                      const VRegister& vm,     \
3023                      int vm_index) {          \
3024     VIXL_ASSERT(AS);                          \
3025     NEONByElementL(vd, vn, vm, vm_index, OP); \
3026   }
3027 NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
3028 #undef DEFINE_ASM_FUNC
3029 
3030 
3031 void Assembler::suqadd(const VRegister& vd, const VRegister& vn) {
3032   NEON2RegMisc(vd, vn, NEON_SUQADD);
3033 }
3034 
3035 
usqadd(const VRegister & vd,const VRegister & vn)3036 void Assembler::usqadd(const VRegister& vd, const VRegister& vn) {
3037   NEON2RegMisc(vd, vn, NEON_USQADD);
3038 }
3039 
3040 
abs(const VRegister & vd,const VRegister & vn)3041 void Assembler::abs(const VRegister& vd, const VRegister& vn) {
3042   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3043   NEON2RegMisc(vd, vn, NEON_ABS);
3044 }
3045 
3046 
sqabs(const VRegister & vd,const VRegister & vn)3047 void Assembler::sqabs(const VRegister& vd, const VRegister& vn) {
3048   NEON2RegMisc(vd, vn, NEON_SQABS);
3049 }
3050 
3051 
neg(const VRegister & vd,const VRegister & vn)3052 void Assembler::neg(const VRegister& vd, const VRegister& vn) {
3053   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3054   NEON2RegMisc(vd, vn, NEON_NEG);
3055 }
3056 
3057 
sqneg(const VRegister & vd,const VRegister & vn)3058 void Assembler::sqneg(const VRegister& vd, const VRegister& vn) {
3059   NEON2RegMisc(vd, vn, NEON_SQNEG);
3060 }
3061 
3062 
NEONXtn(const VRegister & vd,const VRegister & vn,NEON2RegMiscOp vop)3063 void Assembler::NEONXtn(const VRegister& vd,
3064                         const VRegister& vn,
3065                         NEON2RegMiscOp vop) {
3066   Instr format, op = vop;
3067   if (vd.IsScalar()) {
3068     VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
3069                 (vd.Is1S() && vn.Is1D()));
3070     op |= NEON_Q | NEONScalar;
3071     format = SFormat(vd);
3072   } else {
3073     VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
3074                 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
3075                 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
3076     format = VFormat(vd);
3077   }
3078   Emit(format | op | Rn(vn) | Rd(vd));
3079 }
3080 
3081 
xtn(const VRegister & vd,const VRegister & vn)3082 void Assembler::xtn(const VRegister& vd, const VRegister& vn) {
3083   VIXL_ASSERT(vd.IsVector() && vd.IsD());
3084   NEONXtn(vd, vn, NEON_XTN);
3085 }
3086 
3087 
xtn2(const VRegister & vd,const VRegister & vn)3088 void Assembler::xtn2(const VRegister& vd, const VRegister& vn) {
3089   VIXL_ASSERT(vd.IsVector() && vd.IsQ());
3090   NEONXtn(vd, vn, NEON_XTN);
3091 }
3092 
3093 
sqxtn(const VRegister & vd,const VRegister & vn)3094 void Assembler::sqxtn(const VRegister& vd, const VRegister& vn) {
3095   VIXL_ASSERT(vd.IsScalar() || vd.IsD());
3096   NEONXtn(vd, vn, NEON_SQXTN);
3097 }
3098 
3099 
sqxtn2(const VRegister & vd,const VRegister & vn)3100 void Assembler::sqxtn2(const VRegister& vd, const VRegister& vn) {
3101   VIXL_ASSERT(vd.IsVector() && vd.IsQ());
3102   NEONXtn(vd, vn, NEON_SQXTN);
3103 }
3104 
3105 
sqxtun(const VRegister & vd,const VRegister & vn)3106 void Assembler::sqxtun(const VRegister& vd, const VRegister& vn) {
3107   VIXL_ASSERT(vd.IsScalar() || vd.IsD());
3108   NEONXtn(vd, vn, NEON_SQXTUN);
3109 }
3110 
3111 
sqxtun2(const VRegister & vd,const VRegister & vn)3112 void Assembler::sqxtun2(const VRegister& vd, const VRegister& vn) {
3113   VIXL_ASSERT(vd.IsVector() && vd.IsQ());
3114   NEONXtn(vd, vn, NEON_SQXTUN);
3115 }
3116 
3117 
uqxtn(const VRegister & vd,const VRegister & vn)3118 void Assembler::uqxtn(const VRegister& vd, const VRegister& vn) {
3119   VIXL_ASSERT(vd.IsScalar() || vd.IsD());
3120   NEONXtn(vd, vn, NEON_UQXTN);
3121 }
3122 
3123 
uqxtn2(const VRegister & vd,const VRegister & vn)3124 void Assembler::uqxtn2(const VRegister& vd, const VRegister& vn) {
3125   VIXL_ASSERT(vd.IsVector() && vd.IsQ());
3126   NEONXtn(vd, vn, NEON_UQXTN);
3127 }
3128 
3129 
3130 // NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size".
not_(const VRegister & vd,const VRegister & vn)3131 void Assembler::not_(const VRegister& vd, const VRegister& vn) {
3132   VIXL_ASSERT(AreSameFormat(vd, vn));
3133   VIXL_ASSERT(vd.Is8B() || vd.Is16B());
3134   Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
3135 }
3136 
3137 
rbit(const VRegister & vd,const VRegister & vn)3138 void Assembler::rbit(const VRegister& vd, const VRegister& vn) {
3139   VIXL_ASSERT(AreSameFormat(vd, vn));
3140   VIXL_ASSERT(vd.Is8B() || vd.Is16B());
3141   Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
3142 }
3143 
3144 
ext(const VRegister & vd,const VRegister & vn,const VRegister & vm,int index)3145 void Assembler::ext(const VRegister& vd,
3146                     const VRegister& vn,
3147                     const VRegister& vm,
3148                     int index) {
3149   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
3150   VIXL_ASSERT(vd.Is8B() || vd.Is16B());
3151   VIXL_ASSERT((0 <= index) && (index < vd.GetLanes()));
3152   Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
3153 }
3154 
3155 
dup(const VRegister & vd,const VRegister & vn,int vn_index)3156 void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) {
3157   Instr q, scalar;
3158 
3159   // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
3160   // number of lanes, and T is b, h, s or d.
3161   int lane_size = vn.GetLaneSizeInBytes();
3162   NEONFormatField format;
3163   switch (lane_size) {
3164     case 1:
3165       format = NEON_16B;
3166       break;
3167     case 2:
3168       format = NEON_8H;
3169       break;
3170     case 4:
3171       format = NEON_4S;
3172       break;
3173     default:
3174       VIXL_ASSERT(lane_size == 8);
3175       format = NEON_2D;
3176       break;
3177   }
3178 
3179   if (vd.IsScalar()) {
3180     q = NEON_Q;
3181     scalar = NEONScalar;
3182   } else {
3183     VIXL_ASSERT(!vd.Is1D());
3184     q = vd.IsD() ? 0 : NEON_Q;
3185     scalar = 0;
3186   }
3187   Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) |
3188        Rd(vd));
3189 }
3190 
3191 
mov(const VRegister & vd,const VRegister & vn,int vn_index)3192 void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index) {
3193   VIXL_ASSERT(vd.IsScalar());
3194   dup(vd, vn, vn_index);
3195 }
3196 
3197 
dup(const VRegister & vd,const Register & rn)3198 void Assembler::dup(const VRegister& vd, const Register& rn) {
3199   VIXL_ASSERT(!vd.Is1D());
3200   VIXL_ASSERT(vd.Is2D() == rn.IsX());
3201   int q = vd.IsD() ? 0 : NEON_Q;
3202   Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
3203 }
3204 
3205 
ins(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)3206 void Assembler::ins(const VRegister& vd,
3207                     int vd_index,
3208                     const VRegister& vn,
3209                     int vn_index) {
3210   VIXL_ASSERT(AreSameFormat(vd, vn));
3211   // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
3212   // number of lanes, and T is b, h, s or d.
3213   int lane_size = vd.GetLaneSizeInBytes();
3214   NEONFormatField format;
3215   switch (lane_size) {
3216     case 1:
3217       format = NEON_16B;
3218       break;
3219     case 2:
3220       format = NEON_8H;
3221       break;
3222     case 4:
3223       format = NEON_4S;
3224       break;
3225     default:
3226       VIXL_ASSERT(lane_size == 8);
3227       format = NEON_2D;
3228       break;
3229   }
3230 
3231   VIXL_ASSERT(
3232       (0 <= vd_index) &&
3233       (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
3234   VIXL_ASSERT(
3235       (0 <= vn_index) &&
3236       (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
3237   Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) |
3238        ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
3239 }
3240 
3241 
mov(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)3242 void Assembler::mov(const VRegister& vd,
3243                     int vd_index,
3244                     const VRegister& vn,
3245                     int vn_index) {
3246   ins(vd, vd_index, vn, vn_index);
3247 }
3248 
3249 
ins(const VRegister & vd,int vd_index,const Register & rn)3250 void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn) {
3251   // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
3252   // number of lanes, and T is b, h, s or d.
3253   int lane_size = vd.GetLaneSizeInBytes();
3254   NEONFormatField format;
3255   switch (lane_size) {
3256     case 1:
3257       format = NEON_16B;
3258       VIXL_ASSERT(rn.IsW());
3259       break;
3260     case 2:
3261       format = NEON_8H;
3262       VIXL_ASSERT(rn.IsW());
3263       break;
3264     case 4:
3265       format = NEON_4S;
3266       VIXL_ASSERT(rn.IsW());
3267       break;
3268     default:
3269       VIXL_ASSERT(lane_size == 8);
3270       VIXL_ASSERT(rn.IsX());
3271       format = NEON_2D;
3272       break;
3273   }
3274 
3275   VIXL_ASSERT(
3276       (0 <= vd_index) &&
3277       (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
3278   Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
3279 }
3280 
3281 
mov(const VRegister & vd,int vd_index,const Register & rn)3282 void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn) {
3283   ins(vd, vd_index, rn);
3284 }
3285 
3286 
umov(const Register & rd,const VRegister & vn,int vn_index)3287 void Assembler::umov(const Register& rd, const VRegister& vn, int vn_index) {
3288   // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
3289   // number of lanes, and T is b, h, s or d.
3290   int lane_size = vn.GetLaneSizeInBytes();
3291   NEONFormatField format;
3292   Instr q = 0;
3293   switch (lane_size) {
3294     case 1:
3295       format = NEON_16B;
3296       VIXL_ASSERT(rd.IsW());
3297       break;
3298     case 2:
3299       format = NEON_8H;
3300       VIXL_ASSERT(rd.IsW());
3301       break;
3302     case 4:
3303       format = NEON_4S;
3304       VIXL_ASSERT(rd.IsW());
3305       break;
3306     default:
3307       VIXL_ASSERT(lane_size == 8);
3308       VIXL_ASSERT(rd.IsX());
3309       format = NEON_2D;
3310       q = NEON_Q;
3311       break;
3312   }
3313 
3314   VIXL_ASSERT(
3315       (0 <= vn_index) &&
3316       (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
3317   Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
3318 }
3319 
3320 
mov(const Register & rd,const VRegister & vn,int vn_index)3321 void Assembler::mov(const Register& rd, const VRegister& vn, int vn_index) {
3322   VIXL_ASSERT(vn.GetSizeInBytes() >= 4);
3323   umov(rd, vn, vn_index);
3324 }
3325 
3326 
smov(const Register & rd,const VRegister & vn,int vn_index)3327 void Assembler::smov(const Register& rd, const VRegister& vn, int vn_index) {
3328   // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
3329   // number of lanes, and T is b, h, s.
3330   int lane_size = vn.GetLaneSizeInBytes();
3331   NEONFormatField format;
3332   Instr q = 0;
3333   VIXL_ASSERT(lane_size != 8);
3334   switch (lane_size) {
3335     case 1:
3336       format = NEON_16B;
3337       break;
3338     case 2:
3339       format = NEON_8H;
3340       break;
3341     default:
3342       VIXL_ASSERT(lane_size == 4);
3343       VIXL_ASSERT(rd.IsX());
3344       format = NEON_4S;
3345       break;
3346   }
3347   q = rd.IsW() ? 0 : NEON_Q;
3348   VIXL_ASSERT(
3349       (0 <= vn_index) &&
3350       (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
3351   Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
3352 }
3353 
3354 
cls(const VRegister & vd,const VRegister & vn)3355 void Assembler::cls(const VRegister& vd, const VRegister& vn) {
3356   VIXL_ASSERT(AreSameFormat(vd, vn));
3357   VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
3358   Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
3359 }
3360 
3361 
clz(const VRegister & vd,const VRegister & vn)3362 void Assembler::clz(const VRegister& vd, const VRegister& vn) {
3363   VIXL_ASSERT(AreSameFormat(vd, vn));
3364   VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
3365   Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
3366 }
3367 
3368 
cnt(const VRegister & vd,const VRegister & vn)3369 void Assembler::cnt(const VRegister& vd, const VRegister& vn) {
3370   VIXL_ASSERT(AreSameFormat(vd, vn));
3371   VIXL_ASSERT(vd.Is8B() || vd.Is16B());
3372   Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
3373 }
3374 
3375 
rev16(const VRegister & vd,const VRegister & vn)3376 void Assembler::rev16(const VRegister& vd, const VRegister& vn) {
3377   VIXL_ASSERT(AreSameFormat(vd, vn));
3378   VIXL_ASSERT(vd.Is8B() || vd.Is16B());
3379   Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
3380 }
3381 
3382 
rev32(const VRegister & vd,const VRegister & vn)3383 void Assembler::rev32(const VRegister& vd, const VRegister& vn) {
3384   VIXL_ASSERT(AreSameFormat(vd, vn));
3385   VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
3386   Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
3387 }
3388 
3389 
rev64(const VRegister & vd,const VRegister & vn)3390 void Assembler::rev64(const VRegister& vd, const VRegister& vn) {
3391   VIXL_ASSERT(AreSameFormat(vd, vn));
3392   VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
3393   Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
3394 }
3395 
3396 
ursqrte(const VRegister & vd,const VRegister & vn)3397 void Assembler::ursqrte(const VRegister& vd, const VRegister& vn) {
3398   VIXL_ASSERT(AreSameFormat(vd, vn));
3399   VIXL_ASSERT(vd.Is2S() || vd.Is4S());
3400   Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
3401 }
3402 
3403 
urecpe(const VRegister & vd,const VRegister & vn)3404 void Assembler::urecpe(const VRegister& vd, const VRegister& vn) {
3405   VIXL_ASSERT(AreSameFormat(vd, vn));
3406   VIXL_ASSERT(vd.Is2S() || vd.Is4S());
3407   Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
3408 }
3409 
3410 
NEONAddlp(const VRegister & vd,const VRegister & vn,NEON2RegMiscOp op)3411 void Assembler::NEONAddlp(const VRegister& vd,
3412                           const VRegister& vn,
3413                           NEON2RegMiscOp op) {
3414   VIXL_ASSERT((op == NEON_SADDLP) || (op == NEON_UADDLP) ||
3415               (op == NEON_SADALP) || (op == NEON_UADALP));
3416 
3417   VIXL_ASSERT((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) ||
3418               (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) ||
3419               (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
3420   Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
3421 }
3422 
3423 
saddlp(const VRegister & vd,const VRegister & vn)3424 void Assembler::saddlp(const VRegister& vd, const VRegister& vn) {
3425   NEONAddlp(vd, vn, NEON_SADDLP);
3426 }
3427 
3428 
uaddlp(const VRegister & vd,const VRegister & vn)3429 void Assembler::uaddlp(const VRegister& vd, const VRegister& vn) {
3430   NEONAddlp(vd, vn, NEON_UADDLP);
3431 }
3432 
3433 
sadalp(const VRegister & vd,const VRegister & vn)3434 void Assembler::sadalp(const VRegister& vd, const VRegister& vn) {
3435   NEONAddlp(vd, vn, NEON_SADALP);
3436 }
3437 
3438 
uadalp(const VRegister & vd,const VRegister & vn)3439 void Assembler::uadalp(const VRegister& vd, const VRegister& vn) {
3440   NEONAddlp(vd, vn, NEON_UADALP);
3441 }
3442 
3443 
NEONAcrossLanesL(const VRegister & vd,const VRegister & vn,NEONAcrossLanesOp op)3444 void Assembler::NEONAcrossLanesL(const VRegister& vd,
3445                                  const VRegister& vn,
3446                                  NEONAcrossLanesOp op) {
3447   VIXL_ASSERT((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) ||
3448               (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) ||
3449               (vn.Is4S() && vd.Is1D()));
3450   Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
3451 }
3452 
3453 
saddlv(const VRegister & vd,const VRegister & vn)3454 void Assembler::saddlv(const VRegister& vd, const VRegister& vn) {
3455   NEONAcrossLanesL(vd, vn, NEON_SADDLV);
3456 }
3457 
3458 
uaddlv(const VRegister & vd,const VRegister & vn)3459 void Assembler::uaddlv(const VRegister& vd, const VRegister& vn) {
3460   NEONAcrossLanesL(vd, vn, NEON_UADDLV);
3461 }
3462 
3463 
NEONAcrossLanes(const VRegister & vd,const VRegister & vn,NEONAcrossLanesOp op)3464 void Assembler::NEONAcrossLanes(const VRegister& vd,
3465                                 const VRegister& vn,
3466                                 NEONAcrossLanesOp op) {
3467   VIXL_ASSERT((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) ||
3468               (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) ||
3469               (vn.Is4S() && vd.Is1S()));
3470   if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
3471     Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
3472   } else {
3473     Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
3474   }
3475 }
3476 
3477 
3478 #define NEON_ACROSSLANES_LIST(V)      \
3479   V(fmaxv, NEON_FMAXV, vd.Is1S())     \
3480   V(fminv, NEON_FMINV, vd.Is1S())     \
3481   V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \
3482   V(fminnmv, NEON_FMINNMV, vd.Is1S()) \
3483   V(addv, NEON_ADDV, true)            \
3484   V(smaxv, NEON_SMAXV, true)          \
3485   V(sminv, NEON_SMINV, true)          \
3486   V(umaxv, NEON_UMAXV, true)          \
3487   V(uminv, NEON_UMINV, true)
3488 
3489 
3490 #define DEFINE_ASM_FUNC(FN, OP, AS)                              \
3491   void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
3492     VIXL_ASSERT(AS);                                             \
3493     NEONAcrossLanes(vd, vn, OP);                                 \
3494   }
NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)3495 NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)
3496 #undef DEFINE_ASM_FUNC
3497 
3498 
3499 void Assembler::NEONPerm(const VRegister& vd,
3500                          const VRegister& vn,
3501                          const VRegister& vm,
3502                          NEONPermOp op) {
3503   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
3504   VIXL_ASSERT(!vd.Is1D());
3505   Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
3506 }
3507 
3508 
trn1(const VRegister & vd,const VRegister & vn,const VRegister & vm)3509 void Assembler::trn1(const VRegister& vd,
3510                      const VRegister& vn,
3511                      const VRegister& vm) {
3512   NEONPerm(vd, vn, vm, NEON_TRN1);
3513 }
3514 
3515 
trn2(const VRegister & vd,const VRegister & vn,const VRegister & vm)3516 void Assembler::trn2(const VRegister& vd,
3517                      const VRegister& vn,
3518                      const VRegister& vm) {
3519   NEONPerm(vd, vn, vm, NEON_TRN2);
3520 }
3521 
3522 
uzp1(const VRegister & vd,const VRegister & vn,const VRegister & vm)3523 void Assembler::uzp1(const VRegister& vd,
3524                      const VRegister& vn,
3525                      const VRegister& vm) {
3526   NEONPerm(vd, vn, vm, NEON_UZP1);
3527 }
3528 
3529 
uzp2(const VRegister & vd,const VRegister & vn,const VRegister & vm)3530 void Assembler::uzp2(const VRegister& vd,
3531                      const VRegister& vn,
3532                      const VRegister& vm) {
3533   NEONPerm(vd, vn, vm, NEON_UZP2);
3534 }
3535 
3536 
zip1(const VRegister & vd,const VRegister & vn,const VRegister & vm)3537 void Assembler::zip1(const VRegister& vd,
3538                      const VRegister& vn,
3539                      const VRegister& vm) {
3540   NEONPerm(vd, vn, vm, NEON_ZIP1);
3541 }
3542 
3543 
zip2(const VRegister & vd,const VRegister & vn,const VRegister & vm)3544 void Assembler::zip2(const VRegister& vd,
3545                      const VRegister& vn,
3546                      const VRegister& vm) {
3547   NEONPerm(vd, vn, vm, NEON_ZIP2);
3548 }
3549 
3550 
NEONShiftImmediate(const VRegister & vd,const VRegister & vn,NEONShiftImmediateOp op,int immh_immb)3551 void Assembler::NEONShiftImmediate(const VRegister& vd,
3552                                    const VRegister& vn,
3553                                    NEONShiftImmediateOp op,
3554                                    int immh_immb) {
3555   VIXL_ASSERT(AreSameFormat(vd, vn));
3556   Instr q, scalar;
3557   if (vn.IsScalar()) {
3558     q = NEON_Q;
3559     scalar = NEONScalar;
3560   } else {
3561     q = vd.IsD() ? 0 : NEON_Q;
3562     scalar = 0;
3563   }
3564   Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
3565 }
3566 
3567 
NEONShiftLeftImmediate(const VRegister & vd,const VRegister & vn,int shift,NEONShiftImmediateOp op)3568 void Assembler::NEONShiftLeftImmediate(const VRegister& vd,
3569                                        const VRegister& vn,
3570                                        int shift,
3571                                        NEONShiftImmediateOp op) {
3572   int laneSizeInBits = vn.GetLaneSizeInBits();
3573   VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits));
3574   NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
3575 }
3576 
3577 
NEONShiftRightImmediate(const VRegister & vd,const VRegister & vn,int shift,NEONShiftImmediateOp op)3578 void Assembler::NEONShiftRightImmediate(const VRegister& vd,
3579                                         const VRegister& vn,
3580                                         int shift,
3581                                         NEONShiftImmediateOp op) {
3582   int laneSizeInBits = vn.GetLaneSizeInBits();
3583   VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits));
3584   NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
3585 }
3586 
3587 
NEONShiftImmediateL(const VRegister & vd,const VRegister & vn,int shift,NEONShiftImmediateOp op)3588 void Assembler::NEONShiftImmediateL(const VRegister& vd,
3589                                     const VRegister& vn,
3590                                     int shift,
3591                                     NEONShiftImmediateOp op) {
3592   int laneSizeInBits = vn.GetLaneSizeInBits();
3593   VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits));
3594   int immh_immb = (laneSizeInBits + shift) << 16;
3595 
3596   VIXL_ASSERT((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
3597               (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
3598               (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
3599   Instr q;
3600   q = vn.IsD() ? 0 : NEON_Q;
3601   Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
3602 }
3603 
3604 
NEONShiftImmediateN(const VRegister & vd,const VRegister & vn,int shift,NEONShiftImmediateOp op)3605 void Assembler::NEONShiftImmediateN(const VRegister& vd,
3606                                     const VRegister& vn,
3607                                     int shift,
3608                                     NEONShiftImmediateOp op) {
3609   Instr q, scalar;
3610   int laneSizeInBits = vd.GetLaneSizeInBits();
3611   VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits));
3612   int immh_immb = (2 * laneSizeInBits - shift) << 16;
3613 
3614   if (vn.IsScalar()) {
3615     VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
3616                 (vd.Is1S() && vn.Is1D()));
3617     q = NEON_Q;
3618     scalar = NEONScalar;
3619   } else {
3620     VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
3621                 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
3622                 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
3623     scalar = 0;
3624     q = vd.IsD() ? 0 : NEON_Q;
3625   }
3626   Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
3627 }
3628 
3629 
shl(const VRegister & vd,const VRegister & vn,int shift)3630 void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift) {
3631   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3632   NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
3633 }
3634 
3635 
sli(const VRegister & vd,const VRegister & vn,int shift)3636 void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift) {
3637   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3638   NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
3639 }
3640 
3641 
sqshl(const VRegister & vd,const VRegister & vn,int shift)3642 void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift) {
3643   NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
3644 }
3645 
3646 
sqshlu(const VRegister & vd,const VRegister & vn,int shift)3647 void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift) {
3648   NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
3649 }
3650 
3651 
uqshl(const VRegister & vd,const VRegister & vn,int shift)3652 void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift) {
3653   NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
3654 }
3655 
3656 
sshll(const VRegister & vd,const VRegister & vn,int shift)3657 void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift) {
3658   VIXL_ASSERT(vn.IsD());
3659   NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
3660 }
3661 
3662 
sshll2(const VRegister & vd,const VRegister & vn,int shift)3663 void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift) {
3664   VIXL_ASSERT(vn.IsQ());
3665   NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
3666 }
3667 
3668 
sxtl(const VRegister & vd,const VRegister & vn)3669 void Assembler::sxtl(const VRegister& vd, const VRegister& vn) {
3670   sshll(vd, vn, 0);
3671 }
3672 
3673 
sxtl2(const VRegister & vd,const VRegister & vn)3674 void Assembler::sxtl2(const VRegister& vd, const VRegister& vn) {
3675   sshll2(vd, vn, 0);
3676 }
3677 
3678 
ushll(const VRegister & vd,const VRegister & vn,int shift)3679 void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift) {
3680   VIXL_ASSERT(vn.IsD());
3681   NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
3682 }
3683 
3684 
ushll2(const VRegister & vd,const VRegister & vn,int shift)3685 void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift) {
3686   VIXL_ASSERT(vn.IsQ());
3687   NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
3688 }
3689 
3690 
uxtl(const VRegister & vd,const VRegister & vn)3691 void Assembler::uxtl(const VRegister& vd, const VRegister& vn) {
3692   ushll(vd, vn, 0);
3693 }
3694 
3695 
uxtl2(const VRegister & vd,const VRegister & vn)3696 void Assembler::uxtl2(const VRegister& vd, const VRegister& vn) {
3697   ushll2(vd, vn, 0);
3698 }
3699 
3700 
sri(const VRegister & vd,const VRegister & vn,int shift)3701 void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift) {
3702   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3703   NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
3704 }
3705 
3706 
sshr(const VRegister & vd,const VRegister & vn,int shift)3707 void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift) {
3708   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3709   NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
3710 }
3711 
3712 
ushr(const VRegister & vd,const VRegister & vn,int shift)3713 void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift) {
3714   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3715   NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
3716 }
3717 
3718 
srshr(const VRegister & vd,const VRegister & vn,int shift)3719 void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift) {
3720   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3721   NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
3722 }
3723 
3724 
urshr(const VRegister & vd,const VRegister & vn,int shift)3725 void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift) {
3726   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3727   NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
3728 }
3729 
3730 
ssra(const VRegister & vd,const VRegister & vn,int shift)3731 void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift) {
3732   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3733   NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
3734 }
3735 
3736 
usra(const VRegister & vd,const VRegister & vn,int shift)3737 void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift) {
3738   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3739   NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
3740 }
3741 
3742 
srsra(const VRegister & vd,const VRegister & vn,int shift)3743 void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift) {
3744   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3745   NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
3746 }
3747 
3748 
ursra(const VRegister & vd,const VRegister & vn,int shift)3749 void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) {
3750   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3751   NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
3752 }
3753 
3754 
shrn(const VRegister & vd,const VRegister & vn,int shift)3755 void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift) {
3756   VIXL_ASSERT(vn.IsVector() && vd.IsD());
3757   NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
3758 }
3759 
3760 
shrn2(const VRegister & vd,const VRegister & vn,int shift)3761 void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift) {
3762   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3763   NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
3764 }
3765 
3766 
rshrn(const VRegister & vd,const VRegister & vn,int shift)3767 void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift) {
3768   VIXL_ASSERT(vn.IsVector() && vd.IsD());
3769   NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
3770 }
3771 
3772 
rshrn2(const VRegister & vd,const VRegister & vn,int shift)3773 void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift) {
3774   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3775   NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
3776 }
3777 
3778 
sqshrn(const VRegister & vd,const VRegister & vn,int shift)3779 void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift) {
3780   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3781   NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
3782 }
3783 
3784 
sqshrn2(const VRegister & vd,const VRegister & vn,int shift)3785 void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift) {
3786   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3787   NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
3788 }
3789 
3790 
sqrshrn(const VRegister & vd,const VRegister & vn,int shift)3791 void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift) {
3792   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3793   NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
3794 }
3795 
3796 
sqrshrn2(const VRegister & vd,const VRegister & vn,int shift)3797 void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift) {
3798   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3799   NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
3800 }
3801 
3802 
sqshrun(const VRegister & vd,const VRegister & vn,int shift)3803 void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift) {
3804   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3805   NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
3806 }
3807 
3808 
sqshrun2(const VRegister & vd,const VRegister & vn,int shift)3809 void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift) {
3810   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3811   NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
3812 }
3813 
3814 
sqrshrun(const VRegister & vd,const VRegister & vn,int shift)3815 void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift) {
3816   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3817   NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
3818 }
3819 
3820 
sqrshrun2(const VRegister & vd,const VRegister & vn,int shift)3821 void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift) {
3822   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3823   NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
3824 }
3825 
3826 
uqshrn(const VRegister & vd,const VRegister & vn,int shift)3827 void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift) {
3828   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3829   NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
3830 }
3831 
3832 
uqshrn2(const VRegister & vd,const VRegister & vn,int shift)3833 void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift) {
3834   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3835   NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
3836 }
3837 
3838 
uqrshrn(const VRegister & vd,const VRegister & vn,int shift)3839 void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift) {
3840   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3841   NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
3842 }
3843 
3844 
uqrshrn2(const VRegister & vd,const VRegister & vn,int shift)3845 void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift) {
3846   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3847   NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
3848 }
3849 
3850 
3851 // Note:
3852 // Below, a difference in case for the same letter indicates a
3853 // negated bit.
3854 // If b is 1, then B is 0.
FP32ToImm8(float imm)3855 uint32_t Assembler::FP32ToImm8(float imm) {
3856   VIXL_ASSERT(IsImmFP32(imm));
3857   // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
3858   uint32_t bits = FloatToRawbits(imm);
3859   // bit7: a000.0000
3860   uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
3861   // bit6: 0b00.0000
3862   uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
3863   // bit5_to_0: 00cd.efgh
3864   uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
3865 
3866   return bit7 | bit6 | bit5_to_0;
3867 }
3868 
3869 
ImmFP32(float imm)3870 Instr Assembler::ImmFP32(float imm) { return FP32ToImm8(imm) << ImmFP_offset; }
3871 
3872 
FP64ToImm8(double imm)3873 uint32_t Assembler::FP64ToImm8(double imm) {
3874   VIXL_ASSERT(IsImmFP64(imm));
3875   // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
3876   //       0000.0000.0000.0000.0000.0000.0000.0000
3877   uint64_t bits = DoubleToRawbits(imm);
3878   // bit7: a000.0000
3879   uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
3880   // bit6: 0b00.0000
3881   uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
3882   // bit5_to_0: 00cd.efgh
3883   uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
3884 
3885   return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
3886 }
3887 
3888 
ImmFP64(double imm)3889 Instr Assembler::ImmFP64(double imm) { return FP64ToImm8(imm) << ImmFP_offset; }
3890 
3891 
3892 // Code generation helpers.
MoveWide(const Register & rd,uint64_t imm,int shift,MoveWideImmediateOp mov_op)3893 void Assembler::MoveWide(const Register& rd,
3894                          uint64_t imm,
3895                          int shift,
3896                          MoveWideImmediateOp mov_op) {
3897   // Ignore the top 32 bits of an immediate if we're moving to a W register.
3898   if (rd.Is32Bits()) {
3899     // Check that the top 32 bits are zero (a positive 32-bit number) or top
3900     // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
3901     VIXL_ASSERT(((imm >> kWRegSize) == 0) ||
3902                 ((imm >> (kWRegSize - 1)) == 0x1ffffffff));
3903     imm &= kWRegMask;
3904   }
3905 
3906   if (shift >= 0) {
3907     // Explicit shift specified.
3908     VIXL_ASSERT((shift == 0) || (shift == 16) || (shift == 32) ||
3909                 (shift == 48));
3910     VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
3911     shift /= 16;
3912   } else {
3913     // Calculate a new immediate and shift combination to encode the immediate
3914     // argument.
3915     shift = 0;
3916     if ((imm & 0xffffffffffff0000) == 0) {
3917       // Nothing to do.
3918     } else if ((imm & 0xffffffff0000ffff) == 0) {
3919       imm >>= 16;
3920       shift = 1;
3921     } else if ((imm & 0xffff0000ffffffff) == 0) {
3922       VIXL_ASSERT(rd.Is64Bits());
3923       imm >>= 32;
3924       shift = 2;
3925     } else if ((imm & 0x0000ffffffffffff) == 0) {
3926       VIXL_ASSERT(rd.Is64Bits());
3927       imm >>= 48;
3928       shift = 3;
3929     }
3930   }
3931 
3932   VIXL_ASSERT(IsUint16(imm));
3933 
3934   Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) | ImmMoveWide(imm) |
3935        ShiftMoveWide(shift));
3936 }
3937 
3938 
AddSub(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubOp op)3939 void Assembler::AddSub(const Register& rd,
3940                        const Register& rn,
3941                        const Operand& operand,
3942                        FlagsUpdate S,
3943                        AddSubOp op) {
3944   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
3945   if (operand.IsImmediate()) {
3946     int64_t immediate = operand.GetImmediate();
3947     VIXL_ASSERT(IsImmAddSub(immediate));
3948     Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
3949     Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
3950          ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
3951   } else if (operand.IsShiftedRegister()) {
3952     VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits());
3953     VIXL_ASSERT(operand.GetShift() != ROR);
3954 
3955     // For instructions of the form:
3956     //   add/sub   wsp, <Wn>, <Wm> [, LSL #0-3 ]
3957     //   add/sub   <Wd>, wsp, <Wm> [, LSL #0-3 ]
3958     //   add/sub   wsp, wsp, <Wm> [, LSL #0-3 ]
3959     //   adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
3960     // or their 64-bit register equivalents, convert the operand from shifted to
3961     // extended register mode, and emit an add/sub extended instruction.
3962     if (rn.IsSP() || rd.IsSP()) {
3963       VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags)));
3964       DataProcExtendedRegister(rd,
3965                                rn,
3966                                operand.ToExtendedRegister(),
3967                                S,
3968                                AddSubExtendedFixed | op);
3969     } else {
3970       DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
3971     }
3972   } else {
3973     VIXL_ASSERT(operand.IsExtendedRegister());
3974     DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
3975   }
3976 }
3977 
3978 
AddSubWithCarry(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubWithCarryOp op)3979 void Assembler::AddSubWithCarry(const Register& rd,
3980                                 const Register& rn,
3981                                 const Operand& operand,
3982                                 FlagsUpdate S,
3983                                 AddSubWithCarryOp op) {
3984   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
3985   VIXL_ASSERT(rd.GetSizeInBits() == operand.GetRegister().GetSizeInBits());
3986   VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0));
3987   Emit(SF(rd) | op | Flags(S) | Rm(operand.GetRegister()) | Rn(rn) | Rd(rd));
3988 }
3989 
3990 
hlt(int code)3991 void Assembler::hlt(int code) {
3992   VIXL_ASSERT(IsUint16(code));
3993   Emit(HLT | ImmException(code));
3994 }
3995 
3996 
brk(int code)3997 void Assembler::brk(int code) {
3998   VIXL_ASSERT(IsUint16(code));
3999   Emit(BRK | ImmException(code));
4000 }
4001 
4002 
svc(int code)4003 void Assembler::svc(int code) { Emit(SVC | ImmException(code)); }
4004 
4005 
4006 // TODO(all): The third parameter should be passed by reference but gcc 4.8.2
4007 // reports a bogus uninitialised warning then.
Logical(const Register & rd,const Register & rn,const Operand operand,LogicalOp op)4008 void Assembler::Logical(const Register& rd,
4009                         const Register& rn,
4010                         const Operand operand,
4011                         LogicalOp op) {
4012   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
4013   if (operand.IsImmediate()) {
4014     int64_t immediate = operand.GetImmediate();
4015     unsigned reg_size = rd.GetSizeInBits();
4016 
4017     VIXL_ASSERT(immediate != 0);
4018     VIXL_ASSERT(immediate != -1);
4019     VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate));
4020 
4021     // If the operation is NOT, invert the operation and immediate.
4022     if ((op & NOT) == NOT) {
4023       op = static_cast<LogicalOp>(op & ~NOT);
4024       immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
4025     }
4026 
4027     unsigned n, imm_s, imm_r;
4028     if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
4029       // Immediate can be encoded in the instruction.
4030       LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
4031     } else {
4032       // This case is handled in the macro assembler.
4033       VIXL_UNREACHABLE();
4034     }
4035   } else {
4036     VIXL_ASSERT(operand.IsShiftedRegister());
4037     VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits());
4038     Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
4039     DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
4040   }
4041 }
4042 
4043 
LogicalImmediate(const Register & rd,const Register & rn,unsigned n,unsigned imm_s,unsigned imm_r,LogicalOp op)4044 void Assembler::LogicalImmediate(const Register& rd,
4045                                  const Register& rn,
4046                                  unsigned n,
4047                                  unsigned imm_s,
4048                                  unsigned imm_r,
4049                                  LogicalOp op) {
4050   unsigned reg_size = rd.GetSizeInBits();
4051   Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
4052   Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
4053        ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
4054        Rn(rn));
4055 }
4056 
4057 
ConditionalCompare(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond,ConditionalCompareOp op)4058 void Assembler::ConditionalCompare(const Register& rn,
4059                                    const Operand& operand,
4060                                    StatusFlags nzcv,
4061                                    Condition cond,
4062                                    ConditionalCompareOp op) {
4063   Instr ccmpop;
4064   if (operand.IsImmediate()) {
4065     int64_t immediate = operand.GetImmediate();
4066     VIXL_ASSERT(IsImmConditionalCompare(immediate));
4067     ccmpop = ConditionalCompareImmediateFixed | op |
4068              ImmCondCmp(static_cast<unsigned>(immediate));
4069   } else {
4070     VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0));
4071     ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.GetRegister());
4072   }
4073   Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
4074 }
4075 
4076 
DataProcessing1Source(const Register & rd,const Register & rn,DataProcessing1SourceOp op)4077 void Assembler::DataProcessing1Source(const Register& rd,
4078                                       const Register& rn,
4079                                       DataProcessing1SourceOp op) {
4080   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
4081   Emit(SF(rn) | op | Rn(rn) | Rd(rd));
4082 }
4083 
4084 
FPDataProcessing1Source(const VRegister & vd,const VRegister & vn,FPDataProcessing1SourceOp op)4085 void Assembler::FPDataProcessing1Source(const VRegister& vd,
4086                                         const VRegister& vn,
4087                                         FPDataProcessing1SourceOp op) {
4088   VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
4089   Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
4090 }
4091 
4092 
FPDataProcessing3Source(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va,FPDataProcessing3SourceOp op)4093 void Assembler::FPDataProcessing3Source(const VRegister& vd,
4094                                         const VRegister& vn,
4095                                         const VRegister& vm,
4096                                         const VRegister& va,
4097                                         FPDataProcessing3SourceOp op) {
4098   VIXL_ASSERT(vd.Is1S() || vd.Is1D());
4099   VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va));
4100   Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va));
4101 }
4102 
4103 
NEONModifiedImmShiftLsl(const VRegister & vd,const int imm8,const int left_shift,NEONModifiedImmediateOp op)4104 void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd,
4105                                         const int imm8,
4106                                         const int left_shift,
4107                                         NEONModifiedImmediateOp op) {
4108   VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() ||
4109               vd.Is4S());
4110   VIXL_ASSERT((left_shift == 0) || (left_shift == 8) || (left_shift == 16) ||
4111               (left_shift == 24));
4112   VIXL_ASSERT(IsUint8(imm8));
4113 
4114   int cmode_1, cmode_2, cmode_3;
4115   if (vd.Is8B() || vd.Is16B()) {
4116     VIXL_ASSERT(op == NEONModifiedImmediate_MOVI);
4117     cmode_1 = 1;
4118     cmode_2 = 1;
4119     cmode_3 = 1;
4120   } else {
4121     cmode_1 = (left_shift >> 3) & 1;
4122     cmode_2 = left_shift >> 4;
4123     cmode_3 = 0;
4124     if (vd.Is4H() || vd.Is8H()) {
4125       VIXL_ASSERT((left_shift == 0) || (left_shift == 8));
4126       cmode_3 = 1;
4127     }
4128   }
4129   int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);
4130 
4131   int q = vd.IsQ() ? NEON_Q : 0;
4132 
4133   Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
4134 }
4135 
4136 
NEONModifiedImmShiftMsl(const VRegister & vd,const int imm8,const int shift_amount,NEONModifiedImmediateOp op)4137 void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd,
4138                                         const int imm8,
4139                                         const int shift_amount,
4140                                         NEONModifiedImmediateOp op) {
4141   VIXL_ASSERT(vd.Is2S() || vd.Is4S());
4142   VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16));
4143   VIXL_ASSERT(IsUint8(imm8));
4144 
4145   int cmode_0 = (shift_amount >> 4) & 1;
4146   int cmode = 0xc | cmode_0;
4147 
4148   int q = vd.IsQ() ? NEON_Q : 0;
4149 
4150   Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
4151 }
4152 
4153 
EmitShift(const Register & rd,const Register & rn,Shift shift,unsigned shift_amount)4154 void Assembler::EmitShift(const Register& rd,
4155                           const Register& rn,
4156                           Shift shift,
4157                           unsigned shift_amount) {
4158   switch (shift) {
4159     case LSL:
4160       lsl(rd, rn, shift_amount);
4161       break;
4162     case LSR:
4163       lsr(rd, rn, shift_amount);
4164       break;
4165     case ASR:
4166       asr(rd, rn, shift_amount);
4167       break;
4168     case ROR:
4169       ror(rd, rn, shift_amount);
4170       break;
4171     default:
4172       VIXL_UNREACHABLE();
4173   }
4174 }
4175 
4176 
EmitExtendShift(const Register & rd,const Register & rn,Extend extend,unsigned left_shift)4177 void Assembler::EmitExtendShift(const Register& rd,
4178                                 const Register& rn,
4179                                 Extend extend,
4180                                 unsigned left_shift) {
4181   VIXL_ASSERT(rd.GetSizeInBits() >= rn.GetSizeInBits());
4182   unsigned reg_size = rd.GetSizeInBits();
4183   // Use the correct size of register.
4184   Register rn_ = Register(rn.GetCode(), rd.GetSizeInBits());
4185   // Bits extracted are high_bit:0.
4186   unsigned high_bit = (8 << (extend & 0x3)) - 1;
4187   // Number of bits left in the result that are not introduced by the shift.
4188   unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
4189 
4190   if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
4191     switch (extend) {
4192       case UXTB:
4193       case UXTH:
4194       case UXTW:
4195         ubfm(rd, rn_, non_shift_bits, high_bit);
4196         break;
4197       case SXTB:
4198       case SXTH:
4199       case SXTW:
4200         sbfm(rd, rn_, non_shift_bits, high_bit);
4201         break;
4202       case UXTX:
4203       case SXTX: {
4204         VIXL_ASSERT(rn.GetSizeInBits() == kXRegSize);
4205         // Nothing to extend. Just shift.
4206         lsl(rd, rn_, left_shift);
4207         break;
4208       }
4209       default:
4210         VIXL_UNREACHABLE();
4211     }
4212   } else {
4213     // No need to extend as the extended bits would be shifted away.
4214     lsl(rd, rn_, left_shift);
4215   }
4216 }
4217 
4218 
DataProcShiftedRegister(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,Instr op)4219 void Assembler::DataProcShiftedRegister(const Register& rd,
4220                                         const Register& rn,
4221                                         const Operand& operand,
4222                                         FlagsUpdate S,
4223                                         Instr op) {
4224   VIXL_ASSERT(operand.IsShiftedRegister());
4225   VIXL_ASSERT(rn.Is64Bits() ||
4226               (rn.Is32Bits() && IsUint5(operand.GetShiftAmount())));
4227   Emit(SF(rd) | op | Flags(S) | ShiftDP(operand.GetShift()) |
4228        ImmDPShift(operand.GetShiftAmount()) | Rm(operand.GetRegister()) |
4229        Rn(rn) | Rd(rd));
4230 }
4231 
4232 
DataProcExtendedRegister(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,Instr op)4233 void Assembler::DataProcExtendedRegister(const Register& rd,
4234                                          const Register& rn,
4235                                          const Operand& operand,
4236                                          FlagsUpdate S,
4237                                          Instr op) {
4238   Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
4239   Emit(SF(rd) | op | Flags(S) | Rm(operand.GetRegister()) |
4240        ExtendMode(operand.GetExtend()) |
4241        ImmExtendShift(operand.GetShiftAmount()) | dest_reg | RnSP(rn));
4242 }
4243 
4244 
LoadStoreMemOperand(const MemOperand & addr,unsigned access_size,LoadStoreScalingOption option)4245 Instr Assembler::LoadStoreMemOperand(const MemOperand& addr,
4246                                      unsigned access_size,
4247                                      LoadStoreScalingOption option) {
4248   Instr base = RnSP(addr.GetBaseRegister());
4249   int64_t offset = addr.GetOffset();
4250 
4251   if (addr.IsImmediateOffset()) {
4252     bool prefer_unscaled =
4253         (option == PreferUnscaledOffset) || (option == RequireUnscaledOffset);
4254     if (prefer_unscaled && IsImmLSUnscaled(offset)) {
4255       // Use the unscaled addressing mode.
4256       return base | LoadStoreUnscaledOffsetFixed |
4257              ImmLS(static_cast<int>(offset));
4258     }
4259 
4260     if ((option != RequireUnscaledOffset) &&
4261         IsImmLSScaled(offset, access_size)) {
4262       // Use the scaled addressing mode.
4263       return base | LoadStoreUnsignedOffsetFixed |
4264              ImmLSUnsigned(static_cast<int>(offset) >> access_size);
4265     }
4266 
4267     if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) {
4268       // Use the unscaled addressing mode.
4269       return base | LoadStoreUnscaledOffsetFixed |
4270              ImmLS(static_cast<int>(offset));
4271     }
4272   }
4273 
4274   // All remaining addressing modes are register-offset, pre-indexed or
4275   // post-indexed modes.
4276   VIXL_ASSERT((option != RequireUnscaledOffset) &&
4277               (option != RequireScaledOffset));
4278 
4279   if (addr.IsRegisterOffset()) {
4280     Extend ext = addr.GetExtend();
4281     Shift shift = addr.GetShift();
4282     unsigned shift_amount = addr.GetShiftAmount();
4283 
4284     // LSL is encoded in the option field as UXTX.
4285     if (shift == LSL) {
4286       ext = UXTX;
4287     }
4288 
4289     // Shifts are encoded in one bit, indicating a left shift by the memory
4290     // access size.
4291     VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size));
4292     return base | LoadStoreRegisterOffsetFixed | Rm(addr.GetRegisterOffset()) |
4293            ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0);
4294   }
4295 
4296   if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) {
4297     return base | LoadStorePreIndexFixed | ImmLS(static_cast<int>(offset));
4298   }
4299 
4300   if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) {
4301     return base | LoadStorePostIndexFixed | ImmLS(static_cast<int>(offset));
4302   }
4303 
4304   // If this point is reached, the MemOperand (addr) cannot be encoded.
4305   VIXL_UNREACHABLE();
4306   return 0;
4307 }
4308 
4309 
LoadStore(const CPURegister & rt,const MemOperand & addr,LoadStoreOp op,LoadStoreScalingOption option)4310 void Assembler::LoadStore(const CPURegister& rt,
4311                           const MemOperand& addr,
4312                           LoadStoreOp op,
4313                           LoadStoreScalingOption option) {
4314   Emit(op | Rt(rt) | LoadStoreMemOperand(addr, CalcLSDataSize(op), option));
4315 }
4316 
4317 
Prefetch(PrefetchOperation op,const MemOperand & addr,LoadStoreScalingOption option)4318 void Assembler::Prefetch(PrefetchOperation op,
4319                          const MemOperand& addr,
4320                          LoadStoreScalingOption option) {
4321   VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset());
4322 
4323   Instr prfop = ImmPrefetchOperation(op);
4324   Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option));
4325 }
4326 
4327 
IsImmAddSub(int64_t immediate)4328 bool Assembler::IsImmAddSub(int64_t immediate) {
4329   return IsUint12(immediate) ||
4330          (IsUint12(immediate >> 12) && ((immediate & 0xfff) == 0));
4331 }
4332 
4333 
IsImmConditionalCompare(int64_t immediate)4334 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
4335   return IsUint5(immediate);
4336 }
4337 
4338 
IsImmFP32(float imm)4339 bool Assembler::IsImmFP32(float imm) {
4340   // Valid values will have the form:
4341   // aBbb.bbbc.defg.h000.0000.0000.0000.0000
4342   uint32_t bits = FloatToRawbits(imm);
4343   // bits[19..0] are cleared.
4344   if ((bits & 0x7ffff) != 0) {
4345     return false;
4346   }
4347 
4348   // bits[29..25] are all set or all cleared.
4349   uint32_t b_pattern = (bits >> 16) & 0x3e00;
4350   if (b_pattern != 0 && b_pattern != 0x3e00) {
4351     return false;
4352   }
4353 
4354   // bit[30] and bit[29] are opposite.
4355   if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
4356     return false;
4357   }
4358 
4359   return true;
4360 }
4361 
4362 
IsImmFP64(double imm)4363 bool Assembler::IsImmFP64(double imm) {
4364   // Valid values will have the form:
4365   // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
4366   // 0000.0000.0000.0000.0000.0000.0000.0000
4367   uint64_t bits = DoubleToRawbits(imm);
4368   // bits[47..0] are cleared.
4369   if ((bits & 0x0000ffffffffffff) != 0) {
4370     return false;
4371   }
4372 
4373   // bits[61..54] are all set or all cleared.
4374   uint32_t b_pattern = (bits >> 48) & 0x3fc0;
4375   if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
4376     return false;
4377   }
4378 
4379   // bit[62] and bit[61] are opposite.
4380   if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
4381     return false;
4382   }
4383 
4384   return true;
4385 }
4386 
4387 
IsImmLSPair(int64_t offset,unsigned access_size)4388 bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) {
4389   VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
4390   return IsMultiple(offset, 1 << access_size) &&
4391          IsInt7(offset / (1 << access_size));
4392 }
4393 
4394 
IsImmLSScaled(int64_t offset,unsigned access_size)4395 bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) {
4396   VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
4397   return IsMultiple(offset, 1 << access_size) &&
4398          IsUint12(offset / (1 << access_size));
4399 }
4400 
4401 
IsImmLSUnscaled(int64_t offset)4402 bool Assembler::IsImmLSUnscaled(int64_t offset) { return IsInt9(offset); }
4403 
4404 
4405 // The movn instruction can generate immediates containing an arbitrary 16-bit
4406 // value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
IsImmMovn(uint64_t imm,unsigned reg_size)4407 bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
4408   return IsImmMovz(~imm, reg_size);
4409 }
4410 
4411 
4412 // The movz instruction can generate immediates containing an arbitrary 16-bit
4413 // value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
IsImmMovz(uint64_t imm,unsigned reg_size)4414 bool Assembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
4415   VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
4416   return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
4417 }
4418 
4419 
4420 // Test if a given value can be encoded in the immediate field of a logical
4421 // instruction.
4422 // If it can be encoded, the function returns true, and values pointed to by n,
4423 // imm_s and imm_r are updated with immediates encoded in the format required
4424 // by the corresponding fields in the logical instruction.
4425 // If it can not be encoded, the function returns false, and the values pointed
4426 // to by n, imm_s and imm_r are undefined.
IsImmLogical(uint64_t value,unsigned width,unsigned * n,unsigned * imm_s,unsigned * imm_r)4427 bool Assembler::IsImmLogical(uint64_t value,
4428                              unsigned width,
4429                              unsigned* n,
4430                              unsigned* imm_s,
4431                              unsigned* imm_r) {
4432   VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize));
4433 
4434   bool negate = false;
4435 
4436   // Logical immediates are encoded using parameters n, imm_s and imm_r using
4437   // the following table:
4438   //
4439   //    N   imms    immr    size        S             R
4440   //    1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
4441   //    0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
4442   //    0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
4443   //    0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
4444   //    0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
4445   //    0  11110s  xxxxxr     2    UInt(s)       UInt(r)
4446   // (s bits must not be all set)
4447   //
4448   // A pattern is constructed of size bits, where the least significant S+1 bits
4449   // are set. The pattern is rotated right by R, and repeated across a 32 or
4450   // 64-bit value, depending on destination register width.
4451   //
4452   // Put another way: the basic format of a logical immediate is a single
4453   // contiguous stretch of 1 bits, repeated across the whole word at intervals
4454   // given by a power of 2. To identify them quickly, we first locate the
4455   // lowest stretch of 1 bits, then the next 1 bit above that; that combination
4456   // is different for every logical immediate, so it gives us all the
4457   // information we need to identify the only logical immediate that our input
4458   // could be, and then we simply check if that's the value we actually have.
4459   //
4460   // (The rotation parameter does give the possibility of the stretch of 1 bits
4461   // going 'round the end' of the word. To deal with that, we observe that in
4462   // any situation where that happens the bitwise NOT of the value is also a
4463   // valid logical immediate. So we simply invert the input whenever its low bit
4464   // is set, and then we know that the rotated case can't arise.)
4465 
4466   if (value & 1) {
4467     // If the low bit is 1, negate the value, and set a flag to remember that we
4468     // did (so that we can adjust the return values appropriately).
4469     negate = true;
4470     value = ~value;
4471   }
4472 
4473   if (width == kWRegSize) {
4474     // To handle 32-bit logical immediates, the very easiest thing is to repeat
4475     // the input value twice to make a 64-bit word. The correct encoding of that
4476     // as a logical immediate will also be the correct encoding of the 32-bit
4477     // value.
4478 
4479     // Avoid making the assumption that the most-significant 32 bits are zero by
4480     // shifting the value left and duplicating it.
4481     value <<= kWRegSize;
4482     value |= value >> kWRegSize;
4483   }
4484 
4485   // The basic analysis idea: imagine our input word looks like this.
4486   //
4487   //    0011111000111110001111100011111000111110001111100011111000111110
4488   //                                                          c  b    a
4489   //                                                          |<--d-->|
4490   //
4491   // We find the lowest set bit (as an actual power-of-2 value, not its index)
4492   // and call it a. Then we add a to our original number, which wipes out the
4493   // bottommost stretch of set bits and replaces it with a 1 carried into the
4494   // next zero bit. Then we look for the new lowest set bit, which is in
4495   // position b, and subtract it, so now our number is just like the original
4496   // but with the lowest stretch of set bits completely gone. Now we find the
4497   // lowest set bit again, which is position c in the diagram above. Then we'll
4498   // measure the distance d between bit positions a and c (using CLZ), and that
4499   // tells us that the only valid logical immediate that could possibly be equal
4500   // to this number is the one in which a stretch of bits running from a to just
4501   // below b is replicated every d bits.
4502   uint64_t a = LowestSetBit(value);
4503   uint64_t value_plus_a = value + a;
4504   uint64_t b = LowestSetBit(value_plus_a);
4505   uint64_t value_plus_a_minus_b = value_plus_a - b;
4506   uint64_t c = LowestSetBit(value_plus_a_minus_b);
4507 
4508   int d, clz_a, out_n;
4509   uint64_t mask;
4510 
4511   if (c != 0) {
4512     // The general case, in which there is more than one stretch of set bits.
4513     // Compute the repeat distance d, and set up a bitmask covering the basic
4514     // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
4515     // of these cases the N bit of the output will be zero.
4516     clz_a = CountLeadingZeros(a, kXRegSize);
4517     int clz_c = CountLeadingZeros(c, kXRegSize);
4518     d = clz_a - clz_c;
4519     mask = ((UINT64_C(1) << d) - 1);
4520     out_n = 0;
4521   } else {
4522     // Handle degenerate cases.
4523     //
4524     // If any of those 'find lowest set bit' operations didn't find a set bit at
4525     // all, then the word will have been zero thereafter, so in particular the
4526     // last lowest_set_bit operation will have returned zero. So we can test for
4527     // all the special case conditions in one go by seeing if c is zero.
4528     if (a == 0) {
4529       // The input was zero (or all 1 bits, which will come to here too after we
4530       // inverted it at the start of the function), for which we just return
4531       // false.
4532       return false;
4533     } else {
4534       // Otherwise, if c was zero but a was not, then there's just one stretch
4535       // of set bits in our word, meaning that we have the trivial case of
4536       // d == 64 and only one 'repetition'. Set up all the same variables as in
4537       // the general case above, and set the N bit in the output.
4538       clz_a = CountLeadingZeros(a, kXRegSize);
4539       d = 64;
4540       mask = ~UINT64_C(0);
4541       out_n = 1;
4542     }
4543   }
4544 
4545   // If the repeat period d is not a power of two, it can't be encoded.
4546   if (!IsPowerOf2(d)) {
4547     return false;
4548   }
4549 
4550   if (((b - a) & ~mask) != 0) {
4551     // If the bit stretch (b - a) does not fit within the mask derived from the
4552     // repeat period, then fail.
4553     return false;
4554   }
4555 
4556   // The only possible option is b - a repeated every d bits. Now we're going to
4557   // actually construct the valid logical immediate derived from that
4558   // specification, and see if it equals our original input.
4559   //
4560   // To repeat a value every d bits, we multiply it by a number of the form
4561   // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
4562   // be derived using a table lookup on CLZ(d).
4563   static const uint64_t multipliers[] = {
4564       0x0000000000000001UL,
4565       0x0000000100000001UL,
4566       0x0001000100010001UL,
4567       0x0101010101010101UL,
4568       0x1111111111111111UL,
4569       0x5555555555555555UL,
4570   };
4571   uint64_t multiplier = multipliers[CountLeadingZeros(d, kXRegSize) - 57];
4572   uint64_t candidate = (b - a) * multiplier;
4573 
4574   if (value != candidate) {
4575     // The candidate pattern doesn't match our input value, so fail.
4576     return false;
4577   }
4578 
4579   // We have a match! This is a valid logical immediate, so now we have to
4580   // construct the bits and pieces of the instruction encoding that generates
4581   // it.
4582 
4583   // Count the set bits in our basic stretch. The special case of clz(0) == -1
4584   // makes the answer come out right for stretches that reach the very top of
4585   // the word (e.g. numbers like 0xffffc00000000000).
4586   int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSize);
4587   int s = clz_a - clz_b;
4588 
4589   // Decide how many bits to rotate right by, to put the low bit of that basic
4590   // stretch in position a.
4591   int r;
4592   if (negate) {
4593     // If we inverted the input right at the start of this function, here's
4594     // where we compensate: the number of set bits becomes the number of clear
4595     // bits, and the rotation count is based on position b rather than position
4596     // a (since b is the location of the 'lowest' 1 bit after inversion).
4597     s = d - s;
4598     r = (clz_b + 1) & (d - 1);
4599   } else {
4600     r = (clz_a + 1) & (d - 1);
4601   }
4602 
4603   // Now we're done, except for having to encode the S output in such a way that
4604   // it gives both the number of set bits and the length of the repeated
4605   // segment. The s field is encoded like this:
4606   //
4607   //     imms    size        S
4608   //    ssssss    64    UInt(ssssss)
4609   //    0sssss    32    UInt(sssss)
4610   //    10ssss    16    UInt(ssss)
4611   //    110sss     8    UInt(sss)
4612   //    1110ss     4    UInt(ss)
4613   //    11110s     2    UInt(s)
4614   //
4615   // So we 'or' (2 * -d) with our computed s to form imms.
4616   if ((n != NULL) || (imm_s != NULL) || (imm_r != NULL)) {
4617     *n = out_n;
4618     *imm_s = ((2 * -d) | (s - 1)) & 0x3f;
4619     *imm_r = r;
4620   }
4621 
4622   return true;
4623 }
4624 
4625 
LoadOpFor(const CPURegister & rt)4626 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
4627   VIXL_ASSERT(rt.IsValid());
4628   if (rt.IsRegister()) {
4629     return rt.Is64Bits() ? LDR_x : LDR_w;
4630   } else {
4631     VIXL_ASSERT(rt.IsVRegister());
4632     switch (rt.GetSizeInBits()) {
4633       case kBRegSize:
4634         return LDR_b;
4635       case kHRegSize:
4636         return LDR_h;
4637       case kSRegSize:
4638         return LDR_s;
4639       case kDRegSize:
4640         return LDR_d;
4641       default:
4642         VIXL_ASSERT(rt.IsQ());
4643         return LDR_q;
4644     }
4645   }
4646 }
4647 
4648 
StoreOpFor(const CPURegister & rt)4649 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
4650   VIXL_ASSERT(rt.IsValid());
4651   if (rt.IsRegister()) {
4652     return rt.Is64Bits() ? STR_x : STR_w;
4653   } else {
4654     VIXL_ASSERT(rt.IsVRegister());
4655     switch (rt.GetSizeInBits()) {
4656       case kBRegSize:
4657         return STR_b;
4658       case kHRegSize:
4659         return STR_h;
4660       case kSRegSize:
4661         return STR_s;
4662       case kDRegSize:
4663         return STR_d;
4664       default:
4665         VIXL_ASSERT(rt.IsQ());
4666         return STR_q;
4667     }
4668   }
4669 }
4670 
4671 
StorePairOpFor(const CPURegister & rt,const CPURegister & rt2)4672 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
4673                                           const CPURegister& rt2) {
4674   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
4675   USE(rt2);
4676   if (rt.IsRegister()) {
4677     return rt.Is64Bits() ? STP_x : STP_w;
4678   } else {
4679     VIXL_ASSERT(rt.IsVRegister());
4680     switch (rt.GetSizeInBytes()) {
4681       case kSRegSizeInBytes:
4682         return STP_s;
4683       case kDRegSizeInBytes:
4684         return STP_d;
4685       default:
4686         VIXL_ASSERT(rt.IsQ());
4687         return STP_q;
4688     }
4689   }
4690 }
4691 
4692 
LoadPairOpFor(const CPURegister & rt,const CPURegister & rt2)4693 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
4694                                          const CPURegister& rt2) {
4695   VIXL_ASSERT((STP_w | LoadStorePairLBit) == LDP_w);
4696   return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
4697                                       LoadStorePairLBit);
4698 }
4699 
4700 
StorePairNonTemporalOpFor(const CPURegister & rt,const CPURegister & rt2)4701 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
4702     const CPURegister& rt, const CPURegister& rt2) {
4703   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
4704   USE(rt2);
4705   if (rt.IsRegister()) {
4706     return rt.Is64Bits() ? STNP_x : STNP_w;
4707   } else {
4708     VIXL_ASSERT(rt.IsVRegister());
4709     switch (rt.GetSizeInBytes()) {
4710       case kSRegSizeInBytes:
4711         return STNP_s;
4712       case kDRegSizeInBytes:
4713         return STNP_d;
4714       default:
4715         VIXL_ASSERT(rt.IsQ());
4716         return STNP_q;
4717     }
4718   }
4719 }
4720 
4721 
LoadPairNonTemporalOpFor(const CPURegister & rt,const CPURegister & rt2)4722 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
4723     const CPURegister& rt, const CPURegister& rt2) {
4724   VIXL_ASSERT((STNP_w | LoadStorePairNonTemporalLBit) == LDNP_w);
4725   return static_cast<LoadStorePairNonTemporalOp>(
4726       StorePairNonTemporalOpFor(rt, rt2) | LoadStorePairNonTemporalLBit);
4727 }
4728 
4729 
LoadLiteralOpFor(const CPURegister & rt)4730 LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
4731   if (rt.IsRegister()) {
4732     return rt.IsX() ? LDR_x_lit : LDR_w_lit;
4733   } else {
4734     VIXL_ASSERT(rt.IsVRegister());
4735     switch (rt.GetSizeInBytes()) {
4736       case kSRegSizeInBytes:
4737         return LDR_s_lit;
4738       case kDRegSizeInBytes:
4739         return LDR_d_lit;
4740       default:
4741         VIXL_ASSERT(rt.IsQ());
4742         return LDR_q_lit;
4743     }
4744   }
4745 }
4746 
4747 
AreAliased(const CPURegister & reg1,const CPURegister & reg2,const CPURegister & reg3,const CPURegister & reg4,const CPURegister & reg5,const CPURegister & reg6,const CPURegister & reg7,const CPURegister & reg8)4748 bool AreAliased(const CPURegister& reg1,
4749                 const CPURegister& reg2,
4750                 const CPURegister& reg3,
4751                 const CPURegister& reg4,
4752                 const CPURegister& reg5,
4753                 const CPURegister& reg6,
4754                 const CPURegister& reg7,
4755                 const CPURegister& reg8) {
4756   int number_of_valid_regs = 0;
4757   int number_of_valid_fpregs = 0;
4758 
4759   RegList unique_regs = 0;
4760   RegList unique_fpregs = 0;
4761 
4762   const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
4763 
4764   for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
4765     if (regs[i].IsRegister()) {
4766       number_of_valid_regs++;
4767       unique_regs |= regs[i].GetBit();
4768     } else if (regs[i].IsVRegister()) {
4769       number_of_valid_fpregs++;
4770       unique_fpregs |= regs[i].GetBit();
4771     } else {
4772       VIXL_ASSERT(!regs[i].IsValid());
4773     }
4774   }
4775 
4776   int number_of_unique_regs = CountSetBits(unique_regs);
4777   int number_of_unique_fpregs = CountSetBits(unique_fpregs);
4778 
4779   VIXL_ASSERT(number_of_valid_regs >= number_of_unique_regs);
4780   VIXL_ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
4781 
4782   return (number_of_valid_regs != number_of_unique_regs) ||
4783          (number_of_valid_fpregs != number_of_unique_fpregs);
4784 }
4785 
4786 
AreSameSizeAndType(const CPURegister & reg1,const CPURegister & reg2,const CPURegister & reg3,const CPURegister & reg4,const CPURegister & reg5,const CPURegister & reg6,const CPURegister & reg7,const CPURegister & reg8)4787 bool AreSameSizeAndType(const CPURegister& reg1,
4788                         const CPURegister& reg2,
4789                         const CPURegister& reg3,
4790                         const CPURegister& reg4,
4791                         const CPURegister& reg5,
4792                         const CPURegister& reg6,
4793                         const CPURegister& reg7,
4794                         const CPURegister& reg8) {
4795   VIXL_ASSERT(reg1.IsValid());
4796   bool match = true;
4797   match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
4798   match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
4799   match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
4800   match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
4801   match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
4802   match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
4803   match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
4804   return match;
4805 }
4806 
4807 
AreSameFormat(const VRegister & reg1,const VRegister & reg2,const VRegister & reg3,const VRegister & reg4)4808 bool AreSameFormat(const VRegister& reg1,
4809                    const VRegister& reg2,
4810                    const VRegister& reg3,
4811                    const VRegister& reg4) {
4812   VIXL_ASSERT(reg1.IsValid());
4813   bool match = true;
4814   match &= !reg2.IsValid() || reg2.IsSameFormat(reg1);
4815   match &= !reg3.IsValid() || reg3.IsSameFormat(reg1);
4816   match &= !reg4.IsValid() || reg4.IsSameFormat(reg1);
4817   return match;
4818 }
4819 
4820 
AreConsecutive(const VRegister & reg1,const VRegister & reg2,const VRegister & reg3,const VRegister & reg4)4821 bool AreConsecutive(const VRegister& reg1,
4822                     const VRegister& reg2,
4823                     const VRegister& reg3,
4824                     const VRegister& reg4) {
4825   VIXL_ASSERT(reg1.IsValid());
4826 
4827   if (!reg2.IsValid()) {
4828     return true;
4829   } else if (reg2.GetCode() != ((reg1.GetCode() + 1) % kNumberOfVRegisters)) {
4830     return false;
4831   }
4832 
4833   if (!reg3.IsValid()) {
4834     return true;
4835   } else if (reg3.GetCode() != ((reg2.GetCode() + 1) % kNumberOfVRegisters)) {
4836     return false;
4837   }
4838 
4839   if (!reg4.IsValid()) {
4840     return true;
4841   } else if (reg4.GetCode() != ((reg3.GetCode() + 1) % kNumberOfVRegisters)) {
4842     return false;
4843   }
4844 
4845   return true;
4846 }
4847 }  // namespace aarch64
4848 }  // namespace vixl
4849