• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 
28 #include <cmath>
29 #include "a64/assembler-a64.h"
30 
31 namespace vixl {
32 
33 // CPURegList utilities.
PopLowestIndex()34 CPURegister CPURegList::PopLowestIndex() {
35   if (IsEmpty()) {
36     return NoCPUReg;
37   }
38   int index = CountTrailingZeros(list_, kRegListSizeInBits);
39   VIXL_ASSERT((1 << index) & list_);
40   Remove(index);
41   return CPURegister(index, size_, type_);
42 }
43 
44 
PopHighestIndex()45 CPURegister CPURegList::PopHighestIndex() {
46   VIXL_ASSERT(IsValid());
47   if (IsEmpty()) {
48     return NoCPUReg;
49   }
50   int index = CountLeadingZeros(list_, kRegListSizeInBits);
51   index = kRegListSizeInBits - 1 - index;
52   VIXL_ASSERT((1 << index) & list_);
53   Remove(index);
54   return CPURegister(index, size_, type_);
55 }
56 
57 
IsValid() const58 bool CPURegList::IsValid() const {
59   if ((type_ == CPURegister::kRegister) ||
60       (type_ == CPURegister::kFPRegister)) {
61     bool is_valid = true;
62     // Try to create a CPURegister for each element in the list.
63     for (int i = 0; i < kRegListSizeInBits; i++) {
64       if (((list_ >> i) & 1) != 0) {
65         is_valid &= CPURegister(i, size_, type_).IsValid();
66       }
67     }
68     return is_valid;
69   } else if (type_ == CPURegister::kNoRegister) {
70     // We can't use IsEmpty here because that asserts IsValid().
71     return list_ == 0;
72   } else {
73     return false;
74   }
75 }
76 
77 
RemoveCalleeSaved()78 void CPURegList::RemoveCalleeSaved() {
79   if (type() == CPURegister::kRegister) {
80     Remove(GetCalleeSaved(RegisterSizeInBits()));
81   } else if (type() == CPURegister::kFPRegister) {
82     Remove(GetCalleeSavedFP(RegisterSizeInBits()));
83   } else {
84     VIXL_ASSERT(type() == CPURegister::kNoRegister);
85     VIXL_ASSERT(IsEmpty());
86     // The list must already be empty, so do nothing.
87   }
88 }
89 
90 
GetCalleeSaved(unsigned size)91 CPURegList CPURegList::GetCalleeSaved(unsigned size) {
92   return CPURegList(CPURegister::kRegister, size, 19, 29);
93 }
94 
95 
GetCalleeSavedFP(unsigned size)96 CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
97   return CPURegList(CPURegister::kFPRegister, size, 8, 15);
98 }
99 
100 
GetCallerSaved(unsigned size)101 CPURegList CPURegList::GetCallerSaved(unsigned size) {
102   // Registers x0-x18 and lr (x30) are caller-saved.
103   CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
104   list.Combine(lr);
105   return list;
106 }
107 
108 
GetCallerSavedFP(unsigned size)109 CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
110   // Registers d0-d7 and d16-d31 are caller-saved.
111   CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
112   list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
113   return list;
114 }
115 
116 
117 const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
118 const CPURegList kCalleeSavedFP = CPURegList::GetCalleeSavedFP();
119 const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
120 const CPURegList kCallerSavedFP = CPURegList::GetCallerSavedFP();
121 
122 
123 // Registers.
124 #define WREG(n) w##n,
125 const Register Register::wregisters[] = {
126 REGISTER_CODE_LIST(WREG)
127 };
128 #undef WREG
129 
130 #define XREG(n) x##n,
131 const Register Register::xregisters[] = {
132 REGISTER_CODE_LIST(XREG)
133 };
134 #undef XREG
135 
136 #define SREG(n) s##n,
137 const FPRegister FPRegister::sregisters[] = {
138 REGISTER_CODE_LIST(SREG)
139 };
140 #undef SREG
141 
142 #define DREG(n) d##n,
143 const FPRegister FPRegister::dregisters[] = {
144 REGISTER_CODE_LIST(DREG)
145 };
146 #undef DREG
147 
148 
WRegFromCode(unsigned code)149 const Register& Register::WRegFromCode(unsigned code) {
150   if (code == kSPRegInternalCode) {
151     return wsp;
152   } else {
153     VIXL_ASSERT(code < kNumberOfRegisters);
154     return wregisters[code];
155   }
156 }
157 
158 
XRegFromCode(unsigned code)159 const Register& Register::XRegFromCode(unsigned code) {
160   if (code == kSPRegInternalCode) {
161     return sp;
162   } else {
163     VIXL_ASSERT(code < kNumberOfRegisters);
164     return xregisters[code];
165   }
166 }
167 
168 
SRegFromCode(unsigned code)169 const FPRegister& FPRegister::SRegFromCode(unsigned code) {
170   VIXL_ASSERT(code < kNumberOfFPRegisters);
171   return sregisters[code];
172 }
173 
174 
DRegFromCode(unsigned code)175 const FPRegister& FPRegister::DRegFromCode(unsigned code) {
176   VIXL_ASSERT(code < kNumberOfFPRegisters);
177   return dregisters[code];
178 }
179 
180 
W() const181 const Register& CPURegister::W() const {
182   VIXL_ASSERT(IsValidRegister());
183   return Register::WRegFromCode(code_);
184 }
185 
186 
X() const187 const Register& CPURegister::X() const {
188   VIXL_ASSERT(IsValidRegister());
189   return Register::XRegFromCode(code_);
190 }
191 
192 
S() const193 const FPRegister& CPURegister::S() const {
194   VIXL_ASSERT(IsValidFPRegister());
195   return FPRegister::SRegFromCode(code_);
196 }
197 
198 
D() const199 const FPRegister& CPURegister::D() const {
200   VIXL_ASSERT(IsValidFPRegister());
201   return FPRegister::DRegFromCode(code_);
202 }
203 
204 
205 // Operand.
Operand(int64_t immediate)206 Operand::Operand(int64_t immediate)
207     : immediate_(immediate),
208       reg_(NoReg),
209       shift_(NO_SHIFT),
210       extend_(NO_EXTEND),
211       shift_amount_(0) {}
212 
213 
Operand(Register reg,Shift shift,unsigned shift_amount)214 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
215     : reg_(reg),
216       shift_(shift),
217       extend_(NO_EXTEND),
218       shift_amount_(shift_amount) {
219   VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
220   VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
221   VIXL_ASSERT(!reg.IsSP());
222 }
223 
224 
Operand(Register reg,Extend extend,unsigned shift_amount)225 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
226     : reg_(reg),
227       shift_(NO_SHIFT),
228       extend_(extend),
229       shift_amount_(shift_amount) {
230   VIXL_ASSERT(reg.IsValid());
231   VIXL_ASSERT(shift_amount <= 4);
232   VIXL_ASSERT(!reg.IsSP());
233 
234   // Extend modes SXTX and UXTX require a 64-bit register.
235   VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
236 }
237 
238 
IsImmediate() const239 bool Operand::IsImmediate() const {
240   return reg_.Is(NoReg);
241 }
242 
243 
IsShiftedRegister() const244 bool Operand::IsShiftedRegister() const {
245   return reg_.IsValid() && (shift_ != NO_SHIFT);
246 }
247 
248 
IsExtendedRegister() const249 bool Operand::IsExtendedRegister() const {
250   return reg_.IsValid() && (extend_ != NO_EXTEND);
251 }
252 
253 
IsZero() const254 bool Operand::IsZero() const {
255   if (IsImmediate()) {
256     return immediate() == 0;
257   } else {
258     return reg().IsZero();
259   }
260 }
261 
262 
ToExtendedRegister() const263 Operand Operand::ToExtendedRegister() const {
264   VIXL_ASSERT(IsShiftedRegister());
265   VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
266   return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
267 }
268 
269 
270 // MemOperand
MemOperand(Register base,ptrdiff_t offset,AddrMode addrmode)271 MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
272   : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode) {
273   VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
274 }
275 
276 
MemOperand(Register base,Register regoffset,Extend extend,unsigned shift_amount)277 MemOperand::MemOperand(Register base,
278                        Register regoffset,
279                        Extend extend,
280                        unsigned shift_amount)
281   : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
282     shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
283   VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
284   VIXL_ASSERT(!regoffset.IsSP());
285   VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
286 
287   // SXTX extend mode requires a 64-bit offset register.
288   VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
289 }
290 
291 
MemOperand(Register base,Register regoffset,Shift shift,unsigned shift_amount)292 MemOperand::MemOperand(Register base,
293                        Register regoffset,
294                        Shift shift,
295                        unsigned shift_amount)
296   : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
297     shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
298   VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
299   VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
300   VIXL_ASSERT(shift == LSL);
301 }
302 
303 
MemOperand(Register base,const Operand & offset,AddrMode addrmode)304 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
305   : base_(base), regoffset_(NoReg), addrmode_(addrmode) {
306   VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
307 
308   if (offset.IsImmediate()) {
309     offset_ = offset.immediate();
310   } else if (offset.IsShiftedRegister()) {
311     VIXL_ASSERT(addrmode == Offset);
312 
313     regoffset_ = offset.reg();
314     shift_= offset.shift();
315     shift_amount_ = offset.shift_amount();
316 
317     extend_ = NO_EXTEND;
318     offset_ = 0;
319 
320     // These assertions match those in the shifted-register constructor.
321     VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
322     VIXL_ASSERT(shift_ == LSL);
323   } else {
324     VIXL_ASSERT(offset.IsExtendedRegister());
325     VIXL_ASSERT(addrmode == Offset);
326 
327     regoffset_ = offset.reg();
328     extend_ = offset.extend();
329     shift_amount_ = offset.shift_amount();
330 
331     shift_= NO_SHIFT;
332     offset_ = 0;
333 
334     // These assertions match those in the extended-register constructor.
335     VIXL_ASSERT(!regoffset_.IsSP());
336     VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
337     VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
338   }
339 }
340 
341 
IsImmediateOffset() const342 bool MemOperand::IsImmediateOffset() const {
343   return (addrmode_ == Offset) && regoffset_.Is(NoReg);
344 }
345 
346 
IsRegisterOffset() const347 bool MemOperand::IsRegisterOffset() const {
348   return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
349 }
350 
351 
IsPreIndex() const352 bool MemOperand::IsPreIndex() const {
353   return addrmode_ == PreIndex;
354 }
355 
356 
IsPostIndex() const357 bool MemOperand::IsPostIndex() const {
358   return addrmode_ == PostIndex;
359 }
360 
361 
362 // Assembler
Assembler(byte * buffer,unsigned buffer_size)363 Assembler::Assembler(byte* buffer, unsigned buffer_size)
364     : buffer_size_(buffer_size), literal_pool_monitor_(0) {
365 
366   buffer_ = reinterpret_cast<Instruction*>(buffer);
367   pc_ = buffer_;
368   Reset();
369 }
370 
371 
~Assembler()372 Assembler::~Assembler() {
373   VIXL_ASSERT(finalized_ || (pc_ == buffer_));
374   VIXL_ASSERT(literals_.empty());
375 }
376 
377 
Reset()378 void Assembler::Reset() {
379 #ifdef DEBUG
380   VIXL_ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
381   VIXL_ASSERT(literal_pool_monitor_ == 0);
382   memset(buffer_, 0, pc_ - buffer_);
383   finalized_ = false;
384 #endif
385   pc_ = buffer_;
386   literals_.clear();
387   next_literal_pool_check_ = pc_ + kLiteralPoolCheckInterval;
388 }
389 
390 
FinalizeCode()391 void Assembler::FinalizeCode() {
392   EmitLiteralPool();
393 #ifdef DEBUG
394   finalized_ = true;
395 #endif
396 }
397 
398 
bind(Label * label)399 void Assembler::bind(Label* label) {
400   label->is_bound_ = true;
401   label->target_ = pc_;
402   while (label->IsLinked()) {
403     // Get the address of the following instruction in the chain.
404     Instruction* next_link = label->link_->ImmPCOffsetTarget();
405     // Update the instruction target.
406     label->link_->SetImmPCOffsetTarget(label->target_);
407     // Update the label's link.
408     // If the offset of the branch we just updated was 0 (kEndOfChain) we are
409     // done.
410     label->link_ = (label->link_ != next_link) ? next_link : NULL;
411   }
412 }
413 
414 
UpdateAndGetByteOffsetTo(Label * label)415 int Assembler::UpdateAndGetByteOffsetTo(Label* label) {
416   int offset;
417   VIXL_STATIC_ASSERT(sizeof(*pc_) == 1);
418   if (label->IsBound()) {
419     offset = label->target() - pc_;
420   } else if (label->IsLinked()) {
421     offset = label->link() - pc_;
422   } else {
423     offset = Label::kEndOfChain;
424   }
425   label->set_link(pc_);
426   return offset;
427 }
428 
429 
430 // Code generation.
br(const Register & xn)431 void Assembler::br(const Register& xn) {
432   VIXL_ASSERT(xn.Is64Bits());
433   Emit(BR | Rn(xn));
434 }
435 
436 
blr(const Register & xn)437 void Assembler::blr(const Register& xn) {
438   VIXL_ASSERT(xn.Is64Bits());
439   Emit(BLR | Rn(xn));
440 }
441 
442 
ret(const Register & xn)443 void Assembler::ret(const Register& xn) {
444   VIXL_ASSERT(xn.Is64Bits());
445   Emit(RET | Rn(xn));
446 }
447 
448 
b(int imm26)449 void Assembler::b(int imm26) {
450   Emit(B | ImmUncondBranch(imm26));
451 }
452 
453 
b(int imm19,Condition cond)454 void Assembler::b(int imm19, Condition cond) {
455   Emit(B_cond | ImmCondBranch(imm19) | cond);
456 }
457 
458 
b(Label * label)459 void Assembler::b(Label* label) {
460   b(UpdateAndGetInstructionOffsetTo(label));
461 }
462 
463 
b(Label * label,Condition cond)464 void Assembler::b(Label* label, Condition cond) {
465   b(UpdateAndGetInstructionOffsetTo(label), cond);
466 }
467 
468 
bl(int imm26)469 void Assembler::bl(int imm26) {
470   Emit(BL | ImmUncondBranch(imm26));
471 }
472 
473 
bl(Label * label)474 void Assembler::bl(Label* label) {
475   bl(UpdateAndGetInstructionOffsetTo(label));
476 }
477 
478 
cbz(const Register & rt,int imm19)479 void Assembler::cbz(const Register& rt,
480                     int imm19) {
481   Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
482 }
483 
484 
cbz(const Register & rt,Label * label)485 void Assembler::cbz(const Register& rt,
486                     Label* label) {
487   cbz(rt, UpdateAndGetInstructionOffsetTo(label));
488 }
489 
490 
cbnz(const Register & rt,int imm19)491 void Assembler::cbnz(const Register& rt,
492                      int imm19) {
493   Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
494 }
495 
496 
cbnz(const Register & rt,Label * label)497 void Assembler::cbnz(const Register& rt,
498                      Label* label) {
499   cbnz(rt, UpdateAndGetInstructionOffsetTo(label));
500 }
501 
502 
tbz(const Register & rt,unsigned bit_pos,int imm14)503 void Assembler::tbz(const Register& rt,
504                     unsigned bit_pos,
505                     int imm14) {
506   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
507   Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
508 }
509 
510 
tbz(const Register & rt,unsigned bit_pos,Label * label)511 void Assembler::tbz(const Register& rt,
512                     unsigned bit_pos,
513                     Label* label) {
514   tbz(rt, bit_pos, UpdateAndGetInstructionOffsetTo(label));
515 }
516 
517 
tbnz(const Register & rt,unsigned bit_pos,int imm14)518 void Assembler::tbnz(const Register& rt,
519                      unsigned bit_pos,
520                      int imm14) {
521   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
522   Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
523 }
524 
525 
tbnz(const Register & rt,unsigned bit_pos,Label * label)526 void Assembler::tbnz(const Register& rt,
527                      unsigned bit_pos,
528                      Label* label) {
529   tbnz(rt, bit_pos, UpdateAndGetInstructionOffsetTo(label));
530 }
531 
532 
adr(const Register & rd,int imm21)533 void Assembler::adr(const Register& rd, int imm21) {
534   VIXL_ASSERT(rd.Is64Bits());
535   Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
536 }
537 
538 
adr(const Register & rd,Label * label)539 void Assembler::adr(const Register& rd, Label* label) {
540   adr(rd, UpdateAndGetByteOffsetTo(label));
541 }
542 
543 
add(const Register & rd,const Register & rn,const Operand & operand)544 void Assembler::add(const Register& rd,
545                     const Register& rn,
546                     const Operand& operand) {
547   AddSub(rd, rn, operand, LeaveFlags, ADD);
548 }
549 
550 
adds(const Register & rd,const Register & rn,const Operand & operand)551 void Assembler::adds(const Register& rd,
552                      const Register& rn,
553                      const Operand& operand) {
554   AddSub(rd, rn, operand, SetFlags, ADD);
555 }
556 
557 
cmn(const Register & rn,const Operand & operand)558 void Assembler::cmn(const Register& rn,
559                     const Operand& operand) {
560   Register zr = AppropriateZeroRegFor(rn);
561   adds(zr, rn, operand);
562 }
563 
564 
sub(const Register & rd,const Register & rn,const Operand & operand)565 void Assembler::sub(const Register& rd,
566                     const Register& rn,
567                     const Operand& operand) {
568   AddSub(rd, rn, operand, LeaveFlags, SUB);
569 }
570 
571 
subs(const Register & rd,const Register & rn,const Operand & operand)572 void Assembler::subs(const Register& rd,
573                      const Register& rn,
574                      const Operand& operand) {
575   AddSub(rd, rn, operand, SetFlags, SUB);
576 }
577 
578 
cmp(const Register & rn,const Operand & operand)579 void Assembler::cmp(const Register& rn, const Operand& operand) {
580   Register zr = AppropriateZeroRegFor(rn);
581   subs(zr, rn, operand);
582 }
583 
584 
neg(const Register & rd,const Operand & operand)585 void Assembler::neg(const Register& rd, const Operand& operand) {
586   Register zr = AppropriateZeroRegFor(rd);
587   sub(rd, zr, operand);
588 }
589 
590 
negs(const Register & rd,const Operand & operand)591 void Assembler::negs(const Register& rd, const Operand& operand) {
592   Register zr = AppropriateZeroRegFor(rd);
593   subs(rd, zr, operand);
594 }
595 
596 
adc(const Register & rd,const Register & rn,const Operand & operand)597 void Assembler::adc(const Register& rd,
598                     const Register& rn,
599                     const Operand& operand) {
600   AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
601 }
602 
603 
adcs(const Register & rd,const Register & rn,const Operand & operand)604 void Assembler::adcs(const Register& rd,
605                      const Register& rn,
606                      const Operand& operand) {
607   AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
608 }
609 
610 
sbc(const Register & rd,const Register & rn,const Operand & operand)611 void Assembler::sbc(const Register& rd,
612                     const Register& rn,
613                     const Operand& operand) {
614   AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
615 }
616 
617 
sbcs(const Register & rd,const Register & rn,const Operand & operand)618 void Assembler::sbcs(const Register& rd,
619                      const Register& rn,
620                      const Operand& operand) {
621   AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
622 }
623 
624 
ngc(const Register & rd,const Operand & operand)625 void Assembler::ngc(const Register& rd, const Operand& operand) {
626   Register zr = AppropriateZeroRegFor(rd);
627   sbc(rd, zr, operand);
628 }
629 
630 
ngcs(const Register & rd,const Operand & operand)631 void Assembler::ngcs(const Register& rd, const Operand& operand) {
632   Register zr = AppropriateZeroRegFor(rd);
633   sbcs(rd, zr, operand);
634 }
635 
636 
637 // Logical instructions.
and_(const Register & rd,const Register & rn,const Operand & operand)638 void Assembler::and_(const Register& rd,
639                      const Register& rn,
640                      const Operand& operand) {
641   Logical(rd, rn, operand, AND);
642 }
643 
644 
ands(const Register & rd,const Register & rn,const Operand & operand)645 void Assembler::ands(const Register& rd,
646                      const Register& rn,
647                      const Operand& operand) {
648   Logical(rd, rn, operand, ANDS);
649 }
650 
651 
tst(const Register & rn,const Operand & operand)652 void Assembler::tst(const Register& rn,
653                     const Operand& operand) {
654   ands(AppropriateZeroRegFor(rn), rn, operand);
655 }
656 
657 
bic(const Register & rd,const Register & rn,const Operand & operand)658 void Assembler::bic(const Register& rd,
659                     const Register& rn,
660                     const Operand& operand) {
661   Logical(rd, rn, operand, BIC);
662 }
663 
664 
bics(const Register & rd,const Register & rn,const Operand & operand)665 void Assembler::bics(const Register& rd,
666                      const Register& rn,
667                      const Operand& operand) {
668   Logical(rd, rn, operand, BICS);
669 }
670 
671 
orr(const Register & rd,const Register & rn,const Operand & operand)672 void Assembler::orr(const Register& rd,
673                     const Register& rn,
674                     const Operand& operand) {
675   Logical(rd, rn, operand, ORR);
676 }
677 
678 
orn(const Register & rd,const Register & rn,const Operand & operand)679 void Assembler::orn(const Register& rd,
680                     const Register& rn,
681                     const Operand& operand) {
682   Logical(rd, rn, operand, ORN);
683 }
684 
685 
eor(const Register & rd,const Register & rn,const Operand & operand)686 void Assembler::eor(const Register& rd,
687                     const Register& rn,
688                     const Operand& operand) {
689   Logical(rd, rn, operand, EOR);
690 }
691 
692 
eon(const Register & rd,const Register & rn,const Operand & operand)693 void Assembler::eon(const Register& rd,
694                     const Register& rn,
695                     const Operand& operand) {
696   Logical(rd, rn, operand, EON);
697 }
698 
699 
lslv(const Register & rd,const Register & rn,const Register & rm)700 void Assembler::lslv(const Register& rd,
701                      const Register& rn,
702                      const Register& rm) {
703   VIXL_ASSERT(rd.size() == rn.size());
704   VIXL_ASSERT(rd.size() == rm.size());
705   Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
706 }
707 
708 
lsrv(const Register & rd,const Register & rn,const Register & rm)709 void Assembler::lsrv(const Register& rd,
710                      const Register& rn,
711                      const Register& rm) {
712   VIXL_ASSERT(rd.size() == rn.size());
713   VIXL_ASSERT(rd.size() == rm.size());
714   Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
715 }
716 
717 
asrv(const Register & rd,const Register & rn,const Register & rm)718 void Assembler::asrv(const Register& rd,
719                      const Register& rn,
720                      const Register& rm) {
721   VIXL_ASSERT(rd.size() == rn.size());
722   VIXL_ASSERT(rd.size() == rm.size());
723   Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
724 }
725 
726 
rorv(const Register & rd,const Register & rn,const Register & rm)727 void Assembler::rorv(const Register& rd,
728                      const Register& rn,
729                      const Register& rm) {
730   VIXL_ASSERT(rd.size() == rn.size());
731   VIXL_ASSERT(rd.size() == rm.size());
732   Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
733 }
734 
735 
736 // Bitfield operations.
bfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)737 void Assembler::bfm(const Register& rd,
738                      const Register& rn,
739                      unsigned immr,
740                      unsigned imms) {
741   VIXL_ASSERT(rd.size() == rn.size());
742   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
743   Emit(SF(rd) | BFM | N |
744        ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
745 }
746 
747 
sbfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)748 void Assembler::sbfm(const Register& rd,
749                      const Register& rn,
750                      unsigned immr,
751                      unsigned imms) {
752   VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits());
753   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
754   Emit(SF(rd) | SBFM | N |
755        ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
756 }
757 
758 
ubfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)759 void Assembler::ubfm(const Register& rd,
760                      const Register& rn,
761                      unsigned immr,
762                      unsigned imms) {
763   VIXL_ASSERT(rd.size() == rn.size());
764   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
765   Emit(SF(rd) | UBFM | N |
766        ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
767 }
768 
769 
extr(const Register & rd,const Register & rn,const Register & rm,unsigned lsb)770 void Assembler::extr(const Register& rd,
771                      const Register& rn,
772                      const Register& rm,
773                      unsigned lsb) {
774   VIXL_ASSERT(rd.size() == rn.size());
775   VIXL_ASSERT(rd.size() == rm.size());
776   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
777   Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.size()) | Rn(rn) | Rd(rd));
778 }
779 
780 
csel(const Register & rd,const Register & rn,const Register & rm,Condition cond)781 void Assembler::csel(const Register& rd,
782                      const Register& rn,
783                      const Register& rm,
784                      Condition cond) {
785   ConditionalSelect(rd, rn, rm, cond, CSEL);
786 }
787 
788 
csinc(const Register & rd,const Register & rn,const Register & rm,Condition cond)789 void Assembler::csinc(const Register& rd,
790                       const Register& rn,
791                       const Register& rm,
792                       Condition cond) {
793   ConditionalSelect(rd, rn, rm, cond, CSINC);
794 }
795 
796 
csinv(const Register & rd,const Register & rn,const Register & rm,Condition cond)797 void Assembler::csinv(const Register& rd,
798                       const Register& rn,
799                       const Register& rm,
800                       Condition cond) {
801   ConditionalSelect(rd, rn, rm, cond, CSINV);
802 }
803 
804 
csneg(const Register & rd,const Register & rn,const Register & rm,Condition cond)805 void Assembler::csneg(const Register& rd,
806                       const Register& rn,
807                       const Register& rm,
808                       Condition cond) {
809   ConditionalSelect(rd, rn, rm, cond, CSNEG);
810 }
811 
812 
cset(const Register & rd,Condition cond)813 void Assembler::cset(const Register &rd, Condition cond) {
814   VIXL_ASSERT((cond != al) && (cond != nv));
815   Register zr = AppropriateZeroRegFor(rd);
816   csinc(rd, zr, zr, InvertCondition(cond));
817 }
818 
819 
csetm(const Register & rd,Condition cond)820 void Assembler::csetm(const Register &rd, Condition cond) {
821   VIXL_ASSERT((cond != al) && (cond != nv));
822   Register zr = AppropriateZeroRegFor(rd);
823   csinv(rd, zr, zr, InvertCondition(cond));
824 }
825 
826 
cinc(const Register & rd,const Register & rn,Condition cond)827 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
828   VIXL_ASSERT((cond != al) && (cond != nv));
829   csinc(rd, rn, rn, InvertCondition(cond));
830 }
831 
832 
cinv(const Register & rd,const Register & rn,Condition cond)833 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
834   VIXL_ASSERT((cond != al) && (cond != nv));
835   csinv(rd, rn, rn, InvertCondition(cond));
836 }
837 
838 
cneg(const Register & rd,const Register & rn,Condition cond)839 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
840   VIXL_ASSERT((cond != al) && (cond != nv));
841   csneg(rd, rn, rn, InvertCondition(cond));
842 }
843 
844 
ConditionalSelect(const Register & rd,const Register & rn,const Register & rm,Condition cond,ConditionalSelectOp op)845 void Assembler::ConditionalSelect(const Register& rd,
846                                   const Register& rn,
847                                   const Register& rm,
848                                   Condition cond,
849                                   ConditionalSelectOp op) {
850   VIXL_ASSERT(rd.size() == rn.size());
851   VIXL_ASSERT(rd.size() == rm.size());
852   Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
853 }
854 
855 
ccmn(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond)856 void Assembler::ccmn(const Register& rn,
857                      const Operand& operand,
858                      StatusFlags nzcv,
859                      Condition cond) {
860   ConditionalCompare(rn, operand, nzcv, cond, CCMN);
861 }
862 
863 
ccmp(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond)864 void Assembler::ccmp(const Register& rn,
865                      const Operand& operand,
866                      StatusFlags nzcv,
867                      Condition cond) {
868   ConditionalCompare(rn, operand, nzcv, cond, CCMP);
869 }
870 
871 
DataProcessing3Source(const Register & rd,const Register & rn,const Register & rm,const Register & ra,DataProcessing3SourceOp op)872 void Assembler::DataProcessing3Source(const Register& rd,
873                      const Register& rn,
874                      const Register& rm,
875                      const Register& ra,
876                      DataProcessing3SourceOp op) {
877   Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
878 }
879 
880 
mul(const Register & rd,const Register & rn,const Register & rm)881 void Assembler::mul(const Register& rd,
882                     const Register& rn,
883                     const Register& rm) {
884   VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
885   DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD);
886 }
887 
888 
madd(const Register & rd,const Register & rn,const Register & rm,const Register & ra)889 void Assembler::madd(const Register& rd,
890                      const Register& rn,
891                      const Register& rm,
892                      const Register& ra) {
893   DataProcessing3Source(rd, rn, rm, ra, MADD);
894 }
895 
896 
mneg(const Register & rd,const Register & rn,const Register & rm)897 void Assembler::mneg(const Register& rd,
898                      const Register& rn,
899                      const Register& rm) {
900   VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
901   DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB);
902 }
903 
904 
msub(const Register & rd,const Register & rn,const Register & rm,const Register & ra)905 void Assembler::msub(const Register& rd,
906                      const Register& rn,
907                      const Register& rm,
908                      const Register& ra) {
909   DataProcessing3Source(rd, rn, rm, ra, MSUB);
910 }
911 
912 
umaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)913 void Assembler::umaddl(const Register& rd,
914                        const Register& rn,
915                        const Register& rm,
916                        const Register& ra) {
917   VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
918   VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
919   DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
920 }
921 
922 
smaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)923 void Assembler::smaddl(const Register& rd,
924                        const Register& rn,
925                        const Register& rm,
926                        const Register& ra) {
927   VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
928   VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
929   DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
930 }
931 
932 
umsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)933 void Assembler::umsubl(const Register& rd,
934                        const Register& rn,
935                        const Register& rm,
936                        const Register& ra) {
937   VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
938   VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
939   DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
940 }
941 
942 
smsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)943 void Assembler::smsubl(const Register& rd,
944                        const Register& rn,
945                        const Register& rm,
946                        const Register& ra) {
947   VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
948   VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
949   DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
950 }
951 
952 
smull(const Register & rd,const Register & rn,const Register & rm)953 void Assembler::smull(const Register& rd,
954                       const Register& rn,
955                       const Register& rm) {
956   VIXL_ASSERT(rd.Is64Bits());
957   VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
958   DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
959 }
960 
961 
sdiv(const Register & rd,const Register & rn,const Register & rm)962 void Assembler::sdiv(const Register& rd,
963                      const Register& rn,
964                      const Register& rm) {
965   VIXL_ASSERT(rd.size() == rn.size());
966   VIXL_ASSERT(rd.size() == rm.size());
967   Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
968 }
969 
970 
smulh(const Register & xd,const Register & xn,const Register & xm)971 void Assembler::smulh(const Register& xd,
972                       const Register& xn,
973                       const Register& xm) {
974   VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
975   DataProcessing3Source(xd, xn, xm, xzr, SMULH_x);
976 }
977 
udiv(const Register & rd,const Register & rn,const Register & rm)978 void Assembler::udiv(const Register& rd,
979                      const Register& rn,
980                      const Register& rm) {
981   VIXL_ASSERT(rd.size() == rn.size());
982   VIXL_ASSERT(rd.size() == rm.size());
983   Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
984 }
985 
986 
rbit(const Register & rd,const Register & rn)987 void Assembler::rbit(const Register& rd,
988                      const Register& rn) {
989   DataProcessing1Source(rd, rn, RBIT);
990 }
991 
992 
rev16(const Register & rd,const Register & rn)993 void Assembler::rev16(const Register& rd,
994                       const Register& rn) {
995   DataProcessing1Source(rd, rn, REV16);
996 }
997 
998 
rev32(const Register & rd,const Register & rn)999 void Assembler::rev32(const Register& rd,
1000                       const Register& rn) {
1001   VIXL_ASSERT(rd.Is64Bits());
1002   DataProcessing1Source(rd, rn, REV);
1003 }
1004 
1005 
rev(const Register & rd,const Register & rn)1006 void Assembler::rev(const Register& rd,
1007                     const Register& rn) {
1008   DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
1009 }
1010 
1011 
clz(const Register & rd,const Register & rn)1012 void Assembler::clz(const Register& rd,
1013                     const Register& rn) {
1014   DataProcessing1Source(rd, rn, CLZ);
1015 }
1016 
1017 
cls(const Register & rd,const Register & rn)1018 void Assembler::cls(const Register& rd,
1019                     const Register& rn) {
1020   DataProcessing1Source(rd, rn, CLS);
1021 }
1022 
1023 
ldp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)1024 void Assembler::ldp(const CPURegister& rt,
1025                     const CPURegister& rt2,
1026                     const MemOperand& src) {
1027   LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
1028 }
1029 
1030 
stp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)1031 void Assembler::stp(const CPURegister& rt,
1032                     const CPURegister& rt2,
1033                     const MemOperand& dst) {
1034   LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
1035 }
1036 
1037 
ldpsw(const Register & rt,const Register & rt2,const MemOperand & src)1038 void Assembler::ldpsw(const Register& rt,
1039                       const Register& rt2,
1040                       const MemOperand& src) {
1041   VIXL_ASSERT(rt.Is64Bits());
1042   LoadStorePair(rt, rt2, src, LDPSW_x);
1043 }
1044 
1045 
LoadStorePair(const CPURegister & rt,const CPURegister & rt2,const MemOperand & addr,LoadStorePairOp op)1046 void Assembler::LoadStorePair(const CPURegister& rt,
1047                               const CPURegister& rt2,
1048                               const MemOperand& addr,
1049                               LoadStorePairOp op) {
1050   // 'rt' and 'rt2' can only be aliased for stores.
1051   VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
1052   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
1053 
1054   Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1055                 ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
1056 
1057   Instr addrmodeop;
1058   if (addr.IsImmediateOffset()) {
1059     addrmodeop = LoadStorePairOffsetFixed;
1060   } else {
1061     VIXL_ASSERT(addr.offset() != 0);
1062     if (addr.IsPreIndex()) {
1063       addrmodeop = LoadStorePairPreIndexFixed;
1064     } else {
1065       VIXL_ASSERT(addr.IsPostIndex());
1066       addrmodeop = LoadStorePairPostIndexFixed;
1067     }
1068   }
1069   Emit(addrmodeop | memop);
1070 }
1071 
1072 
ldnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)1073 void Assembler::ldnp(const CPURegister& rt,
1074                      const CPURegister& rt2,
1075                      const MemOperand& src) {
1076   LoadStorePairNonTemporal(rt, rt2, src,
1077                            LoadPairNonTemporalOpFor(rt, rt2));
1078 }
1079 
1080 
stnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)1081 void Assembler::stnp(const CPURegister& rt,
1082                      const CPURegister& rt2,
1083                      const MemOperand& dst) {
1084   LoadStorePairNonTemporal(rt, rt2, dst,
1085                            StorePairNonTemporalOpFor(rt, rt2));
1086 }
1087 
1088 
LoadStorePairNonTemporal(const CPURegister & rt,const CPURegister & rt2,const MemOperand & addr,LoadStorePairNonTemporalOp op)1089 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
1090                                          const CPURegister& rt2,
1091                                          const MemOperand& addr,
1092                                          LoadStorePairNonTemporalOp op) {
1093   VIXL_ASSERT(!rt.Is(rt2));
1094   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
1095   VIXL_ASSERT(addr.IsImmediateOffset());
1096 
1097   LSDataSize size = CalcLSPairDataSize(
1098     static_cast<LoadStorePairOp>(op & LoadStorePairMask));
1099   Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1100        ImmLSPair(addr.offset(), size));
1101 }
1102 
1103 
1104 // Memory instructions.
ldrb(const Register & rt,const MemOperand & src)1105 void Assembler::ldrb(const Register& rt, const MemOperand& src) {
1106   LoadStore(rt, src, LDRB_w);
1107 }
1108 
1109 
strb(const Register & rt,const MemOperand & dst)1110 void Assembler::strb(const Register& rt, const MemOperand& dst) {
1111   LoadStore(rt, dst, STRB_w);
1112 }
1113 
1114 
ldrsb(const Register & rt,const MemOperand & src)1115 void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
1116   LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1117 }
1118 
1119 
ldrh(const Register & rt,const MemOperand & src)1120 void Assembler::ldrh(const Register& rt, const MemOperand& src) {
1121   LoadStore(rt, src, LDRH_w);
1122 }
1123 
1124 
strh(const Register & rt,const MemOperand & dst)1125 void Assembler::strh(const Register& rt, const MemOperand& dst) {
1126   LoadStore(rt, dst, STRH_w);
1127 }
1128 
1129 
ldrsh(const Register & rt,const MemOperand & src)1130 void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
1131   LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1132 }
1133 
1134 
ldr(const CPURegister & rt,const MemOperand & src)1135 void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
1136   LoadStore(rt, src, LoadOpFor(rt));
1137 }
1138 
1139 
str(const CPURegister & rt,const MemOperand & src)1140 void Assembler::str(const CPURegister& rt, const MemOperand& src) {
1141   LoadStore(rt, src, StoreOpFor(rt));
1142 }
1143 
1144 
ldrsw(const Register & rt,const MemOperand & src)1145 void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
1146   VIXL_ASSERT(rt.Is64Bits());
1147   LoadStore(rt, src, LDRSW_x);
1148 }
1149 
1150 
ldr(const Register & rt,uint64_t imm)1151 void Assembler::ldr(const Register& rt, uint64_t imm) {
1152   LoadLiteral(rt, imm, rt.Is64Bits() ? LDR_x_lit : LDR_w_lit);
1153 }
1154 
1155 
ldr(const FPRegister & ft,double imm)1156 void Assembler::ldr(const FPRegister& ft, double imm) {
1157   VIXL_ASSERT(ft.Is64Bits());
1158   LoadLiteral(ft, double_to_rawbits(imm), LDR_d_lit);
1159 }
1160 
1161 
ldr(const FPRegister & ft,float imm)1162 void Assembler::ldr(const FPRegister& ft, float imm) {
1163   VIXL_ASSERT(ft.Is32Bits());
1164   LoadLiteral(ft, float_to_rawbits(imm), LDR_s_lit);
1165 }
1166 
1167 
mov(const Register & rd,const Register & rm)1168 void Assembler::mov(const Register& rd, const Register& rm) {
1169   // Moves involving the stack pointer are encoded as add immediate with
1170   // second operand of zero. Otherwise, orr with first operand zr is
1171   // used.
1172   if (rd.IsSP() || rm.IsSP()) {
1173     add(rd, rm, 0);
1174   } else {
1175     orr(rd, AppropriateZeroRegFor(rd), rm);
1176   }
1177 }
1178 
1179 
mvn(const Register & rd,const Operand & operand)1180 void Assembler::mvn(const Register& rd, const Operand& operand) {
1181   orn(rd, AppropriateZeroRegFor(rd), operand);
1182 }
1183 
1184 
mrs(const Register & rt,SystemRegister sysreg)1185 void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
1186   VIXL_ASSERT(rt.Is64Bits());
1187   Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
1188 }
1189 
1190 
msr(SystemRegister sysreg,const Register & rt)1191 void Assembler::msr(SystemRegister sysreg, const Register& rt) {
1192   VIXL_ASSERT(rt.Is64Bits());
1193   Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
1194 }
1195 
1196 
hint(SystemHint code)1197 void Assembler::hint(SystemHint code) {
1198   Emit(HINT | ImmHint(code) | Rt(xzr));
1199 }
1200 
1201 
dmb(BarrierDomain domain,BarrierType type)1202 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
1203   Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1204 }
1205 
1206 
dsb(BarrierDomain domain,BarrierType type)1207 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
1208   Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1209 }
1210 
1211 
isb()1212 void Assembler::isb() {
1213   Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
1214 }
1215 
1216 
fmov(const FPRegister & fd,double imm)1217 void Assembler::fmov(const FPRegister& fd, double imm) {
1218   VIXL_ASSERT(fd.Is64Bits());
1219   VIXL_ASSERT(IsImmFP64(imm));
1220   Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
1221 }
1222 
1223 
fmov(const FPRegister & fd,float imm)1224 void Assembler::fmov(const FPRegister& fd, float imm) {
1225   VIXL_ASSERT(fd.Is32Bits());
1226   VIXL_ASSERT(IsImmFP32(imm));
1227   Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
1228 }
1229 
1230 
fmov(const Register & rd,const FPRegister & fn)1231 void Assembler::fmov(const Register& rd, const FPRegister& fn) {
1232   VIXL_ASSERT(rd.size() == fn.size());
1233   FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
1234   Emit(op | Rd(rd) | Rn(fn));
1235 }
1236 
1237 
fmov(const FPRegister & fd,const Register & rn)1238 void Assembler::fmov(const FPRegister& fd, const Register& rn) {
1239   VIXL_ASSERT(fd.size() == rn.size());
1240   FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
1241   Emit(op | Rd(fd) | Rn(rn));
1242 }
1243 
1244 
fmov(const FPRegister & fd,const FPRegister & fn)1245 void Assembler::fmov(const FPRegister& fd, const FPRegister& fn) {
1246   VIXL_ASSERT(fd.size() == fn.size());
1247   Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
1248 }
1249 
1250 
fadd(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1251 void Assembler::fadd(const FPRegister& fd,
1252                      const FPRegister& fn,
1253                      const FPRegister& fm) {
1254   FPDataProcessing2Source(fd, fn, fm, FADD);
1255 }
1256 
1257 
fsub(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1258 void Assembler::fsub(const FPRegister& fd,
1259                      const FPRegister& fn,
1260                      const FPRegister& fm) {
1261   FPDataProcessing2Source(fd, fn, fm, FSUB);
1262 }
1263 
1264 
fmul(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1265 void Assembler::fmul(const FPRegister& fd,
1266                      const FPRegister& fn,
1267                      const FPRegister& fm) {
1268   FPDataProcessing2Source(fd, fn, fm, FMUL);
1269 }
1270 
1271 
fmadd(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,const FPRegister & fa)1272 void Assembler::fmadd(const FPRegister& fd,
1273                       const FPRegister& fn,
1274                       const FPRegister& fm,
1275                       const FPRegister& fa) {
1276   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
1277 }
1278 
1279 
fmsub(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,const FPRegister & fa)1280 void Assembler::fmsub(const FPRegister& fd,
1281                       const FPRegister& fn,
1282                       const FPRegister& fm,
1283                       const FPRegister& fa) {
1284   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
1285 }
1286 
1287 
fnmadd(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,const FPRegister & fa)1288 void Assembler::fnmadd(const FPRegister& fd,
1289                        const FPRegister& fn,
1290                        const FPRegister& fm,
1291                        const FPRegister& fa) {
1292   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
1293 }
1294 
1295 
fnmsub(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,const FPRegister & fa)1296 void Assembler::fnmsub(const FPRegister& fd,
1297                        const FPRegister& fn,
1298                        const FPRegister& fm,
1299                        const FPRegister& fa) {
1300   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
1301 }
1302 
1303 
fdiv(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1304 void Assembler::fdiv(const FPRegister& fd,
1305                      const FPRegister& fn,
1306                      const FPRegister& fm) {
1307   FPDataProcessing2Source(fd, fn, fm, FDIV);
1308 }
1309 
1310 
fmax(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1311 void Assembler::fmax(const FPRegister& fd,
1312                      const FPRegister& fn,
1313                      const FPRegister& fm) {
1314   FPDataProcessing2Source(fd, fn, fm, FMAX);
1315 }
1316 
1317 
fmaxnm(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1318 void Assembler::fmaxnm(const FPRegister& fd,
1319                        const FPRegister& fn,
1320                        const FPRegister& fm) {
1321   FPDataProcessing2Source(fd, fn, fm, FMAXNM);
1322 }
1323 
1324 
fmin(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1325 void Assembler::fmin(const FPRegister& fd,
1326                      const FPRegister& fn,
1327                      const FPRegister& fm) {
1328   FPDataProcessing2Source(fd, fn, fm, FMIN);
1329 }
1330 
1331 
fminnm(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1332 void Assembler::fminnm(const FPRegister& fd,
1333                        const FPRegister& fn,
1334                        const FPRegister& fm) {
1335   FPDataProcessing2Source(fd, fn, fm, FMINNM);
1336 }
1337 
1338 
fabs(const FPRegister & fd,const FPRegister & fn)1339 void Assembler::fabs(const FPRegister& fd,
1340                      const FPRegister& fn) {
1341   VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1342   FPDataProcessing1Source(fd, fn, FABS);
1343 }
1344 
1345 
fneg(const FPRegister & fd,const FPRegister & fn)1346 void Assembler::fneg(const FPRegister& fd,
1347                      const FPRegister& fn) {
1348   VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1349   FPDataProcessing1Source(fd, fn, FNEG);
1350 }
1351 
1352 
fsqrt(const FPRegister & fd,const FPRegister & fn)1353 void Assembler::fsqrt(const FPRegister& fd,
1354                       const FPRegister& fn) {
1355   VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1356   FPDataProcessing1Source(fd, fn, FSQRT);
1357 }
1358 
1359 
frinta(const FPRegister & fd,const FPRegister & fn)1360 void Assembler::frinta(const FPRegister& fd,
1361                        const FPRegister& fn) {
1362   VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1363   FPDataProcessing1Source(fd, fn, FRINTA);
1364 }
1365 
1366 
frintm(const FPRegister & fd,const FPRegister & fn)1367 void Assembler::frintm(const FPRegister& fd,
1368                        const FPRegister& fn) {
1369   VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1370   FPDataProcessing1Source(fd, fn, FRINTM);
1371 }
1372 
1373 
frintn(const FPRegister & fd,const FPRegister & fn)1374 void Assembler::frintn(const FPRegister& fd,
1375                        const FPRegister& fn) {
1376   VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1377   FPDataProcessing1Source(fd, fn, FRINTN);
1378 }
1379 
1380 
frintz(const FPRegister & fd,const FPRegister & fn)1381 void Assembler::frintz(const FPRegister& fd,
1382                        const FPRegister& fn) {
1383   VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1384   FPDataProcessing1Source(fd, fn, FRINTZ);
1385 }
1386 
1387 
fcmp(const FPRegister & fn,const FPRegister & fm)1388 void Assembler::fcmp(const FPRegister& fn,
1389                      const FPRegister& fm) {
1390   VIXL_ASSERT(fn.size() == fm.size());
1391   Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
1392 }
1393 
1394 
fcmp(const FPRegister & fn,double value)1395 void Assembler::fcmp(const FPRegister& fn,
1396                      double value) {
1397   USE(value);
1398   // Although the fcmp instruction can strictly only take an immediate value of
1399   // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
1400   // affect the result of the comparison.
1401   VIXL_ASSERT(value == 0.0);
1402   Emit(FPType(fn) | FCMP_zero | Rn(fn));
1403 }
1404 
1405 
fccmp(const FPRegister & fn,const FPRegister & fm,StatusFlags nzcv,Condition cond)1406 void Assembler::fccmp(const FPRegister& fn,
1407                       const FPRegister& fm,
1408                       StatusFlags nzcv,
1409                       Condition cond) {
1410   VIXL_ASSERT(fn.size() == fm.size());
1411   Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
1412 }
1413 
1414 
fcsel(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,Condition cond)1415 void Assembler::fcsel(const FPRegister& fd,
1416                       const FPRegister& fn,
1417                       const FPRegister& fm,
1418                       Condition cond) {
1419   VIXL_ASSERT(fd.size() == fn.size());
1420   VIXL_ASSERT(fd.size() == fm.size());
1421   Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
1422 }
1423 
1424 
FPConvertToInt(const Register & rd,const FPRegister & fn,FPIntegerConvertOp op)1425 void Assembler::FPConvertToInt(const Register& rd,
1426                                const FPRegister& fn,
1427                                FPIntegerConvertOp op) {
1428   Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
1429 }
1430 
1431 
fcvt(const FPRegister & fd,const FPRegister & fn)1432 void Assembler::fcvt(const FPRegister& fd,
1433                      const FPRegister& fn) {
1434   if (fd.Is64Bits()) {
1435     // Convert float to double.
1436     VIXL_ASSERT(fn.Is32Bits());
1437     FPDataProcessing1Source(fd, fn, FCVT_ds);
1438   } else {
1439     // Convert double to float.
1440     VIXL_ASSERT(fn.Is64Bits());
1441     FPDataProcessing1Source(fd, fn, FCVT_sd);
1442   }
1443 }
1444 
1445 
fcvtau(const Register & rd,const FPRegister & fn)1446 void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
1447   FPConvertToInt(rd, fn, FCVTAU);
1448 }
1449 
1450 
fcvtas(const Register & rd,const FPRegister & fn)1451 void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
1452   FPConvertToInt(rd, fn, FCVTAS);
1453 }
1454 
1455 
fcvtmu(const Register & rd,const FPRegister & fn)1456 void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
1457   FPConvertToInt(rd, fn, FCVTMU);
1458 }
1459 
1460 
fcvtms(const Register & rd,const FPRegister & fn)1461 void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
1462   FPConvertToInt(rd, fn, FCVTMS);
1463 }
1464 
1465 
fcvtnu(const Register & rd,const FPRegister & fn)1466 void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
1467   FPConvertToInt(rd, fn, FCVTNU);
1468 }
1469 
1470 
fcvtns(const Register & rd,const FPRegister & fn)1471 void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
1472   FPConvertToInt(rd, fn, FCVTNS);
1473 }
1474 
1475 
fcvtzu(const Register & rd,const FPRegister & fn)1476 void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
1477   FPConvertToInt(rd, fn, FCVTZU);
1478 }
1479 
1480 
fcvtzs(const Register & rd,const FPRegister & fn)1481 void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
1482   FPConvertToInt(rd, fn, FCVTZS);
1483 }
1484 
1485 
scvtf(const FPRegister & fd,const Register & rn,unsigned fbits)1486 void Assembler::scvtf(const FPRegister& fd,
1487                       const Register& rn,
1488                       unsigned fbits) {
1489   if (fbits == 0) {
1490     Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
1491   } else {
1492     Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
1493          Rd(fd));
1494   }
1495 }
1496 
1497 
ucvtf(const FPRegister & fd,const Register & rn,unsigned fbits)1498 void Assembler::ucvtf(const FPRegister& fd,
1499                       const Register& rn,
1500                       unsigned fbits) {
1501   if (fbits == 0) {
1502     Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
1503   } else {
1504     Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
1505          Rd(fd));
1506   }
1507 }
1508 
1509 
1510 // Note:
1511 // Below, a difference in case for the same letter indicates a
1512 // negated bit.
1513 // If b is 1, then B is 0.
ImmFP32(float imm)1514 Instr Assembler::ImmFP32(float imm) {
1515   VIXL_ASSERT(IsImmFP32(imm));
1516   // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
1517   uint32_t bits = float_to_rawbits(imm);
1518   // bit7: a000.0000
1519   uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
1520   // bit6: 0b00.0000
1521   uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
1522   // bit5_to_0: 00cd.efgh
1523   uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
1524 
1525   return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1526 }
1527 
1528 
ImmFP64(double imm)1529 Instr Assembler::ImmFP64(double imm) {
1530   VIXL_ASSERT(IsImmFP64(imm));
1531   // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
1532   //       0000.0000.0000.0000.0000.0000.0000.0000
1533   uint64_t bits = double_to_rawbits(imm);
1534   // bit7: a000.0000
1535   uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
1536   // bit6: 0b00.0000
1537   uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
1538   // bit5_to_0: 00cd.efgh
1539   uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
1540 
1541   return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1542 }
1543 
1544 
1545 // Code generation helpers.
MoveWide(const Register & rd,uint64_t imm,int shift,MoveWideImmediateOp mov_op)1546 void Assembler::MoveWide(const Register& rd,
1547                          uint64_t imm,
1548                          int shift,
1549                          MoveWideImmediateOp mov_op) {
1550   if (shift >= 0) {
1551     // Explicit shift specified.
1552     VIXL_ASSERT((shift == 0) || (shift == 16) ||
1553                 (shift == 32) || (shift == 48));
1554     VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
1555     shift /= 16;
1556   } else {
1557     // Calculate a new immediate and shift combination to encode the immediate
1558     // argument.
1559     shift = 0;
1560     if ((imm & UINT64_C(0xffffffffffff0000)) == 0) {
1561       // Nothing to do.
1562     } else if ((imm & UINT64_C(0xffffffff0000ffff)) == 0) {
1563       imm >>= 16;
1564       shift = 1;
1565     } else if ((imm & UINT64_C(0xffff0000ffffffff)) == 0) {
1566       VIXL_ASSERT(rd.Is64Bits());
1567       imm >>= 32;
1568       shift = 2;
1569     } else if ((imm & UINT64_C(0x0000ffffffffffff)) == 0) {
1570       VIXL_ASSERT(rd.Is64Bits());
1571       imm >>= 48;
1572       shift = 3;
1573     }
1574   }
1575 
1576   VIXL_ASSERT(is_uint16(imm));
1577 
1578   Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
1579        Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
1580 }
1581 
1582 
AddSub(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubOp op)1583 void Assembler::AddSub(const Register& rd,
1584                        const Register& rn,
1585                        const Operand& operand,
1586                        FlagsUpdate S,
1587                        AddSubOp op) {
1588   VIXL_ASSERT(rd.size() == rn.size());
1589   if (operand.IsImmediate()) {
1590     int64_t immediate = operand.immediate();
1591     VIXL_ASSERT(IsImmAddSub(immediate));
1592     Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
1593     Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
1594          ImmAddSub(immediate) | dest_reg | RnSP(rn));
1595   } else if (operand.IsShiftedRegister()) {
1596     VIXL_ASSERT(operand.reg().size() == rd.size());
1597     VIXL_ASSERT(operand.shift() != ROR);
1598 
1599     // For instructions of the form:
1600     //   add/sub   wsp, <Wn>, <Wm> [, LSL #0-3 ]
1601     //   add/sub   <Wd>, wsp, <Wm> [, LSL #0-3 ]
1602     //   add/sub   wsp, wsp, <Wm> [, LSL #0-3 ]
1603     //   adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
1604     // or their 64-bit register equivalents, convert the operand from shifted to
1605     // extended register mode, and emit an add/sub extended instruction.
1606     if (rn.IsSP() || rd.IsSP()) {
1607       VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags)));
1608       DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
1609                                AddSubExtendedFixed | op);
1610     } else {
1611       DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
1612     }
1613   } else {
1614     VIXL_ASSERT(operand.IsExtendedRegister());
1615     DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
1616   }
1617 }
1618 
1619 
AddSubWithCarry(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubWithCarryOp op)1620 void Assembler::AddSubWithCarry(const Register& rd,
1621                                 const Register& rn,
1622                                 const Operand& operand,
1623                                 FlagsUpdate S,
1624                                 AddSubWithCarryOp op) {
1625   VIXL_ASSERT(rd.size() == rn.size());
1626   VIXL_ASSERT(rd.size() == operand.reg().size());
1627   VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
1628   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
1629 }
1630 
1631 
hlt(int code)1632 void Assembler::hlt(int code) {
1633   VIXL_ASSERT(is_uint16(code));
1634   Emit(HLT | ImmException(code));
1635 }
1636 
1637 
brk(int code)1638 void Assembler::brk(int code) {
1639   VIXL_ASSERT(is_uint16(code));
1640   Emit(BRK | ImmException(code));
1641 }
1642 
1643 
Logical(const Register & rd,const Register & rn,const Operand & operand,LogicalOp op)1644 void Assembler::Logical(const Register& rd,
1645                         const Register& rn,
1646                         const Operand& operand,
1647                         LogicalOp op) {
1648   VIXL_ASSERT(rd.size() == rn.size());
1649   if (operand.IsImmediate()) {
1650     int64_t immediate = operand.immediate();
1651     unsigned reg_size = rd.size();
1652 
1653     VIXL_ASSERT(immediate != 0);
1654     VIXL_ASSERT(immediate != -1);
1655     VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
1656 
1657     // If the operation is NOT, invert the operation and immediate.
1658     if ((op & NOT) == NOT) {
1659       op = static_cast<LogicalOp>(op & ~NOT);
1660       immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
1661     }
1662 
1663     unsigned n, imm_s, imm_r;
1664     if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
1665       // Immediate can be encoded in the instruction.
1666       LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
1667     } else {
1668       // This case is handled in the macro assembler.
1669       VIXL_UNREACHABLE();
1670     }
1671   } else {
1672     VIXL_ASSERT(operand.IsShiftedRegister());
1673     VIXL_ASSERT(operand.reg().size() == rd.size());
1674     Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
1675     DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
1676   }
1677 }
1678 
1679 
LogicalImmediate(const Register & rd,const Register & rn,unsigned n,unsigned imm_s,unsigned imm_r,LogicalOp op)1680 void Assembler::LogicalImmediate(const Register& rd,
1681                                  const Register& rn,
1682                                  unsigned n,
1683                                  unsigned imm_s,
1684                                  unsigned imm_r,
1685                                  LogicalOp op) {
1686   unsigned reg_size = rd.size();
1687   Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
1688   Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
1689        ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
1690        Rn(rn));
1691 }
1692 
1693 
ConditionalCompare(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond,ConditionalCompareOp op)1694 void Assembler::ConditionalCompare(const Register& rn,
1695                                    const Operand& operand,
1696                                    StatusFlags nzcv,
1697                                    Condition cond,
1698                                    ConditionalCompareOp op) {
1699   Instr ccmpop;
1700   if (operand.IsImmediate()) {
1701     int64_t immediate = operand.immediate();
1702     VIXL_ASSERT(IsImmConditionalCompare(immediate));
1703     ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
1704   } else {
1705     VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
1706     ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
1707   }
1708   Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
1709 }
1710 
1711 
DataProcessing1Source(const Register & rd,const Register & rn,DataProcessing1SourceOp op)1712 void Assembler::DataProcessing1Source(const Register& rd,
1713                                       const Register& rn,
1714                                       DataProcessing1SourceOp op) {
1715   VIXL_ASSERT(rd.size() == rn.size());
1716   Emit(SF(rn) | op | Rn(rn) | Rd(rd));
1717 }
1718 
1719 
FPDataProcessing1Source(const FPRegister & fd,const FPRegister & fn,FPDataProcessing1SourceOp op)1720 void Assembler::FPDataProcessing1Source(const FPRegister& fd,
1721                                         const FPRegister& fn,
1722                                         FPDataProcessing1SourceOp op) {
1723   Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
1724 }
1725 
1726 
FPDataProcessing2Source(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,FPDataProcessing2SourceOp op)1727 void Assembler::FPDataProcessing2Source(const FPRegister& fd,
1728                                         const FPRegister& fn,
1729                                         const FPRegister& fm,
1730                                         FPDataProcessing2SourceOp op) {
1731   VIXL_ASSERT(fd.size() == fn.size());
1732   VIXL_ASSERT(fd.size() == fm.size());
1733   Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
1734 }
1735 
1736 
FPDataProcessing3Source(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,const FPRegister & fa,FPDataProcessing3SourceOp op)1737 void Assembler::FPDataProcessing3Source(const FPRegister& fd,
1738                                         const FPRegister& fn,
1739                                         const FPRegister& fm,
1740                                         const FPRegister& fa,
1741                                         FPDataProcessing3SourceOp op) {
1742   VIXL_ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
1743   Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
1744 }
1745 
1746 
EmitShift(const Register & rd,const Register & rn,Shift shift,unsigned shift_amount)1747 void Assembler::EmitShift(const Register& rd,
1748                           const Register& rn,
1749                           Shift shift,
1750                           unsigned shift_amount) {
1751   switch (shift) {
1752     case LSL:
1753       lsl(rd, rn, shift_amount);
1754       break;
1755     case LSR:
1756       lsr(rd, rn, shift_amount);
1757       break;
1758     case ASR:
1759       asr(rd, rn, shift_amount);
1760       break;
1761     case ROR:
1762       ror(rd, rn, shift_amount);
1763       break;
1764     default:
1765       VIXL_UNREACHABLE();
1766   }
1767 }
1768 
1769 
EmitExtendShift(const Register & rd,const Register & rn,Extend extend,unsigned left_shift)1770 void Assembler::EmitExtendShift(const Register& rd,
1771                                 const Register& rn,
1772                                 Extend extend,
1773                                 unsigned left_shift) {
1774   VIXL_ASSERT(rd.size() >= rn.size());
1775   unsigned reg_size = rd.size();
1776   // Use the correct size of register.
1777   Register rn_ = Register(rn.code(), rd.size());
1778   // Bits extracted are high_bit:0.
1779   unsigned high_bit = (8 << (extend & 0x3)) - 1;
1780   // Number of bits left in the result that are not introduced by the shift.
1781   unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
1782 
1783   if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
1784     switch (extend) {
1785       case UXTB:
1786       case UXTH:
1787       case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
1788       case SXTB:
1789       case SXTH:
1790       case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
1791       case UXTX:
1792       case SXTX: {
1793         VIXL_ASSERT(rn.size() == kXRegSize);
1794         // Nothing to extend. Just shift.
1795         lsl(rd, rn_, left_shift);
1796         break;
1797       }
1798       default: VIXL_UNREACHABLE();
1799     }
1800   } else {
1801     // No need to extend as the extended bits would be shifted away.
1802     lsl(rd, rn_, left_shift);
1803   }
1804 }
1805 
1806 
DataProcShiftedRegister(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,Instr op)1807 void Assembler::DataProcShiftedRegister(const Register& rd,
1808                                         const Register& rn,
1809                                         const Operand& operand,
1810                                         FlagsUpdate S,
1811                                         Instr op) {
1812   VIXL_ASSERT(operand.IsShiftedRegister());
1813   VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() &&
1814               is_uint5(operand.shift_amount())));
1815   Emit(SF(rd) | op | Flags(S) |
1816        ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
1817        Rm(operand.reg()) | Rn(rn) | Rd(rd));
1818 }
1819 
1820 
DataProcExtendedRegister(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,Instr op)1821 void Assembler::DataProcExtendedRegister(const Register& rd,
1822                                          const Register& rn,
1823                                          const Operand& operand,
1824                                          FlagsUpdate S,
1825                                          Instr op) {
1826   Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
1827   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
1828        ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
1829        dest_reg | RnSP(rn));
1830 }
1831 
1832 
IsImmAddSub(int64_t immediate)1833 bool Assembler::IsImmAddSub(int64_t immediate) {
1834   return is_uint12(immediate) ||
1835          (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
1836 }
1837 
LoadStore(const CPURegister & rt,const MemOperand & addr,LoadStoreOp op)1838 void Assembler::LoadStore(const CPURegister& rt,
1839                           const MemOperand& addr,
1840                           LoadStoreOp op) {
1841   Instr memop = op | Rt(rt) | RnSP(addr.base());
1842   ptrdiff_t offset = addr.offset();
1843 
1844   if (addr.IsImmediateOffset()) {
1845     LSDataSize size = CalcLSDataSize(op);
1846     if (IsImmLSScaled(offset, size)) {
1847       // Use the scaled addressing mode.
1848       Emit(LoadStoreUnsignedOffsetFixed | memop |
1849            ImmLSUnsigned(offset >> size));
1850     } else if (IsImmLSUnscaled(offset)) {
1851       // Use the unscaled addressing mode.
1852       Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
1853     } else {
1854       // This case is handled in the macro assembler.
1855       VIXL_UNREACHABLE();
1856     }
1857   } else if (addr.IsRegisterOffset()) {
1858     Extend ext = addr.extend();
1859     Shift shift = addr.shift();
1860     unsigned shift_amount = addr.shift_amount();
1861 
1862     // LSL is encoded in the option field as UXTX.
1863     if (shift == LSL) {
1864       ext = UXTX;
1865     }
1866 
1867     // Shifts are encoded in one bit, indicating a left shift by the memory
1868     // access size.
1869     VIXL_ASSERT((shift_amount == 0) ||
1870            (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
1871     Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
1872          ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
1873   } else {
1874     if (IsImmLSUnscaled(offset)) {
1875       if (addr.IsPreIndex()) {
1876         Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
1877       } else {
1878         VIXL_ASSERT(addr.IsPostIndex());
1879         Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
1880       }
1881     } else {
1882       // This case is handled in the macro assembler.
1883       VIXL_UNREACHABLE();
1884     }
1885   }
1886 }
1887 
1888 
IsImmLSUnscaled(ptrdiff_t offset)1889 bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
1890   return is_int9(offset);
1891 }
1892 
1893 
IsImmLSScaled(ptrdiff_t offset,LSDataSize size)1894 bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
1895   bool offset_is_size_multiple = (((offset >> size) << size) == offset);
1896   return offset_is_size_multiple && is_uint12(offset >> size);
1897 }
1898 
1899 
LoadLiteral(const CPURegister & rt,uint64_t imm,LoadLiteralOp op)1900 void Assembler::LoadLiteral(const CPURegister& rt,
1901                             uint64_t imm,
1902                             LoadLiteralOp op) {
1903   VIXL_ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
1904 
1905   BlockLiteralPoolScope scope(this);
1906   RecordLiteral(imm, rt.SizeInBytes());
1907   Emit(op | ImmLLiteral(0) | Rt(rt));
1908 }
1909 
1910 
1911 // Test if a given value can be encoded in the immediate field of a logical
1912 // instruction.
1913 // If it can be encoded, the function returns true, and values pointed to by n,
1914 // imm_s and imm_r are updated with immediates encoded in the format required
1915 // by the corresponding fields in the logical instruction.
1916 // If it can not be encoded, the function returns false, and the values pointed
1917 // to by n, imm_s and imm_r are undefined.
IsImmLogical(uint64_t value,unsigned width,unsigned * n,unsigned * imm_s,unsigned * imm_r)1918 bool Assembler::IsImmLogical(uint64_t value,
1919                              unsigned width,
1920                              unsigned* n,
1921                              unsigned* imm_s,
1922                              unsigned* imm_r) {
1923   VIXL_ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
1924   VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize));
1925 
1926   // Logical immediates are encoded using parameters n, imm_s and imm_r using
1927   // the following table:
1928   //
1929   //  N   imms    immr    size        S             R
1930   //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
1931   //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
1932   //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
1933   //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
1934   //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
1935   //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
1936   // (s bits must not be all set)
1937   //
1938   // A pattern is constructed of size bits, where the least significant S+1
1939   // bits are set. The pattern is rotated right by R, and repeated across a
1940   // 32 or 64-bit value, depending on destination register width.
1941   //
1942   // To test if an arbitrary immediate can be encoded using this scheme, an
1943   // iterative algorithm is used.
1944   //
1945   // TODO: This code does not consider using X/W register overlap to support
1946   // 64-bit immediates where the top 32-bits are zero, and the bottom 32-bits
1947   // are an encodable logical immediate.
1948 
1949   // 1. If the value has all set or all clear bits, it can't be encoded.
1950   if ((value == 0) || (value == kXRegMask) ||
1951       ((width == kWRegSize) && (value == kWRegMask))) {
1952     return false;
1953   }
1954 
1955   unsigned lead_zero = CountLeadingZeros(value, width);
1956   unsigned lead_one = CountLeadingZeros(~value, width);
1957   unsigned trail_zero = CountTrailingZeros(value, width);
1958   unsigned trail_one = CountTrailingZeros(~value, width);
1959   unsigned set_bits = CountSetBits(value, width);
1960 
1961   // The fixed bits in the immediate s field.
1962   // If width == 64 (X reg), start at 0xFFFFFF80.
1963   // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
1964   // widths won't be executed.
1965   int imm_s_fixed = (width == kXRegSize) ? -128 : -64;
1966   int imm_s_mask = 0x3F;
1967 
1968   for (;;) {
1969     // 2. If the value is two bits wide, it can be encoded.
1970     if (width == 2) {
1971       *n = 0;
1972       *imm_s = 0x3C;
1973       *imm_r = (value & 3) - 1;
1974       return true;
1975     }
1976 
1977     *n = (width == 64) ? 1 : 0;
1978     *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
1979     if ((lead_zero + set_bits) == width) {
1980       *imm_r = 0;
1981     } else {
1982       *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
1983     }
1984 
1985     // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
1986     //    the bit width of the value, it can be encoded.
1987     if (lead_zero + trail_zero + set_bits == width) {
1988       return true;
1989     }
1990 
1991     // 4. If the sum of leading ones, trailing ones and unset bits in the
1992     //    value is equal to the bit width of the value, it can be encoded.
1993     if (lead_one + trail_one + (width - set_bits) == width) {
1994       return true;
1995     }
1996 
1997     // 5. If the most-significant half of the bitwise value is equal to the
1998     //    least-significant half, return to step 2 using the least-significant
1999     //    half of the value.
2000     uint64_t mask = (UINT64_C(1) << (width >> 1)) - 1;
2001     if ((value & mask) == ((value >> (width >> 1)) & mask)) {
2002       width >>= 1;
2003       set_bits >>= 1;
2004       imm_s_fixed >>= 1;
2005       continue;
2006     }
2007 
2008     // 6. Otherwise, the value can't be encoded.
2009     return false;
2010   }
2011 }
2012 
IsImmConditionalCompare(int64_t immediate)2013 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
2014   return is_uint5(immediate);
2015 }
2016 
2017 
IsImmFP32(float imm)2018 bool Assembler::IsImmFP32(float imm) {
2019   // Valid values will have the form:
2020   // aBbb.bbbc.defg.h000.0000.0000.0000.0000
2021   uint32_t bits = float_to_rawbits(imm);
2022   // bits[19..0] are cleared.
2023   if ((bits & 0x7ffff) != 0) {
2024     return false;
2025   }
2026 
2027   // bits[29..25] are all set or all cleared.
2028   uint32_t b_pattern = (bits >> 16) & 0x3e00;
2029   if (b_pattern != 0 && b_pattern != 0x3e00) {
2030     return false;
2031   }
2032 
2033   // bit[30] and bit[29] are opposite.
2034   if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
2035     return false;
2036   }
2037 
2038   return true;
2039 }
2040 
2041 
IsImmFP64(double imm)2042 bool Assembler::IsImmFP64(double imm) {
2043   // Valid values will have the form:
2044   // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2045   // 0000.0000.0000.0000.0000.0000.0000.0000
2046   uint64_t bits = double_to_rawbits(imm);
2047   // bits[47..0] are cleared.
2048   if ((bits & UINT64_C(0x0000ffffffffffff)) != 0) {
2049     return false;
2050   }
2051 
2052   // bits[61..54] are all set or all cleared.
2053   uint32_t b_pattern = (bits >> 48) & 0x3fc0;
2054   if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
2055     return false;
2056   }
2057 
2058   // bit[62] and bit[61] are opposite.
2059   if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
2060     return false;
2061   }
2062 
2063   return true;
2064 }
2065 
2066 
LoadOpFor(const CPURegister & rt)2067 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
2068   VIXL_ASSERT(rt.IsValid());
2069   if (rt.IsRegister()) {
2070     return rt.Is64Bits() ? LDR_x : LDR_w;
2071   } else {
2072     VIXL_ASSERT(rt.IsFPRegister());
2073     return rt.Is64Bits() ? LDR_d : LDR_s;
2074   }
2075 }
2076 
2077 
LoadPairOpFor(const CPURegister & rt,const CPURegister & rt2)2078 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
2079     const CPURegister& rt2) {
2080   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
2081   USE(rt2);
2082   if (rt.IsRegister()) {
2083     return rt.Is64Bits() ? LDP_x : LDP_w;
2084   } else {
2085     VIXL_ASSERT(rt.IsFPRegister());
2086     return rt.Is64Bits() ? LDP_d : LDP_s;
2087   }
2088 }
2089 
2090 
StoreOpFor(const CPURegister & rt)2091 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
2092   VIXL_ASSERT(rt.IsValid());
2093   if (rt.IsRegister()) {
2094     return rt.Is64Bits() ? STR_x : STR_w;
2095   } else {
2096     VIXL_ASSERT(rt.IsFPRegister());
2097     return rt.Is64Bits() ? STR_d : STR_s;
2098   }
2099 }
2100 
2101 
StorePairOpFor(const CPURegister & rt,const CPURegister & rt2)2102 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
2103     const CPURegister& rt2) {
2104   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
2105   USE(rt2);
2106   if (rt.IsRegister()) {
2107     return rt.Is64Bits() ? STP_x : STP_w;
2108   } else {
2109     VIXL_ASSERT(rt.IsFPRegister());
2110     return rt.Is64Bits() ? STP_d : STP_s;
2111   }
2112 }
2113 
2114 
LoadPairNonTemporalOpFor(const CPURegister & rt,const CPURegister & rt2)2115 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
2116     const CPURegister& rt, const CPURegister& rt2) {
2117   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
2118   USE(rt2);
2119   if (rt.IsRegister()) {
2120     return rt.Is64Bits() ? LDNP_x : LDNP_w;
2121   } else {
2122     VIXL_ASSERT(rt.IsFPRegister());
2123     return rt.Is64Bits() ? LDNP_d : LDNP_s;
2124   }
2125 }
2126 
2127 
StorePairNonTemporalOpFor(const CPURegister & rt,const CPURegister & rt2)2128 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
2129     const CPURegister& rt, const CPURegister& rt2) {
2130   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
2131   USE(rt2);
2132   if (rt.IsRegister()) {
2133     return rt.Is64Bits() ? STNP_x : STNP_w;
2134   } else {
2135     VIXL_ASSERT(rt.IsFPRegister());
2136     return rt.Is64Bits() ? STNP_d : STNP_s;
2137   }
2138 }
2139 
2140 
RecordLiteral(int64_t imm,unsigned size)2141 void Assembler::RecordLiteral(int64_t imm, unsigned size) {
2142   literals_.push_front(new Literal(pc_, imm, size));
2143 }
2144 
2145 
2146 // Check if a literal pool should be emitted. Currently a literal is emitted
2147 // when:
2148 //  * the distance to the first literal load handled by this pool is greater
2149 //    than the recommended distance and the literal pool can be emitted without
2150 //    generating a jump over it.
2151 //  * the distance to the first literal load handled by this pool is greater
2152 //    than twice the recommended distance.
2153 // TODO: refine this heuristic using real world data.
CheckLiteralPool(LiteralPoolEmitOption option)2154 void Assembler::CheckLiteralPool(LiteralPoolEmitOption option) {
2155   if (IsLiteralPoolBlocked()) {
2156     // Literal pool emission is forbidden, no point in doing further checks.
2157     return;
2158   }
2159 
2160   if (literals_.empty()) {
2161     // No literal pool to emit.
2162     next_literal_pool_check_ += kLiteralPoolCheckInterval;
2163     return;
2164   }
2165 
2166   intptr_t distance = pc_ - literals_.back()->pc_;
2167   if ((distance < kRecommendedLiteralPoolRange) ||
2168       ((option == JumpRequired) &&
2169        (distance < (2 * kRecommendedLiteralPoolRange)))) {
2170     // We prefer not to have to jump over the literal pool.
2171     next_literal_pool_check_ += kLiteralPoolCheckInterval;
2172     return;
2173   }
2174 
2175   EmitLiteralPool(option);
2176 }
2177 
2178 
EmitLiteralPool(LiteralPoolEmitOption option)2179 void Assembler::EmitLiteralPool(LiteralPoolEmitOption option) {
2180   // Prevent recursive calls while emitting the literal pool.
2181   BlockLiteralPoolScope scope(this);
2182 
2183   Label marker;
2184   Label start_of_pool;
2185   Label end_of_pool;
2186 
2187   if (option == JumpRequired) {
2188     b(&end_of_pool);
2189   }
2190 
2191   // Leave space for a literal pool marker. This is populated later, once the
2192   // size of the pool is known.
2193   bind(&marker);
2194   nop();
2195 
2196   // Now populate the literal pool.
2197   bind(&start_of_pool);
2198   std::list<Literal*>::iterator it;
2199   for (it = literals_.begin(); it != literals_.end(); it++) {
2200     // Update the load-literal instruction to point to this pool entry.
2201     Instruction* load_literal = (*it)->pc_;
2202     load_literal->SetImmLLiteral(pc_);
2203     // Copy the data into the pool.
2204     uint64_t value= (*it)->value_;
2205     unsigned size = (*it)->size_;
2206     VIXL_ASSERT((size == kXRegSizeInBytes) || (size == kWRegSizeInBytes));
2207     VIXL_ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
2208     memcpy(pc_, &value, size);
2209     pc_ += size;
2210     delete *it;
2211   }
2212   literals_.clear();
2213   bind(&end_of_pool);
2214 
2215   // The pool size should always be a multiple of four bytes because that is the
2216   // scaling applied by the LDR(literal) instruction, even for X-register loads.
2217   VIXL_ASSERT((SizeOfCodeGeneratedSince(&start_of_pool) % 4) == 0);
2218   uint64_t pool_size = SizeOfCodeGeneratedSince(&start_of_pool) / 4;
2219 
2220   // Literal pool marker indicating the size in words of the literal pool.
2221   // We use a literal load to the zero register, the offset indicating the
2222   // size in words. This instruction can encode a large enough offset to span
2223   // the entire pool at its maximum size.
2224   Instr marker_instruction = LDR_x_lit | ImmLLiteral(pool_size) | Rt(xzr);
2225   memcpy(marker.target(), &marker_instruction, kInstructionSize);
2226 
2227   next_literal_pool_check_ = pc_ + kLiteralPoolCheckInterval;
2228 }
2229 
2230 
2231 // Return the size in bytes, required by the literal pool entries. This does
2232 // not include any marker or branch over the literal pool itself.
LiteralPoolSize()2233 size_t Assembler::LiteralPoolSize() {
2234   size_t size = 0;
2235 
2236   std::list<Literal*>::iterator it;
2237   for (it = literals_.begin(); it != literals_.end(); it++) {
2238     size += (*it)->size_;
2239   }
2240 
2241   return size;
2242 }
2243 
2244 
AreAliased(const CPURegister & reg1,const CPURegister & reg2,const CPURegister & reg3,const CPURegister & reg4,const CPURegister & reg5,const CPURegister & reg6,const CPURegister & reg7,const CPURegister & reg8)2245 bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
2246                 const CPURegister& reg3, const CPURegister& reg4,
2247                 const CPURegister& reg5, const CPURegister& reg6,
2248                 const CPURegister& reg7, const CPURegister& reg8) {
2249   int number_of_valid_regs = 0;
2250   int number_of_valid_fpregs = 0;
2251 
2252   RegList unique_regs = 0;
2253   RegList unique_fpregs = 0;
2254 
2255   const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
2256 
2257   for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
2258     if (regs[i].IsRegister()) {
2259       number_of_valid_regs++;
2260       unique_regs |= regs[i].Bit();
2261     } else if (regs[i].IsFPRegister()) {
2262       number_of_valid_fpregs++;
2263       unique_fpregs |= regs[i].Bit();
2264     } else {
2265       VIXL_ASSERT(!regs[i].IsValid());
2266     }
2267   }
2268 
2269   int number_of_unique_regs =
2270     CountSetBits(unique_regs, sizeof(unique_regs) * 8);
2271   int number_of_unique_fpregs =
2272     CountSetBits(unique_fpregs, sizeof(unique_fpregs) * 8);
2273 
2274   VIXL_ASSERT(number_of_valid_regs >= number_of_unique_regs);
2275   VIXL_ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
2276 
2277   return (number_of_valid_regs != number_of_unique_regs) ||
2278          (number_of_valid_fpregs != number_of_unique_fpregs);
2279 }
2280 
2281 
AreSameSizeAndType(const CPURegister & reg1,const CPURegister & reg2,const CPURegister & reg3,const CPURegister & reg4,const CPURegister & reg5,const CPURegister & reg6,const CPURegister & reg7,const CPURegister & reg8)2282 bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
2283                         const CPURegister& reg3, const CPURegister& reg4,
2284                         const CPURegister& reg5, const CPURegister& reg6,
2285                         const CPURegister& reg7, const CPURegister& reg8) {
2286   VIXL_ASSERT(reg1.IsValid());
2287   bool match = true;
2288   match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
2289   match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
2290   match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
2291   match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
2292   match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
2293   match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
2294   match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
2295   return match;
2296 }
2297 
2298 
2299 }  // namespace vixl
2300