• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright
10 //     notice, this list of conditions and the following disclaimer in the
11 //     documentation and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may
13 //     be used to endorse or promote products derived from this software
14 //     without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 // POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "aarch32/macro-assembler-aarch32.h"
29 
30 #define STRINGIFY(x) #x
31 #define TOSTRING(x) STRINGIFY(x)
32 
33 #define CONTEXT_SCOPE \
34   ContextScope context(this, __FILE__ ":" TOSTRING(__LINE__))
35 
36 namespace vixl {
37 namespace aarch32 {
38 
ExactAssemblyScopeWithoutPoolsCheck(MacroAssembler * masm,size_t size,SizePolicy size_policy)39 ExactAssemblyScopeWithoutPoolsCheck::ExactAssemblyScopeWithoutPoolsCheck(
40     MacroAssembler* masm, size_t size, SizePolicy size_policy)
41     : ExactAssemblyScope(masm,
42                          size,
43                          size_policy,
44                          ExactAssemblyScope::kIgnorePools) {}
45 
Open(MacroAssembler * masm)46 void UseScratchRegisterScope::Open(MacroAssembler* masm) {
47   VIXL_ASSERT(masm_ == NULL);
48   VIXL_ASSERT(masm != NULL);
49   masm_ = masm;
50 
51   old_available_ = masm_->GetScratchRegisterList()->GetList();
52   old_available_vfp_ = masm_->GetScratchVRegisterList()->GetList();
53 
54   parent_ = masm->GetCurrentScratchRegisterScope();
55   masm->SetCurrentScratchRegisterScope(this);
56 }
57 
58 
Close()59 void UseScratchRegisterScope::Close() {
60   if (masm_ != NULL) {
61     // Ensure that scopes nest perfectly, and do not outlive their parents.
62     // This is a run-time check because the order of destruction of objects in
63     // the _same_ scope is implementation-defined, and is likely to change in
64     // optimised builds.
65     VIXL_CHECK(masm_->GetCurrentScratchRegisterScope() == this);
66     masm_->SetCurrentScratchRegisterScope(parent_);
67 
68     masm_->GetScratchRegisterList()->SetList(old_available_);
69     masm_->GetScratchVRegisterList()->SetList(old_available_vfp_);
70 
71     masm_ = NULL;
72   }
73 }
74 
75 
IsAvailable(const Register & reg) const76 bool UseScratchRegisterScope::IsAvailable(const Register& reg) const {
77   VIXL_ASSERT(masm_ != NULL);
78   VIXL_ASSERT(reg.IsValid());
79   return masm_->GetScratchRegisterList()->Includes(reg);
80 }
81 
82 
IsAvailable(const VRegister & reg) const83 bool UseScratchRegisterScope::IsAvailable(const VRegister& reg) const {
84   VIXL_ASSERT(masm_ != NULL);
85   VIXL_ASSERT(reg.IsValid());
86   return masm_->GetScratchVRegisterList()->IncludesAllOf(reg);
87 }
88 
89 
Acquire()90 Register UseScratchRegisterScope::Acquire() {
91   VIXL_ASSERT(masm_ != NULL);
92   Register reg = masm_->GetScratchRegisterList()->GetFirstAvailableRegister();
93   VIXL_CHECK(reg.IsValid());
94   masm_->GetScratchRegisterList()->Remove(reg);
95   return reg;
96 }
97 
98 
AcquireV(unsigned size_in_bits)99 VRegister UseScratchRegisterScope::AcquireV(unsigned size_in_bits) {
100   switch (size_in_bits) {
101     case kSRegSizeInBits:
102       return AcquireS();
103     case kDRegSizeInBits:
104       return AcquireD();
105     case kQRegSizeInBits:
106       return AcquireQ();
107     default:
108       VIXL_UNREACHABLE();
109       return NoVReg;
110   }
111 }
112 
113 
AcquireQ()114 QRegister UseScratchRegisterScope::AcquireQ() {
115   VIXL_ASSERT(masm_ != NULL);
116   QRegister reg =
117       masm_->GetScratchVRegisterList()->GetFirstAvailableQRegister();
118   VIXL_CHECK(reg.IsValid());
119   masm_->GetScratchVRegisterList()->Remove(reg);
120   return reg;
121 }
122 
123 
AcquireD()124 DRegister UseScratchRegisterScope::AcquireD() {
125   VIXL_ASSERT(masm_ != NULL);
126   DRegister reg =
127       masm_->GetScratchVRegisterList()->GetFirstAvailableDRegister();
128   VIXL_CHECK(reg.IsValid());
129   masm_->GetScratchVRegisterList()->Remove(reg);
130   return reg;
131 }
132 
133 
AcquireS()134 SRegister UseScratchRegisterScope::AcquireS() {
135   VIXL_ASSERT(masm_ != NULL);
136   SRegister reg =
137       masm_->GetScratchVRegisterList()->GetFirstAvailableSRegister();
138   VIXL_CHECK(reg.IsValid());
139   masm_->GetScratchVRegisterList()->Remove(reg);
140   return reg;
141 }
142 
143 
Release(const Register & reg)144 void UseScratchRegisterScope::Release(const Register& reg) {
145   VIXL_ASSERT(masm_ != NULL);
146   VIXL_ASSERT(reg.IsValid());
147   VIXL_ASSERT(!masm_->GetScratchRegisterList()->Includes(reg));
148   masm_->GetScratchRegisterList()->Combine(reg);
149 }
150 
151 
Release(const VRegister & reg)152 void UseScratchRegisterScope::Release(const VRegister& reg) {
153   VIXL_ASSERT(masm_ != NULL);
154   VIXL_ASSERT(reg.IsValid());
155   VIXL_ASSERT(!masm_->GetScratchVRegisterList()->IncludesAliasOf(reg));
156   masm_->GetScratchVRegisterList()->Combine(reg);
157 }
158 
159 
Include(const RegisterList & list)160 void UseScratchRegisterScope::Include(const RegisterList& list) {
161   VIXL_ASSERT(masm_ != NULL);
162   RegisterList excluded_registers(sp, lr, pc);
163   uint32_t mask = list.GetList() & ~excluded_registers.GetList();
164   RegisterList* available = masm_->GetScratchRegisterList();
165   available->SetList(available->GetList() | mask);
166 }
167 
168 
Include(const VRegisterList & list)169 void UseScratchRegisterScope::Include(const VRegisterList& list) {
170   VIXL_ASSERT(masm_ != NULL);
171   VRegisterList* available = masm_->GetScratchVRegisterList();
172   available->SetList(available->GetList() | list.GetList());
173 }
174 
175 
Exclude(const RegisterList & list)176 void UseScratchRegisterScope::Exclude(const RegisterList& list) {
177   VIXL_ASSERT(masm_ != NULL);
178   RegisterList* available = masm_->GetScratchRegisterList();
179   available->SetList(available->GetList() & ~list.GetList());
180 }
181 
182 
Exclude(const VRegisterList & list)183 void UseScratchRegisterScope::Exclude(const VRegisterList& list) {
184   VIXL_ASSERT(masm_ != NULL);
185   VRegisterList* available = masm_->GetScratchVRegisterList();
186   available->SetList(available->GetList() & ~list.GetList());
187 }
188 
189 
Exclude(const Operand & operand)190 void UseScratchRegisterScope::Exclude(const Operand& operand) {
191   if (operand.IsImmediateShiftedRegister()) {
192     Exclude(operand.GetBaseRegister());
193   } else if (operand.IsRegisterShiftedRegister()) {
194     Exclude(operand.GetBaseRegister(), operand.GetShiftRegister());
195   } else {
196     VIXL_ASSERT(operand.IsImmediate());
197   }
198 }
199 
200 
ExcludeAll()201 void UseScratchRegisterScope::ExcludeAll() {
202   VIXL_ASSERT(masm_ != NULL);
203   masm_->GetScratchRegisterList()->SetList(0);
204   masm_->GetScratchVRegisterList()->SetList(0);
205 }
206 
207 
EnsureEmitPoolsFor(size_t size_arg)208 void MacroAssembler::EnsureEmitPoolsFor(size_t size_arg) {
209   // We skip the check when the pools are blocked.
210   if (ArePoolsBlocked()) return;
211 
212   VIXL_ASSERT(IsUint32(size_arg));
213   uint32_t size = static_cast<uint32_t>(size_arg);
214 
215   if (pool_manager_.MustEmit(GetCursorOffset(), size)) {
216     int32_t new_pc = pool_manager_.Emit(this, GetCursorOffset(), size);
217     VIXL_ASSERT(new_pc == GetCursorOffset());
218     USE(new_pc);
219   }
220 }
221 
222 
HandleOutOfBoundsImmediate(Condition cond,Register tmp,uint32_t imm)223 void MacroAssembler::HandleOutOfBoundsImmediate(Condition cond,
224                                                 Register tmp,
225                                                 uint32_t imm) {
226   if (IsUintN(16, imm)) {
227     CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
228     mov(cond, tmp, imm & 0xffff);
229     return;
230   }
231   if (IsUsingT32()) {
232     if (ImmediateT32::IsImmediateT32(~imm)) {
233       CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
234       mvn(cond, tmp, ~imm);
235       return;
236     }
237   } else {
238     if (ImmediateA32::IsImmediateA32(~imm)) {
239       CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
240       mvn(cond, tmp, ~imm);
241       return;
242     }
243   }
244   CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
245   mov(cond, tmp, imm & 0xffff);
246   movt(cond, tmp, imm >> 16);
247 }
248 
249 
MemOperandComputationHelper(Condition cond,Register scratch,Register base,uint32_t offset,uint32_t extra_offset_mask)250 MemOperand MacroAssembler::MemOperandComputationHelper(
251     Condition cond,
252     Register scratch,
253     Register base,
254     uint32_t offset,
255     uint32_t extra_offset_mask) {
256   VIXL_ASSERT(!AliasesAvailableScratchRegister(scratch));
257   VIXL_ASSERT(!AliasesAvailableScratchRegister(base));
258   VIXL_ASSERT(allow_macro_instructions_);
259   VIXL_ASSERT(OutsideITBlock());
260 
261   // Check for the simple pass-through case.
262   if ((offset & extra_offset_mask) == offset) return MemOperand(base, offset);
263 
264   MacroEmissionCheckScope guard(this);
265   ITScope it_scope(this, &cond, guard);
266 
267   uint32_t load_store_offset = offset & extra_offset_mask;
268   uint32_t add_offset = offset & ~extra_offset_mask;
269   if ((add_offset != 0) && (IsModifiedImmediate(offset) ||
270                             IsModifiedImmediate(UnsignedNegate(offset)))) {
271     load_store_offset = 0;
272     add_offset = offset;
273   }
274 
275   if (base.IsPC()) {
276     // Special handling for PC bases. We must read the PC in the first
277     // instruction (and only in that instruction), and we must also take care to
278     // keep the same address calculation as loads and stores. For T32, that
279     // means using something like ADR, which uses AlignDown(PC, 4).
280 
281     // We don't handle positive offsets from PC because the intention is not
282     // clear; does the user expect the offset from the current
283     // GetCursorOffset(), or to allow a certain amount of space after the
284     // instruction?
285     VIXL_ASSERT((offset & 0x80000000) != 0);
286     if (IsUsingT32()) {
287       // T32: make the first instruction "SUB (immediate, from PC)" -- an alias
288       // of ADR -- to get behaviour like loads and stores. This ADR can handle
289       // at least as much offset as the load_store_offset so it can replace it.
290 
291       uint32_t sub_pc_offset = UnsignedNegate(offset) & 0xfff;
292       load_store_offset = (offset + sub_pc_offset) & extra_offset_mask;
293       add_offset = (offset + sub_pc_offset) & ~extra_offset_mask;
294 
295       ExactAssemblyScope scope(this, k32BitT32InstructionSizeInBytes);
296       sub(cond, scratch, base, sub_pc_offset);
297 
298       if (add_offset == 0) return MemOperand(scratch, load_store_offset);
299 
300       // The rest of the offset can be generated in the usual way.
301       base = scratch;
302     }
303     // A32 can use any SUB instruction, so we don't have to do anything special
304     // here except to ensure that we read the PC first.
305   }
306 
307   add(cond, scratch, base, add_offset);
308   return MemOperand(scratch, load_store_offset);
309 }
310 
311 
GetOffsetMask(InstructionType type,AddrMode addrmode)312 uint32_t MacroAssembler::GetOffsetMask(InstructionType type,
313                                        AddrMode addrmode) {
314   switch (type) {
315     case kLdr:
316     case kLdrb:
317     case kStr:
318     case kStrb:
319       if (IsUsingA32() || (addrmode == Offset)) {
320         return 0xfff;
321       } else {
322         return 0xff;
323       }
324     case kLdrsb:
325     case kLdrh:
326     case kLdrsh:
327     case kStrh:
328       if (IsUsingT32() && (addrmode == Offset)) {
329         return 0xfff;
330       } else {
331         return 0xff;
332       }
333     case kVldr:
334     case kVstr:
335       return 0x3fc;
336     case kLdrd:
337     case kStrd:
338       if (IsUsingA32()) {
339         return 0xff;
340       } else {
341         return 0x3fc;
342       }
343     default:
344       VIXL_UNREACHABLE();
345       return 0;
346   }
347 }
348 
349 
PrintfTrampolineRRRR(const char * format,uint32_t a,uint32_t b,uint32_t c,uint32_t d)350 HARDFLOAT void PrintfTrampolineRRRR(
351     const char* format, uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
352   printf(format, a, b, c, d);
353 }
354 
355 
PrintfTrampolineRRRD(const char * format,uint32_t a,uint32_t b,uint32_t c,double d)356 HARDFLOAT void PrintfTrampolineRRRD(
357     const char* format, uint32_t a, uint32_t b, uint32_t c, double d) {
358   printf(format, a, b, c, d);
359 }
360 
361 
PrintfTrampolineRRDR(const char * format,uint32_t a,uint32_t b,double c,uint32_t d)362 HARDFLOAT void PrintfTrampolineRRDR(
363     const char* format, uint32_t a, uint32_t b, double c, uint32_t d) {
364   printf(format, a, b, c, d);
365 }
366 
367 
PrintfTrampolineRRDD(const char * format,uint32_t a,uint32_t b,double c,double d)368 HARDFLOAT void PrintfTrampolineRRDD(
369     const char* format, uint32_t a, uint32_t b, double c, double d) {
370   printf(format, a, b, c, d);
371 }
372 
373 
PrintfTrampolineRDRR(const char * format,uint32_t a,double b,uint32_t c,uint32_t d)374 HARDFLOAT void PrintfTrampolineRDRR(
375     const char* format, uint32_t a, double b, uint32_t c, uint32_t d) {
376   printf(format, a, b, c, d);
377 }
378 
379 
PrintfTrampolineRDRD(const char * format,uint32_t a,double b,uint32_t c,double d)380 HARDFLOAT void PrintfTrampolineRDRD(
381     const char* format, uint32_t a, double b, uint32_t c, double d) {
382   printf(format, a, b, c, d);
383 }
384 
385 
PrintfTrampolineRDDR(const char * format,uint32_t a,double b,double c,uint32_t d)386 HARDFLOAT void PrintfTrampolineRDDR(
387     const char* format, uint32_t a, double b, double c, uint32_t d) {
388   printf(format, a, b, c, d);
389 }
390 
391 
PrintfTrampolineRDDD(const char * format,uint32_t a,double b,double c,double d)392 HARDFLOAT void PrintfTrampolineRDDD(
393     const char* format, uint32_t a, double b, double c, double d) {
394   printf(format, a, b, c, d);
395 }
396 
397 
PrintfTrampolineDRRR(const char * format,double a,uint32_t b,uint32_t c,uint32_t d)398 HARDFLOAT void PrintfTrampolineDRRR(
399     const char* format, double a, uint32_t b, uint32_t c, uint32_t d) {
400   printf(format, a, b, c, d);
401 }
402 
403 
PrintfTrampolineDRRD(const char * format,double a,uint32_t b,uint32_t c,double d)404 HARDFLOAT void PrintfTrampolineDRRD(
405     const char* format, double a, uint32_t b, uint32_t c, double d) {
406   printf(format, a, b, c, d);
407 }
408 
409 
PrintfTrampolineDRDR(const char * format,double a,uint32_t b,double c,uint32_t d)410 HARDFLOAT void PrintfTrampolineDRDR(
411     const char* format, double a, uint32_t b, double c, uint32_t d) {
412   printf(format, a, b, c, d);
413 }
414 
415 
PrintfTrampolineDRDD(const char * format,double a,uint32_t b,double c,double d)416 HARDFLOAT void PrintfTrampolineDRDD(
417     const char* format, double a, uint32_t b, double c, double d) {
418   printf(format, a, b, c, d);
419 }
420 
421 
PrintfTrampolineDDRR(const char * format,double a,double b,uint32_t c,uint32_t d)422 HARDFLOAT void PrintfTrampolineDDRR(
423     const char* format, double a, double b, uint32_t c, uint32_t d) {
424   printf(format, a, b, c, d);
425 }
426 
427 
PrintfTrampolineDDRD(const char * format,double a,double b,uint32_t c,double d)428 HARDFLOAT void PrintfTrampolineDDRD(
429     const char* format, double a, double b, uint32_t c, double d) {
430   printf(format, a, b, c, d);
431 }
432 
433 
PrintfTrampolineDDDR(const char * format,double a,double b,double c,uint32_t d)434 HARDFLOAT void PrintfTrampolineDDDR(
435     const char* format, double a, double b, double c, uint32_t d) {
436   printf(format, a, b, c, d);
437 }
438 
439 
PrintfTrampolineDDDD(const char * format,double a,double b,double c,double d)440 HARDFLOAT void PrintfTrampolineDDDD(
441     const char* format, double a, double b, double c, double d) {
442   printf(format, a, b, c, d);
443 }
444 
445 
Printf(const char * format,CPURegister reg1,CPURegister reg2,CPURegister reg3,CPURegister reg4)446 void MacroAssembler::Printf(const char* format,
447                             CPURegister reg1,
448                             CPURegister reg2,
449                             CPURegister reg3,
450                             CPURegister reg4) {
451   // Exclude all registers from the available scratch registers, so
452   // that we are able to use ip below.
453   // TODO: Refactor this function to use UseScratchRegisterScope
454   // for temporary registers below.
455   UseScratchRegisterScope scratch(this);
456   scratch.ExcludeAll();
457   if (generate_simulator_code_) {
458     PushRegister(reg4);
459     PushRegister(reg3);
460     PushRegister(reg2);
461     PushRegister(reg1);
462     Push(RegisterList(r0, r1));
463     StringLiteral* format_literal =
464         new StringLiteral(format, RawLiteral::kDeletedOnPlacementByPool);
465     Adr(r0, format_literal);
466     uint32_t args = (reg4.GetType() << 12) | (reg3.GetType() << 8) |
467                     (reg2.GetType() << 4) | reg1.GetType();
468     Mov(r1, args);
469     Hvc(kPrintfCode);
470     Pop(RegisterList(r0, r1));
471     int size = reg4.GetRegSizeInBytes() + reg3.GetRegSizeInBytes() +
472                reg2.GetRegSizeInBytes() + reg1.GetRegSizeInBytes();
473     Drop(size);
474   } else {
475     // Generate on a native platform => 32 bit environment.
476     // Preserve core registers r0-r3, r12, r14
477     const uint32_t saved_registers_mask =
478         kCallerSavedRegistersMask | (1 << r5.GetCode());
479     Push(RegisterList(saved_registers_mask));
480     // Push VFP registers.
481     Vpush(Untyped64, DRegisterList(d0, 8));
482     if (Has32DRegs()) Vpush(Untyped64, DRegisterList(d16, 16));
483     // Search one register which has been saved and which doesn't need to be
484     // printed.
485     RegisterList available_registers(kCallerSavedRegistersMask);
486     if (reg1.GetType() == CPURegister::kRRegister) {
487       available_registers.Remove(Register(reg1.GetCode()));
488     }
489     if (reg2.GetType() == CPURegister::kRRegister) {
490       available_registers.Remove(Register(reg2.GetCode()));
491     }
492     if (reg3.GetType() == CPURegister::kRRegister) {
493       available_registers.Remove(Register(reg3.GetCode()));
494     }
495     if (reg4.GetType() == CPURegister::kRRegister) {
496       available_registers.Remove(Register(reg4.GetCode()));
497     }
498     Register tmp = available_registers.GetFirstAvailableRegister();
499     VIXL_ASSERT(tmp.GetType() == CPURegister::kRRegister);
500     // Push the flags.
501     Mrs(tmp, APSR);
502     Push(tmp);
503     Vmrs(RegisterOrAPSR_nzcv(tmp.GetCode()), FPSCR);
504     Push(tmp);
505     // Push the registers to print on the stack.
506     PushRegister(reg4);
507     PushRegister(reg3);
508     PushRegister(reg2);
509     PushRegister(reg1);
510     int core_count = 1;
511     int vfp_count = 0;
512     uint32_t printf_type = 0;
513     // Pop the registers to print and store them into r1-r3 and/or d0-d3.
514     // Reg4 may stay into the stack if all the register to print are core
515     // registers.
516     PreparePrintfArgument(reg1, &core_count, &vfp_count, &printf_type);
517     PreparePrintfArgument(reg2, &core_count, &vfp_count, &printf_type);
518     PreparePrintfArgument(reg3, &core_count, &vfp_count, &printf_type);
519     PreparePrintfArgument(reg4, &core_count, &vfp_count, &printf_type);
520     // Ensure that the stack is aligned on 8 bytes.
521     And(r5, sp, 0x7);
522     if (core_count == 5) {
523       // One 32 bit argument (reg4) has been left on the stack =>  align the
524       // stack
525       // before the argument.
526       Pop(r0);
527       Sub(sp, sp, r5);
528       Push(r0);
529     } else {
530       Sub(sp, sp, r5);
531     }
532     // Select the right trampoline depending on the arguments.
533     uintptr_t address;
534     switch (printf_type) {
535       case 0:
536         address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRRR);
537         break;
538       case 1:
539         address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRRR);
540         break;
541       case 2:
542         address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDRR);
543         break;
544       case 3:
545         address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDRR);
546         break;
547       case 4:
548         address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRDR);
549         break;
550       case 5:
551         address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRDR);
552         break;
553       case 6:
554         address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDDR);
555         break;
556       case 7:
557         address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDDR);
558         break;
559       case 8:
560         address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRRD);
561         break;
562       case 9:
563         address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRRD);
564         break;
565       case 10:
566         address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDRD);
567         break;
568       case 11:
569         address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDRD);
570         break;
571       case 12:
572         address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRDD);
573         break;
574       case 13:
575         address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRDD);
576         break;
577       case 14:
578         address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDDD);
579         break;
580       case 15:
581         address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDDD);
582         break;
583       default:
584         VIXL_UNREACHABLE();
585         address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRRR);
586         break;
587     }
588     StringLiteral* format_literal =
589         new StringLiteral(format, RawLiteral::kDeletedOnPlacementByPool);
590     Adr(r0, format_literal);
591     Mov(ip, Operand::From(address));
592     Blx(ip);
593     // If register reg4 was left on the stack => skip it.
594     if (core_count == 5) Drop(kRegSizeInBytes);
595     // Restore the stack as it was before alignment.
596     Add(sp, sp, r5);
597     // Restore the flags.
598     Pop(tmp);
599     Vmsr(FPSCR, tmp);
600     Pop(tmp);
601     Msr(APSR_nzcvqg, tmp);
602     // Restore the registers.
603     if (Has32DRegs()) Vpop(Untyped64, DRegisterList(d16, 16));
604     Vpop(Untyped64, DRegisterList(d0, 8));
605     Pop(RegisterList(saved_registers_mask));
606   }
607 }
608 
609 
PushRegister(CPURegister reg)610 void MacroAssembler::PushRegister(CPURegister reg) {
611   switch (reg.GetType()) {
612     case CPURegister::kNoRegister:
613       break;
614     case CPURegister::kRRegister:
615       Push(Register(reg.GetCode()));
616       break;
617     case CPURegister::kSRegister:
618       Vpush(Untyped32, SRegisterList(SRegister(reg.GetCode())));
619       break;
620     case CPURegister::kDRegister:
621       Vpush(Untyped64, DRegisterList(DRegister(reg.GetCode())));
622       break;
623     case CPURegister::kQRegister:
624       VIXL_UNIMPLEMENTED();
625       break;
626   }
627 }
628 
629 
PreparePrintfArgument(CPURegister reg,int * core_count,int * vfp_count,uint32_t * printf_type)630 void MacroAssembler::PreparePrintfArgument(CPURegister reg,
631                                            int* core_count,
632                                            int* vfp_count,
633                                            uint32_t* printf_type) {
634   switch (reg.GetType()) {
635     case CPURegister::kNoRegister:
636       break;
637     case CPURegister::kRRegister:
638       VIXL_ASSERT(*core_count <= 4);
639       if (*core_count < 4) Pop(Register(*core_count));
640       *core_count += 1;
641       break;
642     case CPURegister::kSRegister:
643       VIXL_ASSERT(*vfp_count < 4);
644       *printf_type |= 1 << (*core_count + *vfp_count - 1);
645       Vpop(Untyped32, SRegisterList(SRegister(*vfp_count * 2)));
646       Vcvt(F64, F32, DRegister(*vfp_count), SRegister(*vfp_count * 2));
647       *vfp_count += 1;
648       break;
649     case CPURegister::kDRegister:
650       VIXL_ASSERT(*vfp_count < 4);
651       *printf_type |= 1 << (*core_count + *vfp_count - 1);
652       Vpop(Untyped64, DRegisterList(DRegister(*vfp_count)));
653       *vfp_count += 1;
654       break;
655     case CPURegister::kQRegister:
656       VIXL_UNIMPLEMENTED();
657       break;
658   }
659 }
660 
661 
Delegate(InstructionType type,InstructionCondROp instruction,Condition cond,Register rn,const Operand & operand)662 void MacroAssembler::Delegate(InstructionType type,
663                               InstructionCondROp instruction,
664                               Condition cond,
665                               Register rn,
666                               const Operand& operand) {
667   VIXL_ASSERT((type == kMovt) || (type == kSxtb16) || (type == kTeq) ||
668               (type == kUxtb16));
669 
670   if (type == kMovt) {
671     VIXL_ABORT_WITH_MSG("`Movt` expects a 16-bit immediate.\n");
672   }
673 
674   // This delegate only supports teq with immediates.
675   CONTEXT_SCOPE;
676   if ((type == kTeq) && operand.IsImmediate()) {
677     UseScratchRegisterScope temps(this);
678     Register scratch = temps.Acquire();
679     HandleOutOfBoundsImmediate(cond, scratch, operand.GetImmediate());
680     CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
681     teq(cond, rn, scratch);
682     return;
683   }
684   Assembler::Delegate(type, instruction, cond, rn, operand);
685 }
686 
687 
Delegate(InstructionType type,InstructionCondSizeROp instruction,Condition cond,EncodingSize size,Register rn,const Operand & operand)688 void MacroAssembler::Delegate(InstructionType type,
689                               InstructionCondSizeROp instruction,
690                               Condition cond,
691                               EncodingSize size,
692                               Register rn,
693                               const Operand& operand) {
694   CONTEXT_SCOPE;
695   VIXL_ASSERT(size.IsBest());
696   VIXL_ASSERT((type == kCmn) || (type == kCmp) || (type == kMov) ||
697               (type == kMovs) || (type == kMvn) || (type == kMvns) ||
698               (type == kSxtb) || (type == kSxth) || (type == kTst) ||
699               (type == kUxtb) || (type == kUxth));
700   if (IsUsingT32() && operand.IsRegisterShiftedRegister()) {
701     VIXL_ASSERT((type != kMov) || (type != kMovs));
702     InstructionCondRROp shiftop = NULL;
703     switch (operand.GetShift().GetType()) {
704       case LSL:
705         shiftop = &Assembler::lsl;
706         break;
707       case LSR:
708         shiftop = &Assembler::lsr;
709         break;
710       case ASR:
711         shiftop = &Assembler::asr;
712         break;
713       case RRX:
714         // A RegisterShiftedRegister operand cannot have a shift of type RRX.
715         VIXL_UNREACHABLE();
716         break;
717       case ROR:
718         shiftop = &Assembler::ror;
719         break;
720       default:
721         VIXL_UNREACHABLE();
722     }
723     if (shiftop != NULL) {
724       UseScratchRegisterScope temps(this);
725       Register scratch = temps.Acquire();
726       CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
727       (this->*shiftop)(cond,
728                        scratch,
729                        operand.GetBaseRegister(),
730                        operand.GetShiftRegister());
731       (this->*instruction)(cond, size, rn, scratch);
732       return;
733     }
734   }
735   if (operand.IsImmediate()) {
736     uint32_t imm = operand.GetImmediate();
737     switch (type) {
738       case kMov:
739       case kMovs:
740         if (!rn.IsPC()) {
741           // Immediate is too large, but not using PC, so handle with mov{t}.
742           HandleOutOfBoundsImmediate(cond, rn, imm);
743           if (type == kMovs) {
744             CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
745             tst(cond, rn, rn);
746           }
747           return;
748         } else if (type == kMov) {
749           VIXL_ASSERT(IsUsingA32() || cond.Is(al));
750           // Immediate is too large and using PC, so handle using a temporary
751           // register.
752           UseScratchRegisterScope temps(this);
753           Register scratch = temps.Acquire();
754           HandleOutOfBoundsImmediate(al, scratch, imm);
755           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
756           bx(cond, scratch);
757           return;
758         }
759         break;
760       case kCmn:
761       case kCmp:
762         if (IsUsingA32() || !rn.IsPC()) {
763           UseScratchRegisterScope temps(this);
764           Register scratch = temps.Acquire();
765           HandleOutOfBoundsImmediate(cond, scratch, imm);
766           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
767           (this->*instruction)(cond, size, rn, scratch);
768           return;
769         }
770         break;
771       case kMvn:
772       case kMvns:
773         if (!rn.IsPC()) {
774           UseScratchRegisterScope temps(this);
775           Register scratch = temps.Acquire();
776           HandleOutOfBoundsImmediate(cond, scratch, imm);
777           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
778           (this->*instruction)(cond, size, rn, scratch);
779           return;
780         }
781         break;
782       case kTst:
783         if (IsUsingA32() || !rn.IsPC()) {
784           UseScratchRegisterScope temps(this);
785           Register scratch = temps.Acquire();
786           HandleOutOfBoundsImmediate(cond, scratch, imm);
787           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
788           (this->*instruction)(cond, size, rn, scratch);
789           return;
790         }
791         break;
792       default:  // kSxtb, Sxth, Uxtb, Uxth
793         break;
794     }
795   }
796   Assembler::Delegate(type, instruction, cond, size, rn, operand);
797 }
798 
799 
Delegate(InstructionType type,InstructionCondRROp instruction,Condition cond,Register rd,Register rn,const Operand & operand)800 void MacroAssembler::Delegate(InstructionType type,
801                               InstructionCondRROp instruction,
802                               Condition cond,
803                               Register rd,
804                               Register rn,
805                               const Operand& operand) {
806   if ((type == kSxtab) || (type == kSxtab16) || (type == kSxtah) ||
807       (type == kUxtab) || (type == kUxtab16) || (type == kUxtah) ||
808       (type == kPkhbt) || (type == kPkhtb)) {
809     UnimplementedDelegate(type);
810     return;
811   }
812 
813   // This delegate only handles the following instructions.
814   VIXL_ASSERT((type == kOrn) || (type == kOrns) || (type == kRsc) ||
815               (type == kRscs));
816   CONTEXT_SCOPE;
817 
818   // T32 does not support register shifted register operands, emulate it.
819   if (IsUsingT32() && operand.IsRegisterShiftedRegister()) {
820     InstructionCondRROp shiftop = NULL;
821     switch (operand.GetShift().GetType()) {
822       case LSL:
823         shiftop = &Assembler::lsl;
824         break;
825       case LSR:
826         shiftop = &Assembler::lsr;
827         break;
828       case ASR:
829         shiftop = &Assembler::asr;
830         break;
831       case RRX:
832         // A RegisterShiftedRegister operand cannot have a shift of type RRX.
833         VIXL_UNREACHABLE();
834         break;
835       case ROR:
836         shiftop = &Assembler::ror;
837         break;
838       default:
839         VIXL_UNREACHABLE();
840     }
841     if (shiftop != NULL) {
842       UseScratchRegisterScope temps(this);
843       Register rm = operand.GetBaseRegister();
844       Register rs = operand.GetShiftRegister();
845       // Try to use rd as a scratch register. We can do this if it aliases rs or
846       // rm (because we read them in the first instruction), but not rn.
847       if (!rd.Is(rn)) temps.Include(rd);
848       Register scratch = temps.Acquire();
849       // TODO: The scope length was measured empirically. We should analyse the
850       // worst-case size and add targetted tests.
851       CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
852       (this->*shiftop)(cond, scratch, rm, rs);
853       (this->*instruction)(cond, rd, rn, scratch);
854       return;
855     }
856   }
857 
858   // T32 does not have a Rsc instruction, negate the lhs input and turn it into
859   // an Adc. Adc and Rsc are equivalent using a bitwise NOT:
860   //   adc rd, rn, operand <-> rsc rd, NOT(rn), operand
861   if (IsUsingT32() && ((type == kRsc) || (type == kRscs))) {
862     // The RegisterShiftRegister case should have been handled above.
863     VIXL_ASSERT(!operand.IsRegisterShiftedRegister());
864     UseScratchRegisterScope temps(this);
865     // Try to use rd as a scratch register. We can do this if it aliases rn
866     // (because we read it in the first instruction), but not rm.
867     temps.Include(rd);
868     temps.Exclude(operand);
869     Register negated_rn = temps.Acquire();
870     {
871       CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
872       mvn(cond, negated_rn, rn);
873     }
874     if (type == kRsc) {
875       CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
876       adc(cond, rd, negated_rn, operand);
877       return;
878     }
879     // TODO: We shouldn't have to specify how much space the next instruction
880     // needs.
881     CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
882     adcs(cond, rd, negated_rn, operand);
883     return;
884   }
885 
886   if (operand.IsImmediate()) {
887     // If the immediate can be encoded when inverted, turn Orn into Orr.
888     // Otherwise rely on HandleOutOfBoundsImmediate to generate a series of
889     // mov.
890     int32_t imm = operand.GetSignedImmediate();
891     if (((type == kOrn) || (type == kOrns)) && IsModifiedImmediate(~imm)) {
892       CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
893       switch (type) {
894         case kOrn:
895           orr(cond, rd, rn, ~imm);
896           return;
897         case kOrns:
898           orrs(cond, rd, rn, ~imm);
899           return;
900         default:
901           VIXL_UNREACHABLE();
902           break;
903       }
904     }
905   }
906 
907   // A32 does not have a Orn instruction, negate the rhs input and turn it into
908   // a Orr.
909   if (IsUsingA32() && ((type == kOrn) || (type == kOrns))) {
910     // TODO: orn r0, r1, imm -> orr r0, r1, neg(imm) if doable
911     //  mvn r0, r2
912     //  orr r0, r1, r0
913     Register scratch;
914     UseScratchRegisterScope temps(this);
915     // Try to use rd as a scratch register. We can do this if it aliases rs or
916     // rm (because we read them in the first instruction), but not rn.
917     if (!rd.Is(rn)) temps.Include(rd);
918     scratch = temps.Acquire();
919     {
920       // TODO: We shouldn't have to specify how much space the next instruction
921       // needs.
922       CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
923       mvn(cond, scratch, operand);
924     }
925     if (type == kOrns) {
926       CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
927       orrs(cond, rd, rn, scratch);
928       return;
929     }
930     CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
931     orr(cond, rd, rn, scratch);
932     return;
933   }
934 
935   if (operand.IsImmediate()) {
936     UseScratchRegisterScope temps(this);
937     // Allow using the destination as a scratch register if possible.
938     if (!rd.Is(rn)) temps.Include(rd);
939     Register scratch = temps.Acquire();
940     int32_t imm = operand.GetSignedImmediate();
941     HandleOutOfBoundsImmediate(cond, scratch, imm);
942     CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
943     (this->*instruction)(cond, rd, rn, scratch);
944     return;
945   }
946   Assembler::Delegate(type, instruction, cond, rd, rn, operand);
947 }
948 
949 
Delegate(InstructionType type,InstructionCondSizeRL instruction,Condition cond,EncodingSize size,Register rd,Location * location)950 void MacroAssembler::Delegate(InstructionType type,
951                               InstructionCondSizeRL instruction,
952                               Condition cond,
953                               EncodingSize size,
954                               Register rd,
955                               Location* location) {
956   VIXL_ASSERT((type == kLdr) || (type == kAdr));
957 
958   CONTEXT_SCOPE;
959   VIXL_ASSERT(size.IsBest());
960 
961   if ((type == kLdr) && location->IsBound()) {
962     CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);
963     UseScratchRegisterScope temps(this);
964     temps.Include(rd);
965     uint32_t mask = GetOffsetMask(type, Offset);
966     ldr(rd, MemOperandComputationHelper(cond, temps.Acquire(), location, mask));
967     return;
968   }
969 
970   Assembler::Delegate(type, instruction, cond, size, rd, location);
971 }
972 
973 
GenerateSplitInstruction(InstructionCondSizeRROp instruction,Condition cond,Register rd,Register rn,uint32_t imm,uint32_t mask)974 bool MacroAssembler::GenerateSplitInstruction(
975     InstructionCondSizeRROp instruction,
976     Condition cond,
977     Register rd,
978     Register rn,
979     uint32_t imm,
980     uint32_t mask) {
981   uint32_t high = imm & ~mask;
982   if (!IsModifiedImmediate(high) && !rn.IsPC()) return false;
983   // If high is a modified immediate, we can perform the operation with
984   // only 2 instructions.
985   // Else, if rn is PC, we want to avoid moving PC into a temporary.
986   // Therefore, we also use the pattern even if the second call may
987   // generate 3 instructions.
988   uint32_t low = imm & mask;
989   CodeBufferCheckScope scope(this,
990                              (rn.IsPC() ? 4 : 2) * kMaxInstructionSizeInBytes);
991   (this->*instruction)(cond, Best, rd, rn, low);
992   (this->*instruction)(cond, Best, rd, rd, high);
993   return true;
994 }
995 
996 
Delegate(InstructionType type,InstructionCondSizeRROp instruction,Condition cond,EncodingSize size,Register rd,Register rn,const Operand & operand)997 void MacroAssembler::Delegate(InstructionType type,
998                               InstructionCondSizeRROp instruction,
999                               Condition cond,
1000                               EncodingSize size,
1001                               Register rd,
1002                               Register rn,
1003                               const Operand& operand) {
1004   VIXL_ASSERT(
1005       (type == kAdc) || (type == kAdcs) || (type == kAdd) || (type == kAdds) ||
1006       (type == kAnd) || (type == kAnds) || (type == kAsr) || (type == kAsrs) ||
1007       (type == kBic) || (type == kBics) || (type == kEor) || (type == kEors) ||
1008       (type == kLsl) || (type == kLsls) || (type == kLsr) || (type == kLsrs) ||
1009       (type == kOrr) || (type == kOrrs) || (type == kRor) || (type == kRors) ||
1010       (type == kRsb) || (type == kRsbs) || (type == kSbc) || (type == kSbcs) ||
1011       (type == kSub) || (type == kSubs));
1012 
1013   CONTEXT_SCOPE;
1014   VIXL_ASSERT(size.IsBest());
1015   if (IsUsingT32() && operand.IsRegisterShiftedRegister()) {
1016     InstructionCondRROp shiftop = NULL;
1017     switch (operand.GetShift().GetType()) {
1018       case LSL:
1019         shiftop = &Assembler::lsl;
1020         break;
1021       case LSR:
1022         shiftop = &Assembler::lsr;
1023         break;
1024       case ASR:
1025         shiftop = &Assembler::asr;
1026         break;
1027       case RRX:
1028         // A RegisterShiftedRegister operand cannot have a shift of type RRX.
1029         VIXL_UNREACHABLE();
1030         break;
1031       case ROR:
1032         shiftop = &Assembler::ror;
1033         break;
1034       default:
1035         VIXL_UNREACHABLE();
1036     }
1037     if (shiftop != NULL) {
1038       UseScratchRegisterScope temps(this);
1039       Register rm = operand.GetBaseRegister();
1040       Register rs = operand.GetShiftRegister();
1041       // Try to use rd as a scratch register. We can do this if it aliases rs or
1042       // rm (because we read them in the first instruction), but not rn.
1043       if (!rd.Is(rn)) temps.Include(rd);
1044       Register scratch = temps.Acquire();
1045       CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
1046       (this->*shiftop)(cond, scratch, rm, rs);
1047       (this->*instruction)(cond, size, rd, rn, scratch);
1048       return;
1049     }
1050   }
1051   if (operand.IsImmediate()) {
1052     int32_t imm = operand.GetSignedImmediate();
1053     if (ImmediateT32::IsImmediateT32(~imm)) {
1054       if (IsUsingT32()) {
1055         switch (type) {
1056           case kOrr:
1057             orn(cond, rd, rn, ~imm);
1058             return;
1059           case kOrrs:
1060             orns(cond, rd, rn, ~imm);
1061             return;
1062           default:
1063             break;
1064         }
1065       }
1066     }
1067     if (imm < 0) {
1068       InstructionCondSizeRROp asmcb = NULL;
1069       // Add and sub are equivalent using an arithmetic negation:
1070       //   add rd, rn, #imm <-> sub rd, rn, - #imm
1071       // Add and sub with carry are equivalent using a bitwise NOT:
1072       //   adc rd, rn, #imm <-> sbc rd, rn, NOT #imm
1073       switch (type) {
1074         case kAdd:
1075           asmcb = &Assembler::sub;
1076           imm = -imm;
1077           break;
1078         case kAdds:
1079           asmcb = &Assembler::subs;
1080           imm = -imm;
1081           break;
1082         case kSub:
1083           asmcb = &Assembler::add;
1084           imm = -imm;
1085           break;
1086         case kSubs:
1087           asmcb = &Assembler::adds;
1088           imm = -imm;
1089           break;
1090         case kAdc:
1091           asmcb = &Assembler::sbc;
1092           imm = ~imm;
1093           break;
1094         case kAdcs:
1095           asmcb = &Assembler::sbcs;
1096           imm = ~imm;
1097           break;
1098         case kSbc:
1099           asmcb = &Assembler::adc;
1100           imm = ~imm;
1101           break;
1102         case kSbcs:
1103           asmcb = &Assembler::adcs;
1104           imm = ~imm;
1105           break;
1106         default:
1107           break;
1108       }
1109       if (asmcb != NULL) {
1110         CodeBufferCheckScope scope(this, 4 * kMaxInstructionSizeInBytes);
1111         (this->*asmcb)(cond, size, rd, rn, Operand(imm));
1112         return;
1113       }
1114     }
1115 
1116     // When rn is PC, only handle negative offsets. The correct way to handle
1117     // positive offsets isn't clear; does the user want the offset from the
1118     // start of the macro, or from the end (to allow a certain amount of space)?
1119     // When type is Add or Sub, imm is always positive (imm < 0 has just been
1120     // handled and imm == 0 would have been generated without the need of a
1121     // delegate). Therefore, only add to PC is forbidden here.
1122     if ((((type == kAdd) && !rn.IsPC()) || (type == kSub)) &&
1123         (IsUsingA32() || (!rd.IsPC() && !rn.IsPC()))) {
1124       VIXL_ASSERT(imm > 0);
1125       // Try to break the constant into two modified immediates.
1126       // For T32 also try to break the constant into one imm12 and one modified
1127       // immediate. Count the trailing zeroes and get the biggest even value.
1128       int trailing_zeroes = CountTrailingZeros(imm) & ~1u;
1129       uint32_t mask = ((trailing_zeroes < 4) && IsUsingT32())
1130                           ? 0xfff
1131                           : (0xff << trailing_zeroes);
1132       if (GenerateSplitInstruction(instruction, cond, rd, rn, imm, mask)) {
1133         return;
1134       }
1135       InstructionCondSizeRROp asmcb = NULL;
1136       switch (type) {
1137         case kAdd:
1138           asmcb = &Assembler::sub;
1139           break;
1140         case kSub:
1141           asmcb = &Assembler::add;
1142           break;
1143         default:
1144           VIXL_UNREACHABLE();
1145       }
1146       if (GenerateSplitInstruction(asmcb, cond, rd, rn, -imm, mask)) {
1147         return;
1148       }
1149     }
1150 
1151     UseScratchRegisterScope temps(this);
1152     // Allow using the destination as a scratch register if possible.
1153     if (!rd.Is(rn)) temps.Include(rd);
1154     if (rn.IsPC()) {
1155       // If we're reading the PC, we need to do it in the first instruction,
1156       // otherwise we'll read the wrong value. We rely on this to handle the
1157       // long-range PC-relative MemOperands which can result from user-managed
1158       // literals.
1159 
1160       // Only handle negative offsets. The correct way to handle positive
1161       // offsets isn't clear; does the user want the offset from the start of
1162       // the macro, or from the end (to allow a certain amount of space)?
1163       bool offset_is_negative_or_zero = (imm <= 0);
1164       switch (type) {
1165         case kAdd:
1166         case kAdds:
1167           offset_is_negative_or_zero = (imm <= 0);
1168           break;
1169         case kSub:
1170         case kSubs:
1171           offset_is_negative_or_zero = (imm >= 0);
1172           break;
1173         case kAdc:
1174         case kAdcs:
1175           offset_is_negative_or_zero = (imm < 0);
1176           break;
1177         case kSbc:
1178         case kSbcs:
1179           offset_is_negative_or_zero = (imm > 0);
1180           break;
1181         default:
1182           break;
1183       }
1184       if (offset_is_negative_or_zero) {
1185         {
1186           rn = temps.Acquire();
1187           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1188           mov(cond, rn, pc);
1189         }
1190         // Recurse rather than falling through, to try to get the immediate into
1191         // a single instruction.
1192         CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
1193         (this->*instruction)(cond, size, rd, rn, operand);
1194         return;
1195       }
1196     } else {
1197       Register scratch = temps.Acquire();
1198       // TODO: The scope length was measured empirically. We should analyse the
1199       // worst-case size and add targetted tests.
1200       CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
1201       mov(cond, scratch, operand.GetImmediate());
1202       (this->*instruction)(cond, size, rd, rn, scratch);
1203       return;
1204     }
1205   }
1206   Assembler::Delegate(type, instruction, cond, size, rd, rn, operand);
1207 }
1208 
1209 
Delegate(InstructionType type,InstructionRL instruction,Register rn,Location * location)1210 void MacroAssembler::Delegate(InstructionType type,
1211                               InstructionRL instruction,
1212                               Register rn,
1213                               Location* location) {
1214   VIXL_ASSERT((type == kCbz) || (type == kCbnz));
1215 
1216   CONTEXT_SCOPE;
1217   CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
1218   if (IsUsingA32()) {
1219     if (type == kCbz) {
1220       VIXL_ABORT_WITH_MSG("Cbz is only available for T32.\n");
1221     } else {
1222       VIXL_ABORT_WITH_MSG("Cbnz is only available for T32.\n");
1223     }
1224   } else if (rn.IsLow()) {
1225     switch (type) {
1226       case kCbnz: {
1227         Label done;
1228         cbz(rn, &done);
1229         b(location);
1230         Bind(&done);
1231         return;
1232       }
1233       case kCbz: {
1234         Label done;
1235         cbnz(rn, &done);
1236         b(location);
1237         Bind(&done);
1238         return;
1239       }
1240       default:
1241         break;
1242     }
1243   }
1244   Assembler::Delegate(type, instruction, rn, location);
1245 }
1246 
1247 
Delegate(InstructionType type,InstructionCondSizeL instruction,Condition cond,EncodingSize size,Location * location)1248 void MacroAssembler::Delegate(InstructionType type,
1249                               InstructionCondSizeL instruction,
1250                               Condition cond,
1251                               EncodingSize size,
1252                               Location* location) {
1253   VIXL_ASSERT(type == kB);
1254 
1255   CONTEXT_SCOPE;
1256 
1257   // Apply veneer to increase range of backwards conditional branches.
1258   // This replaces:
1259   //   label:
1260   //    <instructions>
1261   //    bcond label   ; T3
1262   // With:
1263   //   label:
1264   //    <instructions>
1265   //    binvcond skip ; T1
1266   //    b label       ; T4
1267   //   skip:
1268   Location::Offset offset = location->GetLocation() -
1269     (GetCursorOffset() + GetArchitectureStatePCOffset());
1270   if (IsUsingT32() && location->IsBound() && ((offset & 0x1) == 0) &&
1271       !cond.Is(al) && cond.IsNotNever()) {
1272     // Bound locations must be earlier in the code.
1273     VIXL_ASSERT(offset < 0);
1274 
1275     // The offset must be within range of a T4 branch, accounting for the
1276     // conditional branch (T1) we emit first, in order to jump over it.
1277     offset -= k16BitT32InstructionSizeInBytes;
1278     if (offset >= -16777216) {
1279       CodeBufferCheckScope scope(this, k16BitT32InstructionSizeInBytes +
1280                                        k32BitT32InstructionSizeInBytes);
1281       Label skip;
1282       b(cond.Negate(), Narrow, &skip);
1283       b(location);
1284       Bind(&skip);
1285       return;
1286     } else {
1287       VIXL_ABORT_WITH_MSG("Conditional branch too far for veneer.\n");
1288     }
1289   }
1290 
1291   Assembler::Delegate(type, instruction, cond, size, location);
1292 }
1293 
1294 
1295 template <typename T>
IsI64BitPattern(T imm)1296 static inline bool IsI64BitPattern(T imm) {
1297   for (T mask = 0xff << ((sizeof(T) - 1) * 8); mask != 0; mask >>= 8) {
1298     if (((imm & mask) != mask) && ((imm & mask) != 0)) return false;
1299   }
1300   return true;
1301 }
1302 
1303 
1304 template <typename T>
IsI8BitPattern(T imm)1305 static inline bool IsI8BitPattern(T imm) {
1306   uint8_t imm8 = imm & 0xff;
1307   for (unsigned rep = sizeof(T) - 1; rep > 0; rep--) {
1308     imm >>= 8;
1309     if ((imm & 0xff) != imm8) return false;
1310   }
1311   return true;
1312 }
1313 
1314 
CanBeInverted(uint32_t imm32)1315 static inline bool CanBeInverted(uint32_t imm32) {
1316   uint32_t fill8 = 0;
1317 
1318   if ((imm32 & 0xffffff00) == 0xffffff00) {
1319     //    11111111 11111111 11111111 abcdefgh
1320     return true;
1321   }
1322   if (((imm32 & 0xff) == 0) || ((imm32 & 0xff) == 0xff)) {
1323     fill8 = imm32 & 0xff;
1324     imm32 >>= 8;
1325     if ((imm32 >> 8) == 0xffff) {
1326       //    11111111 11111111 abcdefgh 00000000
1327       // or 11111111 11111111 abcdefgh 11111111
1328       return true;
1329     }
1330     if ((imm32 & 0xff) == fill8) {
1331       imm32 >>= 8;
1332       if ((imm32 >> 8) == 0xff) {
1333         //    11111111 abcdefgh 00000000 00000000
1334         // or 11111111 abcdefgh 11111111 11111111
1335         return true;
1336       }
1337       if ((fill8 == 0xff) && ((imm32 & 0xff) == 0xff)) {
1338         //    abcdefgh 11111111 11111111 11111111
1339         return true;
1340       }
1341     }
1342   }
1343   return false;
1344 }
1345 
1346 
1347 template <typename RES, typename T>
replicate(T imm)1348 static inline RES replicate(T imm) {
1349   VIXL_ASSERT((sizeof(RES) > sizeof(T)) &&
1350               (((sizeof(RES) / sizeof(T)) * sizeof(T)) == sizeof(RES)));
1351   RES res = imm;
1352   for (unsigned i = sizeof(RES) / sizeof(T) - 1; i > 0; i--) {
1353     res = (res << (sizeof(T) * 8)) | imm;
1354   }
1355   return res;
1356 }
1357 
1358 
Delegate(InstructionType type,InstructionCondDtSSop instruction,Condition cond,DataType dt,SRegister rd,const SOperand & operand)1359 void MacroAssembler::Delegate(InstructionType type,
1360                               InstructionCondDtSSop instruction,
1361                               Condition cond,
1362                               DataType dt,
1363                               SRegister rd,
1364                               const SOperand& operand) {
1365   CONTEXT_SCOPE;
1366   if (type == kVmov) {
1367     if (operand.IsImmediate() && dt.Is(F32)) {
1368       const NeonImmediate& neon_imm = operand.GetNeonImmediate();
1369       if (neon_imm.CanConvert<float>()) {
1370         // movw ip, imm16
1371         // movk ip, imm16
1372         // vmov s0, ip
1373         UseScratchRegisterScope temps(this);
1374         Register scratch = temps.Acquire();
1375         float f = neon_imm.GetImmediate<float>();
1376         // TODO: The scope length was measured empirically. We should analyse
1377         // the
1378         // worst-case size and add targetted tests.
1379         CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
1380         mov(cond, scratch, FloatToRawbits(f));
1381         vmov(cond, rd, scratch);
1382         return;
1383       }
1384     }
1385   }
1386   Assembler::Delegate(type, instruction, cond, dt, rd, operand);
1387 }
1388 
1389 
Delegate(InstructionType type,InstructionCondDtDDop instruction,Condition cond,DataType dt,DRegister rd,const DOperand & operand)1390 void MacroAssembler::Delegate(InstructionType type,
1391                               InstructionCondDtDDop instruction,
1392                               Condition cond,
1393                               DataType dt,
1394                               DRegister rd,
1395                               const DOperand& operand) {
1396   CONTEXT_SCOPE;
1397   if (type == kVmov) {
1398     if (operand.IsImmediate()) {
1399       const NeonImmediate& neon_imm = operand.GetNeonImmediate();
1400       switch (dt.GetValue()) {
1401         case I32:
1402           if (neon_imm.CanConvert<uint32_t>()) {
1403             uint32_t imm = neon_imm.GetImmediate<uint32_t>();
1404             // vmov.i32 d0, 0xabababab will translate into vmov.i8 d0, 0xab
1405             if (IsI8BitPattern(imm)) {
1406               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1407               vmov(cond, I8, rd, imm & 0xff);
1408               return;
1409             }
1410             // vmov.i32 d0, 0xff0000ff will translate into
1411             // vmov.i64 d0, 0xff0000ffff0000ff
1412             if (IsI64BitPattern(imm)) {
1413               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1414               vmov(cond, I64, rd, replicate<uint64_t>(imm));
1415               return;
1416             }
1417             // vmov.i32 d0, 0xffab0000 will translate into
1418             // vmvn.i32 d0, 0x0054ffff
1419             if (cond.Is(al) && CanBeInverted(imm)) {
1420               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1421               vmvn(I32, rd, ~imm);
1422               return;
1423             }
1424           }
1425           break;
1426         case I16:
1427           if (neon_imm.CanConvert<uint16_t>()) {
1428             uint16_t imm = neon_imm.GetImmediate<uint16_t>();
1429             // vmov.i16 d0, 0xabab will translate into vmov.i8 d0, 0xab
1430             if (IsI8BitPattern(imm)) {
1431               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1432               vmov(cond, I8, rd, imm & 0xff);
1433               return;
1434             }
1435           }
1436           break;
1437         case I64:
1438           if (neon_imm.CanConvert<uint64_t>()) {
1439             uint64_t imm = neon_imm.GetImmediate<uint64_t>();
1440             // vmov.i64 d0, -1 will translate into vmov.i8 d0, 0xff
1441             if (IsI8BitPattern(imm)) {
1442               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1443               vmov(cond, I8, rd, imm & 0xff);
1444               return;
1445             }
1446             // mov ip, lo(imm64)
1447             // vdup d0, ip
1448             // vdup is prefered to 'vmov d0[0]' as d0[1] does not need to be
1449             // preserved
1450             {
1451               UseScratchRegisterScope temps(this);
1452               Register scratch = temps.Acquire();
1453               {
1454                 // TODO: The scope length was measured empirically. We should
1455                 // analyse the
1456                 // worst-case size and add targetted tests.
1457                 CodeBufferCheckScope scope(this,
1458                                            2 * kMaxInstructionSizeInBytes);
1459                 mov(cond, scratch, static_cast<uint32_t>(imm & 0xffffffff));
1460               }
1461               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1462               vdup(cond, Untyped32, rd, scratch);
1463             }
1464             // mov ip, hi(imm64)
1465             // vmov d0[1], ip
1466             {
1467               UseScratchRegisterScope temps(this);
1468               Register scratch = temps.Acquire();
1469               {
1470                 // TODO: The scope length was measured empirically. We should
1471                 // analyse the
1472                 // worst-case size and add targetted tests.
1473                 CodeBufferCheckScope scope(this,
1474                                            2 * kMaxInstructionSizeInBytes);
1475                 mov(cond, scratch, static_cast<uint32_t>(imm >> 32));
1476               }
1477               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1478               vmov(cond, Untyped32, DRegisterLane(rd, 1), scratch);
1479             }
1480             return;
1481           }
1482           break;
1483         default:
1484           break;
1485       }
1486       VIXL_ASSERT(!dt.Is(I8));  // I8 cases should have been handled already.
1487       if ((dt.Is(I16) || dt.Is(I32)) && neon_imm.CanConvert<uint32_t>()) {
1488         // mov ip, imm32
1489         // vdup.16 d0, ip
1490         UseScratchRegisterScope temps(this);
1491         Register scratch = temps.Acquire();
1492         {
1493           CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
1494           mov(cond, scratch, neon_imm.GetImmediate<uint32_t>());
1495         }
1496         DataTypeValue vdup_dt = Untyped32;
1497         switch (dt.GetValue()) {
1498           case I16:
1499             vdup_dt = Untyped16;
1500             break;
1501           case I32:
1502             vdup_dt = Untyped32;
1503             break;
1504           default:
1505             VIXL_UNREACHABLE();
1506         }
1507         CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1508         vdup(cond, vdup_dt, rd, scratch);
1509         return;
1510       }
1511       if (dt.Is(F32) && neon_imm.CanConvert<float>()) {
1512         float f = neon_imm.GetImmediate<float>();
1513         // Punt to vmov.i32
1514         // TODO: The scope length was guessed based on the double case below. We
1515         // should analyse the worst-case size and add targetted tests.
1516         CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
1517         vmov(cond, I32, rd, FloatToRawbits(f));
1518         return;
1519       }
1520       if (dt.Is(F64) && neon_imm.CanConvert<double>()) {
1521         // Punt to vmov.i64
1522         double d = neon_imm.GetImmediate<double>();
1523         // TODO: The scope length was measured empirically. We should analyse
1524         // the
1525         // worst-case size and add targetted tests.
1526         CodeBufferCheckScope scope(this, 6 * kMaxInstructionSizeInBytes);
1527         vmov(cond, I64, rd, DoubleToRawbits(d));
1528         return;
1529       }
1530     }
1531   }
1532   Assembler::Delegate(type, instruction, cond, dt, rd, operand);
1533 }
1534 
1535 
Delegate(InstructionType type,InstructionCondDtQQop instruction,Condition cond,DataType dt,QRegister rd,const QOperand & operand)1536 void MacroAssembler::Delegate(InstructionType type,
1537                               InstructionCondDtQQop instruction,
1538                               Condition cond,
1539                               DataType dt,
1540                               QRegister rd,
1541                               const QOperand& operand) {
1542   CONTEXT_SCOPE;
1543   if (type == kVmov) {
1544     if (operand.IsImmediate()) {
1545       const NeonImmediate& neon_imm = operand.GetNeonImmediate();
1546       switch (dt.GetValue()) {
1547         case I32:
1548           if (neon_imm.CanConvert<uint32_t>()) {
1549             uint32_t imm = neon_imm.GetImmediate<uint32_t>();
1550             // vmov.i32 d0, 0xabababab will translate into vmov.i8 d0, 0xab
1551             if (IsI8BitPattern(imm)) {
1552               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1553               vmov(cond, I8, rd, imm & 0xff);
1554               return;
1555             }
1556             // vmov.i32 d0, 0xff0000ff will translate into
1557             // vmov.i64 d0, 0xff0000ffff0000ff
1558             if (IsI64BitPattern(imm)) {
1559               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1560               vmov(cond, I64, rd, replicate<uint64_t>(imm));
1561               return;
1562             }
1563             // vmov.i32 d0, 0xffab0000 will translate into
1564             // vmvn.i32 d0, 0x0054ffff
1565             if (CanBeInverted(imm)) {
1566               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1567               vmvn(cond, I32, rd, ~imm);
1568               return;
1569             }
1570           }
1571           break;
1572         case I16:
1573           if (neon_imm.CanConvert<uint16_t>()) {
1574             uint16_t imm = neon_imm.GetImmediate<uint16_t>();
1575             // vmov.i16 d0, 0xabab will translate into vmov.i8 d0, 0xab
1576             if (IsI8BitPattern(imm)) {
1577               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1578               vmov(cond, I8, rd, imm & 0xff);
1579               return;
1580             }
1581           }
1582           break;
1583         case I64:
1584           if (neon_imm.CanConvert<uint64_t>()) {
1585             uint64_t imm = neon_imm.GetImmediate<uint64_t>();
1586             // vmov.i64 d0, -1 will translate into vmov.i8 d0, 0xff
1587             if (IsI8BitPattern(imm)) {
1588               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1589               vmov(cond, I8, rd, imm & 0xff);
1590               return;
1591             }
1592             // mov ip, lo(imm64)
1593             // vdup q0, ip
1594             // vdup is prefered to 'vmov d0[0]' as d0[1-3] don't need to be
1595             // preserved
1596             {
1597               UseScratchRegisterScope temps(this);
1598               Register scratch = temps.Acquire();
1599               {
1600                 CodeBufferCheckScope scope(this,
1601                                            2 * kMaxInstructionSizeInBytes);
1602                 mov(cond, scratch, static_cast<uint32_t>(imm & 0xffffffff));
1603               }
1604               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1605               vdup(cond, Untyped32, rd, scratch);
1606             }
1607             // mov ip, hi(imm64)
1608             // vmov.i32 d0[1], ip
1609             // vmov d1, d0
1610             {
1611               UseScratchRegisterScope temps(this);
1612               Register scratch = temps.Acquire();
1613               {
1614                 CodeBufferCheckScope scope(this,
1615                                            2 * kMaxInstructionSizeInBytes);
1616                 mov(cond, scratch, static_cast<uint32_t>(imm >> 32));
1617               }
1618               {
1619                 CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1620                 vmov(cond,
1621                      Untyped32,
1622                      DRegisterLane(rd.GetLowDRegister(), 1),
1623                      scratch);
1624               }
1625               CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1626               vmov(cond, F64, rd.GetHighDRegister(), rd.GetLowDRegister());
1627             }
1628             return;
1629           }
1630           break;
1631         default:
1632           break;
1633       }
1634       VIXL_ASSERT(!dt.Is(I8));  // I8 cases should have been handled already.
1635       if ((dt.Is(I16) || dt.Is(I32)) && neon_imm.CanConvert<uint32_t>()) {
1636         // mov ip, imm32
1637         // vdup.16 d0, ip
1638         UseScratchRegisterScope temps(this);
1639         Register scratch = temps.Acquire();
1640         {
1641           CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
1642           mov(cond, scratch, neon_imm.GetImmediate<uint32_t>());
1643         }
1644         DataTypeValue vdup_dt = Untyped32;
1645         switch (dt.GetValue()) {
1646           case I16:
1647             vdup_dt = Untyped16;
1648             break;
1649           case I32:
1650             vdup_dt = Untyped32;
1651             break;
1652           default:
1653             VIXL_UNREACHABLE();
1654         }
1655         CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1656         vdup(cond, vdup_dt, rd, scratch);
1657         return;
1658       }
1659       if (dt.Is(F32) && neon_imm.CanConvert<float>()) {
1660         // Punt to vmov.i64
1661         float f = neon_imm.GetImmediate<float>();
1662         CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
1663         vmov(cond, I32, rd, FloatToRawbits(f));
1664         return;
1665       }
1666       if (dt.Is(F64) && neon_imm.CanConvert<double>()) {
1667         // Use vmov to create the double in the low D register, then duplicate
1668         // it into the high D register.
1669         double d = neon_imm.GetImmediate<double>();
1670         CodeBufferCheckScope scope(this, 7 * kMaxInstructionSizeInBytes);
1671         vmov(cond, F64, rd.GetLowDRegister(), d);
1672         vmov(cond, F64, rd.GetHighDRegister(), rd.GetLowDRegister());
1673         return;
1674       }
1675     }
1676   }
1677   Assembler::Delegate(type, instruction, cond, dt, rd, operand);
1678 }
1679 
1680 
Delegate(InstructionType type,InstructionCondRL instruction,Condition cond,Register rt,Location * location)1681 void MacroAssembler::Delegate(InstructionType type,
1682                               InstructionCondRL instruction,
1683                               Condition cond,
1684                               Register rt,
1685                               Location* location) {
1686   VIXL_ASSERT((type == kLdrb) || (type == kLdrh) || (type == kLdrsb) ||
1687               (type == kLdrsh));
1688 
1689   CONTEXT_SCOPE;
1690 
1691   if (location->IsBound()) {
1692     CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);
1693     UseScratchRegisterScope temps(this);
1694     temps.Include(rt);
1695     Register scratch = temps.Acquire();
1696     uint32_t mask = GetOffsetMask(type, Offset);
1697     switch (type) {
1698       case kLdrb:
1699         ldrb(rt, MemOperandComputationHelper(cond, scratch, location, mask));
1700         return;
1701       case kLdrh:
1702         ldrh(rt, MemOperandComputationHelper(cond, scratch, location, mask));
1703         return;
1704       case kLdrsb:
1705         ldrsb(rt, MemOperandComputationHelper(cond, scratch, location, mask));
1706         return;
1707       case kLdrsh:
1708         ldrsh(rt, MemOperandComputationHelper(cond, scratch, location, mask));
1709         return;
1710       default:
1711         VIXL_UNREACHABLE();
1712     }
1713     return;
1714   }
1715 
1716   Assembler::Delegate(type, instruction, cond, rt, location);
1717 }
1718 
1719 
Delegate(InstructionType type,InstructionCondRRL instruction,Condition cond,Register rt,Register rt2,Location * location)1720 void MacroAssembler::Delegate(InstructionType type,
1721                               InstructionCondRRL instruction,
1722                               Condition cond,
1723                               Register rt,
1724                               Register rt2,
1725                               Location* location) {
1726   VIXL_ASSERT(type == kLdrd);
1727 
1728   CONTEXT_SCOPE;
1729 
1730   if (location->IsBound()) {
1731     CodeBufferCheckScope scope(this, 6 * kMaxInstructionSizeInBytes);
1732     UseScratchRegisterScope temps(this);
1733     temps.Include(rt, rt2);
1734     Register scratch = temps.Acquire();
1735     uint32_t mask = GetOffsetMask(type, Offset);
1736     ldrd(rt, rt2, MemOperandComputationHelper(cond, scratch, location, mask));
1737     return;
1738   }
1739 
1740   Assembler::Delegate(type, instruction, cond, rt, rt2, location);
1741 }
1742 
1743 
Delegate(InstructionType type,InstructionCondSizeRMop instruction,Condition cond,EncodingSize size,Register rd,const MemOperand & operand)1744 void MacroAssembler::Delegate(InstructionType type,
1745                               InstructionCondSizeRMop instruction,
1746                               Condition cond,
1747                               EncodingSize size,
1748                               Register rd,
1749                               const MemOperand& operand) {
1750   CONTEXT_SCOPE;
1751   VIXL_ASSERT(size.IsBest());
1752   VIXL_ASSERT((type == kLdr) || (type == kLdrb) || (type == kLdrh) ||
1753               (type == kLdrsb) || (type == kLdrsh) || (type == kStr) ||
1754               (type == kStrb) || (type == kStrh));
1755   if (operand.IsImmediate()) {
1756     const Register& rn = operand.GetBaseRegister();
1757     AddrMode addrmode = operand.GetAddrMode();
1758     int32_t offset = operand.GetOffsetImmediate();
1759     uint32_t extra_offset_mask = GetOffsetMask(type, addrmode);
1760     // Try to maximize the offset used by the MemOperand (load_store_offset).
1761     // Add the part which can't be used by the MemOperand (add_offset).
1762     uint32_t load_store_offset = offset & extra_offset_mask;
1763     uint32_t add_offset = offset & ~extra_offset_mask;
1764     if ((add_offset != 0) &&
1765         (IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) {
1766       load_store_offset = 0;
1767       add_offset = offset;
1768     }
1769     switch (addrmode) {
1770       case PreIndex:
1771         // Avoid the unpredictable case 'str r0, [r0, imm]!'
1772         if (!rn.Is(rd)) {
1773           // Pre-Indexed case:
1774           // ldr r0, [r1, 12345]! will translate into
1775           //   add r1, r1, 12345
1776           //   ldr r0, [r1]
1777           {
1778             CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
1779             add(cond, rn, rn, add_offset);
1780           }
1781           {
1782             CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1783             (this->*instruction)(cond,
1784                                  size,
1785                                  rd,
1786                                  MemOperand(rn, load_store_offset, PreIndex));
1787           }
1788           return;
1789         }
1790         break;
1791       case Offset: {
1792         UseScratchRegisterScope temps(this);
1793         // Allow using the destination as a scratch register if possible.
1794         if ((type != kStr) && (type != kStrb) && (type != kStrh) &&
1795             !rd.Is(rn)) {
1796           temps.Include(rd);
1797         }
1798         Register scratch = temps.Acquire();
1799         // Offset case:
1800         // ldr r0, [r1, 12345] will translate into
1801         //   add r0, r1, 12345
1802         //   ldr r0, [r0]
1803         {
1804           CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
1805           add(cond, scratch, rn, add_offset);
1806         }
1807         {
1808           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1809           (this->*instruction)(cond,
1810                                size,
1811                                rd,
1812                                MemOperand(scratch, load_store_offset));
1813         }
1814         return;
1815       }
1816       case PostIndex:
1817         // Avoid the unpredictable case 'ldr r0, [r0], imm'
1818         if (!rn.Is(rd)) {
1819           // Post-indexed case:
1820           // ldr r0. [r1], imm32 will translate into
1821           //   ldr r0, [r1]
1822           //   movw ip. imm32 & 0xffffffff
1823           //   movt ip, imm32 >> 16
1824           //   add r1, r1, ip
1825           {
1826             CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1827             (this->*instruction)(cond,
1828                                  size,
1829                                  rd,
1830                                  MemOperand(rn, load_store_offset, PostIndex));
1831           }
1832           {
1833             CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
1834             add(cond, rn, rn, add_offset);
1835           }
1836           return;
1837         }
1838         break;
1839     }
1840   } else if (operand.IsPlainRegister()) {
1841     const Register& rn = operand.GetBaseRegister();
1842     AddrMode addrmode = operand.GetAddrMode();
1843     const Register& rm = operand.GetOffsetRegister();
1844     if (rm.IsPC()) {
1845       VIXL_ABORT_WITH_MSG(
1846           "The MacroAssembler does not convert loads and stores with a PC "
1847           "offset register.\n");
1848     }
1849     if (rn.IsPC()) {
1850       if (addrmode == Offset) {
1851         if (IsUsingT32()) {
1852           VIXL_ABORT_WITH_MSG(
1853               "The MacroAssembler does not convert loads and stores with a PC "
1854               "base register for T32.\n");
1855         }
1856       } else {
1857         VIXL_ABORT_WITH_MSG(
1858             "The MacroAssembler does not convert loads and stores with a PC "
1859             "base register in pre-index or post-index mode.\n");
1860       }
1861     }
1862     switch (addrmode) {
1863       case PreIndex:
1864         // Avoid the unpredictable case 'str r0, [r0, imm]!'
1865         if (!rn.Is(rd)) {
1866           // Pre-Indexed case:
1867           // ldr r0, [r1, r2]! will translate into
1868           //   add r1, r1, r2
1869           //   ldr r0, [r1]
1870           {
1871             CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1872             if (operand.GetSign().IsPlus()) {
1873               add(cond, rn, rn, rm);
1874             } else {
1875               sub(cond, rn, rn, rm);
1876             }
1877           }
1878           {
1879             CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1880             (this->*instruction)(cond, size, rd, MemOperand(rn, Offset));
1881           }
1882           return;
1883         }
1884         break;
1885       case Offset: {
1886         UseScratchRegisterScope temps(this);
1887         // Allow using the destination as a scratch register if this is not a
1888         // store.
1889         // Avoid using PC as a temporary as this has side-effects.
1890         if ((type != kStr) && (type != kStrb) && (type != kStrh) &&
1891             !rd.IsPC()) {
1892           temps.Include(rd);
1893         }
1894         Register scratch = temps.Acquire();
1895         // Offset case:
1896         // ldr r0, [r1, r2] will translate into
1897         //   add r0, r1, r2
1898         //   ldr r0, [r0]
1899         {
1900           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1901           if (operand.GetSign().IsPlus()) {
1902             add(cond, scratch, rn, rm);
1903           } else {
1904             sub(cond, scratch, rn, rm);
1905           }
1906         }
1907         {
1908           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1909           (this->*instruction)(cond, size, rd, MemOperand(scratch, Offset));
1910         }
1911         return;
1912       }
1913       case PostIndex:
1914         // Avoid the unpredictable case 'ldr r0, [r0], imm'
1915         if (!rn.Is(rd)) {
1916           // Post-indexed case:
1917           // ldr r0. [r1], r2 will translate into
1918           //   ldr r0, [r1]
1919           //   add r1, r1, r2
1920           {
1921             CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1922             (this->*instruction)(cond, size, rd, MemOperand(rn, Offset));
1923           }
1924           {
1925             CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
1926             if (operand.GetSign().IsPlus()) {
1927               add(cond, rn, rn, rm);
1928             } else {
1929               sub(cond, rn, rn, rm);
1930             }
1931           }
1932           return;
1933         }
1934         break;
1935     }
1936   }
1937   Assembler::Delegate(type, instruction, cond, size, rd, operand);
1938 }
1939 
1940 
Delegate(InstructionType type,InstructionCondRRMop instruction,Condition cond,Register rt,Register rt2,const MemOperand & operand)1941 void MacroAssembler::Delegate(InstructionType type,
1942                               InstructionCondRRMop instruction,
1943                               Condition cond,
1944                               Register rt,
1945                               Register rt2,
1946                               const MemOperand& operand) {
1947   if ((type == kLdaexd) || (type == kLdrexd) || (type == kStlex) ||
1948       (type == kStlexb) || (type == kStlexh) || (type == kStrex) ||
1949       (type == kStrexb) || (type == kStrexh)) {
1950     UnimplementedDelegate(type);
1951     return;
1952   }
1953 
1954   VIXL_ASSERT((type == kLdrd) || (type == kStrd));
1955 
1956   CONTEXT_SCOPE;
1957 
1958   // TODO: Should we allow these cases?
1959   if (IsUsingA32()) {
1960     // The first register needs to be even.
1961     if ((rt.GetCode() & 1) != 0) {
1962       UnimplementedDelegate(type);
1963       return;
1964     }
1965     // Registers need to be adjacent.
1966     if (((rt.GetCode() + 1) % kNumberOfRegisters) != rt2.GetCode()) {
1967       UnimplementedDelegate(type);
1968       return;
1969     }
1970     // LDRD lr, pc [...] is not allowed.
1971     if (rt.Is(lr)) {
1972       UnimplementedDelegate(type);
1973       return;
1974     }
1975   }
1976 
1977   if (operand.IsImmediate()) {
1978     const Register& rn = operand.GetBaseRegister();
1979     AddrMode addrmode = operand.GetAddrMode();
1980     int32_t offset = operand.GetOffsetImmediate();
1981     uint32_t extra_offset_mask = GetOffsetMask(type, addrmode);
1982     // Try to maximize the offset used by the MemOperand (load_store_offset).
1983     // Add the part which can't be used by the MemOperand (add_offset).
1984     uint32_t load_store_offset = offset & extra_offset_mask;
1985     uint32_t add_offset = offset & ~extra_offset_mask;
1986     if ((add_offset != 0) &&
1987         (IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) {
1988       load_store_offset = 0;
1989       add_offset = offset;
1990     }
1991     switch (addrmode) {
1992       case PreIndex: {
1993         // Allow using the destinations as a scratch registers if possible.
1994         UseScratchRegisterScope temps(this);
1995         if (type == kLdrd) {
1996           if (!rt.Is(rn)) temps.Include(rt);
1997           if (!rt2.Is(rn)) temps.Include(rt2);
1998         }
1999 
2000         // Pre-Indexed case:
2001         // ldrd r0, r1, [r2, 12345]! will translate into
2002         //   add r2, 12345
2003         //   ldrd r0, r1, [r2]
2004         {
2005           CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
2006           add(cond, rn, rn, add_offset);
2007         }
2008         {
2009           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2010           (this->*instruction)(cond,
2011                                rt,
2012                                rt2,
2013                                MemOperand(rn, load_store_offset, PreIndex));
2014         }
2015         return;
2016       }
2017       case Offset: {
2018         UseScratchRegisterScope temps(this);
2019         // Allow using the destinations as a scratch registers if possible.
2020         if (type == kLdrd) {
2021           if (!rt.Is(rn)) temps.Include(rt);
2022           if (!rt2.Is(rn)) temps.Include(rt2);
2023         }
2024         Register scratch = temps.Acquire();
2025         // Offset case:
2026         // ldrd r0, r1, [r2, 12345] will translate into
2027         //   add r0, r2, 12345
2028         //   ldrd r0, r1, [r0]
2029         {
2030           CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
2031           add(cond, scratch, rn, add_offset);
2032         }
2033         {
2034           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2035           (this->*instruction)(cond,
2036                                rt,
2037                                rt2,
2038                                MemOperand(scratch, load_store_offset));
2039         }
2040         return;
2041       }
2042       case PostIndex:
2043         // Avoid the unpredictable case 'ldrd r0, r1, [r0], imm'
2044         if (!rn.Is(rt) && !rn.Is(rt2)) {
2045           // Post-indexed case:
2046           // ldrd r0, r1, [r2], imm32 will translate into
2047           //   ldrd r0, r1, [r2]
2048           //   movw ip. imm32 & 0xffffffff
2049           //   movt ip, imm32 >> 16
2050           //   add r2, ip
2051           {
2052             CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2053             (this->*instruction)(cond,
2054                                  rt,
2055                                  rt2,
2056                                  MemOperand(rn, load_store_offset, PostIndex));
2057           }
2058           {
2059             CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
2060             add(cond, rn, rn, add_offset);
2061           }
2062           return;
2063         }
2064         break;
2065     }
2066   }
2067   if (operand.IsPlainRegister()) {
2068     const Register& rn = operand.GetBaseRegister();
2069     const Register& rm = operand.GetOffsetRegister();
2070     AddrMode addrmode = operand.GetAddrMode();
2071     switch (addrmode) {
2072       case PreIndex:
2073         // ldrd r0, r1, [r2, r3]! will translate into
2074         //   add r2, r3
2075         //   ldrd r0, r1, [r2]
2076         {
2077           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2078           if (operand.GetSign().IsPlus()) {
2079             add(cond, rn, rn, rm);
2080           } else {
2081             sub(cond, rn, rn, rm);
2082           }
2083         }
2084         {
2085           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2086           (this->*instruction)(cond, rt, rt2, MemOperand(rn, Offset));
2087         }
2088         return;
2089       case PostIndex:
2090         // ldrd r0, r1, [r2], r3 will translate into
2091         //   ldrd r0, r1, [r2]
2092         //   add r2, r3
2093         {
2094           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2095           (this->*instruction)(cond, rt, rt2, MemOperand(rn, Offset));
2096         }
2097         {
2098           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2099           if (operand.GetSign().IsPlus()) {
2100             add(cond, rn, rn, rm);
2101           } else {
2102             sub(cond, rn, rn, rm);
2103           }
2104         }
2105         return;
2106       case Offset: {
2107         UseScratchRegisterScope temps(this);
2108         // Allow using the destinations as a scratch registers if possible.
2109         if (type == kLdrd) {
2110           if (!rt.Is(rn)) temps.Include(rt);
2111           if (!rt2.Is(rn)) temps.Include(rt2);
2112         }
2113         Register scratch = temps.Acquire();
2114         // Offset case:
2115         // ldrd r0, r1, [r2, r3] will translate into
2116         //   add r0, r2, r3
2117         //   ldrd r0, r1, [r0]
2118         {
2119           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2120           if (operand.GetSign().IsPlus()) {
2121             add(cond, scratch, rn, rm);
2122           } else {
2123             sub(cond, scratch, rn, rm);
2124           }
2125         }
2126         {
2127           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2128           (this->*instruction)(cond, rt, rt2, MemOperand(scratch, Offset));
2129         }
2130         return;
2131       }
2132     }
2133   }
2134   Assembler::Delegate(type, instruction, cond, rt, rt2, operand);
2135 }
2136 
2137 
Delegate(InstructionType type,InstructionCondDtSMop instruction,Condition cond,DataType dt,SRegister rd,const MemOperand & operand)2138 void MacroAssembler::Delegate(InstructionType type,
2139                               InstructionCondDtSMop instruction,
2140                               Condition cond,
2141                               DataType dt,
2142                               SRegister rd,
2143                               const MemOperand& operand) {
2144   CONTEXT_SCOPE;
2145   if (operand.IsImmediate()) {
2146     const Register& rn = operand.GetBaseRegister();
2147     AddrMode addrmode = operand.GetAddrMode();
2148     int32_t offset = operand.GetOffsetImmediate();
2149     VIXL_ASSERT(((offset > 0) && operand.GetSign().IsPlus()) ||
2150                 ((offset < 0) && operand.GetSign().IsMinus()) || (offset == 0));
2151     if (rn.IsPC()) {
2152       VIXL_ABORT_WITH_MSG(
2153           "The MacroAssembler does not convert vldr or vstr with a PC base "
2154           "register.\n");
2155     }
2156     switch (addrmode) {
2157       case PreIndex:
2158         // Pre-Indexed case:
2159         // vldr.32 s0, [r1, 12345]! will translate into
2160         //   add r1, 12345
2161         //   vldr.32 s0, [r1]
2162         if (offset != 0) {
2163           CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
2164           add(cond, rn, rn, offset);
2165         }
2166         {
2167           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2168           (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));
2169         }
2170         return;
2171       case Offset: {
2172         UseScratchRegisterScope temps(this);
2173         Register scratch = temps.Acquire();
2174         // Offset case:
2175         // vldr.32 s0, [r1, 12345] will translate into
2176         //   add ip, r1, 12345
2177         //   vldr.32 s0, [ip]
2178         {
2179           VIXL_ASSERT(offset != 0);
2180           CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
2181           add(cond, scratch, rn, offset);
2182         }
2183         {
2184           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2185           (this->*instruction)(cond, dt, rd, MemOperand(scratch, Offset));
2186         }
2187         return;
2188       }
2189       case PostIndex:
2190         // Post-indexed case:
2191         // vldr.32 s0, [r1], imm32 will translate into
2192         //   vldr.32 s0, [r1]
2193         //   movw ip. imm32 & 0xffffffff
2194         //   movt ip, imm32 >> 16
2195         //   add r1, ip
2196         {
2197           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2198           (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));
2199         }
2200         if (offset != 0) {
2201           CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
2202           add(cond, rn, rn, offset);
2203         }
2204         return;
2205     }
2206   }
2207   Assembler::Delegate(type, instruction, cond, dt, rd, operand);
2208 }
2209 
2210 
Delegate(InstructionType type,InstructionCondDtDMop instruction,Condition cond,DataType dt,DRegister rd,const MemOperand & operand)2211 void MacroAssembler::Delegate(InstructionType type,
2212                               InstructionCondDtDMop instruction,
2213                               Condition cond,
2214                               DataType dt,
2215                               DRegister rd,
2216                               const MemOperand& operand) {
2217   CONTEXT_SCOPE;
2218   if (operand.IsImmediate()) {
2219     const Register& rn = operand.GetBaseRegister();
2220     AddrMode addrmode = operand.GetAddrMode();
2221     int32_t offset = operand.GetOffsetImmediate();
2222     VIXL_ASSERT(((offset > 0) && operand.GetSign().IsPlus()) ||
2223                 ((offset < 0) && operand.GetSign().IsMinus()) || (offset == 0));
2224     if (rn.IsPC()) {
2225       VIXL_ABORT_WITH_MSG(
2226           "The MacroAssembler does not convert vldr or vstr with a PC base "
2227           "register.\n");
2228     }
2229     switch (addrmode) {
2230       case PreIndex:
2231         // Pre-Indexed case:
2232         // vldr.64 d0, [r1, 12345]! will translate into
2233         //   add r1, 12345
2234         //   vldr.64 d0, [r1]
2235         if (offset != 0) {
2236           CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
2237           add(cond, rn, rn, offset);
2238         }
2239         {
2240           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2241           (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));
2242         }
2243         return;
2244       case Offset: {
2245         UseScratchRegisterScope temps(this);
2246         Register scratch = temps.Acquire();
2247         // Offset case:
2248         // vldr.64 d0, [r1, 12345] will translate into
2249         //   add ip, r1, 12345
2250         //   vldr.32 s0, [ip]
2251         {
2252           VIXL_ASSERT(offset != 0);
2253           CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
2254           add(cond, scratch, rn, offset);
2255         }
2256         {
2257           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2258           (this->*instruction)(cond, dt, rd, MemOperand(scratch, Offset));
2259         }
2260         return;
2261       }
2262       case PostIndex:
2263         // Post-indexed case:
2264         // vldr.64 d0. [r1], imm32 will translate into
2265         //   vldr.64 d0, [r1]
2266         //   movw ip. imm32 & 0xffffffff
2267         //   movt ip, imm32 >> 16
2268         //   add r1, ip
2269         {
2270           CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2271           (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));
2272         }
2273         if (offset != 0) {
2274           CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);
2275           add(cond, rn, rn, offset);
2276         }
2277         return;
2278     }
2279   }
2280   Assembler::Delegate(type, instruction, cond, dt, rd, operand);
2281 }
2282 
2283 
Delegate(InstructionType type,InstructionCondMsrOp instruction,Condition cond,MaskedSpecialRegister spec_reg,const Operand & operand)2284 void MacroAssembler::Delegate(InstructionType type,
2285                               InstructionCondMsrOp instruction,
2286                               Condition cond,
2287                               MaskedSpecialRegister spec_reg,
2288                               const Operand& operand) {
2289   USE(type);
2290   VIXL_ASSERT(type == kMsr);
2291   if (operand.IsImmediate()) {
2292     UseScratchRegisterScope temps(this);
2293     Register scratch = temps.Acquire();
2294     {
2295       CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);
2296       mov(cond, scratch, operand);
2297     }
2298     CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);
2299     msr(cond, spec_reg, scratch);
2300     return;
2301   }
2302   Assembler::Delegate(type, instruction, cond, spec_reg, operand);
2303 }
2304 
2305 
Delegate(InstructionType type,InstructionCondDtDL instruction,Condition cond,DataType dt,DRegister rd,Location * location)2306 void MacroAssembler::Delegate(InstructionType type,
2307                               InstructionCondDtDL instruction,
2308                               Condition cond,
2309                               DataType dt,
2310                               DRegister rd,
2311                               Location* location) {
2312   VIXL_ASSERT(type == kVldr);
2313 
2314   CONTEXT_SCOPE;
2315 
2316   if (location->IsBound()) {
2317     CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);
2318     UseScratchRegisterScope temps(this);
2319     Register scratch = temps.Acquire();
2320     uint32_t mask = GetOffsetMask(type, Offset);
2321     vldr(dt, rd, MemOperandComputationHelper(cond, scratch, location, mask));
2322     return;
2323   }
2324 
2325   Assembler::Delegate(type, instruction, cond, dt, rd, location);
2326 }
2327 
2328 
Delegate(InstructionType type,InstructionCondDtSL instruction,Condition cond,DataType dt,SRegister rd,Location * location)2329 void MacroAssembler::Delegate(InstructionType type,
2330                               InstructionCondDtSL instruction,
2331                               Condition cond,
2332                               DataType dt,
2333                               SRegister rd,
2334                               Location* location) {
2335   VIXL_ASSERT(type == kVldr);
2336 
2337   CONTEXT_SCOPE;
2338 
2339   if (location->IsBound()) {
2340     CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);
2341     UseScratchRegisterScope temps(this);
2342     Register scratch = temps.Acquire();
2343     uint32_t mask = GetOffsetMask(type, Offset);
2344     vldr(dt, rd, MemOperandComputationHelper(cond, scratch, location, mask));
2345     return;
2346   }
2347 
2348   Assembler::Delegate(type, instruction, cond, dt, rd, location);
2349 }
2350 
2351 
2352 #undef CONTEXT_SCOPE
2353 #undef TOSTRING
2354 #undef STRINGIFY
2355 
2356 // Start of generated code.
2357 // End of generated code.
2358 }  // namespace aarch32
2359 }  // namespace vixl
2360