• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 #include "src/v8.h"
38 
39 #if V8_TARGET_ARCH_ARM
40 
41 #include "src/arm/assembler-arm-inl.h"
42 #include "src/macro-assembler.h"
43 #include "src/serialize.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 // Get the CPU features enabled by the build. For cross compilation the
49 // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
50 // can be defined to enable ARMv7 and VFPv3 instructions when building the
51 // snapshot.
CpuFeaturesImpliedByCompiler()52 static unsigned CpuFeaturesImpliedByCompiler() {
53   unsigned answer = 0;
54 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
55   if (FLAG_enable_armv7) answer |= 1u << ARMv7;
56 #endif  // CAN_USE_ARMV7_INSTRUCTIONS
57 #ifdef CAN_USE_VFP3_INSTRUCTIONS
58   if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7;
59 #endif  // CAN_USE_VFP3_INSTRUCTIONS
60 #ifdef CAN_USE_VFP32DREGS
61   if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS;
62 #endif  // CAN_USE_VFP32DREGS
63 #ifdef CAN_USE_NEON
64   if (FLAG_enable_neon) answer |= 1u << NEON;
65 #endif  // CAN_USE_VFP32DREGS
66   if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
67     answer |= 1u << UNALIGNED_ACCESSES;
68   }
69 
70   return answer;
71 }
72 
73 
ProbeImpl(bool cross_compile)74 void CpuFeatures::ProbeImpl(bool cross_compile) {
75   supported_ |= CpuFeaturesImpliedByCompiler();
76   cache_line_size_ = 64;
77 
78   // Only use statically determined features for cross compile (snapshot).
79   if (cross_compile) return;
80 
81 #ifndef __arm__
82   // For the simulator build, use whatever the flags specify.
83   if (FLAG_enable_armv7) {
84     supported_ |= 1u << ARMv7;
85     if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
86     if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
87     if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
88     if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
89     if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
90   }
91   if (FLAG_enable_mls) supported_ |= 1u << MLS;
92   if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
93 
94 #else  // __arm__
95   // Probe for additional features at runtime.
96   CPU cpu;
97   if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
98     // This implementation also sets the VFP flags if runtime
99     // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
100     // 0406B, page A1-6.
101     supported_ |= 1u << VFP3 | 1u << ARMv7;
102   }
103 
104   if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
105   if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
106   if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
107 
108   if (cpu.architecture() >= 7) {
109     if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
110     if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
111     // Use movw/movt for QUALCOMM ARMv7 cores.
112     if (FLAG_enable_movw_movt && cpu.implementer() == CPU::QUALCOMM) {
113       supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
114     }
115   }
116 
117   // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
118   if (cpu.implementer() == CPU::ARM && (cpu.part() == CPU::ARM_CORTEX_A5 ||
119                                         cpu.part() == CPU::ARM_CORTEX_A9)) {
120     cache_line_size_ = 32;
121   }
122 
123   if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
124 #endif
125 
126   ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
127 }
128 
129 
PrintTarget()130 void CpuFeatures::PrintTarget() {
131   const char* arm_arch = NULL;
132   const char* arm_test = "";
133   const char* arm_fpu = "";
134   const char* arm_thumb = "";
135   const char* arm_float_abi = NULL;
136 
137 #if defined CAN_USE_ARMV7_INSTRUCTIONS
138   arm_arch = "arm v7";
139 #else
140   arm_arch = "arm v6";
141 #endif
142 
143 #ifdef __arm__
144 
145 # ifdef ARM_TEST
146   arm_test = " test";
147 # endif
148 # if defined __ARM_NEON__
149   arm_fpu = " neon";
150 # elif defined CAN_USE_VFP3_INSTRUCTIONS
151   arm_fpu = " vfp3";
152 # else
153   arm_fpu = " vfp2";
154 # endif
155 # if (defined __thumb__) || (defined __thumb2__)
156   arm_thumb = " thumb";
157 # endif
158   arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp";
159 
160 #else  // __arm__
161 
162   arm_test = " simulator";
163 # if defined CAN_USE_VFP3_INSTRUCTIONS
164 #  if defined CAN_USE_VFP32DREGS
165   arm_fpu = " vfp3";
166 #  else
167   arm_fpu = " vfp3-d16";
168 #  endif
169 # else
170   arm_fpu = " vfp2";
171 # endif
172 # if USE_EABI_HARDFLOAT == 1
173   arm_float_abi = "hard";
174 # else
175   arm_float_abi = "softfp";
176 # endif
177 
178 #endif  // __arm__
179 
180   printf("target%s %s%s%s %s\n",
181          arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi);
182 }
183 
184 
PrintFeatures()185 void CpuFeatures::PrintFeatures() {
186   printf(
187     "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
188     "MOVW_MOVT_IMMEDIATE_LOADS=%d",
189     CpuFeatures::IsSupported(ARMv7),
190     CpuFeatures::IsSupported(VFP3),
191     CpuFeatures::IsSupported(VFP32DREGS),
192     CpuFeatures::IsSupported(NEON),
193     CpuFeatures::IsSupported(SUDIV),
194     CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
195     CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
196 #ifdef __arm__
197   bool eabi_hardfloat = OS::ArmUsingHardFloat();
198 #elif USE_EABI_HARDFLOAT
199   bool eabi_hardfloat = true;
200 #else
201   bool eabi_hardfloat = false;
202 #endif
203     printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
204 }
205 
206 
207 // -----------------------------------------------------------------------------
208 // Implementation of DwVfpRegister
209 
AllocationIndexToString(int index)210 const char* DwVfpRegister::AllocationIndexToString(int index) {
211   ASSERT(index >= 0 && index < NumAllocatableRegisters());
212   ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
213          kNumReservedRegisters - 1);
214   if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters;
215   return VFPRegisters::Name(index, true);
216 }
217 
218 
219 // -----------------------------------------------------------------------------
220 // Implementation of RelocInfo
221 
222 const int RelocInfo::kApplyMask = 0;
223 
224 
IsCodedSpecially()225 bool RelocInfo::IsCodedSpecially() {
226   // The deserializer needs to know whether a pointer is specially coded.  Being
227   // specially coded on ARM means that it is a movw/movt instruction, or is an
228   // out of line constant pool entry.  These only occur if
229   // FLAG_enable_ool_constant_pool is true.
230   return FLAG_enable_ool_constant_pool;
231 }
232 
233 
IsInConstantPool()234 bool RelocInfo::IsInConstantPool() {
235   if (FLAG_enable_ool_constant_pool) {
236     return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
237   } else {
238     return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
239   }
240 }
241 
242 
PatchCode(byte * instructions,int instruction_count)243 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
244   // Patch the code at the current address with the supplied instructions.
245   Instr* pc = reinterpret_cast<Instr*>(pc_);
246   Instr* instr = reinterpret_cast<Instr*>(instructions);
247   for (int i = 0; i < instruction_count; i++) {
248     *(pc + i) = *(instr + i);
249   }
250 
251   // Indicate that code has changed.
252   CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
253 }
254 
255 
256 // Patch the code at the current PC with a call to the target address.
257 // Additional guard instructions can be added if required.
PatchCodeWithCall(Address target,int guard_bytes)258 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
259   // Patch the code at the current address with a call to the target.
260   UNIMPLEMENTED();
261 }
262 
263 
264 // -----------------------------------------------------------------------------
265 // Implementation of Operand and MemOperand
266 // See assembler-arm-inl.h for inlined constructors
267 
Operand(Handle<Object> handle)268 Operand::Operand(Handle<Object> handle) {
269   AllowDeferredHandleDereference using_raw_address;
270   rm_ = no_reg;
271   // Verify all Objects referred by code are NOT in new space.
272   Object* obj = *handle;
273   if (obj->IsHeapObject()) {
274     ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
275     imm32_ = reinterpret_cast<intptr_t>(handle.location());
276     rmode_ = RelocInfo::EMBEDDED_OBJECT;
277   } else {
278     // no relocation needed
279     imm32_ = reinterpret_cast<intptr_t>(obj);
280     rmode_ = RelocInfo::NONE32;
281   }
282 }
283 
284 
Operand(Register rm,ShiftOp shift_op,int shift_imm)285 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
286   ASSERT(is_uint5(shift_imm));
287 
288   rm_ = rm;
289   rs_ = no_reg;
290   shift_op_ = shift_op;
291   shift_imm_ = shift_imm & 31;
292 
293   if ((shift_op == ROR) && (shift_imm == 0)) {
294     // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
295     // RRX as ROR #0 (See below).
296     shift_op = LSL;
297   } else if (shift_op == RRX) {
298     // encoded as ROR with shift_imm == 0
299     ASSERT(shift_imm == 0);
300     shift_op_ = ROR;
301     shift_imm_ = 0;
302   }
303 }
304 
305 
Operand(Register rm,ShiftOp shift_op,Register rs)306 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
307   ASSERT(shift_op != RRX);
308   rm_ = rm;
309   rs_ = no_reg;
310   shift_op_ = shift_op;
311   rs_ = rs;
312 }
313 
314 
MemOperand(Register rn,int32_t offset,AddrMode am)315 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
316   rn_ = rn;
317   rm_ = no_reg;
318   offset_ = offset;
319   am_ = am;
320 }
321 
322 
MemOperand(Register rn,Register rm,AddrMode am)323 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
324   rn_ = rn;
325   rm_ = rm;
326   shift_op_ = LSL;
327   shift_imm_ = 0;
328   am_ = am;
329 }
330 
331 
MemOperand(Register rn,Register rm,ShiftOp shift_op,int shift_imm,AddrMode am)332 MemOperand::MemOperand(Register rn, Register rm,
333                        ShiftOp shift_op, int shift_imm, AddrMode am) {
334   ASSERT(is_uint5(shift_imm));
335   rn_ = rn;
336   rm_ = rm;
337   shift_op_ = shift_op;
338   shift_imm_ = shift_imm & 31;
339   am_ = am;
340 }
341 
342 
NeonMemOperand(Register rn,AddrMode am,int align)343 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
344   ASSERT((am == Offset) || (am == PostIndex));
345   rn_ = rn;
346   rm_ = (am == Offset) ? pc : sp;
347   SetAlignment(align);
348 }
349 
350 
NeonMemOperand(Register rn,Register rm,int align)351 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
352   rn_ = rn;
353   rm_ = rm;
354   SetAlignment(align);
355 }
356 
357 
SetAlignment(int align)358 void NeonMemOperand::SetAlignment(int align) {
359   switch (align) {
360     case 0:
361       align_ = 0;
362       break;
363     case 64:
364       align_ = 1;
365       break;
366     case 128:
367       align_ = 2;
368       break;
369     case 256:
370       align_ = 3;
371       break;
372     default:
373       UNREACHABLE();
374       align_ = 0;
375       break;
376   }
377 }
378 
379 
NeonListOperand(DoubleRegister base,int registers_count)380 NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
381   base_ = base;
382   switch (registers_count) {
383     case 1:
384       type_ = nlt_1;
385       break;
386     case 2:
387       type_ = nlt_2;
388       break;
389     case 3:
390       type_ = nlt_3;
391       break;
392     case 4:
393       type_ = nlt_4;
394       break;
395     default:
396       UNREACHABLE();
397       type_ = nlt_1;
398       break;
399   }
400 }
401 
402 
403 // -----------------------------------------------------------------------------
404 // Specific instructions, constants, and masks.
405 
406 // add(sp, sp, 4) instruction (aka Pop())
407 const Instr kPopInstruction =
408     al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
409         kRegister_sp_Code * B12;
410 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
411 // register r is not encoded.
412 const Instr kPushRegPattern =
413     al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
414 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
415 // register r is not encoded.
416 const Instr kPopRegPattern =
417     al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
418 // mov lr, pc
419 const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
420 // ldr rd, [pc, #offset]
421 const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
422 const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
423 // ldr rd, [pp, #offset]
424 const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16;
425 const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16;
426 // vldr dd, [pc, #offset]
427 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
428 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
429 // vldr dd, [pp, #offset]
430 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
431 const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
432 // blxcc rm
433 const Instr kBlxRegMask =
434     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
435 const Instr kBlxRegPattern =
436     B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
437 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
438 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
439 const Instr kMovMvnPattern = 0xd * B21;
440 const Instr kMovMvnFlip = B22;
441 const Instr kMovLeaveCCMask = 0xdff * B16;
442 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
443 const Instr kMovwMask = 0xff * B20;
444 const Instr kMovwPattern = 0x30 * B20;
445 const Instr kMovwLeaveCCFlip = 0x5 * B21;
446 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
447 const Instr kCmpCmnPattern = 0x15 * B20;
448 const Instr kCmpCmnFlip = B21;
449 const Instr kAddSubFlip = 0x6 * B21;
450 const Instr kAndBicFlip = 0xe * B21;
451 
452 // A mask for the Rd register for push, pop, ldr, str instructions.
453 const Instr kLdrRegFpOffsetPattern =
454     al | B26 | L | Offset | kRegister_fp_Code * B16;
455 const Instr kStrRegFpOffsetPattern =
456     al | B26 | Offset | kRegister_fp_Code * B16;
457 const Instr kLdrRegFpNegOffsetPattern =
458     al | B26 | L | NegOffset | kRegister_fp_Code * B16;
459 const Instr kStrRegFpNegOffsetPattern =
460     al | B26 | NegOffset | kRegister_fp_Code * B16;
461 const Instr kLdrStrInstrTypeMask = 0xffff0000;
462 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
463 const Instr kLdrStrOffsetMask = 0x00000fff;
464 
465 
Assembler(Isolate * isolate,void * buffer,int buffer_size)466 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
467     : AssemblerBase(isolate, buffer, buffer_size),
468       recorded_ast_id_(TypeFeedbackId::None()),
469       constant_pool_builder_(),
470       positions_recorder_(this) {
471   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
472   num_pending_32_bit_reloc_info_ = 0;
473   num_pending_64_bit_reloc_info_ = 0;
474   next_buffer_check_ = 0;
475   const_pool_blocked_nesting_ = 0;
476   no_const_pool_before_ = 0;
477   first_const_pool_32_use_ = -1;
478   first_const_pool_64_use_ = -1;
479   last_bound_pos_ = 0;
480   constant_pool_available_ = !FLAG_enable_ool_constant_pool;
481   constant_pool_full_ = false;
482   ClearRecordedAstId();
483 }
484 
485 
~Assembler()486 Assembler::~Assembler() {
487   ASSERT(const_pool_blocked_nesting_ == 0);
488 }
489 
490 
GetCode(CodeDesc * desc)491 void Assembler::GetCode(CodeDesc* desc) {
492   if (!FLAG_enable_ool_constant_pool) {
493     // Emit constant pool if necessary.
494     CheckConstPool(true, false);
495     ASSERT(num_pending_32_bit_reloc_info_ == 0);
496     ASSERT(num_pending_64_bit_reloc_info_ == 0);
497   }
498   // Set up code descriptor.
499   desc->buffer = buffer_;
500   desc->buffer_size = buffer_size_;
501   desc->instr_size = pc_offset();
502   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
503   desc->origin = this;
504 }
505 
506 
Align(int m)507 void Assembler::Align(int m) {
508   ASSERT(m >= 4 && IsPowerOf2(m));
509   while ((pc_offset() & (m - 1)) != 0) {
510     nop();
511   }
512 }
513 
514 
CodeTargetAlign()515 void Assembler::CodeTargetAlign() {
516   // Preferred alignment of jump targets on some ARM chips.
517   Align(8);
518 }
519 
520 
GetCondition(Instr instr)521 Condition Assembler::GetCondition(Instr instr) {
522   return Instruction::ConditionField(instr);
523 }
524 
525 
IsBranch(Instr instr)526 bool Assembler::IsBranch(Instr instr) {
527   return (instr & (B27 | B25)) == (B27 | B25);
528 }
529 
530 
GetBranchOffset(Instr instr)531 int Assembler::GetBranchOffset(Instr instr) {
532   ASSERT(IsBranch(instr));
533   // Take the jump offset in the lower 24 bits, sign extend it and multiply it
534   // with 4 to get the offset in bytes.
535   return ((instr & kImm24Mask) << 8) >> 6;
536 }
537 
538 
IsLdrRegisterImmediate(Instr instr)539 bool Assembler::IsLdrRegisterImmediate(Instr instr) {
540   return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
541 }
542 
543 
IsVldrDRegisterImmediate(Instr instr)544 bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
545   return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
546 }
547 
548 
GetLdrRegisterImmediateOffset(Instr instr)549 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
550   ASSERT(IsLdrRegisterImmediate(instr));
551   bool positive = (instr & B23) == B23;
552   int offset = instr & kOff12Mask;  // Zero extended offset.
553   return positive ? offset : -offset;
554 }
555 
556 
GetVldrDRegisterImmediateOffset(Instr instr)557 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
558   ASSERT(IsVldrDRegisterImmediate(instr));
559   bool positive = (instr & B23) == B23;
560   int offset = instr & kOff8Mask;  // Zero extended offset.
561   offset <<= 2;
562   return positive ? offset : -offset;
563 }
564 
565 
SetLdrRegisterImmediateOffset(Instr instr,int offset)566 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
567   ASSERT(IsLdrRegisterImmediate(instr));
568   bool positive = offset >= 0;
569   if (!positive) offset = -offset;
570   ASSERT(is_uint12(offset));
571   // Set bit indicating whether the offset should be added.
572   instr = (instr & ~B23) | (positive ? B23 : 0);
573   // Set the actual offset.
574   return (instr & ~kOff12Mask) | offset;
575 }
576 
577 
SetVldrDRegisterImmediateOffset(Instr instr,int offset)578 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
579   ASSERT(IsVldrDRegisterImmediate(instr));
580   ASSERT((offset & ~3) == offset);  // Must be 64-bit aligned.
581   bool positive = offset >= 0;
582   if (!positive) offset = -offset;
583   ASSERT(is_uint10(offset));
584   // Set bit indicating whether the offset should be added.
585   instr = (instr & ~B23) | (positive ? B23 : 0);
586   // Set the actual offset. Its bottom 2 bits are zero.
587   return (instr & ~kOff8Mask) | (offset >> 2);
588 }
589 
590 
IsStrRegisterImmediate(Instr instr)591 bool Assembler::IsStrRegisterImmediate(Instr instr) {
592   return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
593 }
594 
595 
SetStrRegisterImmediateOffset(Instr instr,int offset)596 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
597   ASSERT(IsStrRegisterImmediate(instr));
598   bool positive = offset >= 0;
599   if (!positive) offset = -offset;
600   ASSERT(is_uint12(offset));
601   // Set bit indicating whether the offset should be added.
602   instr = (instr & ~B23) | (positive ? B23 : 0);
603   // Set the actual offset.
604   return (instr & ~kOff12Mask) | offset;
605 }
606 
607 
IsAddRegisterImmediate(Instr instr)608 bool Assembler::IsAddRegisterImmediate(Instr instr) {
609   return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
610 }
611 
612 
SetAddRegisterImmediateOffset(Instr instr,int offset)613 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
614   ASSERT(IsAddRegisterImmediate(instr));
615   ASSERT(offset >= 0);
616   ASSERT(is_uint12(offset));
617   // Set the offset.
618   return (instr & ~kOff12Mask) | offset;
619 }
620 
621 
GetRd(Instr instr)622 Register Assembler::GetRd(Instr instr) {
623   Register reg;
624   reg.code_ = Instruction::RdValue(instr);
625   return reg;
626 }
627 
628 
GetRn(Instr instr)629 Register Assembler::GetRn(Instr instr) {
630   Register reg;
631   reg.code_ = Instruction::RnValue(instr);
632   return reg;
633 }
634 
635 
GetRm(Instr instr)636 Register Assembler::GetRm(Instr instr) {
637   Register reg;
638   reg.code_ = Instruction::RmValue(instr);
639   return reg;
640 }
641 
642 
IsPush(Instr instr)643 bool Assembler::IsPush(Instr instr) {
644   return ((instr & ~kRdMask) == kPushRegPattern);
645 }
646 
647 
IsPop(Instr instr)648 bool Assembler::IsPop(Instr instr) {
649   return ((instr & ~kRdMask) == kPopRegPattern);
650 }
651 
652 
IsStrRegFpOffset(Instr instr)653 bool Assembler::IsStrRegFpOffset(Instr instr) {
654   return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
655 }
656 
657 
IsLdrRegFpOffset(Instr instr)658 bool Assembler::IsLdrRegFpOffset(Instr instr) {
659   return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
660 }
661 
662 
IsStrRegFpNegOffset(Instr instr)663 bool Assembler::IsStrRegFpNegOffset(Instr instr) {
664   return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
665 }
666 
667 
IsLdrRegFpNegOffset(Instr instr)668 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
669   return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
670 }
671 
672 
IsLdrPcImmediateOffset(Instr instr)673 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
674   // Check the instruction is indeed a
675   // ldr<cond> <Rd>, [pc +/- offset_12].
676   return (instr & kLdrPCMask) == kLdrPCPattern;
677 }
678 
679 
IsLdrPpImmediateOffset(Instr instr)680 bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
681   // Check the instruction is indeed a
682   // ldr<cond> <Rd>, [pp +/- offset_12].
683   return (instr & kLdrPpMask) == kLdrPpPattern;
684 }
685 
686 
IsVldrDPcImmediateOffset(Instr instr)687 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
688   // Check the instruction is indeed a
689   // vldr<cond> <Dd>, [pc +/- offset_10].
690   return (instr & kVldrDPCMask) == kVldrDPCPattern;
691 }
692 
693 
IsVldrDPpImmediateOffset(Instr instr)694 bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
695   // Check the instruction is indeed a
696   // vldr<cond> <Dd>, [pp +/- offset_10].
697   return (instr & kVldrDPpMask) == kVldrDPpPattern;
698 }
699 
700 
IsTstImmediate(Instr instr)701 bool Assembler::IsTstImmediate(Instr instr) {
702   return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
703       (I | TST | S);
704 }
705 
706 
IsCmpRegister(Instr instr)707 bool Assembler::IsCmpRegister(Instr instr) {
708   return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
709       (CMP | S);
710 }
711 
712 
IsCmpImmediate(Instr instr)713 bool Assembler::IsCmpImmediate(Instr instr) {
714   return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
715       (I | CMP | S);
716 }
717 
718 
GetCmpImmediateRegister(Instr instr)719 Register Assembler::GetCmpImmediateRegister(Instr instr) {
720   ASSERT(IsCmpImmediate(instr));
721   return GetRn(instr);
722 }
723 
724 
GetCmpImmediateRawImmediate(Instr instr)725 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
726   ASSERT(IsCmpImmediate(instr));
727   return instr & kOff12Mask;
728 }
729 
730 
731 // Labels refer to positions in the (to be) generated code.
732 // There are bound, linked, and unused labels.
733 //
734 // Bound labels refer to known positions in the already
735 // generated code. pos() is the position the label refers to.
736 //
737 // Linked labels refer to unknown positions in the code
738 // to be generated; pos() is the position of the last
739 // instruction using the label.
740 //
741 // The linked labels form a link chain by making the branch offset
742 // in the instruction steam to point to the previous branch
743 // instruction using the same label.
744 //
745 // The link chain is terminated by a branch offset pointing to the
746 // same position.
747 
748 
target_at(int pos)749 int Assembler::target_at(int pos) {
750   Instr instr = instr_at(pos);
751   if (is_uint24(instr)) {
752     // Emitted link to a label, not part of a branch.
753     return instr;
754   }
755   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
756   int imm26 = ((instr & kImm24Mask) << 8) >> 6;
757   if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
758       ((instr & B24) != 0)) {
759     // blx uses bit 24 to encode bit 2 of imm26
760     imm26 += 2;
761   }
762   return pos + kPcLoadDelta + imm26;
763 }
764 
765 
target_at_put(int pos,int target_pos)766 void Assembler::target_at_put(int pos, int target_pos) {
767   Instr instr = instr_at(pos);
768   if (is_uint24(instr)) {
769     ASSERT(target_pos == pos || target_pos >= 0);
770     // Emitted link to a label, not part of a branch.
771     // Load the position of the label relative to the generated code object
772     // pointer in a register.
773 
774     // Here are the instructions we need to emit:
775     //   For ARMv7: target24 => target16_1:target16_0
776     //      movw dst, #target16_0
777     //      movt dst, #target16_1
778     //   For ARMv6: target24 => target8_2:target8_1:target8_0
779     //      mov dst, #target8_0
780     //      orr dst, dst, #target8_1 << 8
781     //      orr dst, dst, #target8_2 << 16
782 
783     // We extract the destination register from the emitted nop instruction.
784     Register dst = Register::from_code(
785         Instruction::RmValue(instr_at(pos + kInstrSize)));
786     ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code()));
787     uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
788     ASSERT(is_uint24(target24));
789     if (is_uint8(target24)) {
790       // If the target fits in a byte then only patch with a mov
791       // instruction.
792       CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
793                           1,
794                           CodePatcher::DONT_FLUSH);
795       patcher.masm()->mov(dst, Operand(target24));
796     } else {
797       uint16_t target16_0 = target24 & kImm16Mask;
798       uint16_t target16_1 = target24 >> 16;
799       if (CpuFeatures::IsSupported(ARMv7)) {
800         // Patch with movw/movt.
801         if (target16_1 == 0) {
802           CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
803                               1,
804                               CodePatcher::DONT_FLUSH);
805           patcher.masm()->movw(dst, target16_0);
806         } else {
807           CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
808                               2,
809                               CodePatcher::DONT_FLUSH);
810           patcher.masm()->movw(dst, target16_0);
811           patcher.masm()->movt(dst, target16_1);
812         }
813       } else {
814         // Patch with a sequence of mov/orr/orr instructions.
815         uint8_t target8_0 = target16_0 & kImm8Mask;
816         uint8_t target8_1 = target16_0 >> 8;
817         uint8_t target8_2 = target16_1 & kImm8Mask;
818         if (target8_2 == 0) {
819           CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
820                               2,
821                               CodePatcher::DONT_FLUSH);
822           patcher.masm()->mov(dst, Operand(target8_0));
823           patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
824         } else {
825           CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
826                               3,
827                               CodePatcher::DONT_FLUSH);
828           patcher.masm()->mov(dst, Operand(target8_0));
829           patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
830           patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
831         }
832       }
833     }
834     return;
835   }
836   int imm26 = target_pos - (pos + kPcLoadDelta);
837   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
838   if (Instruction::ConditionField(instr) == kSpecialCondition) {
839     // blx uses bit 24 to encode bit 2 of imm26
840     ASSERT((imm26 & 1) == 0);
841     instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
842   } else {
843     ASSERT((imm26 & 3) == 0);
844     instr &= ~kImm24Mask;
845   }
846   int imm24 = imm26 >> 2;
847   ASSERT(is_int24(imm24));
848   instr_at_put(pos, instr | (imm24 & kImm24Mask));
849 }
850 
851 
print(Label * L)852 void Assembler::print(Label* L) {
853   if (L->is_unused()) {
854     PrintF("unused label\n");
855   } else if (L->is_bound()) {
856     PrintF("bound label to %d\n", L->pos());
857   } else if (L->is_linked()) {
858     Label l = *L;
859     PrintF("unbound label");
860     while (l.is_linked()) {
861       PrintF("@ %d ", l.pos());
862       Instr instr = instr_at(l.pos());
863       if ((instr & ~kImm24Mask) == 0) {
864         PrintF("value\n");
865       } else {
866         ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
867         Condition cond = Instruction::ConditionField(instr);
868         const char* b;
869         const char* c;
870         if (cond == kSpecialCondition) {
871           b = "blx";
872           c = "";
873         } else {
874           if ((instr & B24) != 0)
875             b = "bl";
876           else
877             b = "b";
878 
879           switch (cond) {
880             case eq: c = "eq"; break;
881             case ne: c = "ne"; break;
882             case hs: c = "hs"; break;
883             case lo: c = "lo"; break;
884             case mi: c = "mi"; break;
885             case pl: c = "pl"; break;
886             case vs: c = "vs"; break;
887             case vc: c = "vc"; break;
888             case hi: c = "hi"; break;
889             case ls: c = "ls"; break;
890             case ge: c = "ge"; break;
891             case lt: c = "lt"; break;
892             case gt: c = "gt"; break;
893             case le: c = "le"; break;
894             case al: c = ""; break;
895             default:
896               c = "";
897               UNREACHABLE();
898           }
899         }
900         PrintF("%s%s\n", b, c);
901       }
902       next(&l);
903     }
904   } else {
905     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
906   }
907 }
908 
909 
bind_to(Label * L,int pos)910 void Assembler::bind_to(Label* L, int pos) {
911   ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
912   while (L->is_linked()) {
913     int fixup_pos = L->pos();
914     next(L);  // call next before overwriting link with target at fixup_pos
915     target_at_put(fixup_pos, pos);
916   }
917   L->bind_to(pos);
918 
919   // Keep track of the last bound label so we don't eliminate any instructions
920   // before a bound label.
921   if (pos > last_bound_pos_)
922     last_bound_pos_ = pos;
923 }
924 
925 
bind(Label * L)926 void Assembler::bind(Label* L) {
927   ASSERT(!L->is_bound());  // label can only be bound once
928   bind_to(L, pc_offset());
929 }
930 
931 
next(Label * L)932 void Assembler::next(Label* L) {
933   ASSERT(L->is_linked());
934   int link = target_at(L->pos());
935   if (link == L->pos()) {
936     // Branch target points to the same instuction. This is the end of the link
937     // chain.
938     L->Unuse();
939   } else {
940     ASSERT(link >= 0);
941     L->link_to(link);
942   }
943 }
944 
945 
946 // Low-level code emission routines depending on the addressing mode.
947 // If this returns true then you have to use the rotate_imm and immed_8
948 // that it returns, because it may have already changed the instruction
949 // to match them!
fits_shifter(uint32_t imm32,uint32_t * rotate_imm,uint32_t * immed_8,Instr * instr)950 static bool fits_shifter(uint32_t imm32,
951                          uint32_t* rotate_imm,
952                          uint32_t* immed_8,
953                          Instr* instr) {
954   // imm32 must be unsigned.
955   for (int rot = 0; rot < 16; rot++) {
956     uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
957     if ((imm8 <= 0xff)) {
958       *rotate_imm = rot;
959       *immed_8 = imm8;
960       return true;
961     }
962   }
963   // If the opcode is one with a complementary version and the complementary
964   // immediate fits, change the opcode.
965   if (instr != NULL) {
966     if ((*instr & kMovMvnMask) == kMovMvnPattern) {
967       if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
968         *instr ^= kMovMvnFlip;
969         return true;
970       } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
971         if (CpuFeatures::IsSupported(ARMv7)) {
972           if (imm32 < 0x10000) {
973             *instr ^= kMovwLeaveCCFlip;
974             *instr |= EncodeMovwImmediate(imm32);
975             *rotate_imm = *immed_8 = 0;  // Not used for movw.
976             return true;
977           }
978         }
979       }
980     } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
981       if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
982         *instr ^= kCmpCmnFlip;
983         return true;
984       }
985     } else {
986       Instr alu_insn = (*instr & kALUMask);
987       if (alu_insn == ADD ||
988           alu_insn == SUB) {
989         if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
990           *instr ^= kAddSubFlip;
991           return true;
992         }
993       } else if (alu_insn == AND ||
994                  alu_insn == BIC) {
995         if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
996           *instr ^= kAndBicFlip;
997           return true;
998         }
999       }
1000     }
1001   }
1002   return false;
1003 }
1004 
1005 
1006 // We have to use the temporary register for things that can be relocated even
1007 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
1008 // space.  There is no guarantee that the relocated location can be similarly
1009 // encoded.
must_output_reloc_info(const Assembler * assembler) const1010 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1011   if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1012     if (assembler != NULL && assembler->predictable_code_size()) return true;
1013     return assembler->serializer_enabled();
1014   } else if (RelocInfo::IsNone(rmode_)) {
1015     return false;
1016   }
1017   return true;
1018 }
1019 
1020 
use_mov_immediate_load(const Operand & x,const Assembler * assembler)1021 static bool use_mov_immediate_load(const Operand& x,
1022                                    const Assembler* assembler) {
1023   if (assembler != NULL && !assembler->can_use_constant_pool()) {
1024     // If there is no constant pool available, we must use an mov immediate.
1025     // TODO(rmcilroy): enable ARMv6 support.
1026     ASSERT(CpuFeatures::IsSupported(ARMv7));
1027     return true;
1028   } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
1029              (assembler == NULL || !assembler->predictable_code_size())) {
1030     // Prefer movw / movt to constant pool if it is more efficient on the CPU.
1031     return true;
1032   } else if (x.must_output_reloc_info(assembler)) {
1033     // Prefer constant pool if data is likely to be patched.
1034     return false;
1035   } else {
1036     // Otherwise, use immediate load if movw / movt is available.
1037     return CpuFeatures::IsSupported(ARMv7);
1038   }
1039 }
1040 
1041 
is_single_instruction(const Assembler * assembler,Instr instr) const1042 bool Operand::is_single_instruction(const Assembler* assembler,
1043                                     Instr instr) const {
1044   if (rm_.is_valid()) return true;
1045   uint32_t dummy1, dummy2;
1046   if (must_output_reloc_info(assembler) ||
1047       !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1048     // The immediate operand cannot be encoded as a shifter operand, or use of
1049     // constant pool is required. For a mov instruction not setting the
1050     // condition code additional instruction conventions can be used.
1051     if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
1052       return !use_mov_immediate_load(*this, assembler);
1053     } else {
1054       // If this is not a mov or mvn instruction there will always an additional
1055       // instructions - either mov or ldr. The mov might actually be two
1056       // instructions mov or movw followed by movt so including the actual
1057       // instruction two or three instructions will be generated.
1058       return false;
1059     }
1060   } else {
1061     // No use of constant pool and the immediate operand can be encoded as a
1062     // shifter operand.
1063     return true;
1064   }
1065 }
1066 
1067 
move_32_bit_immediate(Register rd,const Operand & x,Condition cond)1068 void Assembler::move_32_bit_immediate(Register rd,
1069                                       const Operand& x,
1070                                       Condition cond) {
1071   RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
1072   if (x.must_output_reloc_info(this)) {
1073     RecordRelocInfo(rinfo);
1074   }
1075 
1076   if (use_mov_immediate_load(x, this)) {
1077     Register target = rd.code() == pc.code() ? ip : rd;
1078     // TODO(rmcilroy): add ARMv6 support for immediate loads.
1079     ASSERT(CpuFeatures::IsSupported(ARMv7));
1080     if (!FLAG_enable_ool_constant_pool &&
1081         x.must_output_reloc_info(this)) {
1082       // Make sure the movw/movt doesn't get separated.
1083       BlockConstPoolFor(2);
1084     }
1085     emit(cond | 0x30*B20 | target.code()*B12 |
1086          EncodeMovwImmediate(x.imm32_ & 0xffff));
1087     movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
1088     if (target.code() != rd.code()) {
1089       mov(rd, target, LeaveCC, cond);
1090     }
1091   } else {
1092     ASSERT(can_use_constant_pool());
1093     ConstantPoolAddEntry(rinfo);
1094     ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
1095   }
1096 }
1097 
1098 
addrmod1(Instr instr,Register rn,Register rd,const Operand & x)1099 void Assembler::addrmod1(Instr instr,
1100                          Register rn,
1101                          Register rd,
1102                          const Operand& x) {
1103   CheckBuffer();
1104   ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
1105   if (!x.rm_.is_valid()) {
1106     // Immediate.
1107     uint32_t rotate_imm;
1108     uint32_t immed_8;
1109     if (x.must_output_reloc_info(this) ||
1110         !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1111       // The immediate operand cannot be encoded as a shifter operand, so load
1112       // it first to register ip and change the original instruction to use ip.
1113       // However, if the original instruction is a 'mov rd, x' (not setting the
1114       // condition code), then replace it with a 'ldr rd, [pc]'.
1115       CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
1116       Condition cond = Instruction::ConditionField(instr);
1117       if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
1118         move_32_bit_immediate(rd, x, cond);
1119       } else {
1120         mov(ip, x, LeaveCC, cond);
1121         addrmod1(instr, rn, rd, Operand(ip));
1122       }
1123       return;
1124     }
1125     instr |= I | rotate_imm*B8 | immed_8;
1126   } else if (!x.rs_.is_valid()) {
1127     // Immediate shift.
1128     instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1129   } else {
1130     // Register shift.
1131     ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
1132     instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
1133   }
1134   emit(instr | rn.code()*B16 | rd.code()*B12);
1135   if (rn.is(pc) || x.rm_.is(pc)) {
1136     // Block constant pool emission for one instruction after reading pc.
1137     BlockConstPoolFor(1);
1138   }
1139 }
1140 
1141 
addrmod2(Instr instr,Register rd,const MemOperand & x)1142 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
1143   ASSERT((instr & ~(kCondMask | B | L)) == B26);
1144   int am = x.am_;
1145   if (!x.rm_.is_valid()) {
1146     // Immediate offset.
1147     int offset_12 = x.offset_;
1148     if (offset_12 < 0) {
1149       offset_12 = -offset_12;
1150       am ^= U;
1151     }
1152     if (!is_uint12(offset_12)) {
1153       // Immediate offset cannot be encoded, load it first to register ip
1154       // rn (and rd in a load) should never be ip, or will be trashed.
1155       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1156       mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1157       addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
1158       return;
1159     }
1160     ASSERT(offset_12 >= 0);  // no masking needed
1161     instr |= offset_12;
1162   } else {
1163     // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1164     // register offset the constructors make sure than both shift_imm_
1165     // and shift_op_ are initialized.
1166     ASSERT(!x.rm_.is(pc));
1167     instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1168   }
1169   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
1170   emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1171 }
1172 
1173 
addrmod3(Instr instr,Register rd,const MemOperand & x)1174 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
1175   ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1176   ASSERT(x.rn_.is_valid());
1177   int am = x.am_;
1178   if (!x.rm_.is_valid()) {
1179     // Immediate offset.
1180     int offset_8 = x.offset_;
1181     if (offset_8 < 0) {
1182       offset_8 = -offset_8;
1183       am ^= U;
1184     }
1185     if (!is_uint8(offset_8)) {
1186       // Immediate offset cannot be encoded, load it first to register ip
1187       // rn (and rd in a load) should never be ip, or will be trashed.
1188       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1189       mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1190       addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1191       return;
1192     }
1193     ASSERT(offset_8 >= 0);  // no masking needed
1194     instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
1195   } else if (x.shift_imm_ != 0) {
1196     // Scaled register offset not supported, load index first
1197     // rn (and rd in a load) should never be ip, or will be trashed.
1198     ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1199     mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
1200         Instruction::ConditionField(instr));
1201     addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1202     return;
1203   } else {
1204     // Register offset.
1205     ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
1206     instr |= x.rm_.code();
1207   }
1208   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
1209   emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1210 }
1211 
1212 
addrmod4(Instr instr,Register rn,RegList rl)1213 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
1214   ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
1215   ASSERT(rl != 0);
1216   ASSERT(!rn.is(pc));
1217   emit(instr | rn.code()*B16 | rl);
1218 }
1219 
1220 
addrmod5(Instr instr,CRegister crd,const MemOperand & x)1221 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
1222   // Unindexed addressing is not encoded by this function.
1223   ASSERT_EQ((B27 | B26),
1224             (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
1225   ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
1226   int am = x.am_;
1227   int offset_8 = x.offset_;
1228   ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
1229   offset_8 >>= 2;
1230   if (offset_8 < 0) {
1231     offset_8 = -offset_8;
1232     am ^= U;
1233   }
1234   ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
1235   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
1236 
1237   // Post-indexed addressing requires W == 1; different than in addrmod2/3.
1238   if ((am & P) == 0)
1239     am |= W;
1240 
1241   ASSERT(offset_8 >= 0);  // no masking needed
1242   emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1243 }
1244 
1245 
branch_offset(Label * L,bool jump_elimination_allowed)1246 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1247   int target_pos;
1248   if (L->is_bound()) {
1249     target_pos = L->pos();
1250   } else {
1251     if (L->is_linked()) {
1252       // Point to previous instruction that uses the link.
1253       target_pos = L->pos();
1254     } else {
1255       // First entry of the link chain points to itself.
1256       target_pos = pc_offset();
1257     }
1258     L->link_to(pc_offset());
1259   }
1260 
1261   // Block the emission of the constant pool, since the branch instruction must
1262   // be emitted at the pc offset recorded by the label.
1263   BlockConstPoolFor(1);
1264   return target_pos - (pc_offset() + kPcLoadDelta);
1265 }
1266 
1267 
1268 // Branch instructions.
b(int branch_offset,Condition cond)1269 void Assembler::b(int branch_offset, Condition cond) {
1270   ASSERT((branch_offset & 3) == 0);
1271   int imm24 = branch_offset >> 2;
1272   ASSERT(is_int24(imm24));
1273   emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1274 
1275   if (cond == al) {
1276     // Dead code is a good location to emit the constant pool.
1277     CheckConstPool(false, false);
1278   }
1279 }
1280 
1281 
bl(int branch_offset,Condition cond)1282 void Assembler::bl(int branch_offset, Condition cond) {
1283   positions_recorder()->WriteRecordedPositions();
1284   ASSERT((branch_offset & 3) == 0);
1285   int imm24 = branch_offset >> 2;
1286   ASSERT(is_int24(imm24));
1287   emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1288 }
1289 
1290 
blx(int branch_offset)1291 void Assembler::blx(int branch_offset) {  // v5 and above
1292   positions_recorder()->WriteRecordedPositions();
1293   ASSERT((branch_offset & 1) == 0);
1294   int h = ((branch_offset & 2) >> 1)*B24;
1295   int imm24 = branch_offset >> 2;
1296   ASSERT(is_int24(imm24));
1297   emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1298 }
1299 
1300 
blx(Register target,Condition cond)1301 void Assembler::blx(Register target, Condition cond) {  // v5 and above
1302   positions_recorder()->WriteRecordedPositions();
1303   ASSERT(!target.is(pc));
1304   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1305 }
1306 
1307 
bx(Register target,Condition cond)1308 void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
1309   positions_recorder()->WriteRecordedPositions();
1310   ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
1311   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1312 }
1313 
1314 
1315 // Data-processing instructions.
1316 
and_(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1317 void Assembler::and_(Register dst, Register src1, const Operand& src2,
1318                      SBit s, Condition cond) {
1319   addrmod1(cond | AND | s, src1, dst, src2);
1320 }
1321 
1322 
eor(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1323 void Assembler::eor(Register dst, Register src1, const Operand& src2,
1324                     SBit s, Condition cond) {
1325   addrmod1(cond | EOR | s, src1, dst, src2);
1326 }
1327 
1328 
sub(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1329 void Assembler::sub(Register dst, Register src1, const Operand& src2,
1330                     SBit s, Condition cond) {
1331   addrmod1(cond | SUB | s, src1, dst, src2);
1332 }
1333 
1334 
rsb(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1335 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1336                     SBit s, Condition cond) {
1337   addrmod1(cond | RSB | s, src1, dst, src2);
1338 }
1339 
1340 
add(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1341 void Assembler::add(Register dst, Register src1, const Operand& src2,
1342                     SBit s, Condition cond) {
1343   addrmod1(cond | ADD | s, src1, dst, src2);
1344 }
1345 
1346 
adc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1347 void Assembler::adc(Register dst, Register src1, const Operand& src2,
1348                     SBit s, Condition cond) {
1349   addrmod1(cond | ADC | s, src1, dst, src2);
1350 }
1351 
1352 
sbc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1353 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1354                     SBit s, Condition cond) {
1355   addrmod1(cond | SBC | s, src1, dst, src2);
1356 }
1357 
1358 
rsc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1359 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1360                     SBit s, Condition cond) {
1361   addrmod1(cond | RSC | s, src1, dst, src2);
1362 }
1363 
1364 
tst(Register src1,const Operand & src2,Condition cond)1365 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1366   addrmod1(cond | TST | S, src1, r0, src2);
1367 }
1368 
1369 
teq(Register src1,const Operand & src2,Condition cond)1370 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1371   addrmod1(cond | TEQ | S, src1, r0, src2);
1372 }
1373 
1374 
cmp(Register src1,const Operand & src2,Condition cond)1375 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1376   addrmod1(cond | CMP | S, src1, r0, src2);
1377 }
1378 
1379 
cmp_raw_immediate(Register src,int raw_immediate,Condition cond)1380 void Assembler::cmp_raw_immediate(
1381     Register src, int raw_immediate, Condition cond) {
1382   ASSERT(is_uint12(raw_immediate));
1383   emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1384 }
1385 
1386 
cmn(Register src1,const Operand & src2,Condition cond)1387 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1388   addrmod1(cond | CMN | S, src1, r0, src2);
1389 }
1390 
1391 
orr(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1392 void Assembler::orr(Register dst, Register src1, const Operand& src2,
1393                     SBit s, Condition cond) {
1394   addrmod1(cond | ORR | s, src1, dst, src2);
1395 }
1396 
1397 
mov(Register dst,const Operand & src,SBit s,Condition cond)1398 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1399   if (dst.is(pc)) {
1400     positions_recorder()->WriteRecordedPositions();
1401   }
1402   // Don't allow nop instructions in the form mov rn, rn to be generated using
1403   // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1404   // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1405   ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1406   addrmod1(cond | MOV | s, r0, dst, src);
1407 }
1408 
1409 
mov_label_offset(Register dst,Label * label)1410 void Assembler::mov_label_offset(Register dst, Label* label) {
1411   if (label->is_bound()) {
1412     mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
1413   } else {
1414     // Emit the link to the label in the code stream followed by extra nop
1415     // instructions.
1416     // If the label is not linked, then start a new link chain by linking it to
1417     // itself, emitting pc_offset().
1418     int link = label->is_linked() ? label->pos() : pc_offset();
1419     label->link_to(pc_offset());
1420 
1421     // When the label is bound, these instructions will be patched with a
1422     // sequence of movw/movt or mov/orr/orr instructions. They will load the
1423     // destination register with the position of the label from the beginning
1424     // of the code.
1425     //
1426     // The link will be extracted from the first instruction and the destination
1427     // register from the second.
1428     //   For ARMv7:
1429     //      link
1430     //      mov dst, dst
1431     //   For ARMv6:
1432     //      link
1433     //      mov dst, dst
1434     //      mov dst, dst
1435     //
1436     // When the label gets bound: target_at extracts the link and target_at_put
1437     // patches the instructions.
1438     ASSERT(is_uint24(link));
1439     BlockConstPoolScope block_const_pool(this);
1440     emit(link);
1441     nop(dst.code());
1442     if (!CpuFeatures::IsSupported(ARMv7)) {
1443       nop(dst.code());
1444     }
1445   }
1446 }
1447 
1448 
movw(Register reg,uint32_t immediate,Condition cond)1449 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1450   ASSERT(immediate < 0x10000);
1451   // May use movw if supported, but on unsupported platforms will try to use
1452   // equivalent rotated immed_8 value and other tricks before falling back to a
1453   // constant pool load.
1454   mov(reg, Operand(immediate), LeaveCC, cond);
1455 }
1456 
1457 
movt(Register reg,uint32_t immediate,Condition cond)1458 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1459   emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1460 }
1461 
1462 
bic(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1463 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1464                     SBit s, Condition cond) {
1465   addrmod1(cond | BIC | s, src1, dst, src2);
1466 }
1467 
1468 
mvn(Register dst,const Operand & src,SBit s,Condition cond)1469 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1470   addrmod1(cond | MVN | s, r0, dst, src);
1471 }
1472 
1473 
1474 // Multiply instructions.
mla(Register dst,Register src1,Register src2,Register srcA,SBit s,Condition cond)1475 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1476                     SBit s, Condition cond) {
1477   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1478   emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1479        src2.code()*B8 | B7 | B4 | src1.code());
1480 }
1481 
1482 
mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)1483 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1484                     Condition cond) {
1485   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1486   ASSERT(IsEnabled(MLS));
1487   emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1488        src2.code()*B8 | B7 | B4 | src1.code());
1489 }
1490 
1491 
sdiv(Register dst,Register src1,Register src2,Condition cond)1492 void Assembler::sdiv(Register dst, Register src1, Register src2,
1493                      Condition cond) {
1494   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1495   ASSERT(IsEnabled(SUDIV));
1496   emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
1497        src2.code()*B8 | B4 | src1.code());
1498 }
1499 
1500 
mul(Register dst,Register src1,Register src2,SBit s,Condition cond)1501 void Assembler::mul(Register dst, Register src1, Register src2,
1502                     SBit s, Condition cond) {
1503   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1504   // dst goes in bits 16-19 for this instruction!
1505   emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1506 }
1507 
1508 
smlal(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1509 void Assembler::smlal(Register dstL,
1510                       Register dstH,
1511                       Register src1,
1512                       Register src2,
1513                       SBit s,
1514                       Condition cond) {
1515   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1516   ASSERT(!dstL.is(dstH));
1517   emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1518        src2.code()*B8 | B7 | B4 | src1.code());
1519 }
1520 
1521 
smull(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1522 void Assembler::smull(Register dstL,
1523                       Register dstH,
1524                       Register src1,
1525                       Register src2,
1526                       SBit s,
1527                       Condition cond) {
1528   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1529   ASSERT(!dstL.is(dstH));
1530   emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1531        src2.code()*B8 | B7 | B4 | src1.code());
1532 }
1533 
1534 
umlal(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1535 void Assembler::umlal(Register dstL,
1536                       Register dstH,
1537                       Register src1,
1538                       Register src2,
1539                       SBit s,
1540                       Condition cond) {
1541   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1542   ASSERT(!dstL.is(dstH));
1543   emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1544        src2.code()*B8 | B7 | B4 | src1.code());
1545 }
1546 
1547 
umull(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1548 void Assembler::umull(Register dstL,
1549                       Register dstH,
1550                       Register src1,
1551                       Register src2,
1552                       SBit s,
1553                       Condition cond) {
1554   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1555   ASSERT(!dstL.is(dstH));
1556   emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1557        src2.code()*B8 | B7 | B4 | src1.code());
1558 }
1559 
1560 
1561 // Miscellaneous arithmetic instructions.
clz(Register dst,Register src,Condition cond)1562 void Assembler::clz(Register dst, Register src, Condition cond) {
1563   // v5 and above.
1564   ASSERT(!dst.is(pc) && !src.is(pc));
1565   emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1566        15*B8 | CLZ | src.code());
1567 }
1568 
1569 
1570 // Saturating instructions.
1571 
1572 // Unsigned saturate.
usat(Register dst,int satpos,const Operand & src,Condition cond)1573 void Assembler::usat(Register dst,
1574                      int satpos,
1575                      const Operand& src,
1576                      Condition cond) {
1577   // v6 and above.
1578   ASSERT(CpuFeatures::IsSupported(ARMv7));
1579   ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1580   ASSERT((satpos >= 0) && (satpos <= 31));
1581   ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1582   ASSERT(src.rs_.is(no_reg));
1583 
1584   int sh = 0;
1585   if (src.shift_op_ == ASR) {
1586       sh = 1;
1587   }
1588 
1589   emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1590        src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1591 }
1592 
1593 
1594 // Bitfield manipulation instructions.
1595 
1596 // Unsigned bit field extract.
1597 // Extracts #width adjacent bits from position #lsb in a register, and
1598 // writes them to the low bits of a destination register.
1599 //   ubfx dst, src, #lsb, #width
ubfx(Register dst,Register src,int lsb,int width,Condition cond)1600 void Assembler::ubfx(Register dst,
1601                      Register src,
1602                      int lsb,
1603                      int width,
1604                      Condition cond) {
1605   // v7 and above.
1606   ASSERT(CpuFeatures::IsSupported(ARMv7));
1607   ASSERT(!dst.is(pc) && !src.is(pc));
1608   ASSERT((lsb >= 0) && (lsb <= 31));
1609   ASSERT((width >= 1) && (width <= (32 - lsb)));
1610   emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1611        lsb*B7 | B6 | B4 | src.code());
1612 }
1613 
1614 
1615 // Signed bit field extract.
1616 // Extracts #width adjacent bits from position #lsb in a register, and
1617 // writes them to the low bits of a destination register. The extracted
1618 // value is sign extended to fill the destination register.
1619 //   sbfx dst, src, #lsb, #width
sbfx(Register dst,Register src,int lsb,int width,Condition cond)1620 void Assembler::sbfx(Register dst,
1621                      Register src,
1622                      int lsb,
1623                      int width,
1624                      Condition cond) {
1625   // v7 and above.
1626   ASSERT(CpuFeatures::IsSupported(ARMv7));
1627   ASSERT(!dst.is(pc) && !src.is(pc));
1628   ASSERT((lsb >= 0) && (lsb <= 31));
1629   ASSERT((width >= 1) && (width <= (32 - lsb)));
1630   emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1631        lsb*B7 | B6 | B4 | src.code());
1632 }
1633 
1634 
1635 // Bit field clear.
1636 // Sets #width adjacent bits at position #lsb in the destination register
1637 // to zero, preserving the value of the other bits.
1638 //   bfc dst, #lsb, #width
bfc(Register dst,int lsb,int width,Condition cond)1639 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1640   // v7 and above.
1641   ASSERT(CpuFeatures::IsSupported(ARMv7));
1642   ASSERT(!dst.is(pc));
1643   ASSERT((lsb >= 0) && (lsb <= 31));
1644   ASSERT((width >= 1) && (width <= (32 - lsb)));
1645   int msb = lsb + width - 1;
1646   emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1647 }
1648 
1649 
1650 // Bit field insert.
1651 // Inserts #width adjacent bits from the low bits of the source register
1652 // into position #lsb of the destination register.
1653 //   bfi dst, src, #lsb, #width
bfi(Register dst,Register src,int lsb,int width,Condition cond)1654 void Assembler::bfi(Register dst,
1655                     Register src,
1656                     int lsb,
1657                     int width,
1658                     Condition cond) {
1659   // v7 and above.
1660   ASSERT(CpuFeatures::IsSupported(ARMv7));
1661   ASSERT(!dst.is(pc) && !src.is(pc));
1662   ASSERT((lsb >= 0) && (lsb <= 31));
1663   ASSERT((width >= 1) && (width <= (32 - lsb)));
1664   int msb = lsb + width - 1;
1665   emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1666        src.code());
1667 }
1668 
1669 
pkhbt(Register dst,Register src1,const Operand & src2,Condition cond)1670 void Assembler::pkhbt(Register dst,
1671                       Register src1,
1672                       const Operand& src2,
1673                       Condition cond ) {
1674   // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1675   // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1676   // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
1677   ASSERT(!dst.is(pc));
1678   ASSERT(!src1.is(pc));
1679   ASSERT(!src2.rm().is(pc));
1680   ASSERT(!src2.rm().is(no_reg));
1681   ASSERT(src2.rs().is(no_reg));
1682   ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1683   ASSERT(src2.shift_op() == LSL);
1684   emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1685        src2.shift_imm_*B7 | B4 | src2.rm().code());
1686 }
1687 
1688 
pkhtb(Register dst,Register src1,const Operand & src2,Condition cond)1689 void Assembler::pkhtb(Register dst,
1690                       Register src1,
1691                       const Operand& src2,
1692                       Condition cond) {
1693   // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1694   // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1695   // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
1696   ASSERT(!dst.is(pc));
1697   ASSERT(!src1.is(pc));
1698   ASSERT(!src2.rm().is(pc));
1699   ASSERT(!src2.rm().is(no_reg));
1700   ASSERT(src2.rs().is(no_reg));
1701   ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1702   ASSERT(src2.shift_op() == ASR);
1703   int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1704   emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1705        asr*B7 | B6 | B4 | src2.rm().code());
1706 }
1707 
1708 
uxtb(Register dst,const Operand & src,Condition cond)1709 void Assembler::uxtb(Register dst,
1710                      const Operand& src,
1711                      Condition cond) {
1712   // Instruction details available in ARM DDI 0406C.b, A8.8.274.
1713   // cond(31-28) | 01101110(27-20) | 1111(19-16) |
1714   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1715   ASSERT(!dst.is(pc));
1716   ASSERT(!src.rm().is(pc));
1717   ASSERT(!src.rm().is(no_reg));
1718   ASSERT(src.rs().is(no_reg));
1719   ASSERT((src.shift_imm_ == 0) ||
1720          (src.shift_imm_ == 8) ||
1721          (src.shift_imm_ == 16) ||
1722          (src.shift_imm_ == 24));
1723   // Operand maps ROR #0 to LSL #0.
1724   ASSERT((src.shift_op() == ROR) ||
1725          ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
1726   emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
1727        ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
1728 }
1729 
1730 
uxtab(Register dst,Register src1,const Operand & src2,Condition cond)1731 void Assembler::uxtab(Register dst,
1732                       Register src1,
1733                       const Operand& src2,
1734                       Condition cond) {
1735   // Instruction details available in ARM DDI 0406C.b, A8.8.271.
1736   // cond(31-28) | 01101110(27-20) | Rn(19-16) |
1737   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1738   ASSERT(!dst.is(pc));
1739   ASSERT(!src1.is(pc));
1740   ASSERT(!src2.rm().is(pc));
1741   ASSERT(!src2.rm().is(no_reg));
1742   ASSERT(src2.rs().is(no_reg));
1743   ASSERT((src2.shift_imm_ == 0) ||
1744          (src2.shift_imm_ == 8) ||
1745          (src2.shift_imm_ == 16) ||
1746          (src2.shift_imm_ == 24));
1747   // Operand maps ROR #0 to LSL #0.
1748   ASSERT((src2.shift_op() == ROR) ||
1749          ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
1750   emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
1751        ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
1752 }
1753 
1754 
uxtb16(Register dst,const Operand & src,Condition cond)1755 void Assembler::uxtb16(Register dst,
1756                        const Operand& src,
1757                        Condition cond) {
1758   // Instruction details available in ARM DDI 0406C.b, A8.8.275.
1759   // cond(31-28) | 01101100(27-20) | 1111(19-16) |
1760   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1761   ASSERT(!dst.is(pc));
1762   ASSERT(!src.rm().is(pc));
1763   ASSERT(!src.rm().is(no_reg));
1764   ASSERT(src.rs().is(no_reg));
1765   ASSERT((src.shift_imm_ == 0) ||
1766          (src.shift_imm_ == 8) ||
1767          (src.shift_imm_ == 16) ||
1768          (src.shift_imm_ == 24));
1769   // Operand maps ROR #0 to LSL #0.
1770   ASSERT((src.shift_op() == ROR) ||
1771          ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
1772   emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
1773        ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
1774 }
1775 
1776 
1777 // Status register access instructions.
mrs(Register dst,SRegister s,Condition cond)1778 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1779   ASSERT(!dst.is(pc));
1780   emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1781 }
1782 
1783 
msr(SRegisterFieldMask fields,const Operand & src,Condition cond)1784 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1785                     Condition cond) {
1786   ASSERT(fields >= B16 && fields < B20);  // at least one field set
1787   Instr instr;
1788   if (!src.rm_.is_valid()) {
1789     // Immediate.
1790     uint32_t rotate_imm;
1791     uint32_t immed_8;
1792     if (src.must_output_reloc_info(this) ||
1793         !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1794       // Immediate operand cannot be encoded, load it first to register ip.
1795       move_32_bit_immediate(ip, src);
1796       msr(fields, Operand(ip), cond);
1797       return;
1798     }
1799     instr = I | rotate_imm*B8 | immed_8;
1800   } else {
1801     ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
1802     instr = src.rm_.code();
1803   }
1804   emit(cond | instr | B24 | B21 | fields | 15*B12);
1805 }
1806 
1807 
1808 // Load/Store instructions.
ldr(Register dst,const MemOperand & src,Condition cond)1809 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1810   if (dst.is(pc)) {
1811     positions_recorder()->WriteRecordedPositions();
1812   }
1813   addrmod2(cond | B26 | L, dst, src);
1814 }
1815 
1816 
str(Register src,const MemOperand & dst,Condition cond)1817 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1818   addrmod2(cond | B26, src, dst);
1819 }
1820 
1821 
ldrb(Register dst,const MemOperand & src,Condition cond)1822 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1823   addrmod2(cond | B26 | B | L, dst, src);
1824 }
1825 
1826 
strb(Register src,const MemOperand & dst,Condition cond)1827 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1828   addrmod2(cond | B26 | B, src, dst);
1829 }
1830 
1831 
ldrh(Register dst,const MemOperand & src,Condition cond)1832 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1833   addrmod3(cond | L | B7 | H | B4, dst, src);
1834 }
1835 
1836 
strh(Register src,const MemOperand & dst,Condition cond)1837 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1838   addrmod3(cond | B7 | H | B4, src, dst);
1839 }
1840 
1841 
ldrsb(Register dst,const MemOperand & src,Condition cond)1842 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1843   addrmod3(cond | L | B7 | S6 | B4, dst, src);
1844 }
1845 
1846 
ldrsh(Register dst,const MemOperand & src,Condition cond)1847 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1848   addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1849 }
1850 
1851 
ldrd(Register dst1,Register dst2,const MemOperand & src,Condition cond)1852 void Assembler::ldrd(Register dst1, Register dst2,
1853                      const MemOperand& src, Condition cond) {
1854   ASSERT(IsEnabled(ARMv7));
1855   ASSERT(src.rm().is(no_reg));
1856   ASSERT(!dst1.is(lr));  // r14.
1857   ASSERT_EQ(0, dst1.code() % 2);
1858   ASSERT_EQ(dst1.code() + 1, dst2.code());
1859   addrmod3(cond | B7 | B6 | B4, dst1, src);
1860 }
1861 
1862 
strd(Register src1,Register src2,const MemOperand & dst,Condition cond)1863 void Assembler::strd(Register src1, Register src2,
1864                      const MemOperand& dst, Condition cond) {
1865   ASSERT(dst.rm().is(no_reg));
1866   ASSERT(!src1.is(lr));  // r14.
1867   ASSERT_EQ(0, src1.code() % 2);
1868   ASSERT_EQ(src1.code() + 1, src2.code());
1869   ASSERT(IsEnabled(ARMv7));
1870   addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
1871 }
1872 
1873 
1874 // Preload instructions.
pld(const MemOperand & address)1875 void Assembler::pld(const MemOperand& address) {
1876   // Instruction details available in ARM DDI 0406C.b, A8.8.128.
1877   // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
1878   // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
1879   ASSERT(address.rm().is(no_reg));
1880   ASSERT(address.am() == Offset);
1881   int U = B23;
1882   int offset = address.offset();
1883   if (offset < 0) {
1884     offset = -offset;
1885     U = 0;
1886   }
1887   ASSERT(offset < 4096);
1888   emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
1889        0xf*B12 | offset);
1890 }
1891 
1892 
1893 // Load/Store multiple instructions.
ldm(BlockAddrMode am,Register base,RegList dst,Condition cond)1894 void Assembler::ldm(BlockAddrMode am,
1895                     Register base,
1896                     RegList dst,
1897                     Condition cond) {
1898   // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
1899   ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1900 
1901   addrmod4(cond | B27 | am | L, base, dst);
1902 
1903   // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1904   if (cond == al && (dst & pc.bit()) != 0) {
1905     // There is a slight chance that the ldm instruction was actually a call,
1906     // in which case it would be wrong to return into the constant pool; we
1907     // recognize this case by checking if the emission of the pool was blocked
1908     // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1909     // the case, we emit a jump over the pool.
1910     CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1911   }
1912 }
1913 
1914 
stm(BlockAddrMode am,Register base,RegList src,Condition cond)1915 void Assembler::stm(BlockAddrMode am,
1916                     Register base,
1917                     RegList src,
1918                     Condition cond) {
1919   addrmod4(cond | B27 | am, base, src);
1920 }
1921 
1922 
1923 // Exception-generating instructions and debugging support.
1924 // Stops with a non-negative code less than kNumOfWatchedStops support
1925 // enabling/disabling and a counter feature. See simulator-arm.h .
stop(const char * msg,Condition cond,int32_t code)1926 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
1927 #ifndef __arm__
1928   ASSERT(code >= kDefaultStopCode);
1929   {
1930     // The Simulator will handle the stop instruction and get the message
1931     // address. It expects to find the address just after the svc instruction.
1932     BlockConstPoolScope block_const_pool(this);
1933     if (code >= 0) {
1934       svc(kStopCode + code, cond);
1935     } else {
1936       svc(kStopCode + kMaxStopCode, cond);
1937     }
1938     emit(reinterpret_cast<Instr>(msg));
1939   }
1940 #else  // def __arm__
1941   if (cond != al) {
1942     Label skip;
1943     b(&skip, NegateCondition(cond));
1944     bkpt(0);
1945     bind(&skip);
1946   } else {
1947     bkpt(0);
1948   }
1949 #endif  // def __arm__
1950 }
1951 
1952 
bkpt(uint32_t imm16)1953 void Assembler::bkpt(uint32_t imm16) {  // v5 and above
1954   ASSERT(is_uint16(imm16));
1955   emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
1956 }
1957 
1958 
svc(uint32_t imm24,Condition cond)1959 void Assembler::svc(uint32_t imm24, Condition cond) {
1960   ASSERT(is_uint24(imm24));
1961   emit(cond | 15*B24 | imm24);
1962 }
1963 
1964 
1965 // Coprocessor instructions.
cdp(Coprocessor coproc,int opcode_1,CRegister crd,CRegister crn,CRegister crm,int opcode_2,Condition cond)1966 void Assembler::cdp(Coprocessor coproc,
1967                     int opcode_1,
1968                     CRegister crd,
1969                     CRegister crn,
1970                     CRegister crm,
1971                     int opcode_2,
1972                     Condition cond) {
1973   ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1974   emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1975        crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1976 }
1977 
1978 
cdp2(Coprocessor coproc,int opcode_1,CRegister crd,CRegister crn,CRegister crm,int opcode_2)1979 void Assembler::cdp2(Coprocessor coproc,
1980                      int opcode_1,
1981                      CRegister crd,
1982                      CRegister crn,
1983                      CRegister crm,
1984                      int opcode_2) {  // v5 and above
1985   cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
1986 }
1987 
1988 
mcr(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2,Condition cond)1989 void Assembler::mcr(Coprocessor coproc,
1990                     int opcode_1,
1991                     Register rd,
1992                     CRegister crn,
1993                     CRegister crm,
1994                     int opcode_2,
1995                     Condition cond) {
1996   ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1997   emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1998        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1999 }
2000 
2001 
mcr2(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2)2002 void Assembler::mcr2(Coprocessor coproc,
2003                      int opcode_1,
2004                      Register rd,
2005                      CRegister crn,
2006                      CRegister crm,
2007                      int opcode_2) {  // v5 and above
2008   mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2009 }
2010 
2011 
mrc(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2,Condition cond)2012 void Assembler::mrc(Coprocessor coproc,
2013                     int opcode_1,
2014                     Register rd,
2015                     CRegister crn,
2016                     CRegister crm,
2017                     int opcode_2,
2018                     Condition cond) {
2019   ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
2020   emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
2021        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2022 }
2023 
2024 
mrc2(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2)2025 void Assembler::mrc2(Coprocessor coproc,
2026                      int opcode_1,
2027                      Register rd,
2028                      CRegister crn,
2029                      CRegister crm,
2030                      int opcode_2) {  // v5 and above
2031   mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2032 }
2033 
2034 
ldc(Coprocessor coproc,CRegister crd,const MemOperand & src,LFlag l,Condition cond)2035 void Assembler::ldc(Coprocessor coproc,
2036                     CRegister crd,
2037                     const MemOperand& src,
2038                     LFlag l,
2039                     Condition cond) {
2040   addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
2041 }
2042 
2043 
ldc(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l,Condition cond)2044 void Assembler::ldc(Coprocessor coproc,
2045                     CRegister crd,
2046                     Register rn,
2047                     int option,
2048                     LFlag l,
2049                     Condition cond) {
2050   // Unindexed addressing.
2051   ASSERT(is_uint8(option));
2052   emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
2053        coproc*B8 | (option & 255));
2054 }
2055 
2056 
ldc2(Coprocessor coproc,CRegister crd,const MemOperand & src,LFlag l)2057 void Assembler::ldc2(Coprocessor coproc,
2058                      CRegister crd,
2059                      const MemOperand& src,
2060                      LFlag l) {  // v5 and above
2061   ldc(coproc, crd, src, l, kSpecialCondition);
2062 }
2063 
2064 
ldc2(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l)2065 void Assembler::ldc2(Coprocessor coproc,
2066                      CRegister crd,
2067                      Register rn,
2068                      int option,
2069                      LFlag l) {  // v5 and above
2070   ldc(coproc, crd, rn, option, l, kSpecialCondition);
2071 }
2072 
2073 
2074 // Support for VFP.
2075 
vldr(const DwVfpRegister dst,const Register base,int offset,const Condition cond)2076 void Assembler::vldr(const DwVfpRegister dst,
2077                      const Register base,
2078                      int offset,
2079                      const Condition cond) {
2080   // Ddst = MEM(Rbase + offset).
2081   // Instruction details available in ARM DDI 0406C.b, A8-924.
2082   // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
2083   // Vd(15-12) | 1011(11-8) | offset
2084   int u = 1;
2085   if (offset < 0) {
2086     offset = -offset;
2087     u = 0;
2088   }
2089   int vd, d;
2090   dst.split_code(&vd, &d);
2091 
2092   ASSERT(offset >= 0);
2093   if ((offset % 4) == 0 && (offset / 4) < 256) {
2094     emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
2095          0xB*B8 | ((offset / 4) & 255));
2096   } else {
2097     // Larger offsets must be handled by computing the correct address
2098     // in the ip register.
2099     ASSERT(!base.is(ip));
2100     if (u == 1) {
2101       add(ip, base, Operand(offset));
2102     } else {
2103       sub(ip, base, Operand(offset));
2104     }
2105     emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
2106   }
2107 }
2108 
2109 
vldr(const DwVfpRegister dst,const MemOperand & operand,const Condition cond)2110 void Assembler::vldr(const DwVfpRegister dst,
2111                      const MemOperand& operand,
2112                      const Condition cond) {
2113   ASSERT(!operand.rm().is_valid());
2114   ASSERT(operand.am_ == Offset);
2115   vldr(dst, operand.rn(), operand.offset(), cond);
2116 }
2117 
2118 
vldr(const SwVfpRegister dst,const Register base,int offset,const Condition cond)2119 void Assembler::vldr(const SwVfpRegister dst,
2120                      const Register base,
2121                      int offset,
2122                      const Condition cond) {
2123   // Sdst = MEM(Rbase + offset).
2124   // Instruction details available in ARM DDI 0406A, A8-628.
2125   // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
2126   // Vdst(15-12) | 1010(11-8) | offset
2127   int u = 1;
2128   if (offset < 0) {
2129     offset = -offset;
2130     u = 0;
2131   }
2132   int sd, d;
2133   dst.split_code(&sd, &d);
2134   ASSERT(offset >= 0);
2135 
2136   if ((offset % 4) == 0 && (offset / 4) < 256) {
2137   emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
2138        0xA*B8 | ((offset / 4) & 255));
2139   } else {
2140     // Larger offsets must be handled by computing the correct address
2141     // in the ip register.
2142     ASSERT(!base.is(ip));
2143     if (u == 1) {
2144       add(ip, base, Operand(offset));
2145     } else {
2146       sub(ip, base, Operand(offset));
2147     }
2148     emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2149   }
2150 }
2151 
2152 
vldr(const SwVfpRegister dst,const MemOperand & operand,const Condition cond)2153 void Assembler::vldr(const SwVfpRegister dst,
2154                      const MemOperand& operand,
2155                      const Condition cond) {
2156   ASSERT(!operand.rm().is_valid());
2157   ASSERT(operand.am_ == Offset);
2158   vldr(dst, operand.rn(), operand.offset(), cond);
2159 }
2160 
2161 
vstr(const DwVfpRegister src,const Register base,int offset,const Condition cond)2162 void Assembler::vstr(const DwVfpRegister src,
2163                      const Register base,
2164                      int offset,
2165                      const Condition cond) {
2166   // MEM(Rbase + offset) = Dsrc.
2167   // Instruction details available in ARM DDI 0406C.b, A8-1082.
2168   // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
2169   // Vd(15-12) | 1011(11-8) | (offset/4)
2170   int u = 1;
2171   if (offset < 0) {
2172     offset = -offset;
2173     u = 0;
2174   }
2175   ASSERT(offset >= 0);
2176   int vd, d;
2177   src.split_code(&vd, &d);
2178 
2179   if ((offset % 4) == 0 && (offset / 4) < 256) {
2180     emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
2181          ((offset / 4) & 255));
2182   } else {
2183     // Larger offsets must be handled by computing the correct address
2184     // in the ip register.
2185     ASSERT(!base.is(ip));
2186     if (u == 1) {
2187       add(ip, base, Operand(offset));
2188     } else {
2189       sub(ip, base, Operand(offset));
2190     }
2191     emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
2192   }
2193 }
2194 
2195 
vstr(const DwVfpRegister src,const MemOperand & operand,const Condition cond)2196 void Assembler::vstr(const DwVfpRegister src,
2197                      const MemOperand& operand,
2198                      const Condition cond) {
2199   ASSERT(!operand.rm().is_valid());
2200   ASSERT(operand.am_ == Offset);
2201   vstr(src, operand.rn(), operand.offset(), cond);
2202 }
2203 
2204 
vstr(const SwVfpRegister src,const Register base,int offset,const Condition cond)2205 void Assembler::vstr(const SwVfpRegister src,
2206                      const Register base,
2207                      int offset,
2208                      const Condition cond) {
2209   // MEM(Rbase + offset) = SSrc.
2210   // Instruction details available in ARM DDI 0406A, A8-786.
2211   // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
2212   // Vdst(15-12) | 1010(11-8) | (offset/4)
2213   int u = 1;
2214   if (offset < 0) {
2215     offset = -offset;
2216     u = 0;
2217   }
2218   int sd, d;
2219   src.split_code(&sd, &d);
2220   ASSERT(offset >= 0);
2221   if ((offset % 4) == 0 && (offset / 4) < 256) {
2222     emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2223          0xA*B8 | ((offset / 4) & 255));
2224   } else {
2225     // Larger offsets must be handled by computing the correct address
2226     // in the ip register.
2227     ASSERT(!base.is(ip));
2228     if (u == 1) {
2229       add(ip, base, Operand(offset));
2230     } else {
2231       sub(ip, base, Operand(offset));
2232     }
2233     emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2234   }
2235 }
2236 
2237 
vstr(const SwVfpRegister src,const MemOperand & operand,const Condition cond)2238 void Assembler::vstr(const SwVfpRegister src,
2239                      const MemOperand& operand,
2240                      const Condition cond) {
2241   ASSERT(!operand.rm().is_valid());
2242   ASSERT(operand.am_ == Offset);
2243   vstr(src, operand.rn(), operand.offset(), cond);
2244 }
2245 
2246 
vldm(BlockAddrMode am,Register base,DwVfpRegister first,DwVfpRegister last,Condition cond)2247 void  Assembler::vldm(BlockAddrMode am,
2248                       Register base,
2249                       DwVfpRegister first,
2250                       DwVfpRegister last,
2251                       Condition cond) {
2252   // Instruction details available in ARM DDI 0406C.b, A8-922.
2253   // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2254   // first(15-12) | 1011(11-8) | (count * 2)
2255   ASSERT_LE(first.code(), last.code());
2256   ASSERT(am == ia || am == ia_w || am == db_w);
2257   ASSERT(!base.is(pc));
2258 
2259   int sd, d;
2260   first.split_code(&sd, &d);
2261   int count = last.code() - first.code() + 1;
2262   ASSERT(count <= 16);
2263   emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2264        0xB*B8 | count*2);
2265 }
2266 
2267 
vstm(BlockAddrMode am,Register base,DwVfpRegister first,DwVfpRegister last,Condition cond)2268 void  Assembler::vstm(BlockAddrMode am,
2269                       Register base,
2270                       DwVfpRegister first,
2271                       DwVfpRegister last,
2272                       Condition cond) {
2273   // Instruction details available in ARM DDI 0406C.b, A8-1080.
2274   // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2275   // first(15-12) | 1011(11-8) | (count * 2)
2276   ASSERT_LE(first.code(), last.code());
2277   ASSERT(am == ia || am == ia_w || am == db_w);
2278   ASSERT(!base.is(pc));
2279 
2280   int sd, d;
2281   first.split_code(&sd, &d);
2282   int count = last.code() - first.code() + 1;
2283   ASSERT(count <= 16);
2284   emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2285        0xB*B8 | count*2);
2286 }
2287 
vldm(BlockAddrMode am,Register base,SwVfpRegister first,SwVfpRegister last,Condition cond)2288 void  Assembler::vldm(BlockAddrMode am,
2289                       Register base,
2290                       SwVfpRegister first,
2291                       SwVfpRegister last,
2292                       Condition cond) {
2293   // Instruction details available in ARM DDI 0406A, A8-626.
2294   // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2295   // first(15-12) | 1010(11-8) | (count/2)
2296   ASSERT_LE(first.code(), last.code());
2297   ASSERT(am == ia || am == ia_w || am == db_w);
2298   ASSERT(!base.is(pc));
2299 
2300   int sd, d;
2301   first.split_code(&sd, &d);
2302   int count = last.code() - first.code() + 1;
2303   emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2304        0xA*B8 | count);
2305 }
2306 
2307 
vstm(BlockAddrMode am,Register base,SwVfpRegister first,SwVfpRegister last,Condition cond)2308 void  Assembler::vstm(BlockAddrMode am,
2309                       Register base,
2310                       SwVfpRegister first,
2311                       SwVfpRegister last,
2312                       Condition cond) {
2313   // Instruction details available in ARM DDI 0406A, A8-784.
2314   // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2315   // first(15-12) | 1011(11-8) | (count/2)
2316   ASSERT_LE(first.code(), last.code());
2317   ASSERT(am == ia || am == ia_w || am == db_w);
2318   ASSERT(!base.is(pc));
2319 
2320   int sd, d;
2321   first.split_code(&sd, &d);
2322   int count = last.code() - first.code() + 1;
2323   emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2324        0xA*B8 | count);
2325 }
2326 
2327 
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)2328 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2329   uint64_t i;
2330   memcpy(&i, &d, 8);
2331 
2332   *lo = i & 0xffffffff;
2333   *hi = i >> 32;
2334 }
2335 
2336 
2337 // Only works for little endian floating point formats.
2338 // We don't support VFP on the mixed endian floating point platform.
FitsVMOVDoubleImmediate(double d,uint32_t * encoding)2339 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
2340   ASSERT(CpuFeatures::IsSupported(VFP3));
2341 
2342   // VMOV can accept an immediate of the form:
2343   //
2344   //  +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2345   //
2346   // The immediate is encoded using an 8-bit quantity, comprised of two
2347   // 4-bit fields. For an 8-bit immediate of the form:
2348   //
2349   //  [abcdefgh]
2350   //
2351   // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2352   // created of the form:
2353   //
2354   //  [aBbbbbbb,bbcdefgh,00000000,00000000,
2355   //      00000000,00000000,00000000,00000000]
2356   //
2357   // where B = ~b.
2358   //
2359 
2360   uint32_t lo, hi;
2361   DoubleAsTwoUInt32(d, &lo, &hi);
2362 
2363   // The most obvious constraint is the long block of zeroes.
2364   if ((lo != 0) || ((hi & 0xffff) != 0)) {
2365     return false;
2366   }
2367 
2368   // Bits 62:55 must be all clear or all set.
2369   if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2370     return false;
2371   }
2372 
2373   // Bit 63 must be NOT bit 62.
2374   if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2375     return false;
2376   }
2377 
2378   // Create the encoded immediate in the form:
2379   //  [00000000,0000abcd,00000000,0000efgh]
2380   *encoding  = (hi >> 16) & 0xf;      // Low nybble.
2381   *encoding |= (hi >> 4) & 0x70000;   // Low three bits of the high nybble.
2382   *encoding |= (hi >> 12) & 0x80000;  // Top bit of the high nybble.
2383 
2384   return true;
2385 }
2386 
2387 
vmov(const DwVfpRegister dst,double imm,const Register scratch)2388 void Assembler::vmov(const DwVfpRegister dst,
2389                      double imm,
2390                      const Register scratch) {
2391   uint32_t enc;
2392   if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
2393     // The double can be encoded in the instruction.
2394     //
2395     // Dd = immediate
2396     // Instruction details available in ARM DDI 0406C.b, A8-936.
2397     // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2398     // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2399     int vd, d;
2400     dst.split_code(&vd, &d);
2401     emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2402   } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
2403     // TODO(jfb) Temporarily turned off until we have constant blinding or
2404     //           some equivalent mitigation: an attacker can otherwise control
2405     //           generated data which also happens to be executable, a Very Bad
2406     //           Thing indeed.
2407     //           Blinding gets tricky because we don't have xor, we probably
2408     //           need to add/subtract without losing precision, which requires a
2409     //           cookie value that Lithium is probably better positioned to
2410     //           choose.
2411     //           We could also add a few peepholes here like detecting 0.0 and
2412     //           -0.0 and doing a vmov from the sequestered d14, forcing denorms
2413     //           to zero (we set flush-to-zero), and normalizing NaN values.
2414     //           We could also detect redundant values.
2415     //           The code could also randomize the order of values, though
2416     //           that's tricky because vldr has a limited reach. Furthermore
2417     //           it breaks load locality.
2418     RelocInfo rinfo(pc_, imm);
2419     ConstantPoolAddEntry(rinfo);
2420     vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
2421   } else {
2422     // Synthesise the double from ARM immediates.
2423     uint32_t lo, hi;
2424     DoubleAsTwoUInt32(imm, &lo, &hi);
2425 
2426     if (scratch.is(no_reg)) {
2427       if (dst.code() < 16) {
2428         const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
2429         // Move the low part of the double into the lower of the corresponsing S
2430         // registers of D register dst.
2431         mov(ip, Operand(lo));
2432         vmov(loc.low(), ip);
2433 
2434         // Move the high part of the double into the higher of the
2435         // corresponsing S registers of D register dst.
2436         mov(ip, Operand(hi));
2437         vmov(loc.high(), ip);
2438       } else {
2439         // D16-D31 does not have S registers, so move the low and high parts
2440         // directly to the D register using vmov.32.
2441         // Note: This may be slower, so we only do this when we have to.
2442         mov(ip, Operand(lo));
2443         vmov(dst, VmovIndexLo, ip);
2444         mov(ip, Operand(hi));
2445         vmov(dst, VmovIndexHi, ip);
2446       }
2447     } else {
2448       // Move the low and high parts of the double to a D register in one
2449       // instruction.
2450       mov(ip, Operand(lo));
2451       mov(scratch, Operand(hi));
2452       vmov(dst, ip, scratch);
2453     }
2454   }
2455 }
2456 
2457 
vmov(const SwVfpRegister dst,const SwVfpRegister src,const Condition cond)2458 void Assembler::vmov(const SwVfpRegister dst,
2459                      const SwVfpRegister src,
2460                      const Condition cond) {
2461   // Sd = Sm
2462   // Instruction details available in ARM DDI 0406B, A8-642.
2463   int sd, d, sm, m;
2464   dst.split_code(&sd, &d);
2465   src.split_code(&sm, &m);
2466   emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2467 }
2468 
2469 
vmov(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2470 void Assembler::vmov(const DwVfpRegister dst,
2471                      const DwVfpRegister src,
2472                      const Condition cond) {
2473   // Dd = Dm
2474   // Instruction details available in ARM DDI 0406C.b, A8-938.
2475   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2476   // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2477   int vd, d;
2478   dst.split_code(&vd, &d);
2479   int vm, m;
2480   src.split_code(&vm, &m);
2481   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
2482        vm);
2483 }
2484 
2485 
vmov(const DwVfpRegister dst,const VmovIndex index,const Register src,const Condition cond)2486 void Assembler::vmov(const DwVfpRegister dst,
2487                      const VmovIndex index,
2488                      const Register src,
2489                      const Condition cond) {
2490   // Dd[index] = Rt
2491   // Instruction details available in ARM DDI 0406C.b, A8-940.
2492   // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
2493   // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2494   ASSERT(index.index == 0 || index.index == 1);
2495   int vd, d;
2496   dst.split_code(&vd, &d);
2497   emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
2498        d*B7 | B4);
2499 }
2500 
2501 
vmov(const Register dst,const VmovIndex index,const DwVfpRegister src,const Condition cond)2502 void Assembler::vmov(const Register dst,
2503                      const VmovIndex index,
2504                      const DwVfpRegister src,
2505                      const Condition cond) {
2506   // Dd[index] = Rt
2507   // Instruction details available in ARM DDI 0406C.b, A8.8.342.
2508   // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
2509   // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2510   ASSERT(index.index == 0 || index.index == 1);
2511   int vn, n;
2512   src.split_code(&vn, &n);
2513   emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
2514        0xB*B8 | n*B7 | B4);
2515 }
2516 
2517 
vmov(const DwVfpRegister dst,const Register src1,const Register src2,const Condition cond)2518 void Assembler::vmov(const DwVfpRegister dst,
2519                      const Register src1,
2520                      const Register src2,
2521                      const Condition cond) {
2522   // Dm = <Rt,Rt2>.
2523   // Instruction details available in ARM DDI 0406C.b, A8-948.
2524   // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2525   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2526   ASSERT(!src1.is(pc) && !src2.is(pc));
2527   int vm, m;
2528   dst.split_code(&vm, &m);
2529   emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2530        src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2531 }
2532 
2533 
vmov(const Register dst1,const Register dst2,const DwVfpRegister src,const Condition cond)2534 void Assembler::vmov(const Register dst1,
2535                      const Register dst2,
2536                      const DwVfpRegister src,
2537                      const Condition cond) {
2538   // <Rt,Rt2> = Dm.
2539   // Instruction details available in ARM DDI 0406C.b, A8-948.
2540   // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2541   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2542   ASSERT(!dst1.is(pc) && !dst2.is(pc));
2543   int vm, m;
2544   src.split_code(&vm, &m);
2545   emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2546        dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2547 }
2548 
2549 
vmov(const SwVfpRegister dst,const Register src,const Condition cond)2550 void Assembler::vmov(const SwVfpRegister dst,
2551                      const Register src,
2552                      const Condition cond) {
2553   // Sn = Rt.
2554   // Instruction details available in ARM DDI 0406A, A8-642.
2555   // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2556   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2557   ASSERT(!src.is(pc));
2558   int sn, n;
2559   dst.split_code(&sn, &n);
2560   emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2561 }
2562 
2563 
vmov(const Register dst,const SwVfpRegister src,const Condition cond)2564 void Assembler::vmov(const Register dst,
2565                      const SwVfpRegister src,
2566                      const Condition cond) {
2567   // Rt = Sn.
2568   // Instruction details available in ARM DDI 0406A, A8-642.
2569   // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2570   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2571   ASSERT(!dst.is(pc));
2572   int sn, n;
2573   src.split_code(&sn, &n);
2574   emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2575 }
2576 
2577 
2578 // Type of data to read from or write to VFP register.
2579 // Used as specifier in generic vcvt instruction.
2580 enum VFPType { S32, U32, F32, F64 };
2581 
2582 
IsSignedVFPType(VFPType type)2583 static bool IsSignedVFPType(VFPType type) {
2584   switch (type) {
2585     case S32:
2586       return true;
2587     case U32:
2588       return false;
2589     default:
2590       UNREACHABLE();
2591       return false;
2592   }
2593 }
2594 
2595 
IsIntegerVFPType(VFPType type)2596 static bool IsIntegerVFPType(VFPType type) {
2597   switch (type) {
2598     case S32:
2599     case U32:
2600       return true;
2601     case F32:
2602     case F64:
2603       return false;
2604     default:
2605       UNREACHABLE();
2606       return false;
2607   }
2608 }
2609 
2610 
IsDoubleVFPType(VFPType type)2611 static bool IsDoubleVFPType(VFPType type) {
2612   switch (type) {
2613     case F32:
2614       return false;
2615     case F64:
2616       return true;
2617     default:
2618       UNREACHABLE();
2619       return false;
2620   }
2621 }
2622 
2623 
2624 // Split five bit reg_code based on size of reg_type.
2625 //  32-bit register codes are Vm:M
2626 //  64-bit register codes are M:Vm
2627 // where Vm is four bits, and M is a single bit.
SplitRegCode(VFPType reg_type,int reg_code,int * vm,int * m)2628 static void SplitRegCode(VFPType reg_type,
2629                          int reg_code,
2630                          int* vm,
2631                          int* m) {
2632   ASSERT((reg_code >= 0) && (reg_code <= 31));
2633   if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2634     // 32 bit type.
2635     *m  = reg_code & 0x1;
2636     *vm = reg_code >> 1;
2637   } else {
2638     // 64 bit type.
2639     *m  = (reg_code & 0x10) >> 4;
2640     *vm = reg_code & 0x0F;
2641   }
2642 }
2643 
2644 
2645 // Encode vcvt.src_type.dst_type instruction.
EncodeVCVT(const VFPType dst_type,const int dst_code,const VFPType src_type,const int src_code,VFPConversionMode mode,const Condition cond)2646 static Instr EncodeVCVT(const VFPType dst_type,
2647                         const int dst_code,
2648                         const VFPType src_type,
2649                         const int src_code,
2650                         VFPConversionMode mode,
2651                         const Condition cond) {
2652   ASSERT(src_type != dst_type);
2653   int D, Vd, M, Vm;
2654   SplitRegCode(src_type, src_code, &Vm, &M);
2655   SplitRegCode(dst_type, dst_code, &Vd, &D);
2656 
2657   if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2658     // Conversion between IEEE floating point and 32-bit integer.
2659     // Instruction details available in ARM DDI 0406B, A8.6.295.
2660     // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2661     // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2662     ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2663 
2664     int sz, opc2, op;
2665 
2666     if (IsIntegerVFPType(dst_type)) {
2667       opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2668       sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2669       op = mode;
2670     } else {
2671       ASSERT(IsIntegerVFPType(src_type));
2672       opc2 = 0x0;
2673       sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2674       op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2675     }
2676 
2677     return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2678             Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2679   } else {
2680     // Conversion between IEEE double and single precision.
2681     // Instruction details available in ARM DDI 0406B, A8.6.298.
2682     // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2683     // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2684     int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2685     return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2686             Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2687   }
2688 }
2689 
2690 
vcvt_f64_s32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2691 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2692                              const SwVfpRegister src,
2693                              VFPConversionMode mode,
2694                              const Condition cond) {
2695   emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2696 }
2697 
2698 
vcvt_f32_s32(const SwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2699 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2700                              const SwVfpRegister src,
2701                              VFPConversionMode mode,
2702                              const Condition cond) {
2703   emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2704 }
2705 
2706 
vcvt_f64_u32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2707 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2708                              const SwVfpRegister src,
2709                              VFPConversionMode mode,
2710                              const Condition cond) {
2711   emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2712 }
2713 
2714 
vcvt_s32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)2715 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2716                              const DwVfpRegister src,
2717                              VFPConversionMode mode,
2718                              const Condition cond) {
2719   emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2720 }
2721 
2722 
vcvt_u32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)2723 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2724                              const DwVfpRegister src,
2725                              VFPConversionMode mode,
2726                              const Condition cond) {
2727   emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2728 }
2729 
2730 
vcvt_f64_f32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2731 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2732                              const SwVfpRegister src,
2733                              VFPConversionMode mode,
2734                              const Condition cond) {
2735   emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2736 }
2737 
2738 
vcvt_f32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)2739 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2740                              const DwVfpRegister src,
2741                              VFPConversionMode mode,
2742                              const Condition cond) {
2743   emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2744 }
2745 
2746 
vcvt_f64_s32(const DwVfpRegister dst,int fraction_bits,const Condition cond)2747 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2748                              int fraction_bits,
2749                              const Condition cond) {
2750   // Instruction details available in ARM DDI 0406C.b, A8-874.
2751   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
2752   // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
2753   ASSERT(fraction_bits > 0 && fraction_bits <= 32);
2754   ASSERT(CpuFeatures::IsSupported(VFP3));
2755   int vd, d;
2756   dst.split_code(&vd, &d);
2757   int imm5 = 32 - fraction_bits;
2758   int i = imm5 & 1;
2759   int imm4 = (imm5 >> 1) & 0xf;
2760   emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
2761        vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
2762 }
2763 
2764 
vneg(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2765 void Assembler::vneg(const DwVfpRegister dst,
2766                      const DwVfpRegister src,
2767                      const Condition cond) {
2768   // Instruction details available in ARM DDI 0406C.b, A8-968.
2769   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
2770   // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2771   int vd, d;
2772   dst.split_code(&vd, &d);
2773   int vm, m;
2774   src.split_code(&vm, &m);
2775 
2776   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2777        m*B5 | vm);
2778 }
2779 
2780 
vabs(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2781 void Assembler::vabs(const DwVfpRegister dst,
2782                      const DwVfpRegister src,
2783                      const Condition cond) {
2784   // Instruction details available in ARM DDI 0406C.b, A8-524.
2785   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2786   // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2787   int vd, d;
2788   dst.split_code(&vd, &d);
2789   int vm, m;
2790   src.split_code(&vm, &m);
2791   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
2792        m*B5 | vm);
2793 }
2794 
2795 
vadd(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2796 void Assembler::vadd(const DwVfpRegister dst,
2797                      const DwVfpRegister src1,
2798                      const DwVfpRegister src2,
2799                      const Condition cond) {
2800   // Dd = vadd(Dn, Dm) double precision floating point addition.
2801   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2802   // Instruction details available in ARM DDI 0406C.b, A8-830.
2803   // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2804   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2805   int vd, d;
2806   dst.split_code(&vd, &d);
2807   int vn, n;
2808   src1.split_code(&vn, &n);
2809   int vm, m;
2810   src2.split_code(&vm, &m);
2811   emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2812        n*B7 | m*B5 | vm);
2813 }
2814 
2815 
vsub(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2816 void Assembler::vsub(const DwVfpRegister dst,
2817                      const DwVfpRegister src1,
2818                      const DwVfpRegister src2,
2819                      const Condition cond) {
2820   // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2821   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2822   // Instruction details available in ARM DDI 0406C.b, A8-1086.
2823   // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2824   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2825   int vd, d;
2826   dst.split_code(&vd, &d);
2827   int vn, n;
2828   src1.split_code(&vn, &n);
2829   int vm, m;
2830   src2.split_code(&vm, &m);
2831   emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2832        n*B7 | B6 | m*B5 | vm);
2833 }
2834 
2835 
vmul(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2836 void Assembler::vmul(const DwVfpRegister dst,
2837                      const DwVfpRegister src1,
2838                      const DwVfpRegister src2,
2839                      const Condition cond) {
2840   // Dd = vmul(Dn, Dm) double precision floating point multiplication.
2841   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2842   // Instruction details available in ARM DDI 0406C.b, A8-960.
2843   // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
2844   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2845   int vd, d;
2846   dst.split_code(&vd, &d);
2847   int vn, n;
2848   src1.split_code(&vn, &n);
2849   int vm, m;
2850   src2.split_code(&vm, &m);
2851   emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2852        n*B7 | m*B5 | vm);
2853 }
2854 
2855 
vmla(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2856 void Assembler::vmla(const DwVfpRegister dst,
2857                      const DwVfpRegister src1,
2858                      const DwVfpRegister src2,
2859                      const Condition cond) {
2860   // Instruction details available in ARM DDI 0406C.b, A8-932.
2861   // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
2862   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
2863   int vd, d;
2864   dst.split_code(&vd, &d);
2865   int vn, n;
2866   src1.split_code(&vn, &n);
2867   int vm, m;
2868   src2.split_code(&vm, &m);
2869   emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
2870        vm);
2871 }
2872 
2873 
vmls(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2874 void Assembler::vmls(const DwVfpRegister dst,
2875                      const DwVfpRegister src1,
2876                      const DwVfpRegister src2,
2877                      const Condition cond) {
2878   // Instruction details available in ARM DDI 0406C.b, A8-932.
2879   // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
2880   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
2881   int vd, d;
2882   dst.split_code(&vd, &d);
2883   int vn, n;
2884   src1.split_code(&vn, &n);
2885   int vm, m;
2886   src2.split_code(&vm, &m);
2887   emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
2888        m*B5 | vm);
2889 }
2890 
2891 
vdiv(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2892 void Assembler::vdiv(const DwVfpRegister dst,
2893                      const DwVfpRegister src1,
2894                      const DwVfpRegister src2,
2895                      const Condition cond) {
2896   // Dd = vdiv(Dn, Dm) double precision floating point division.
2897   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2898   // Instruction details available in ARM DDI 0406C.b, A8-882.
2899   // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
2900   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2901   int vd, d;
2902   dst.split_code(&vd, &d);
2903   int vn, n;
2904   src1.split_code(&vn, &n);
2905   int vm, m;
2906   src2.split_code(&vm, &m);
2907   emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
2908        vm);
2909 }
2910 
2911 
vcmp(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2912 void Assembler::vcmp(const DwVfpRegister src1,
2913                      const DwVfpRegister src2,
2914                      const Condition cond) {
2915   // vcmp(Dd, Dm) double precision floating point comparison.
2916   // Instruction details available in ARM DDI 0406C.b, A8-864.
2917   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
2918   // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2919   int vd, d;
2920   src1.split_code(&vd, &d);
2921   int vm, m;
2922   src2.split_code(&vm, &m);
2923   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2924        m*B5 | vm);
2925 }
2926 
2927 
vcmp(const DwVfpRegister src1,const double src2,const Condition cond)2928 void Assembler::vcmp(const DwVfpRegister src1,
2929                      const double src2,
2930                      const Condition cond) {
2931   // vcmp(Dd, #0.0) double precision floating point comparison.
2932   // Instruction details available in ARM DDI 0406C.b, A8-864.
2933   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
2934   // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
2935   ASSERT(src2 == 0.0);
2936   int vd, d;
2937   src1.split_code(&vd, &d);
2938   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
2939 }
2940 
2941 
vmsr(Register dst,Condition cond)2942 void Assembler::vmsr(Register dst, Condition cond) {
2943   // Instruction details available in ARM DDI 0406A, A8-652.
2944   // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
2945   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2946   emit(cond | 0xE*B24 | 0xE*B20 |  B16 |
2947        dst.code()*B12 | 0xA*B8 | B4);
2948 }
2949 
2950 
vmrs(Register dst,Condition cond)2951 void Assembler::vmrs(Register dst, Condition cond) {
2952   // Instruction details available in ARM DDI 0406A, A8-652.
2953   // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
2954   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2955   emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
2956        dst.code()*B12 | 0xA*B8 | B4);
2957 }
2958 
2959 
vsqrt(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2960 void Assembler::vsqrt(const DwVfpRegister dst,
2961                       const DwVfpRegister src,
2962                       const Condition cond) {
2963   // Instruction details available in ARM DDI 0406C.b, A8-1058.
2964   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
2965   // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
2966   int vd, d;
2967   dst.split_code(&vd, &d);
2968   int vm, m;
2969   src.split_code(&vm, &m);
2970   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
2971        m*B5 | vm);
2972 }
2973 
2974 
2975 // Support for NEON.
2976 
vld1(NeonSize size,const NeonListOperand & dst,const NeonMemOperand & src)2977 void Assembler::vld1(NeonSize size,
2978                      const NeonListOperand& dst,
2979                      const NeonMemOperand& src) {
2980   // Instruction details available in ARM DDI 0406C.b, A8.8.320.
2981   // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
2982   // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
2983   ASSERT(CpuFeatures::IsSupported(NEON));
2984   int vd, d;
2985   dst.base().split_code(&vd, &d);
2986   emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
2987        dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
2988 }
2989 
2990 
vst1(NeonSize size,const NeonListOperand & src,const NeonMemOperand & dst)2991 void Assembler::vst1(NeonSize size,
2992                      const NeonListOperand& src,
2993                      const NeonMemOperand& dst) {
2994   // Instruction details available in ARM DDI 0406C.b, A8.8.404.
2995   // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
2996   // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
2997   ASSERT(CpuFeatures::IsSupported(NEON));
2998   int vd, d;
2999   src.base().split_code(&vd, &d);
3000   emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
3001        size*B6 | dst.align()*B4 | dst.rm().code());
3002 }
3003 
3004 
vmovl(NeonDataType dt,QwNeonRegister dst,DwVfpRegister src)3005 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3006   // Instruction details available in ARM DDI 0406C.b, A8.8.346.
3007   // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
3008   // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
3009   ASSERT(CpuFeatures::IsSupported(NEON));
3010   int vd, d;
3011   dst.split_code(&vd, &d);
3012   int vm, m;
3013   src.split_code(&vm, &m);
3014   emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
3015         (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
3016 }
3017 
3018 
3019 // Pseudo instructions.
nop(int type)3020 void Assembler::nop(int type) {
3021   // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
3022   // some of the CPU's pipeline and has to issue. Older ARM chips simply used
3023   // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
3024   // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
3025   // a type.
3026   ASSERT(0 <= type && type <= 14);  // mov pc, pc isn't a nop.
3027   emit(al | 13*B21 | type*B12 | type);
3028 }
3029 
3030 
IsMovT(Instr instr)3031 bool Assembler::IsMovT(Instr instr) {
3032   instr &= ~(((kNumberOfConditions - 1) << 28) |  // Mask off conditions
3033              ((kNumRegisters-1)*B12) |            // mask out register
3034              EncodeMovwImmediate(0xFFFF));        // mask out immediate value
3035   return instr == 0x34*B20;
3036 }
3037 
3038 
IsMovW(Instr instr)3039 bool Assembler::IsMovW(Instr instr) {
3040   instr &= ~(((kNumberOfConditions - 1) << 28) |  // Mask off conditions
3041              ((kNumRegisters-1)*B12) |            // mask out destination
3042              EncodeMovwImmediate(0xFFFF));        // mask out immediate value
3043   return instr == 0x30*B20;
3044 }
3045 
3046 
IsNop(Instr instr,int type)3047 bool Assembler::IsNop(Instr instr, int type) {
3048   ASSERT(0 <= type && type <= 14);  // mov pc, pc isn't a nop.
3049   // Check for mov rx, rx where x = type.
3050   return instr == (al | 13*B21 | type*B12 | type);
3051 }
3052 
3053 
ImmediateFitsAddrMode1Instruction(int32_t imm32)3054 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
3055   uint32_t dummy1;
3056   uint32_t dummy2;
3057   return fits_shifter(imm32, &dummy1, &dummy2, NULL);
3058 }
3059 
3060 
ImmediateFitsAddrMode2Instruction(int32_t imm32)3061 bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
3062   return is_uint12(abs(imm32));
3063 }
3064 
3065 
3066 // Debugging.
RecordJSReturn()3067 void Assembler::RecordJSReturn() {
3068   positions_recorder()->WriteRecordedPositions();
3069   CheckBuffer();
3070   RecordRelocInfo(RelocInfo::JS_RETURN);
3071 }
3072 
3073 
RecordDebugBreakSlot()3074 void Assembler::RecordDebugBreakSlot() {
3075   positions_recorder()->WriteRecordedPositions();
3076   CheckBuffer();
3077   RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3078 }
3079 
3080 
RecordComment(const char * msg)3081 void Assembler::RecordComment(const char* msg) {
3082   if (FLAG_code_comments) {
3083     CheckBuffer();
3084     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
3085   }
3086 }
3087 
3088 
RecordConstPool(int size)3089 void Assembler::RecordConstPool(int size) {
3090   // We only need this for debugger support, to correctly compute offsets in the
3091   // code.
3092   RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3093 }
3094 
3095 
GrowBuffer()3096 void Assembler::GrowBuffer() {
3097   if (!own_buffer_) FATAL("external code buffer is too small");
3098 
3099   // Compute new buffer size.
3100   CodeDesc desc;  // the new buffer
3101   if (buffer_size_ < 4*KB) {
3102     desc.buffer_size = 4*KB;
3103   } else if (buffer_size_ < 1*MB) {
3104     desc.buffer_size = 2*buffer_size_;
3105   } else {
3106     desc.buffer_size = buffer_size_ + 1*MB;
3107   }
3108   CHECK_GT(desc.buffer_size, 0);  // no overflow
3109 
3110   // Set up new buffer.
3111   desc.buffer = NewArray<byte>(desc.buffer_size);
3112 
3113   desc.instr_size = pc_offset();
3114   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3115 
3116   // Copy the data.
3117   int pc_delta = desc.buffer - buffer_;
3118   int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3119   MemMove(desc.buffer, buffer_, desc.instr_size);
3120   MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3121           desc.reloc_size);
3122 
3123   // Switch buffers.
3124   DeleteArray(buffer_);
3125   buffer_ = desc.buffer;
3126   buffer_size_ = desc.buffer_size;
3127   pc_ += pc_delta;
3128   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3129                                reloc_info_writer.last_pc() + pc_delta);
3130 
3131   // None of our relocation types are pc relative pointing outside the code
3132   // buffer nor pc absolute pointing inside the code buffer, so there is no need
3133   // to relocate any emitted relocation entries.
3134 
3135   // Relocate pending relocation entries.
3136   for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3137     RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3138     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3139            rinfo.rmode() != RelocInfo::POSITION);
3140     if (rinfo.rmode() != RelocInfo::JS_RETURN) {
3141       rinfo.set_pc(rinfo.pc() + pc_delta);
3142     }
3143   }
3144   for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3145     RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3146     ASSERT(rinfo.rmode() == RelocInfo::NONE64);
3147     rinfo.set_pc(rinfo.pc() + pc_delta);
3148   }
3149   constant_pool_builder_.Relocate(pc_delta);
3150 }
3151 
3152 
db(uint8_t data)3153 void Assembler::db(uint8_t data) {
3154   // No relocation info should be pending while using db. db is used
3155   // to write pure data with no pointers and the constant pool should
3156   // be emitted before using db.
3157   ASSERT(num_pending_32_bit_reloc_info_ == 0);
3158   ASSERT(num_pending_64_bit_reloc_info_ == 0);
3159   CheckBuffer();
3160   *reinterpret_cast<uint8_t*>(pc_) = data;
3161   pc_ += sizeof(uint8_t);
3162 }
3163 
3164 
dd(uint32_t data)3165 void Assembler::dd(uint32_t data) {
3166   // No relocation info should be pending while using dd. dd is used
3167   // to write pure data with no pointers and the constant pool should
3168   // be emitted before using dd.
3169   ASSERT(num_pending_32_bit_reloc_info_ == 0);
3170   ASSERT(num_pending_64_bit_reloc_info_ == 0);
3171   CheckBuffer();
3172   *reinterpret_cast<uint32_t*>(pc_) = data;
3173   pc_ += sizeof(uint32_t);
3174 }
3175 
3176 
emit_code_stub_address(Code * stub)3177 void Assembler::emit_code_stub_address(Code* stub) {
3178   CheckBuffer();
3179   *reinterpret_cast<uint32_t*>(pc_) =
3180       reinterpret_cast<uint32_t>(stub->instruction_start());
3181   pc_ += sizeof(uint32_t);
3182 }
3183 
3184 
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)3185 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3186   RelocInfo rinfo(pc_, rmode, data, NULL);
3187   RecordRelocInfo(rinfo);
3188 }
3189 
3190 
RecordRelocInfo(const RelocInfo & rinfo)3191 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
3192   if (!RelocInfo::IsNone(rinfo.rmode())) {
3193     // Don't record external references unless the heap will be serialized.
3194     if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
3195         !serializer_enabled() && !emit_debug_code()) {
3196       return;
3197     }
3198     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
3199     if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
3200       RelocInfo reloc_info_with_ast_id(rinfo.pc(),
3201                                        rinfo.rmode(),
3202                                        RecordedAstId().ToInt(),
3203                                        NULL);
3204       ClearRecordedAstId();
3205       reloc_info_writer.Write(&reloc_info_with_ast_id);
3206     } else {
3207       reloc_info_writer.Write(&rinfo);
3208     }
3209   }
3210 }
3211 
3212 
ConstantPoolAddEntry(const RelocInfo & rinfo)3213 void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
3214   if (FLAG_enable_ool_constant_pool) {
3215     constant_pool_builder_.AddEntry(this, rinfo);
3216   } else {
3217     if (rinfo.rmode() == RelocInfo::NONE64) {
3218       ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3219       if (num_pending_64_bit_reloc_info_ == 0) {
3220         first_const_pool_64_use_ = pc_offset();
3221       }
3222       pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3223     } else {
3224       ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3225       if (num_pending_32_bit_reloc_info_ == 0) {
3226         first_const_pool_32_use_ = pc_offset();
3227       }
3228       pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3229     }
3230     // Make sure the constant pool is not emitted in place of the next
3231     // instruction for which we just recorded relocation info.
3232     BlockConstPoolFor(1);
3233   }
3234 }
3235 
3236 
BlockConstPoolFor(int instructions)3237 void Assembler::BlockConstPoolFor(int instructions) {
3238   if (FLAG_enable_ool_constant_pool) {
3239     // Should be a no-op if using an out-of-line constant pool.
3240     ASSERT(num_pending_32_bit_reloc_info_ == 0);
3241     ASSERT(num_pending_64_bit_reloc_info_ == 0);
3242     return;
3243   }
3244 
3245   int pc_limit = pc_offset() + instructions * kInstrSize;
3246   if (no_const_pool_before_ < pc_limit) {
3247     // Max pool start (if we need a jump and an alignment).
3248 #ifdef DEBUG
3249     int start = pc_limit + kInstrSize + 2 * kPointerSize;
3250     ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
3251            (start - first_const_pool_32_use_ +
3252             num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
3253     ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
3254            (start - first_const_pool_64_use_ < kMaxDistToFPPool));
3255 #endif
3256     no_const_pool_before_ = pc_limit;
3257   }
3258 
3259   if (next_buffer_check_ < no_const_pool_before_) {
3260     next_buffer_check_ = no_const_pool_before_;
3261   }
3262 }
3263 
3264 
CheckConstPool(bool force_emit,bool require_jump)3265 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
3266   if (FLAG_enable_ool_constant_pool) {
3267     // Should be a no-op if using an out-of-line constant pool.
3268     ASSERT(num_pending_32_bit_reloc_info_ == 0);
3269     ASSERT(num_pending_64_bit_reloc_info_ == 0);
3270     return;
3271   }
3272 
3273   // Some short sequence of instruction mustn't be broken up by constant pool
3274   // emission, such sequences are protected by calls to BlockConstPoolFor and
3275   // BlockConstPoolScope.
3276   if (is_const_pool_blocked()) {
3277     // Something is wrong if emission is forced and blocked at the same time.
3278     ASSERT(!force_emit);
3279     return;
3280   }
3281 
3282   // There is nothing to do if there are no pending constant pool entries.
3283   if ((num_pending_32_bit_reloc_info_ == 0) &&
3284       (num_pending_64_bit_reloc_info_ == 0)) {
3285     // Calculate the offset of the next check.
3286     next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3287     return;
3288   }
3289 
3290   // Check that the code buffer is large enough before emitting the constant
3291   // pool (include the jump over the pool and the constant pool marker and
3292   // the gap to the relocation information).
3293   int jump_instr = require_jump ? kInstrSize : 0;
3294   int size_up_to_marker = jump_instr + kInstrSize;
3295   int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
3296   bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
3297   bool require_64_bit_align = false;
3298   if (has_fp_values) {
3299     require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
3300     if (require_64_bit_align) {
3301       size_after_marker += kInstrSize;
3302     }
3303     size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
3304   }
3305 
3306   int size = size_up_to_marker + size_after_marker;
3307 
3308   // We emit a constant pool when:
3309   //  * requested to do so by parameter force_emit (e.g. after each function).
3310   //  * the distance from the first instruction accessing the constant pool to
3311   //    any of the constant pool entries will exceed its limit the next
3312   //    time the pool is checked. This is overly restrictive, but we don't emit
3313   //    constant pool entries in-order so it's conservatively correct.
3314   //  * the instruction doesn't require a jump after itself to jump over the
3315   //    constant pool, and we're getting close to running out of range.
3316   if (!force_emit) {
3317     ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
3318     bool need_emit = false;
3319     if (has_fp_values) {
3320       int dist64 = pc_offset() +
3321                    size -
3322                    num_pending_32_bit_reloc_info_ * kPointerSize -
3323                    first_const_pool_64_use_;
3324       if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
3325           (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
3326         need_emit = true;
3327       }
3328     }
3329     int dist32 =
3330       pc_offset() + size - first_const_pool_32_use_;
3331     if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
3332         (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
3333       need_emit = true;
3334     }
3335     if (!need_emit) return;
3336   }
3337 
3338   int needed_space = size + kGap;
3339   while (buffer_space() <= needed_space) GrowBuffer();
3340 
3341   {
3342     // Block recursive calls to CheckConstPool.
3343     BlockConstPoolScope block_const_pool(this);
3344     RecordComment("[ Constant Pool");
3345     RecordConstPool(size);
3346 
3347     // Emit jump over constant pool if necessary.
3348     Label after_pool;
3349     if (require_jump) {
3350       b(&after_pool);
3351     }
3352 
3353     // Put down constant pool marker "Undefined instruction".
3354     // The data size helps disassembly know what to print.
3355     emit(kConstantPoolMarker |
3356          EncodeConstantPoolLength(size_after_marker / kPointerSize));
3357 
3358     if (require_64_bit_align) {
3359       emit(kConstantPoolMarker);
3360     }
3361 
3362     // Emit 64-bit constant pool entries first: their range is smaller than
3363     // 32-bit entries.
3364     for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3365       RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3366 
3367       ASSERT(!((uintptr_t)pc_ & 0x7));  // Check 64-bit alignment.
3368 
3369       Instr instr = instr_at(rinfo.pc());
3370       // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
3371       ASSERT((IsVldrDPcImmediateOffset(instr) &&
3372               GetVldrDRegisterImmediateOffset(instr) == 0));
3373 
3374       int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3375       ASSERT(is_uint10(delta));
3376 
3377       bool found = false;
3378       uint64_t value = rinfo.raw_data64();
3379       for (int j = 0; j < i; j++) {
3380         RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
3381         if (value == rinfo2.raw_data64()) {
3382           found = true;
3383           ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
3384           Instr instr2 = instr_at(rinfo2.pc());
3385           ASSERT(IsVldrDPcImmediateOffset(instr2));
3386           delta = GetVldrDRegisterImmediateOffset(instr2);
3387           delta += rinfo2.pc() - rinfo.pc();
3388           break;
3389         }
3390       }
3391 
3392       instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
3393 
3394       if (!found) {
3395         uint64_t uint_data = rinfo.raw_data64();
3396         emit(uint_data & 0xFFFFFFFF);
3397         emit(uint_data >> 32);
3398       }
3399     }
3400 
3401     // Emit 32-bit constant pool entries.
3402     for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3403       RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3404       ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3405              rinfo.rmode() != RelocInfo::POSITION &&
3406              rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
3407              rinfo.rmode() != RelocInfo::CONST_POOL &&
3408              rinfo.rmode() != RelocInfo::NONE64);
3409 
3410       Instr instr = instr_at(rinfo.pc());
3411 
3412       // 64-bit loads shouldn't get here.
3413       ASSERT(!IsVldrDPcImmediateOffset(instr));
3414 
3415       if (IsLdrPcImmediateOffset(instr) &&
3416           GetLdrRegisterImmediateOffset(instr) == 0) {
3417         int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3418         ASSERT(is_uint12(delta));
3419         // 0 is the smallest delta:
3420         //   ldr rd, [pc, #0]
3421         //   constant pool marker
3422         //   data
3423 
3424         bool found = false;
3425         if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
3426           for (int j = 0; j < i; j++) {
3427             RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
3428 
3429             if ((rinfo2.data() == rinfo.data()) &&
3430                 (rinfo2.rmode() == rinfo.rmode())) {
3431               Instr instr2 = instr_at(rinfo2.pc());
3432               if (IsLdrPcImmediateOffset(instr2)) {
3433                 delta = GetLdrRegisterImmediateOffset(instr2);
3434                 delta += rinfo2.pc() - rinfo.pc();
3435                 found = true;
3436                 break;
3437               }
3438             }
3439           }
3440         }
3441 
3442         instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
3443 
3444         if (!found) {
3445           emit(rinfo.data());
3446         }
3447       } else {
3448         ASSERT(IsMovW(instr));
3449       }
3450     }
3451 
3452     num_pending_32_bit_reloc_info_ = 0;
3453     num_pending_64_bit_reloc_info_ = 0;
3454     first_const_pool_32_use_ = -1;
3455     first_const_pool_64_use_ = -1;
3456 
3457     RecordComment("]");
3458 
3459     if (after_pool.is_linked()) {
3460       bind(&after_pool);
3461     }
3462   }
3463 
3464   // Since a constant pool was just emitted, move the check offset forward by
3465   // the standard interval.
3466   next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3467 }
3468 
3469 
NewConstantPool(Isolate * isolate)3470 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
3471   if (!FLAG_enable_ool_constant_pool) {
3472     return isolate->factory()->empty_constant_pool_array();
3473   }
3474   return constant_pool_builder_.New(isolate);
3475 }
3476 
3477 
PopulateConstantPool(ConstantPoolArray * constant_pool)3478 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3479   constant_pool_builder_.Populate(this, constant_pool);
3480 }
3481 
3482 
ConstantPoolBuilder()3483 ConstantPoolBuilder::ConstantPoolBuilder()
3484     : entries_(),
3485       merged_indexes_(),
3486       count_of_64bit_(0),
3487       count_of_code_ptr_(0),
3488       count_of_heap_ptr_(0),
3489       count_of_32bit_(0) { }
3490 
3491 
IsEmpty()3492 bool ConstantPoolBuilder::IsEmpty() {
3493   return entries_.size() == 0;
3494 }
3495 
3496 
Is64BitEntry(RelocInfo::Mode rmode)3497 bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
3498   return rmode == RelocInfo::NONE64;
3499 }
3500 
3501 
Is32BitEntry(RelocInfo::Mode rmode)3502 bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
3503   return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
3504 }
3505 
3506 
IsCodePtrEntry(RelocInfo::Mode rmode)3507 bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
3508   return RelocInfo::IsCodeTarget(rmode);
3509 }
3510 
3511 
IsHeapPtrEntry(RelocInfo::Mode rmode)3512 bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
3513   return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
3514 }
3515 
3516 
AddEntry(Assembler * assm,const RelocInfo & rinfo)3517 void ConstantPoolBuilder::AddEntry(Assembler* assm,
3518                                    const RelocInfo& rinfo) {
3519   RelocInfo::Mode rmode = rinfo.rmode();
3520   ASSERT(rmode != RelocInfo::COMMENT &&
3521          rmode != RelocInfo::POSITION &&
3522          rmode != RelocInfo::STATEMENT_POSITION &&
3523          rmode != RelocInfo::CONST_POOL);
3524 
3525 
3526   // Try to merge entries which won't be patched.
3527   int merged_index = -1;
3528   if (RelocInfo::IsNone(rmode) ||
3529       (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
3530     size_t i;
3531     std::vector<RelocInfo>::const_iterator it;
3532     for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
3533       if (RelocInfo::IsEqual(rinfo, *it)) {
3534         merged_index = i;
3535         break;
3536       }
3537     }
3538   }
3539 
3540   entries_.push_back(rinfo);
3541   merged_indexes_.push_back(merged_index);
3542 
3543   if (merged_index == -1) {
3544     // Not merged, so update the appropriate count.
3545     if (Is64BitEntry(rmode)) {
3546       count_of_64bit_++;
3547     } else if (Is32BitEntry(rmode)) {
3548       count_of_32bit_++;
3549     } else if (IsCodePtrEntry(rmode)) {
3550       count_of_code_ptr_++;
3551     } else {
3552       ASSERT(IsHeapPtrEntry(rmode));
3553       count_of_heap_ptr_++;
3554     }
3555   }
3556 
3557   // Check if we still have room for another entry given Arm's ldr and vldr
3558   // immediate offset range.
3559   // TODO(rmcilroy): Avoid creating a new object here when we support
3560   //                 extended constant pools.
3561   ConstantPoolArray::NumberOfEntries total(count_of_64bit_,
3562                                            count_of_code_ptr_,
3563                                            count_of_heap_ptr_,
3564                                            count_of_32bit_);
3565   ConstantPoolArray::NumberOfEntries int64_counts(count_of_64bit_, 0, 0, 0);
3566   if (!(is_uint12(ConstantPoolArray::SizeFor(total)) &&
3567         is_uint10(ConstantPoolArray::SizeFor(int64_counts)))) {
3568     assm->set_constant_pool_full();
3569   }
3570 }
3571 
3572 
Relocate(int pc_delta)3573 void ConstantPoolBuilder::Relocate(int pc_delta) {
3574   for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
3575        rinfo != entries_.end(); rinfo++) {
3576     ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
3577     rinfo->set_pc(rinfo->pc() + pc_delta);
3578   }
3579 }
3580 
3581 
New(Isolate * isolate)3582 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
3583   if (IsEmpty()) {
3584     return isolate->factory()->empty_constant_pool_array();
3585   } else {
3586     ConstantPoolArray::NumberOfEntries small(count_of_64bit_,
3587                                              count_of_code_ptr_,
3588                                              count_of_heap_ptr_,
3589                                              count_of_32bit_);
3590     return isolate->factory()->NewConstantPoolArray(small);
3591   }
3592 }
3593 
3594 
Populate(Assembler * assm,ConstantPoolArray * constant_pool)3595 void ConstantPoolBuilder::Populate(Assembler* assm,
3596                                    ConstantPoolArray* constant_pool) {
3597   ASSERT(count_of_64bit_ == constant_pool->number_of_entries(
3598              ConstantPoolArray::INT64, ConstantPoolArray::SMALL_SECTION));
3599   ASSERT(count_of_code_ptr_ == constant_pool->number_of_entries(
3600              ConstantPoolArray::CODE_PTR, ConstantPoolArray::SMALL_SECTION));
3601   ASSERT(count_of_heap_ptr_ == constant_pool->number_of_entries(
3602              ConstantPoolArray::HEAP_PTR, ConstantPoolArray::SMALL_SECTION));
3603   ASSERT(count_of_32bit_ == constant_pool->number_of_entries(
3604              ConstantPoolArray::INT32, ConstantPoolArray::SMALL_SECTION));
3605   ASSERT(entries_.size() == merged_indexes_.size());
3606 
3607   int index_64bit = 0;
3608   int index_code_ptr = count_of_64bit_;
3609   int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
3610   int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
3611 
3612   size_t i;
3613   std::vector<RelocInfo>::const_iterator rinfo;
3614   for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
3615     RelocInfo::Mode rmode = rinfo->rmode();
3616 
3617     // Update constant pool if necessary and get the entry's offset.
3618     int offset;
3619     if (merged_indexes_[i] == -1) {
3620       if (Is64BitEntry(rmode)) {
3621         offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag;
3622         constant_pool->set(index_64bit++, rinfo->data64());
3623       } else if (Is32BitEntry(rmode)) {
3624         offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag;
3625         constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
3626       } else if (IsCodePtrEntry(rmode)) {
3627         offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
3628             kHeapObjectTag;
3629         constant_pool->set(index_code_ptr++,
3630                            reinterpret_cast<Address>(rinfo->data()));
3631       } else {
3632         ASSERT(IsHeapPtrEntry(rmode));
3633         offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
3634             kHeapObjectTag;
3635         constant_pool->set(index_heap_ptr++,
3636                            reinterpret_cast<Object *>(rinfo->data()));
3637       }
3638       merged_indexes_[i] = offset;  // Stash offset for merged entries.
3639     } else {
3640       size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
3641       ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
3642       offset = merged_indexes_[merged_index];
3643     }
3644 
3645     // Patch vldr/ldr instruction with correct offset.
3646     Instr instr = assm->instr_at(rinfo->pc());
3647     if (Is64BitEntry(rmode)) {
3648       // Instruction to patch must be 'vldr rd, [pp, #0]'.
3649       ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
3650               Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
3651       ASSERT(is_uint10(offset));
3652       assm->instr_at_put(rinfo->pc(),
3653           Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
3654     } else {
3655       // Instruction to patch must be 'ldr rd, [pp, #0]'.
3656       ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
3657               Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
3658       ASSERT(is_uint12(offset));
3659       assm->instr_at_put(rinfo->pc(),
3660           Assembler::SetLdrRegisterImmediateOffset(instr, offset));
3661     }
3662   }
3663 
3664   ASSERT((index_64bit == count_of_64bit_) &&
3665          (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
3666          (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
3667          (index_32bit == (index_heap_ptr + count_of_32bit_)));
3668 }
3669 
3670 
3671 } }  // namespace v8::internal
3672 
3673 #endif  // V8_TARGET_ARCH_ARM
3674