• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 #include "v8.h"
38 
39 #if V8_TARGET_ARCH_ARM
40 
41 #include "arm/assembler-arm-inl.h"
42 #include "macro-assembler.h"
43 #include "serialize.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 #ifdef DEBUG
49 bool CpuFeatures::initialized_ = false;
50 #endif
51 unsigned CpuFeatures::supported_ = 0;
52 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
53 unsigned CpuFeatures::cross_compile_ = 0;
54 unsigned CpuFeatures::cache_line_size_ = 64;
55 
56 
cpu_features()57 ExternalReference ExternalReference::cpu_features() {
58   ASSERT(CpuFeatures::initialized_);
59   return ExternalReference(&CpuFeatures::supported_);
60 }
61 
62 
63 // Get the CPU features enabled by the build. For cross compilation the
64 // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
65 // can be defined to enable ARMv7 and VFPv3 instructions when building the
66 // snapshot.
CpuFeaturesImpliedByCompiler()67 static unsigned CpuFeaturesImpliedByCompiler() {
68   unsigned answer = 0;
69 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
70   if (FLAG_enable_armv7) {
71     answer |= 1u << ARMv7;
72   }
73 #endif  // CAN_USE_ARMV7_INSTRUCTIONS
74 #ifdef CAN_USE_VFP3_INSTRUCTIONS
75   if (FLAG_enable_vfp3) {
76     answer |= 1u << VFP3 | 1u << ARMv7;
77   }
78 #endif  // CAN_USE_VFP3_INSTRUCTIONS
79 #ifdef CAN_USE_VFP32DREGS
80   if (FLAG_enable_32dregs) {
81     answer |= 1u << VFP32DREGS;
82   }
83 #endif  // CAN_USE_VFP32DREGS
84   if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
85     answer |= 1u << UNALIGNED_ACCESSES;
86   }
87 
88   return answer;
89 }
90 
91 
AllocationIndexToString(int index)92 const char* DwVfpRegister::AllocationIndexToString(int index) {
93   ASSERT(index >= 0 && index < NumAllocatableRegisters());
94   ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
95          kNumReservedRegisters - 1);
96   if (index >= kDoubleRegZero.code())
97     index += kNumReservedRegisters;
98 
99   return VFPRegisters::Name(index, true);
100 }
101 
102 
Probe()103 void CpuFeatures::Probe() {
104   uint64_t standard_features = static_cast<unsigned>(
105       OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
106   ASSERT(supported_ == 0 || supported_ == standard_features);
107 #ifdef DEBUG
108   initialized_ = true;
109 #endif
110 
111   // Get the features implied by the OS and the compiler settings. This is the
112   // minimal set of features which is also alowed for generated code in the
113   // snapshot.
114   supported_ |= standard_features;
115 
116   if (Serializer::enabled()) {
117     // No probing for features if we might serialize (generate snapshot).
118     printf("   ");
119     PrintFeatures();
120     return;
121   }
122 
123 #ifndef __arm__
124   // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
125   // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
126   if (FLAG_enable_vfp3) {
127     supported_ |=
128         static_cast<uint64_t>(1) << VFP3 |
129         static_cast<uint64_t>(1) << ARMv7;
130   }
131   if (FLAG_enable_neon) {
132     supported_ |= 1u << NEON;
133   }
134   // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
135   if (FLAG_enable_armv7) {
136     supported_ |= static_cast<uint64_t>(1) << ARMv7;
137   }
138 
139   if (FLAG_enable_sudiv) {
140     supported_ |= static_cast<uint64_t>(1) << SUDIV;
141   }
142 
143   if (FLAG_enable_movw_movt) {
144     supported_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
145   }
146 
147   if (FLAG_enable_32dregs) {
148     supported_ |= static_cast<uint64_t>(1) << VFP32DREGS;
149   }
150 
151   if (FLAG_enable_unaligned_accesses) {
152     supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
153   }
154 
155 #else  // __arm__
156   // Probe for additional features not already known to be available.
157   CPU cpu;
158   if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) {
159     // This implementation also sets the VFP flags if runtime
160     // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
161     // 0406B, page A1-6.
162     found_by_runtime_probing_only_ |=
163         static_cast<uint64_t>(1) << VFP3 |
164         static_cast<uint64_t>(1) << ARMv7;
165   }
166 
167   if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) {
168     found_by_runtime_probing_only_ |= 1u << NEON;
169   }
170 
171   if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) {
172     found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
173   }
174 
175   if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) {
176     found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
177   }
178 
179   if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
180       && cpu.architecture() >= 7) {
181     found_by_runtime_probing_only_ |=
182         static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
183   }
184 
185   // Use movw/movt for QUALCOMM ARMv7 cores.
186   if (cpu.implementer() == CPU::QUALCOMM &&
187       cpu.architecture() >= 7 &&
188       FLAG_enable_movw_movt) {
189     found_by_runtime_probing_only_ |=
190         static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
191   }
192 
193   // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
194   if (cpu.implementer() == CPU::ARM &&
195       (cpu.part() == CPU::ARM_CORTEX_A5 ||
196        cpu.part() == CPU::ARM_CORTEX_A9)) {
197     cache_line_size_ = 32;
198   }
199 
200   if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) {
201     found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
202   }
203 
204   supported_ |= found_by_runtime_probing_only_;
205 #endif
206 
207   // Assert that VFP3 implies ARMv7.
208   ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
209 }
210 
211 
PrintTarget()212 void CpuFeatures::PrintTarget() {
213   const char* arm_arch = NULL;
214   const char* arm_test = "";
215   const char* arm_fpu = "";
216   const char* arm_thumb = "";
217   const char* arm_float_abi = NULL;
218 
219 #if defined CAN_USE_ARMV7_INSTRUCTIONS
220   arm_arch = "arm v7";
221 #else
222   arm_arch = "arm v6";
223 #endif
224 
225 #ifdef __arm__
226 
227 # ifdef ARM_TEST
228   arm_test = " test";
229 # endif
230 # if defined __ARM_NEON__
231   arm_fpu = " neon";
232 # elif defined CAN_USE_VFP3_INSTRUCTIONS
233   arm_fpu = " vfp3";
234 # else
235   arm_fpu = " vfp2";
236 # endif
237 # if (defined __thumb__) || (defined __thumb2__)
238   arm_thumb = " thumb";
239 # endif
240   arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp";
241 
242 #else  // __arm__
243 
244   arm_test = " simulator";
245 # if defined CAN_USE_VFP3_INSTRUCTIONS
246 #  if defined CAN_USE_VFP32DREGS
247   arm_fpu = " vfp3";
248 #  else
249   arm_fpu = " vfp3-d16";
250 #  endif
251 # else
252   arm_fpu = " vfp2";
253 # endif
254 # if USE_EABI_HARDFLOAT == 1
255   arm_float_abi = "hard";
256 # else
257   arm_float_abi = "softfp";
258 # endif
259 
260 #endif  // __arm__
261 
262   printf("target%s %s%s%s %s\n",
263          arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi);
264 }
265 
266 
PrintFeatures()267 void CpuFeatures::PrintFeatures() {
268   printf(
269     "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
270     "MOVW_MOVT_IMMEDIATE_LOADS=%d",
271     CpuFeatures::IsSupported(ARMv7),
272     CpuFeatures::IsSupported(VFP3),
273     CpuFeatures::IsSupported(VFP32DREGS),
274     CpuFeatures::IsSupported(NEON),
275     CpuFeatures::IsSupported(SUDIV),
276     CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
277     CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
278 #ifdef __arm__
279   bool eabi_hardfloat = OS::ArmUsingHardFloat();
280 #elif USE_EABI_HARDFLOAT
281   bool eabi_hardfloat = true;
282 #else
283   bool eabi_hardfloat = false;
284 #endif
285     printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
286 }
287 
288 
289 // -----------------------------------------------------------------------------
290 // Implementation of RelocInfo
291 
292 const int RelocInfo::kApplyMask = 0;
293 
294 
IsCodedSpecially()295 bool RelocInfo::IsCodedSpecially() {
296   // The deserializer needs to know whether a pointer is specially coded.  Being
297   // specially coded on ARM means that it is a movw/movt instruction.  We don't
298   // generate those yet.
299   return false;
300 }
301 
302 
PatchCode(byte * instructions,int instruction_count)303 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
304   // Patch the code at the current address with the supplied instructions.
305   Instr* pc = reinterpret_cast<Instr*>(pc_);
306   Instr* instr = reinterpret_cast<Instr*>(instructions);
307   for (int i = 0; i < instruction_count; i++) {
308     *(pc + i) = *(instr + i);
309   }
310 
311   // Indicate that code has changed.
312   CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
313 }
314 
315 
316 // Patch the code at the current PC with a call to the target address.
317 // Additional guard instructions can be added if required.
PatchCodeWithCall(Address target,int guard_bytes)318 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
319   // Patch the code at the current address with a call to the target.
320   UNIMPLEMENTED();
321 }
322 
323 
324 // -----------------------------------------------------------------------------
325 // Implementation of Operand and MemOperand
326 // See assembler-arm-inl.h for inlined constructors
327 
Operand(Handle<Object> handle)328 Operand::Operand(Handle<Object> handle) {
329   AllowDeferredHandleDereference using_raw_address;
330   rm_ = no_reg;
331   // Verify all Objects referred by code are NOT in new space.
332   Object* obj = *handle;
333   if (obj->IsHeapObject()) {
334     ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
335     imm32_ = reinterpret_cast<intptr_t>(handle.location());
336     rmode_ = RelocInfo::EMBEDDED_OBJECT;
337   } else {
338     // no relocation needed
339     imm32_ = reinterpret_cast<intptr_t>(obj);
340     rmode_ = RelocInfo::NONE32;
341   }
342 }
343 
344 
Operand(Register rm,ShiftOp shift_op,int shift_imm)345 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
346   ASSERT(is_uint5(shift_imm));
347   ASSERT(shift_op != ROR || shift_imm != 0);  // use RRX if you mean it
348   rm_ = rm;
349   rs_ = no_reg;
350   shift_op_ = shift_op;
351   shift_imm_ = shift_imm & 31;
352   if (shift_op == RRX) {
353     // encoded as ROR with shift_imm == 0
354     ASSERT(shift_imm == 0);
355     shift_op_ = ROR;
356     shift_imm_ = 0;
357   }
358 }
359 
360 
Operand(Register rm,ShiftOp shift_op,Register rs)361 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
362   ASSERT(shift_op != RRX);
363   rm_ = rm;
364   rs_ = no_reg;
365   shift_op_ = shift_op;
366   rs_ = rs;
367 }
368 
369 
MemOperand(Register rn,int32_t offset,AddrMode am)370 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
371   rn_ = rn;
372   rm_ = no_reg;
373   offset_ = offset;
374   am_ = am;
375 }
376 
377 
MemOperand(Register rn,Register rm,AddrMode am)378 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
379   rn_ = rn;
380   rm_ = rm;
381   shift_op_ = LSL;
382   shift_imm_ = 0;
383   am_ = am;
384 }
385 
386 
MemOperand(Register rn,Register rm,ShiftOp shift_op,int shift_imm,AddrMode am)387 MemOperand::MemOperand(Register rn, Register rm,
388                        ShiftOp shift_op, int shift_imm, AddrMode am) {
389   ASSERT(is_uint5(shift_imm));
390   rn_ = rn;
391   rm_ = rm;
392   shift_op_ = shift_op;
393   shift_imm_ = shift_imm & 31;
394   am_ = am;
395 }
396 
397 
NeonMemOperand(Register rn,AddrMode am,int align)398 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
399   ASSERT((am == Offset) || (am == PostIndex));
400   rn_ = rn;
401   rm_ = (am == Offset) ? pc : sp;
402   SetAlignment(align);
403 }
404 
405 
NeonMemOperand(Register rn,Register rm,int align)406 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
407   rn_ = rn;
408   rm_ = rm;
409   SetAlignment(align);
410 }
411 
412 
SetAlignment(int align)413 void NeonMemOperand::SetAlignment(int align) {
414   switch (align) {
415     case 0:
416       align_ = 0;
417       break;
418     case 64:
419       align_ = 1;
420       break;
421     case 128:
422       align_ = 2;
423       break;
424     case 256:
425       align_ = 3;
426       break;
427     default:
428       UNREACHABLE();
429       align_ = 0;
430       break;
431   }
432 }
433 
434 
NeonListOperand(DoubleRegister base,int registers_count)435 NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
436   base_ = base;
437   switch (registers_count) {
438     case 1:
439       type_ = nlt_1;
440       break;
441     case 2:
442       type_ = nlt_2;
443       break;
444     case 3:
445       type_ = nlt_3;
446       break;
447     case 4:
448       type_ = nlt_4;
449       break;
450     default:
451       UNREACHABLE();
452       type_ = nlt_1;
453       break;
454   }
455 }
456 
457 
458 // -----------------------------------------------------------------------------
459 // Specific instructions, constants, and masks.
460 
461 // add(sp, sp, 4) instruction (aka Pop())
462 const Instr kPopInstruction =
463     al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
464         kRegister_sp_Code * B12;
465 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
466 // register r is not encoded.
467 const Instr kPushRegPattern =
468     al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
469 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
470 // register r is not encoded.
471 const Instr kPopRegPattern =
472     al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
473 // mov lr, pc
474 const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
475 // ldr rd, [pc, #offset]
476 const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
477 const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
478 // vldr dd, [pc, #offset]
479 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
480 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
481 // blxcc rm
482 const Instr kBlxRegMask =
483     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
484 const Instr kBlxRegPattern =
485     B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
486 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
487 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
488 const Instr kMovMvnPattern = 0xd * B21;
489 const Instr kMovMvnFlip = B22;
490 const Instr kMovLeaveCCMask = 0xdff * B16;
491 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
492 const Instr kMovwMask = 0xff * B20;
493 const Instr kMovwPattern = 0x30 * B20;
494 const Instr kMovwLeaveCCFlip = 0x5 * B21;
495 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
496 const Instr kCmpCmnPattern = 0x15 * B20;
497 const Instr kCmpCmnFlip = B21;
498 const Instr kAddSubFlip = 0x6 * B21;
499 const Instr kAndBicFlip = 0xe * B21;
500 
501 // A mask for the Rd register for push, pop, ldr, str instructions.
502 const Instr kLdrRegFpOffsetPattern =
503     al | B26 | L | Offset | kRegister_fp_Code * B16;
504 const Instr kStrRegFpOffsetPattern =
505     al | B26 | Offset | kRegister_fp_Code * B16;
506 const Instr kLdrRegFpNegOffsetPattern =
507     al | B26 | L | NegOffset | kRegister_fp_Code * B16;
508 const Instr kStrRegFpNegOffsetPattern =
509     al | B26 | NegOffset | kRegister_fp_Code * B16;
510 const Instr kLdrStrInstrTypeMask = 0xffff0000;
511 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
512 const Instr kLdrStrOffsetMask = 0x00000fff;
513 
514 
Assembler(Isolate * isolate,void * buffer,int buffer_size)515 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
516     : AssemblerBase(isolate, buffer, buffer_size),
517       recorded_ast_id_(TypeFeedbackId::None()),
518       positions_recorder_(this) {
519   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
520   num_pending_32_bit_reloc_info_ = 0;
521   num_pending_64_bit_reloc_info_ = 0;
522   next_buffer_check_ = 0;
523   const_pool_blocked_nesting_ = 0;
524   no_const_pool_before_ = 0;
525   first_const_pool_32_use_ = -1;
526   first_const_pool_64_use_ = -1;
527   last_bound_pos_ = 0;
528   ClearRecordedAstId();
529 }
530 
531 
~Assembler()532 Assembler::~Assembler() {
533   ASSERT(const_pool_blocked_nesting_ == 0);
534 }
535 
536 
GetCode(CodeDesc * desc)537 void Assembler::GetCode(CodeDesc* desc) {
538   // Emit constant pool if necessary.
539   CheckConstPool(true, false);
540   ASSERT(num_pending_32_bit_reloc_info_ == 0);
541   ASSERT(num_pending_64_bit_reloc_info_ == 0);
542 
543   // Set up code descriptor.
544   desc->buffer = buffer_;
545   desc->buffer_size = buffer_size_;
546   desc->instr_size = pc_offset();
547   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
548   desc->origin = this;
549 }
550 
551 
Align(int m)552 void Assembler::Align(int m) {
553   ASSERT(m >= 4 && IsPowerOf2(m));
554   while ((pc_offset() & (m - 1)) != 0) {
555     nop();
556   }
557 }
558 
559 
CodeTargetAlign()560 void Assembler::CodeTargetAlign() {
561   // Preferred alignment of jump targets on some ARM chips.
562   Align(8);
563 }
564 
565 
GetCondition(Instr instr)566 Condition Assembler::GetCondition(Instr instr) {
567   return Instruction::ConditionField(instr);
568 }
569 
570 
IsBranch(Instr instr)571 bool Assembler::IsBranch(Instr instr) {
572   return (instr & (B27 | B25)) == (B27 | B25);
573 }
574 
575 
GetBranchOffset(Instr instr)576 int Assembler::GetBranchOffset(Instr instr) {
577   ASSERT(IsBranch(instr));
578   // Take the jump offset in the lower 24 bits, sign extend it and multiply it
579   // with 4 to get the offset in bytes.
580   return ((instr & kImm24Mask) << 8) >> 6;
581 }
582 
583 
IsLdrRegisterImmediate(Instr instr)584 bool Assembler::IsLdrRegisterImmediate(Instr instr) {
585   return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
586 }
587 
588 
IsVldrDRegisterImmediate(Instr instr)589 bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
590   return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
591 }
592 
593 
GetLdrRegisterImmediateOffset(Instr instr)594 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
595   ASSERT(IsLdrRegisterImmediate(instr));
596   bool positive = (instr & B23) == B23;
597   int offset = instr & kOff12Mask;  // Zero extended offset.
598   return positive ? offset : -offset;
599 }
600 
601 
GetVldrDRegisterImmediateOffset(Instr instr)602 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
603   ASSERT(IsVldrDRegisterImmediate(instr));
604   bool positive = (instr & B23) == B23;
605   int offset = instr & kOff8Mask;  // Zero extended offset.
606   offset <<= 2;
607   return positive ? offset : -offset;
608 }
609 
610 
SetLdrRegisterImmediateOffset(Instr instr,int offset)611 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
612   ASSERT(IsLdrRegisterImmediate(instr));
613   bool positive = offset >= 0;
614   if (!positive) offset = -offset;
615   ASSERT(is_uint12(offset));
616   // Set bit indicating whether the offset should be added.
617   instr = (instr & ~B23) | (positive ? B23 : 0);
618   // Set the actual offset.
619   return (instr & ~kOff12Mask) | offset;
620 }
621 
622 
SetVldrDRegisterImmediateOffset(Instr instr,int offset)623 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
624   ASSERT(IsVldrDRegisterImmediate(instr));
625   ASSERT((offset & ~3) == offset);  // Must be 64-bit aligned.
626   bool positive = offset >= 0;
627   if (!positive) offset = -offset;
628   ASSERT(is_uint10(offset));
629   // Set bit indicating whether the offset should be added.
630   instr = (instr & ~B23) | (positive ? B23 : 0);
631   // Set the actual offset. Its bottom 2 bits are zero.
632   return (instr & ~kOff8Mask) | (offset >> 2);
633 }
634 
635 
IsStrRegisterImmediate(Instr instr)636 bool Assembler::IsStrRegisterImmediate(Instr instr) {
637   return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
638 }
639 
640 
SetStrRegisterImmediateOffset(Instr instr,int offset)641 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
642   ASSERT(IsStrRegisterImmediate(instr));
643   bool positive = offset >= 0;
644   if (!positive) offset = -offset;
645   ASSERT(is_uint12(offset));
646   // Set bit indicating whether the offset should be added.
647   instr = (instr & ~B23) | (positive ? B23 : 0);
648   // Set the actual offset.
649   return (instr & ~kOff12Mask) | offset;
650 }
651 
652 
IsAddRegisterImmediate(Instr instr)653 bool Assembler::IsAddRegisterImmediate(Instr instr) {
654   return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
655 }
656 
657 
SetAddRegisterImmediateOffset(Instr instr,int offset)658 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
659   ASSERT(IsAddRegisterImmediate(instr));
660   ASSERT(offset >= 0);
661   ASSERT(is_uint12(offset));
662   // Set the offset.
663   return (instr & ~kOff12Mask) | offset;
664 }
665 
666 
GetRd(Instr instr)667 Register Assembler::GetRd(Instr instr) {
668   Register reg;
669   reg.code_ = Instruction::RdValue(instr);
670   return reg;
671 }
672 
673 
GetRn(Instr instr)674 Register Assembler::GetRn(Instr instr) {
675   Register reg;
676   reg.code_ = Instruction::RnValue(instr);
677   return reg;
678 }
679 
680 
GetRm(Instr instr)681 Register Assembler::GetRm(Instr instr) {
682   Register reg;
683   reg.code_ = Instruction::RmValue(instr);
684   return reg;
685 }
686 
687 
IsPush(Instr instr)688 bool Assembler::IsPush(Instr instr) {
689   return ((instr & ~kRdMask) == kPushRegPattern);
690 }
691 
692 
IsPop(Instr instr)693 bool Assembler::IsPop(Instr instr) {
694   return ((instr & ~kRdMask) == kPopRegPattern);
695 }
696 
697 
IsStrRegFpOffset(Instr instr)698 bool Assembler::IsStrRegFpOffset(Instr instr) {
699   return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
700 }
701 
702 
IsLdrRegFpOffset(Instr instr)703 bool Assembler::IsLdrRegFpOffset(Instr instr) {
704   return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
705 }
706 
707 
IsStrRegFpNegOffset(Instr instr)708 bool Assembler::IsStrRegFpNegOffset(Instr instr) {
709   return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
710 }
711 
712 
IsLdrRegFpNegOffset(Instr instr)713 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
714   return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
715 }
716 
717 
IsLdrPcImmediateOffset(Instr instr)718 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
719   // Check the instruction is indeed a
720   // ldr<cond> <Rd>, [pc +/- offset_12].
721   return (instr & kLdrPCMask) == kLdrPCPattern;
722 }
723 
724 
IsVldrDPcImmediateOffset(Instr instr)725 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
726   // Check the instruction is indeed a
727   // vldr<cond> <Dd>, [pc +/- offset_10].
728   return (instr & kVldrDPCMask) == kVldrDPCPattern;
729 }
730 
731 
IsTstImmediate(Instr instr)732 bool Assembler::IsTstImmediate(Instr instr) {
733   return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
734       (I | TST | S);
735 }
736 
737 
IsCmpRegister(Instr instr)738 bool Assembler::IsCmpRegister(Instr instr) {
739   return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
740       (CMP | S);
741 }
742 
743 
IsCmpImmediate(Instr instr)744 bool Assembler::IsCmpImmediate(Instr instr) {
745   return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
746       (I | CMP | S);
747 }
748 
749 
GetCmpImmediateRegister(Instr instr)750 Register Assembler::GetCmpImmediateRegister(Instr instr) {
751   ASSERT(IsCmpImmediate(instr));
752   return GetRn(instr);
753 }
754 
755 
GetCmpImmediateRawImmediate(Instr instr)756 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
757   ASSERT(IsCmpImmediate(instr));
758   return instr & kOff12Mask;
759 }
760 
761 
762 // Labels refer to positions in the (to be) generated code.
763 // There are bound, linked, and unused labels.
764 //
765 // Bound labels refer to known positions in the already
766 // generated code. pos() is the position the label refers to.
767 //
768 // Linked labels refer to unknown positions in the code
769 // to be generated; pos() is the position of the last
770 // instruction using the label.
771 //
772 // The linked labels form a link chain by making the branch offset
773 // in the instruction steam to point to the previous branch
774 // instruction using the same label.
775 //
776 // The link chain is terminated by a branch offset pointing to the
777 // same position.
778 
779 
target_at(int pos)780 int Assembler::target_at(int pos)  {
781   Instr instr = instr_at(pos);
782   if (is_uint24(instr)) {
783     // Emitted link to a label, not part of a branch.
784     return instr;
785   }
786   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
787   int imm26 = ((instr & kImm24Mask) << 8) >> 6;
788   if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
789       ((instr & B24) != 0)) {
790     // blx uses bit 24 to encode bit 2 of imm26
791     imm26 += 2;
792   }
793   return pos + kPcLoadDelta + imm26;
794 }
795 
796 
target_at_put(int pos,int target_pos)797 void Assembler::target_at_put(int pos, int target_pos) {
798   Instr instr = instr_at(pos);
799   if (is_uint24(instr)) {
800     ASSERT(target_pos == pos || target_pos >= 0);
801     // Emitted link to a label, not part of a branch.
802     // Load the position of the label relative to the generated code object
803     // pointer in a register.
804 
805     // Here are the instructions we need to emit:
806     //   For ARMv7: target24 => target16_1:target16_0
807     //      movw dst, #target16_0
808     //      movt dst, #target16_1
809     //   For ARMv6: target24 => target8_2:target8_1:target8_0
810     //      mov dst, #target8_0
811     //      orr dst, dst, #target8_1 << 8
812     //      orr dst, dst, #target8_2 << 16
813 
814     // We extract the destination register from the emitted nop instruction.
815     Register dst = Register::from_code(
816         Instruction::RmValue(instr_at(pos + kInstrSize)));
817     ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code()));
818     uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
819     ASSERT(is_uint24(target24));
820     if (is_uint8(target24)) {
821       // If the target fits in a byte then only patch with a mov
822       // instruction.
823       CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
824                           1,
825                           CodePatcher::DONT_FLUSH);
826       patcher.masm()->mov(dst, Operand(target24));
827     } else {
828       uint16_t target16_0 = target24 & kImm16Mask;
829       uint16_t target16_1 = target24 >> 16;
830       if (CpuFeatures::IsSupported(ARMv7)) {
831         // Patch with movw/movt.
832         if (target16_1 == 0) {
833           CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
834                               1,
835                               CodePatcher::DONT_FLUSH);
836           patcher.masm()->movw(dst, target16_0);
837         } else {
838           CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
839                               2,
840                               CodePatcher::DONT_FLUSH);
841           patcher.masm()->movw(dst, target16_0);
842           patcher.masm()->movt(dst, target16_1);
843         }
844       } else {
845         // Patch with a sequence of mov/orr/orr instructions.
846         uint8_t target8_0 = target16_0 & kImm8Mask;
847         uint8_t target8_1 = target16_0 >> 8;
848         uint8_t target8_2 = target16_1 & kImm8Mask;
849         if (target8_2 == 0) {
850           CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
851                               2,
852                               CodePatcher::DONT_FLUSH);
853           patcher.masm()->mov(dst, Operand(target8_0));
854           patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
855         } else {
856           CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
857                               3,
858                               CodePatcher::DONT_FLUSH);
859           patcher.masm()->mov(dst, Operand(target8_0));
860           patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
861           patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
862         }
863       }
864     }
865     return;
866   }
867   int imm26 = target_pos - (pos + kPcLoadDelta);
868   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
869   if (Instruction::ConditionField(instr) == kSpecialCondition) {
870     // blx uses bit 24 to encode bit 2 of imm26
871     ASSERT((imm26 & 1) == 0);
872     instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
873   } else {
874     ASSERT((imm26 & 3) == 0);
875     instr &= ~kImm24Mask;
876   }
877   int imm24 = imm26 >> 2;
878   ASSERT(is_int24(imm24));
879   instr_at_put(pos, instr | (imm24 & kImm24Mask));
880 }
881 
882 
print(Label * L)883 void Assembler::print(Label* L) {
884   if (L->is_unused()) {
885     PrintF("unused label\n");
886   } else if (L->is_bound()) {
887     PrintF("bound label to %d\n", L->pos());
888   } else if (L->is_linked()) {
889     Label l = *L;
890     PrintF("unbound label");
891     while (l.is_linked()) {
892       PrintF("@ %d ", l.pos());
893       Instr instr = instr_at(l.pos());
894       if ((instr & ~kImm24Mask) == 0) {
895         PrintF("value\n");
896       } else {
897         ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
898         Condition cond = Instruction::ConditionField(instr);
899         const char* b;
900         const char* c;
901         if (cond == kSpecialCondition) {
902           b = "blx";
903           c = "";
904         } else {
905           if ((instr & B24) != 0)
906             b = "bl";
907           else
908             b = "b";
909 
910           switch (cond) {
911             case eq: c = "eq"; break;
912             case ne: c = "ne"; break;
913             case hs: c = "hs"; break;
914             case lo: c = "lo"; break;
915             case mi: c = "mi"; break;
916             case pl: c = "pl"; break;
917             case vs: c = "vs"; break;
918             case vc: c = "vc"; break;
919             case hi: c = "hi"; break;
920             case ls: c = "ls"; break;
921             case ge: c = "ge"; break;
922             case lt: c = "lt"; break;
923             case gt: c = "gt"; break;
924             case le: c = "le"; break;
925             case al: c = ""; break;
926             default:
927               c = "";
928               UNREACHABLE();
929           }
930         }
931         PrintF("%s%s\n", b, c);
932       }
933       next(&l);
934     }
935   } else {
936     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
937   }
938 }
939 
940 
bind_to(Label * L,int pos)941 void Assembler::bind_to(Label* L, int pos) {
942   ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
943   while (L->is_linked()) {
944     int fixup_pos = L->pos();
945     next(L);  // call next before overwriting link with target at fixup_pos
946     target_at_put(fixup_pos, pos);
947   }
948   L->bind_to(pos);
949 
950   // Keep track of the last bound label so we don't eliminate any instructions
951   // before a bound label.
952   if (pos > last_bound_pos_)
953     last_bound_pos_ = pos;
954 }
955 
956 
bind(Label * L)957 void Assembler::bind(Label* L) {
958   ASSERT(!L->is_bound());  // label can only be bound once
959   bind_to(L, pc_offset());
960 }
961 
962 
next(Label * L)963 void Assembler::next(Label* L) {
964   ASSERT(L->is_linked());
965   int link = target_at(L->pos());
966   if (link == L->pos()) {
967     // Branch target points to the same instuction. This is the end of the link
968     // chain.
969     L->Unuse();
970   } else {
971     ASSERT(link >= 0);
972     L->link_to(link);
973   }
974 }
975 
976 
977 // Low-level code emission routines depending on the addressing mode.
978 // If this returns true then you have to use the rotate_imm and immed_8
979 // that it returns, because it may have already changed the instruction
980 // to match them!
fits_shifter(uint32_t imm32,uint32_t * rotate_imm,uint32_t * immed_8,Instr * instr)981 static bool fits_shifter(uint32_t imm32,
982                          uint32_t* rotate_imm,
983                          uint32_t* immed_8,
984                          Instr* instr) {
985   // imm32 must be unsigned.
986   for (int rot = 0; rot < 16; rot++) {
987     uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
988     if ((imm8 <= 0xff)) {
989       *rotate_imm = rot;
990       *immed_8 = imm8;
991       return true;
992     }
993   }
994   // If the opcode is one with a complementary version and the complementary
995   // immediate fits, change the opcode.
996   if (instr != NULL) {
997     if ((*instr & kMovMvnMask) == kMovMvnPattern) {
998       if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
999         *instr ^= kMovMvnFlip;
1000         return true;
1001       } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
1002         if (CpuFeatures::IsSupported(ARMv7)) {
1003           if (imm32 < 0x10000) {
1004             *instr ^= kMovwLeaveCCFlip;
1005             *instr |= EncodeMovwImmediate(imm32);
1006             *rotate_imm = *immed_8 = 0;  // Not used for movw.
1007             return true;
1008           }
1009         }
1010       }
1011     } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
1012       if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1013         *instr ^= kCmpCmnFlip;
1014         return true;
1015       }
1016     } else {
1017       Instr alu_insn = (*instr & kALUMask);
1018       if (alu_insn == ADD ||
1019           alu_insn == SUB) {
1020         if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1021           *instr ^= kAddSubFlip;
1022           return true;
1023         }
1024       } else if (alu_insn == AND ||
1025                  alu_insn == BIC) {
1026         if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1027           *instr ^= kAndBicFlip;
1028           return true;
1029         }
1030       }
1031     }
1032   }
1033   return false;
1034 }
1035 
1036 
1037 // We have to use the temporary register for things that can be relocated even
1038 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
1039 // space.  There is no guarantee that the relocated location can be similarly
1040 // encoded.
must_output_reloc_info(const Assembler * assembler) const1041 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1042   if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1043 #ifdef DEBUG
1044     if (!Serializer::enabled()) {
1045       Serializer::TooLateToEnableNow();
1046     }
1047 #endif  // def DEBUG
1048     if (assembler != NULL && assembler->predictable_code_size()) return true;
1049     return Serializer::enabled();
1050   } else if (RelocInfo::IsNone(rmode_)) {
1051     return false;
1052   }
1053   return true;
1054 }
1055 
1056 
use_movw_movt(const Operand & x,const Assembler * assembler)1057 static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
1058   if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
1059     return true;
1060   }
1061   if (x.must_output_reloc_info(assembler)) {
1062     return false;
1063   }
1064   return CpuFeatures::IsSupported(ARMv7);
1065 }
1066 
1067 
is_single_instruction(const Assembler * assembler,Instr instr) const1068 bool Operand::is_single_instruction(const Assembler* assembler,
1069                                     Instr instr) const {
1070   if (rm_.is_valid()) return true;
1071   uint32_t dummy1, dummy2;
1072   if (must_output_reloc_info(assembler) ||
1073       !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1074     // The immediate operand cannot be encoded as a shifter operand, or use of
1075     // constant pool is required. For a mov instruction not setting the
1076     // condition code additional instruction conventions can be used.
1077     if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
1078       return !use_movw_movt(*this, assembler);
1079     } else {
1080       // If this is not a mov or mvn instruction there will always an additional
1081       // instructions - either mov or ldr. The mov might actually be two
1082       // instructions mov or movw followed by movt so including the actual
1083       // instruction two or three instructions will be generated.
1084       return false;
1085     }
1086   } else {
1087     // No use of constant pool and the immediate operand can be encoded as a
1088     // shifter operand.
1089     return true;
1090   }
1091 }
1092 
1093 
move_32_bit_immediate(Condition cond,Register rd,SBit s,const Operand & x)1094 void Assembler::move_32_bit_immediate(Condition cond,
1095                                       Register rd,
1096                                       SBit s,
1097                                       const Operand& x) {
1098   if (rd.code() != pc.code() && s == LeaveCC) {
1099     if (use_movw_movt(x, this)) {
1100       if (x.must_output_reloc_info(this)) {
1101         RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
1102         // Make sure the movw/movt doesn't get separated.
1103         BlockConstPoolFor(2);
1104       }
1105       emit(cond | 0x30*B20 | rd.code()*B12 |
1106            EncodeMovwImmediate(x.imm32_ & 0xffff));
1107       movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
1108       return;
1109     }
1110   }
1111 
1112   RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
1113   ldr(rd, MemOperand(pc, 0), cond);
1114 }
1115 
1116 
addrmod1(Instr instr,Register rn,Register rd,const Operand & x)1117 void Assembler::addrmod1(Instr instr,
1118                          Register rn,
1119                          Register rd,
1120                          const Operand& x) {
1121   CheckBuffer();
1122   ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
1123   if (!x.rm_.is_valid()) {
1124     // Immediate.
1125     uint32_t rotate_imm;
1126     uint32_t immed_8;
1127     if (x.must_output_reloc_info(this) ||
1128         !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1129       // The immediate operand cannot be encoded as a shifter operand, so load
1130       // it first to register ip and change the original instruction to use ip.
1131       // However, if the original instruction is a 'mov rd, x' (not setting the
1132       // condition code), then replace it with a 'ldr rd, [pc]'.
1133       CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
1134       Condition cond = Instruction::ConditionField(instr);
1135       if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
1136         move_32_bit_immediate(cond, rd, LeaveCC, x);
1137       } else {
1138         if ((instr & kMovMvnMask) == kMovMvnPattern) {
1139           // Moves need to use a constant pool entry.
1140           RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
1141           ldr(ip, MemOperand(pc, 0), cond);
1142         } else if (x.must_output_reloc_info(this)) {
1143           // Otherwise, use most efficient form of fetching from constant pool.
1144           move_32_bit_immediate(cond, ip, LeaveCC, x);
1145         } else {
1146           // If this is not a mov or mvn instruction we may still be able to
1147           // avoid a constant pool entry by using mvn or movw.
1148           mov(ip, x, LeaveCC, cond);
1149         }
1150         addrmod1(instr, rn, rd, Operand(ip));
1151       }
1152       return;
1153     }
1154     instr |= I | rotate_imm*B8 | immed_8;
1155   } else if (!x.rs_.is_valid()) {
1156     // Immediate shift.
1157     instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1158   } else {
1159     // Register shift.
1160     ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
1161     instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
1162   }
1163   emit(instr | rn.code()*B16 | rd.code()*B12);
1164   if (rn.is(pc) || x.rm_.is(pc)) {
1165     // Block constant pool emission for one instruction after reading pc.
1166     BlockConstPoolFor(1);
1167   }
1168 }
1169 
1170 
addrmod2(Instr instr,Register rd,const MemOperand & x)1171 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
1172   ASSERT((instr & ~(kCondMask | B | L)) == B26);
1173   int am = x.am_;
1174   if (!x.rm_.is_valid()) {
1175     // Immediate offset.
1176     int offset_12 = x.offset_;
1177     if (offset_12 < 0) {
1178       offset_12 = -offset_12;
1179       am ^= U;
1180     }
1181     if (!is_uint12(offset_12)) {
1182       // Immediate offset cannot be encoded, load it first to register ip
1183       // rn (and rd in a load) should never be ip, or will be trashed.
1184       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1185       mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1186       addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
1187       return;
1188     }
1189     ASSERT(offset_12 >= 0);  // no masking needed
1190     instr |= offset_12;
1191   } else {
1192     // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1193     // register offset the constructors make sure than both shift_imm_
1194     // and shift_op_ are initialized.
1195     ASSERT(!x.rm_.is(pc));
1196     instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1197   }
1198   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
1199   emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1200 }
1201 
1202 
addrmod3(Instr instr,Register rd,const MemOperand & x)1203 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
1204   ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1205   ASSERT(x.rn_.is_valid());
1206   int am = x.am_;
1207   if (!x.rm_.is_valid()) {
1208     // Immediate offset.
1209     int offset_8 = x.offset_;
1210     if (offset_8 < 0) {
1211       offset_8 = -offset_8;
1212       am ^= U;
1213     }
1214     if (!is_uint8(offset_8)) {
1215       // Immediate offset cannot be encoded, load it first to register ip
1216       // rn (and rd in a load) should never be ip, or will be trashed.
1217       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1218       mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1219       addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1220       return;
1221     }
1222     ASSERT(offset_8 >= 0);  // no masking needed
1223     instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
1224   } else if (x.shift_imm_ != 0) {
1225     // Scaled register offset not supported, load index first
1226     // rn (and rd in a load) should never be ip, or will be trashed.
1227     ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1228     mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
1229         Instruction::ConditionField(instr));
1230     addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1231     return;
1232   } else {
1233     // Register offset.
1234     ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
1235     instr |= x.rm_.code();
1236   }
1237   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
1238   emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1239 }
1240 
1241 
addrmod4(Instr instr,Register rn,RegList rl)1242 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
1243   ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
1244   ASSERT(rl != 0);
1245   ASSERT(!rn.is(pc));
1246   emit(instr | rn.code()*B16 | rl);
1247 }
1248 
1249 
addrmod5(Instr instr,CRegister crd,const MemOperand & x)1250 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
1251   // Unindexed addressing is not encoded by this function.
1252   ASSERT_EQ((B27 | B26),
1253             (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
1254   ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
1255   int am = x.am_;
1256   int offset_8 = x.offset_;
1257   ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
1258   offset_8 >>= 2;
1259   if (offset_8 < 0) {
1260     offset_8 = -offset_8;
1261     am ^= U;
1262   }
1263   ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
1264   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
1265 
1266   // Post-indexed addressing requires W == 1; different than in addrmod2/3.
1267   if ((am & P) == 0)
1268     am |= W;
1269 
1270   ASSERT(offset_8 >= 0);  // no masking needed
1271   emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1272 }
1273 
1274 
branch_offset(Label * L,bool jump_elimination_allowed)1275 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1276   int target_pos;
1277   if (L->is_bound()) {
1278     target_pos = L->pos();
1279   } else {
1280     if (L->is_linked()) {
1281       // Point to previous instruction that uses the link.
1282       target_pos = L->pos();
1283     } else {
1284       // First entry of the link chain points to itself.
1285       target_pos = pc_offset();
1286     }
1287     L->link_to(pc_offset());
1288   }
1289 
1290   // Block the emission of the constant pool, since the branch instruction must
1291   // be emitted at the pc offset recorded by the label.
1292   BlockConstPoolFor(1);
1293   return target_pos - (pc_offset() + kPcLoadDelta);
1294 }
1295 
1296 
1297 // Branch instructions.
b(int branch_offset,Condition cond)1298 void Assembler::b(int branch_offset, Condition cond) {
1299   ASSERT((branch_offset & 3) == 0);
1300   int imm24 = branch_offset >> 2;
1301   ASSERT(is_int24(imm24));
1302   emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1303 
1304   if (cond == al) {
1305     // Dead code is a good location to emit the constant pool.
1306     CheckConstPool(false, false);
1307   }
1308 }
1309 
1310 
bl(int branch_offset,Condition cond)1311 void Assembler::bl(int branch_offset, Condition cond) {
1312   positions_recorder()->WriteRecordedPositions();
1313   ASSERT((branch_offset & 3) == 0);
1314   int imm24 = branch_offset >> 2;
1315   ASSERT(is_int24(imm24));
1316   emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1317 }
1318 
1319 
blx(int branch_offset)1320 void Assembler::blx(int branch_offset) {  // v5 and above
1321   positions_recorder()->WriteRecordedPositions();
1322   ASSERT((branch_offset & 1) == 0);
1323   int h = ((branch_offset & 2) >> 1)*B24;
1324   int imm24 = branch_offset >> 2;
1325   ASSERT(is_int24(imm24));
1326   emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1327 }
1328 
1329 
blx(Register target,Condition cond)1330 void Assembler::blx(Register target, Condition cond) {  // v5 and above
1331   positions_recorder()->WriteRecordedPositions();
1332   ASSERT(!target.is(pc));
1333   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1334 }
1335 
1336 
bx(Register target,Condition cond)1337 void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
1338   positions_recorder()->WriteRecordedPositions();
1339   ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
1340   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1341 }
1342 
1343 
1344 // Data-processing instructions.
1345 
and_(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1346 void Assembler::and_(Register dst, Register src1, const Operand& src2,
1347                      SBit s, Condition cond) {
1348   addrmod1(cond | AND | s, src1, dst, src2);
1349 }
1350 
1351 
eor(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1352 void Assembler::eor(Register dst, Register src1, const Operand& src2,
1353                     SBit s, Condition cond) {
1354   addrmod1(cond | EOR | s, src1, dst, src2);
1355 }
1356 
1357 
sub(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1358 void Assembler::sub(Register dst, Register src1, const Operand& src2,
1359                     SBit s, Condition cond) {
1360   addrmod1(cond | SUB | s, src1, dst, src2);
1361 }
1362 
1363 
rsb(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1364 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1365                     SBit s, Condition cond) {
1366   addrmod1(cond | RSB | s, src1, dst, src2);
1367 }
1368 
1369 
add(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1370 void Assembler::add(Register dst, Register src1, const Operand& src2,
1371                     SBit s, Condition cond) {
1372   addrmod1(cond | ADD | s, src1, dst, src2);
1373 }
1374 
1375 
adc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1376 void Assembler::adc(Register dst, Register src1, const Operand& src2,
1377                     SBit s, Condition cond) {
1378   addrmod1(cond | ADC | s, src1, dst, src2);
1379 }
1380 
1381 
sbc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1382 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1383                     SBit s, Condition cond) {
1384   addrmod1(cond | SBC | s, src1, dst, src2);
1385 }
1386 
1387 
rsc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1388 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1389                     SBit s, Condition cond) {
1390   addrmod1(cond | RSC | s, src1, dst, src2);
1391 }
1392 
1393 
tst(Register src1,const Operand & src2,Condition cond)1394 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1395   addrmod1(cond | TST | S, src1, r0, src2);
1396 }
1397 
1398 
teq(Register src1,const Operand & src2,Condition cond)1399 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1400   addrmod1(cond | TEQ | S, src1, r0, src2);
1401 }
1402 
1403 
cmp(Register src1,const Operand & src2,Condition cond)1404 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1405   addrmod1(cond | CMP | S, src1, r0, src2);
1406 }
1407 
1408 
cmp_raw_immediate(Register src,int raw_immediate,Condition cond)1409 void Assembler::cmp_raw_immediate(
1410     Register src, int raw_immediate, Condition cond) {
1411   ASSERT(is_uint12(raw_immediate));
1412   emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1413 }
1414 
1415 
cmn(Register src1,const Operand & src2,Condition cond)1416 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1417   addrmod1(cond | CMN | S, src1, r0, src2);
1418 }
1419 
1420 
orr(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1421 void Assembler::orr(Register dst, Register src1, const Operand& src2,
1422                     SBit s, Condition cond) {
1423   addrmod1(cond | ORR | s, src1, dst, src2);
1424 }
1425 
1426 
mov(Register dst,const Operand & src,SBit s,Condition cond)1427 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1428   if (dst.is(pc)) {
1429     positions_recorder()->WriteRecordedPositions();
1430   }
1431   // Don't allow nop instructions in the form mov rn, rn to be generated using
1432   // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1433   // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1434   ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1435   addrmod1(cond | MOV | s, r0, dst, src);
1436 }
1437 
1438 
mov_label_offset(Register dst,Label * label)1439 void Assembler::mov_label_offset(Register dst, Label* label) {
1440   if (label->is_bound()) {
1441     mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
1442   } else {
1443     // Emit the link to the label in the code stream followed by extra nop
1444     // instructions.
1445     // If the label is not linked, then start a new link chain by linking it to
1446     // itself, emitting pc_offset().
1447     int link = label->is_linked() ? label->pos() : pc_offset();
1448     label->link_to(pc_offset());
1449 
1450     // When the label is bound, these instructions will be patched with a
1451     // sequence of movw/movt or mov/orr/orr instructions. They will load the
1452     // destination register with the position of the label from the beginning
1453     // of the code.
1454     //
1455     // The link will be extracted from the first instruction and the destination
1456     // register from the second.
1457     //   For ARMv7:
1458     //      link
1459     //      mov dst, dst
1460     //   For ARMv6:
1461     //      link
1462     //      mov dst, dst
1463     //      mov dst, dst
1464     //
1465     // When the label gets bound: target_at extracts the link and target_at_put
1466     // patches the instructions.
1467     ASSERT(is_uint24(link));
1468     BlockConstPoolScope block_const_pool(this);
1469     emit(link);
1470     nop(dst.code());
1471     if (!CpuFeatures::IsSupported(ARMv7)) {
1472       nop(dst.code());
1473     }
1474   }
1475 }
1476 
1477 
movw(Register reg,uint32_t immediate,Condition cond)1478 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1479   ASSERT(immediate < 0x10000);
1480   // May use movw if supported, but on unsupported platforms will try to use
1481   // equivalent rotated immed_8 value and other tricks before falling back to a
1482   // constant pool load.
1483   mov(reg, Operand(immediate), LeaveCC, cond);
1484 }
1485 
1486 
movt(Register reg,uint32_t immediate,Condition cond)1487 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1488   emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1489 }
1490 
1491 
bic(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1492 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1493                     SBit s, Condition cond) {
1494   addrmod1(cond | BIC | s, src1, dst, src2);
1495 }
1496 
1497 
mvn(Register dst,const Operand & src,SBit s,Condition cond)1498 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1499   addrmod1(cond | MVN | s, r0, dst, src);
1500 }
1501 
1502 
1503 // Multiply instructions.
mla(Register dst,Register src1,Register src2,Register srcA,SBit s,Condition cond)1504 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1505                     SBit s, Condition cond) {
1506   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1507   emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1508        src2.code()*B8 | B7 | B4 | src1.code());
1509 }
1510 
1511 
mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)1512 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1513                     Condition cond) {
1514   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1515   emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1516        src2.code()*B8 | B7 | B4 | src1.code());
1517 }
1518 
1519 
sdiv(Register dst,Register src1,Register src2,Condition cond)1520 void Assembler::sdiv(Register dst, Register src1, Register src2,
1521                      Condition cond) {
1522   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1523   ASSERT(IsEnabled(SUDIV));
1524   emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
1525        src2.code()*B8 | B4 | src1.code());
1526 }
1527 
1528 
mul(Register dst,Register src1,Register src2,SBit s,Condition cond)1529 void Assembler::mul(Register dst, Register src1, Register src2,
1530                     SBit s, Condition cond) {
1531   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1532   // dst goes in bits 16-19 for this instruction!
1533   emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1534 }
1535 
1536 
smlal(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1537 void Assembler::smlal(Register dstL,
1538                       Register dstH,
1539                       Register src1,
1540                       Register src2,
1541                       SBit s,
1542                       Condition cond) {
1543   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1544   ASSERT(!dstL.is(dstH));
1545   emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1546        src2.code()*B8 | B7 | B4 | src1.code());
1547 }
1548 
1549 
smull(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1550 void Assembler::smull(Register dstL,
1551                       Register dstH,
1552                       Register src1,
1553                       Register src2,
1554                       SBit s,
1555                       Condition cond) {
1556   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1557   ASSERT(!dstL.is(dstH));
1558   emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1559        src2.code()*B8 | B7 | B4 | src1.code());
1560 }
1561 
1562 
umlal(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1563 void Assembler::umlal(Register dstL,
1564                       Register dstH,
1565                       Register src1,
1566                       Register src2,
1567                       SBit s,
1568                       Condition cond) {
1569   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1570   ASSERT(!dstL.is(dstH));
1571   emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1572        src2.code()*B8 | B7 | B4 | src1.code());
1573 }
1574 
1575 
umull(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1576 void Assembler::umull(Register dstL,
1577                       Register dstH,
1578                       Register src1,
1579                       Register src2,
1580                       SBit s,
1581                       Condition cond) {
1582   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1583   ASSERT(!dstL.is(dstH));
1584   emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1585        src2.code()*B8 | B7 | B4 | src1.code());
1586 }
1587 
1588 
1589 // Miscellaneous arithmetic instructions.
clz(Register dst,Register src,Condition cond)1590 void Assembler::clz(Register dst, Register src, Condition cond) {
1591   // v5 and above.
1592   ASSERT(!dst.is(pc) && !src.is(pc));
1593   emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1594        15*B8 | CLZ | src.code());
1595 }
1596 
1597 
1598 // Saturating instructions.
1599 
1600 // Unsigned saturate.
usat(Register dst,int satpos,const Operand & src,Condition cond)1601 void Assembler::usat(Register dst,
1602                      int satpos,
1603                      const Operand& src,
1604                      Condition cond) {
1605   // v6 and above.
1606   ASSERT(CpuFeatures::IsSupported(ARMv7));
1607   ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1608   ASSERT((satpos >= 0) && (satpos <= 31));
1609   ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1610   ASSERT(src.rs_.is(no_reg));
1611 
1612   int sh = 0;
1613   if (src.shift_op_ == ASR) {
1614       sh = 1;
1615   }
1616 
1617   emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1618        src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1619 }
1620 
1621 
1622 // Bitfield manipulation instructions.
1623 
1624 // Unsigned bit field extract.
1625 // Extracts #width adjacent bits from position #lsb in a register, and
1626 // writes them to the low bits of a destination register.
1627 //   ubfx dst, src, #lsb, #width
ubfx(Register dst,Register src,int lsb,int width,Condition cond)1628 void Assembler::ubfx(Register dst,
1629                      Register src,
1630                      int lsb,
1631                      int width,
1632                      Condition cond) {
1633   // v7 and above.
1634   ASSERT(CpuFeatures::IsSupported(ARMv7));
1635   ASSERT(!dst.is(pc) && !src.is(pc));
1636   ASSERT((lsb >= 0) && (lsb <= 31));
1637   ASSERT((width >= 1) && (width <= (32 - lsb)));
1638   emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1639        lsb*B7 | B6 | B4 | src.code());
1640 }
1641 
1642 
1643 // Signed bit field extract.
1644 // Extracts #width adjacent bits from position #lsb in a register, and
1645 // writes them to the low bits of a destination register. The extracted
1646 // value is sign extended to fill the destination register.
1647 //   sbfx dst, src, #lsb, #width
sbfx(Register dst,Register src,int lsb,int width,Condition cond)1648 void Assembler::sbfx(Register dst,
1649                      Register src,
1650                      int lsb,
1651                      int width,
1652                      Condition cond) {
1653   // v7 and above.
1654   ASSERT(CpuFeatures::IsSupported(ARMv7));
1655   ASSERT(!dst.is(pc) && !src.is(pc));
1656   ASSERT((lsb >= 0) && (lsb <= 31));
1657   ASSERT((width >= 1) && (width <= (32 - lsb)));
1658   emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1659        lsb*B7 | B6 | B4 | src.code());
1660 }
1661 
1662 
1663 // Bit field clear.
1664 // Sets #width adjacent bits at position #lsb in the destination register
1665 // to zero, preserving the value of the other bits.
1666 //   bfc dst, #lsb, #width
bfc(Register dst,int lsb,int width,Condition cond)1667 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1668   // v7 and above.
1669   ASSERT(CpuFeatures::IsSupported(ARMv7));
1670   ASSERT(!dst.is(pc));
1671   ASSERT((lsb >= 0) && (lsb <= 31));
1672   ASSERT((width >= 1) && (width <= (32 - lsb)));
1673   int msb = lsb + width - 1;
1674   emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1675 }
1676 
1677 
1678 // Bit field insert.
1679 // Inserts #width adjacent bits from the low bits of the source register
1680 // into position #lsb of the destination register.
1681 //   bfi dst, src, #lsb, #width
bfi(Register dst,Register src,int lsb,int width,Condition cond)1682 void Assembler::bfi(Register dst,
1683                     Register src,
1684                     int lsb,
1685                     int width,
1686                     Condition cond) {
1687   // v7 and above.
1688   ASSERT(CpuFeatures::IsSupported(ARMv7));
1689   ASSERT(!dst.is(pc) && !src.is(pc));
1690   ASSERT((lsb >= 0) && (lsb <= 31));
1691   ASSERT((width >= 1) && (width <= (32 - lsb)));
1692   int msb = lsb + width - 1;
1693   emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1694        src.code());
1695 }
1696 
1697 
pkhbt(Register dst,Register src1,const Operand & src2,Condition cond)1698 void Assembler::pkhbt(Register dst,
1699                       Register src1,
1700                       const Operand& src2,
1701                       Condition cond ) {
1702   // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1703   // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1704   // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
1705   ASSERT(!dst.is(pc));
1706   ASSERT(!src1.is(pc));
1707   ASSERT(!src2.rm().is(pc));
1708   ASSERT(!src2.rm().is(no_reg));
1709   ASSERT(src2.rs().is(no_reg));
1710   ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1711   ASSERT(src2.shift_op() == LSL);
1712   emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1713        src2.shift_imm_*B7 | B4 | src2.rm().code());
1714 }
1715 
1716 
pkhtb(Register dst,Register src1,const Operand & src2,Condition cond)1717 void Assembler::pkhtb(Register dst,
1718                       Register src1,
1719                       const Operand& src2,
1720                       Condition cond) {
1721   // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1722   // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1723   // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
1724   ASSERT(!dst.is(pc));
1725   ASSERT(!src1.is(pc));
1726   ASSERT(!src2.rm().is(pc));
1727   ASSERT(!src2.rm().is(no_reg));
1728   ASSERT(src2.rs().is(no_reg));
1729   ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1730   ASSERT(src2.shift_op() == ASR);
1731   int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1732   emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1733        asr*B7 | B6 | B4 | src2.rm().code());
1734 }
1735 
1736 
uxtb(Register dst,const Operand & src,Condition cond)1737 void Assembler::uxtb(Register dst,
1738                      const Operand& src,
1739                      Condition cond) {
1740   // Instruction details available in ARM DDI 0406C.b, A8.8.274.
1741   // cond(31-28) | 01101110(27-20) | 1111(19-16) |
1742   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1743   ASSERT(!dst.is(pc));
1744   ASSERT(!src.rm().is(pc));
1745   ASSERT(!src.rm().is(no_reg));
1746   ASSERT(src.rs().is(no_reg));
1747   ASSERT((src.shift_imm_ == 0) ||
1748          (src.shift_imm_ == 8) ||
1749          (src.shift_imm_ == 16) ||
1750          (src.shift_imm_ == 24));
1751   ASSERT(src.shift_op() == ROR);
1752   emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
1753        ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
1754 }
1755 
1756 
uxtab(Register dst,Register src1,const Operand & src2,Condition cond)1757 void Assembler::uxtab(Register dst,
1758                       Register src1,
1759                       const Operand& src2,
1760                       Condition cond) {
1761   // Instruction details available in ARM DDI 0406C.b, A8.8.271.
1762   // cond(31-28) | 01101110(27-20) | Rn(19-16) |
1763   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1764   ASSERT(!dst.is(pc));
1765   ASSERT(!src1.is(pc));
1766   ASSERT(!src2.rm().is(pc));
1767   ASSERT(!src2.rm().is(no_reg));
1768   ASSERT(src2.rs().is(no_reg));
1769   ASSERT((src2.shift_imm_ == 0) ||
1770          (src2.shift_imm_ == 8) ||
1771          (src2.shift_imm_ == 16) ||
1772          (src2.shift_imm_ == 24));
1773   ASSERT(src2.shift_op() == ROR);
1774   emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
1775        ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
1776 }
1777 
1778 
uxtb16(Register dst,const Operand & src,Condition cond)1779 void Assembler::uxtb16(Register dst,
1780                        const Operand& src,
1781                        Condition cond) {
1782   // Instruction details available in ARM DDI 0406C.b, A8.8.275.
1783   // cond(31-28) | 01101100(27-20) | 1111(19-16) |
1784   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1785   ASSERT(!dst.is(pc));
1786   ASSERT(!src.rm().is(pc));
1787   ASSERT(!src.rm().is(no_reg));
1788   ASSERT(src.rs().is(no_reg));
1789   ASSERT((src.shift_imm_ == 0) ||
1790          (src.shift_imm_ == 8) ||
1791          (src.shift_imm_ == 16) ||
1792          (src.shift_imm_ == 24));
1793   ASSERT(src.shift_op() == ROR);
1794   emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
1795        ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
1796 }
1797 
1798 
1799 // Status register access instructions.
mrs(Register dst,SRegister s,Condition cond)1800 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1801   ASSERT(!dst.is(pc));
1802   emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1803 }
1804 
1805 
msr(SRegisterFieldMask fields,const Operand & src,Condition cond)1806 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1807                     Condition cond) {
1808   ASSERT(fields >= B16 && fields < B20);  // at least one field set
1809   Instr instr;
1810   if (!src.rm_.is_valid()) {
1811     // Immediate.
1812     uint32_t rotate_imm;
1813     uint32_t immed_8;
1814     if (src.must_output_reloc_info(this) ||
1815         !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1816       // Immediate operand cannot be encoded, load it first to register ip.
1817       RecordRelocInfo(src.rmode_, src.imm32_);
1818       ldr(ip, MemOperand(pc, 0), cond);
1819       msr(fields, Operand(ip), cond);
1820       return;
1821     }
1822     instr = I | rotate_imm*B8 | immed_8;
1823   } else {
1824     ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
1825     instr = src.rm_.code();
1826   }
1827   emit(cond | instr | B24 | B21 | fields | 15*B12);
1828 }
1829 
1830 
1831 // Load/Store instructions.
ldr(Register dst,const MemOperand & src,Condition cond)1832 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1833   if (dst.is(pc)) {
1834     positions_recorder()->WriteRecordedPositions();
1835   }
1836   addrmod2(cond | B26 | L, dst, src);
1837 }
1838 
1839 
str(Register src,const MemOperand & dst,Condition cond)1840 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1841   addrmod2(cond | B26, src, dst);
1842 }
1843 
1844 
ldrb(Register dst,const MemOperand & src,Condition cond)1845 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1846   addrmod2(cond | B26 | B | L, dst, src);
1847 }
1848 
1849 
strb(Register src,const MemOperand & dst,Condition cond)1850 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1851   addrmod2(cond | B26 | B, src, dst);
1852 }
1853 
1854 
ldrh(Register dst,const MemOperand & src,Condition cond)1855 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1856   addrmod3(cond | L | B7 | H | B4, dst, src);
1857 }
1858 
1859 
strh(Register src,const MemOperand & dst,Condition cond)1860 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1861   addrmod3(cond | B7 | H | B4, src, dst);
1862 }
1863 
1864 
ldrsb(Register dst,const MemOperand & src,Condition cond)1865 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1866   addrmod3(cond | L | B7 | S6 | B4, dst, src);
1867 }
1868 
1869 
ldrsh(Register dst,const MemOperand & src,Condition cond)1870 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1871   addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1872 }
1873 
1874 
ldrd(Register dst1,Register dst2,const MemOperand & src,Condition cond)1875 void Assembler::ldrd(Register dst1, Register dst2,
1876                      const MemOperand& src, Condition cond) {
1877   ASSERT(IsEnabled(ARMv7));
1878   ASSERT(src.rm().is(no_reg));
1879   ASSERT(!dst1.is(lr));  // r14.
1880   ASSERT_EQ(0, dst1.code() % 2);
1881   ASSERT_EQ(dst1.code() + 1, dst2.code());
1882   addrmod3(cond | B7 | B6 | B4, dst1, src);
1883 }
1884 
1885 
strd(Register src1,Register src2,const MemOperand & dst,Condition cond)1886 void Assembler::strd(Register src1, Register src2,
1887                      const MemOperand& dst, Condition cond) {
1888   ASSERT(dst.rm().is(no_reg));
1889   ASSERT(!src1.is(lr));  // r14.
1890   ASSERT_EQ(0, src1.code() % 2);
1891   ASSERT_EQ(src1.code() + 1, src2.code());
1892   ASSERT(IsEnabled(ARMv7));
1893   addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
1894 }
1895 
1896 
1897 // Preload instructions.
pld(const MemOperand & address)1898 void Assembler::pld(const MemOperand& address) {
1899   // Instruction details available in ARM DDI 0406C.b, A8.8.128.
1900   // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
1901   // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
1902   ASSERT(address.rm().is(no_reg));
1903   ASSERT(address.am() == Offset);
1904   int U = B23;
1905   int offset = address.offset();
1906   if (offset < 0) {
1907     offset = -offset;
1908     U = 0;
1909   }
1910   ASSERT(offset < 4096);
1911   emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
1912        0xf*B12 | offset);
1913 }
1914 
1915 
1916 // Load/Store multiple instructions.
ldm(BlockAddrMode am,Register base,RegList dst,Condition cond)1917 void Assembler::ldm(BlockAddrMode am,
1918                     Register base,
1919                     RegList dst,
1920                     Condition cond) {
1921   // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
1922   ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1923 
1924   addrmod4(cond | B27 | am | L, base, dst);
1925 
1926   // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1927   if (cond == al && (dst & pc.bit()) != 0) {
1928     // There is a slight chance that the ldm instruction was actually a call,
1929     // in which case it would be wrong to return into the constant pool; we
1930     // recognize this case by checking if the emission of the pool was blocked
1931     // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1932     // the case, we emit a jump over the pool.
1933     CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1934   }
1935 }
1936 
1937 
stm(BlockAddrMode am,Register base,RegList src,Condition cond)1938 void Assembler::stm(BlockAddrMode am,
1939                     Register base,
1940                     RegList src,
1941                     Condition cond) {
1942   addrmod4(cond | B27 | am, base, src);
1943 }
1944 
1945 
1946 // Exception-generating instructions and debugging support.
1947 // Stops with a non-negative code less than kNumOfWatchedStops support
1948 // enabling/disabling and a counter feature. See simulator-arm.h .
stop(const char * msg,Condition cond,int32_t code)1949 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
1950 #ifndef __arm__
1951   ASSERT(code >= kDefaultStopCode);
1952   {
1953     // The Simulator will handle the stop instruction and get the message
1954     // address. It expects to find the address just after the svc instruction.
1955     BlockConstPoolScope block_const_pool(this);
1956     if (code >= 0) {
1957       svc(kStopCode + code, cond);
1958     } else {
1959       svc(kStopCode + kMaxStopCode, cond);
1960     }
1961     emit(reinterpret_cast<Instr>(msg));
1962   }
1963 #else  // def __arm__
1964   if (cond != al) {
1965     Label skip;
1966     b(&skip, NegateCondition(cond));
1967     bkpt(0);
1968     bind(&skip);
1969   } else {
1970     bkpt(0);
1971   }
1972 #endif  // def __arm__
1973 }
1974 
1975 
bkpt(uint32_t imm16)1976 void Assembler::bkpt(uint32_t imm16) {  // v5 and above
1977   ASSERT(is_uint16(imm16));
1978   emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
1979 }
1980 
1981 
svc(uint32_t imm24,Condition cond)1982 void Assembler::svc(uint32_t imm24, Condition cond) {
1983   ASSERT(is_uint24(imm24));
1984   emit(cond | 15*B24 | imm24);
1985 }
1986 
1987 
1988 // Coprocessor instructions.
cdp(Coprocessor coproc,int opcode_1,CRegister crd,CRegister crn,CRegister crm,int opcode_2,Condition cond)1989 void Assembler::cdp(Coprocessor coproc,
1990                     int opcode_1,
1991                     CRegister crd,
1992                     CRegister crn,
1993                     CRegister crm,
1994                     int opcode_2,
1995                     Condition cond) {
1996   ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1997   emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1998        crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1999 }
2000 
2001 
cdp2(Coprocessor coproc,int opcode_1,CRegister crd,CRegister crn,CRegister crm,int opcode_2)2002 void Assembler::cdp2(Coprocessor coproc,
2003                      int opcode_1,
2004                      CRegister crd,
2005                      CRegister crn,
2006                      CRegister crm,
2007                      int opcode_2) {  // v5 and above
2008   cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
2009 }
2010 
2011 
mcr(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2,Condition cond)2012 void Assembler::mcr(Coprocessor coproc,
2013                     int opcode_1,
2014                     Register rd,
2015                     CRegister crn,
2016                     CRegister crm,
2017                     int opcode_2,
2018                     Condition cond) {
2019   ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
2020   emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
2021        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2022 }
2023 
2024 
mcr2(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2)2025 void Assembler::mcr2(Coprocessor coproc,
2026                      int opcode_1,
2027                      Register rd,
2028                      CRegister crn,
2029                      CRegister crm,
2030                      int opcode_2) {  // v5 and above
2031   mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2032 }
2033 
2034 
mrc(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2,Condition cond)2035 void Assembler::mrc(Coprocessor coproc,
2036                     int opcode_1,
2037                     Register rd,
2038                     CRegister crn,
2039                     CRegister crm,
2040                     int opcode_2,
2041                     Condition cond) {
2042   ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
2043   emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
2044        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2045 }
2046 
2047 
mrc2(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2)2048 void Assembler::mrc2(Coprocessor coproc,
2049                      int opcode_1,
2050                      Register rd,
2051                      CRegister crn,
2052                      CRegister crm,
2053                      int opcode_2) {  // v5 and above
2054   mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2055 }
2056 
2057 
ldc(Coprocessor coproc,CRegister crd,const MemOperand & src,LFlag l,Condition cond)2058 void Assembler::ldc(Coprocessor coproc,
2059                     CRegister crd,
2060                     const MemOperand& src,
2061                     LFlag l,
2062                     Condition cond) {
2063   addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
2064 }
2065 
2066 
ldc(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l,Condition cond)2067 void Assembler::ldc(Coprocessor coproc,
2068                     CRegister crd,
2069                     Register rn,
2070                     int option,
2071                     LFlag l,
2072                     Condition cond) {
2073   // Unindexed addressing.
2074   ASSERT(is_uint8(option));
2075   emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
2076        coproc*B8 | (option & 255));
2077 }
2078 
2079 
ldc2(Coprocessor coproc,CRegister crd,const MemOperand & src,LFlag l)2080 void Assembler::ldc2(Coprocessor coproc,
2081                      CRegister crd,
2082                      const MemOperand& src,
2083                      LFlag l) {  // v5 and above
2084   ldc(coproc, crd, src, l, kSpecialCondition);
2085 }
2086 
2087 
ldc2(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l)2088 void Assembler::ldc2(Coprocessor coproc,
2089                      CRegister crd,
2090                      Register rn,
2091                      int option,
2092                      LFlag l) {  // v5 and above
2093   ldc(coproc, crd, rn, option, l, kSpecialCondition);
2094 }
2095 
2096 
2097 // Support for VFP.
2098 
vldr(const DwVfpRegister dst,const Register base,int offset,const Condition cond)2099 void Assembler::vldr(const DwVfpRegister dst,
2100                      const Register base,
2101                      int offset,
2102                      const Condition cond) {
2103   // Ddst = MEM(Rbase + offset).
2104   // Instruction details available in ARM DDI 0406C.b, A8-924.
2105   // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
2106   // Vd(15-12) | 1011(11-8) | offset
2107   int u = 1;
2108   if (offset < 0) {
2109     offset = -offset;
2110     u = 0;
2111   }
2112   int vd, d;
2113   dst.split_code(&vd, &d);
2114 
2115   ASSERT(offset >= 0);
2116   if ((offset % 4) == 0 && (offset / 4) < 256) {
2117     emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
2118          0xB*B8 | ((offset / 4) & 255));
2119   } else {
2120     // Larger offsets must be handled by computing the correct address
2121     // in the ip register.
2122     ASSERT(!base.is(ip));
2123     if (u == 1) {
2124       add(ip, base, Operand(offset));
2125     } else {
2126       sub(ip, base, Operand(offset));
2127     }
2128     emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
2129   }
2130 }
2131 
2132 
vldr(const DwVfpRegister dst,const MemOperand & operand,const Condition cond)2133 void Assembler::vldr(const DwVfpRegister dst,
2134                      const MemOperand& operand,
2135                      const Condition cond) {
2136   ASSERT(!operand.rm().is_valid());
2137   ASSERT(operand.am_ == Offset);
2138   vldr(dst, operand.rn(), operand.offset(), cond);
2139 }
2140 
2141 
vldr(const SwVfpRegister dst,const Register base,int offset,const Condition cond)2142 void Assembler::vldr(const SwVfpRegister dst,
2143                      const Register base,
2144                      int offset,
2145                      const Condition cond) {
2146   // Sdst = MEM(Rbase + offset).
2147   // Instruction details available in ARM DDI 0406A, A8-628.
2148   // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
2149   // Vdst(15-12) | 1010(11-8) | offset
2150   int u = 1;
2151   if (offset < 0) {
2152     offset = -offset;
2153     u = 0;
2154   }
2155   int sd, d;
2156   dst.split_code(&sd, &d);
2157   ASSERT(offset >= 0);
2158 
2159   if ((offset % 4) == 0 && (offset / 4) < 256) {
2160   emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
2161        0xA*B8 | ((offset / 4) & 255));
2162   } else {
2163     // Larger offsets must be handled by computing the correct address
2164     // in the ip register.
2165     ASSERT(!base.is(ip));
2166     if (u == 1) {
2167       add(ip, base, Operand(offset));
2168     } else {
2169       sub(ip, base, Operand(offset));
2170     }
2171     emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2172   }
2173 }
2174 
2175 
vldr(const SwVfpRegister dst,const MemOperand & operand,const Condition cond)2176 void Assembler::vldr(const SwVfpRegister dst,
2177                      const MemOperand& operand,
2178                      const Condition cond) {
2179   ASSERT(!operand.rm().is_valid());
2180   ASSERT(operand.am_ == Offset);
2181   vldr(dst, operand.rn(), operand.offset(), cond);
2182 }
2183 
2184 
vstr(const DwVfpRegister src,const Register base,int offset,const Condition cond)2185 void Assembler::vstr(const DwVfpRegister src,
2186                      const Register base,
2187                      int offset,
2188                      const Condition cond) {
2189   // MEM(Rbase + offset) = Dsrc.
2190   // Instruction details available in ARM DDI 0406C.b, A8-1082.
2191   // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
2192   // Vd(15-12) | 1011(11-8) | (offset/4)
2193   int u = 1;
2194   if (offset < 0) {
2195     offset = -offset;
2196     u = 0;
2197   }
2198   ASSERT(offset >= 0);
2199   int vd, d;
2200   src.split_code(&vd, &d);
2201 
2202   if ((offset % 4) == 0 && (offset / 4) < 256) {
2203     emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
2204          ((offset / 4) & 255));
2205   } else {
2206     // Larger offsets must be handled by computing the correct address
2207     // in the ip register.
2208     ASSERT(!base.is(ip));
2209     if (u == 1) {
2210       add(ip, base, Operand(offset));
2211     } else {
2212       sub(ip, base, Operand(offset));
2213     }
2214     emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
2215   }
2216 }
2217 
2218 
vstr(const DwVfpRegister src,const MemOperand & operand,const Condition cond)2219 void Assembler::vstr(const DwVfpRegister src,
2220                      const MemOperand& operand,
2221                      const Condition cond) {
2222   ASSERT(!operand.rm().is_valid());
2223   ASSERT(operand.am_ == Offset);
2224   vstr(src, operand.rn(), operand.offset(), cond);
2225 }
2226 
2227 
vstr(const SwVfpRegister src,const Register base,int offset,const Condition cond)2228 void Assembler::vstr(const SwVfpRegister src,
2229                      const Register base,
2230                      int offset,
2231                      const Condition cond) {
2232   // MEM(Rbase + offset) = SSrc.
2233   // Instruction details available in ARM DDI 0406A, A8-786.
2234   // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
2235   // Vdst(15-12) | 1010(11-8) | (offset/4)
2236   int u = 1;
2237   if (offset < 0) {
2238     offset = -offset;
2239     u = 0;
2240   }
2241   int sd, d;
2242   src.split_code(&sd, &d);
2243   ASSERT(offset >= 0);
2244   if ((offset % 4) == 0 && (offset / 4) < 256) {
2245     emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2246          0xA*B8 | ((offset / 4) & 255));
2247   } else {
2248     // Larger offsets must be handled by computing the correct address
2249     // in the ip register.
2250     ASSERT(!base.is(ip));
2251     if (u == 1) {
2252       add(ip, base, Operand(offset));
2253     } else {
2254       sub(ip, base, Operand(offset));
2255     }
2256     emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2257   }
2258 }
2259 
2260 
vstr(const SwVfpRegister src,const MemOperand & operand,const Condition cond)2261 void Assembler::vstr(const SwVfpRegister src,
2262                      const MemOperand& operand,
2263                      const Condition cond) {
2264   ASSERT(!operand.rm().is_valid());
2265   ASSERT(operand.am_ == Offset);
2266   vstr(src, operand.rn(), operand.offset(), cond);
2267 }
2268 
2269 
vldm(BlockAddrMode am,Register base,DwVfpRegister first,DwVfpRegister last,Condition cond)2270 void  Assembler::vldm(BlockAddrMode am,
2271                       Register base,
2272                       DwVfpRegister first,
2273                       DwVfpRegister last,
2274                       Condition cond) {
2275   // Instruction details available in ARM DDI 0406C.b, A8-922.
2276   // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2277   // first(15-12) | 1011(11-8) | (count * 2)
2278   ASSERT_LE(first.code(), last.code());
2279   ASSERT(am == ia || am == ia_w || am == db_w);
2280   ASSERT(!base.is(pc));
2281 
2282   int sd, d;
2283   first.split_code(&sd, &d);
2284   int count = last.code() - first.code() + 1;
2285   ASSERT(count <= 16);
2286   emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2287        0xB*B8 | count*2);
2288 }
2289 
2290 
vstm(BlockAddrMode am,Register base,DwVfpRegister first,DwVfpRegister last,Condition cond)2291 void  Assembler::vstm(BlockAddrMode am,
2292                       Register base,
2293                       DwVfpRegister first,
2294                       DwVfpRegister last,
2295                       Condition cond) {
2296   // Instruction details available in ARM DDI 0406C.b, A8-1080.
2297   // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2298   // first(15-12) | 1011(11-8) | (count * 2)
2299   ASSERT_LE(first.code(), last.code());
2300   ASSERT(am == ia || am == ia_w || am == db_w);
2301   ASSERT(!base.is(pc));
2302 
2303   int sd, d;
2304   first.split_code(&sd, &d);
2305   int count = last.code() - first.code() + 1;
2306   ASSERT(count <= 16);
2307   emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2308        0xB*B8 | count*2);
2309 }
2310 
vldm(BlockAddrMode am,Register base,SwVfpRegister first,SwVfpRegister last,Condition cond)2311 void  Assembler::vldm(BlockAddrMode am,
2312                       Register base,
2313                       SwVfpRegister first,
2314                       SwVfpRegister last,
2315                       Condition cond) {
2316   // Instruction details available in ARM DDI 0406A, A8-626.
2317   // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2318   // first(15-12) | 1010(11-8) | (count/2)
2319   ASSERT_LE(first.code(), last.code());
2320   ASSERT(am == ia || am == ia_w || am == db_w);
2321   ASSERT(!base.is(pc));
2322 
2323   int sd, d;
2324   first.split_code(&sd, &d);
2325   int count = last.code() - first.code() + 1;
2326   emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2327        0xA*B8 | count);
2328 }
2329 
2330 
vstm(BlockAddrMode am,Register base,SwVfpRegister first,SwVfpRegister last,Condition cond)2331 void  Assembler::vstm(BlockAddrMode am,
2332                       Register base,
2333                       SwVfpRegister first,
2334                       SwVfpRegister last,
2335                       Condition cond) {
2336   // Instruction details available in ARM DDI 0406A, A8-784.
2337   // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2338   // first(15-12) | 1011(11-8) | (count/2)
2339   ASSERT_LE(first.code(), last.code());
2340   ASSERT(am == ia || am == ia_w || am == db_w);
2341   ASSERT(!base.is(pc));
2342 
2343   int sd, d;
2344   first.split_code(&sd, &d);
2345   int count = last.code() - first.code() + 1;
2346   emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2347        0xA*B8 | count);
2348 }
2349 
2350 
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)2351 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2352   uint64_t i;
2353   OS::MemCopy(&i, &d, 8);
2354 
2355   *lo = i & 0xffffffff;
2356   *hi = i >> 32;
2357 }
2358 
2359 
2360 // Only works for little endian floating point formats.
2361 // We don't support VFP on the mixed endian floating point platform.
FitsVMOVDoubleImmediate(double d,uint32_t * encoding)2362 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
2363   ASSERT(CpuFeatures::IsSupported(VFP3));
2364 
2365   // VMOV can accept an immediate of the form:
2366   //
2367   //  +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2368   //
2369   // The immediate is encoded using an 8-bit quantity, comprised of two
2370   // 4-bit fields. For an 8-bit immediate of the form:
2371   //
2372   //  [abcdefgh]
2373   //
2374   // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2375   // created of the form:
2376   //
2377   //  [aBbbbbbb,bbcdefgh,00000000,00000000,
2378   //      00000000,00000000,00000000,00000000]
2379   //
2380   // where B = ~b.
2381   //
2382 
2383   uint32_t lo, hi;
2384   DoubleAsTwoUInt32(d, &lo, &hi);
2385 
2386   // The most obvious constraint is the long block of zeroes.
2387   if ((lo != 0) || ((hi & 0xffff) != 0)) {
2388     return false;
2389   }
2390 
2391   // Bits 62:55 must be all clear or all set.
2392   if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2393     return false;
2394   }
2395 
2396   // Bit 63 must be NOT bit 62.
2397   if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2398     return false;
2399   }
2400 
2401   // Create the encoded immediate in the form:
2402   //  [00000000,0000abcd,00000000,0000efgh]
2403   *encoding  = (hi >> 16) & 0xf;      // Low nybble.
2404   *encoding |= (hi >> 4) & 0x70000;   // Low three bits of the high nybble.
2405   *encoding |= (hi >> 12) & 0x80000;  // Top bit of the high nybble.
2406 
2407   return true;
2408 }
2409 
2410 
vmov(const DwVfpRegister dst,double imm,const Register scratch)2411 void Assembler::vmov(const DwVfpRegister dst,
2412                      double imm,
2413                      const Register scratch) {
2414   uint32_t enc;
2415   if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
2416     // The double can be encoded in the instruction.
2417     //
2418     // Dd = immediate
2419     // Instruction details available in ARM DDI 0406C.b, A8-936.
2420     // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2421     // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2422     int vd, d;
2423     dst.split_code(&vd, &d);
2424     emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2425   } else if (FLAG_enable_vldr_imm) {
2426     // TODO(jfb) Temporarily turned off until we have constant blinding or
2427     //           some equivalent mitigation: an attacker can otherwise control
2428     //           generated data which also happens to be executable, a Very Bad
2429     //           Thing indeed.
2430     //           Blinding gets tricky because we don't have xor, we probably
2431     //           need to add/subtract without losing precision, which requires a
2432     //           cookie value that Lithium is probably better positioned to
2433     //           choose.
2434     //           We could also add a few peepholes here like detecting 0.0 and
2435     //           -0.0 and doing a vmov from the sequestered d14, forcing denorms
2436     //           to zero (we set flush-to-zero), and normalizing NaN values.
2437     //           We could also detect redundant values.
2438     //           The code could also randomize the order of values, though
2439     //           that's tricky because vldr has a limited reach. Furthermore
2440     //           it breaks load locality.
2441     RecordRelocInfo(imm);
2442     vldr(dst, MemOperand(pc, 0));
2443   } else {
2444     // Synthesise the double from ARM immediates.
2445     uint32_t lo, hi;
2446     DoubleAsTwoUInt32(imm, &lo, &hi);
2447 
2448     if (scratch.is(no_reg)) {
2449       if (dst.code() < 16) {
2450         const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
2451         // Move the low part of the double into the lower of the corresponsing S
2452         // registers of D register dst.
2453         mov(ip, Operand(lo));
2454         vmov(loc.low(), ip);
2455 
2456         // Move the high part of the double into the higher of the
2457         // corresponsing S registers of D register dst.
2458         mov(ip, Operand(hi));
2459         vmov(loc.high(), ip);
2460       } else {
2461         // D16-D31 does not have S registers, so move the low and high parts
2462         // directly to the D register using vmov.32.
2463         // Note: This may be slower, so we only do this when we have to.
2464         mov(ip, Operand(lo));
2465         vmov(dst, VmovIndexLo, ip);
2466         mov(ip, Operand(hi));
2467         vmov(dst, VmovIndexHi, ip);
2468       }
2469     } else {
2470       // Move the low and high parts of the double to a D register in one
2471       // instruction.
2472       mov(ip, Operand(lo));
2473       mov(scratch, Operand(hi));
2474       vmov(dst, ip, scratch);
2475     }
2476   }
2477 }
2478 
2479 
vmov(const SwVfpRegister dst,const SwVfpRegister src,const Condition cond)2480 void Assembler::vmov(const SwVfpRegister dst,
2481                      const SwVfpRegister src,
2482                      const Condition cond) {
2483   // Sd = Sm
2484   // Instruction details available in ARM DDI 0406B, A8-642.
2485   int sd, d, sm, m;
2486   dst.split_code(&sd, &d);
2487   src.split_code(&sm, &m);
2488   emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2489 }
2490 
2491 
vmov(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2492 void Assembler::vmov(const DwVfpRegister dst,
2493                      const DwVfpRegister src,
2494                      const Condition cond) {
2495   // Dd = Dm
2496   // Instruction details available in ARM DDI 0406C.b, A8-938.
2497   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2498   // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2499   int vd, d;
2500   dst.split_code(&vd, &d);
2501   int vm, m;
2502   src.split_code(&vm, &m);
2503   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
2504        vm);
2505 }
2506 
2507 
vmov(const DwVfpRegister dst,const VmovIndex index,const Register src,const Condition cond)2508 void Assembler::vmov(const DwVfpRegister dst,
2509                      const VmovIndex index,
2510                      const Register src,
2511                      const Condition cond) {
2512   // Dd[index] = Rt
2513   // Instruction details available in ARM DDI 0406C.b, A8-940.
2514   // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
2515   // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2516   ASSERT(index.index == 0 || index.index == 1);
2517   int vd, d;
2518   dst.split_code(&vd, &d);
2519   emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
2520        d*B7 | B4);
2521 }
2522 
2523 
vmov(const Register dst,const VmovIndex index,const DwVfpRegister src,const Condition cond)2524 void Assembler::vmov(const Register dst,
2525                      const VmovIndex index,
2526                      const DwVfpRegister src,
2527                      const Condition cond) {
2528   // Dd[index] = Rt
2529   // Instruction details available in ARM DDI 0406C.b, A8.8.342.
2530   // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
2531   // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2532   ASSERT(index.index == 0 || index.index == 1);
2533   int vn, n;
2534   src.split_code(&vn, &n);
2535   emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
2536        0xB*B8 | n*B7 | B4);
2537 }
2538 
2539 
vmov(const DwVfpRegister dst,const Register src1,const Register src2,const Condition cond)2540 void Assembler::vmov(const DwVfpRegister dst,
2541                      const Register src1,
2542                      const Register src2,
2543                      const Condition cond) {
2544   // Dm = <Rt,Rt2>.
2545   // Instruction details available in ARM DDI 0406C.b, A8-948.
2546   // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2547   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2548   ASSERT(!src1.is(pc) && !src2.is(pc));
2549   int vm, m;
2550   dst.split_code(&vm, &m);
2551   emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2552        src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2553 }
2554 
2555 
vmov(const Register dst1,const Register dst2,const DwVfpRegister src,const Condition cond)2556 void Assembler::vmov(const Register dst1,
2557                      const Register dst2,
2558                      const DwVfpRegister src,
2559                      const Condition cond) {
2560   // <Rt,Rt2> = Dm.
2561   // Instruction details available in ARM DDI 0406C.b, A8-948.
2562   // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2563   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2564   ASSERT(!dst1.is(pc) && !dst2.is(pc));
2565   int vm, m;
2566   src.split_code(&vm, &m);
2567   emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2568        dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2569 }
2570 
2571 
vmov(const SwVfpRegister dst,const Register src,const Condition cond)2572 void Assembler::vmov(const SwVfpRegister dst,
2573                      const Register src,
2574                      const Condition cond) {
2575   // Sn = Rt.
2576   // Instruction details available in ARM DDI 0406A, A8-642.
2577   // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2578   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2579   ASSERT(!src.is(pc));
2580   int sn, n;
2581   dst.split_code(&sn, &n);
2582   emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2583 }
2584 
2585 
vmov(const Register dst,const SwVfpRegister src,const Condition cond)2586 void Assembler::vmov(const Register dst,
2587                      const SwVfpRegister src,
2588                      const Condition cond) {
2589   // Rt = Sn.
2590   // Instruction details available in ARM DDI 0406A, A8-642.
2591   // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2592   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2593   ASSERT(!dst.is(pc));
2594   int sn, n;
2595   src.split_code(&sn, &n);
2596   emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2597 }
2598 
2599 
2600 // Type of data to read from or write to VFP register.
2601 // Used as specifier in generic vcvt instruction.
2602 enum VFPType { S32, U32, F32, F64 };
2603 
2604 
IsSignedVFPType(VFPType type)2605 static bool IsSignedVFPType(VFPType type) {
2606   switch (type) {
2607     case S32:
2608       return true;
2609     case U32:
2610       return false;
2611     default:
2612       UNREACHABLE();
2613       return false;
2614   }
2615 }
2616 
2617 
IsIntegerVFPType(VFPType type)2618 static bool IsIntegerVFPType(VFPType type) {
2619   switch (type) {
2620     case S32:
2621     case U32:
2622       return true;
2623     case F32:
2624     case F64:
2625       return false;
2626     default:
2627       UNREACHABLE();
2628       return false;
2629   }
2630 }
2631 
2632 
IsDoubleVFPType(VFPType type)2633 static bool IsDoubleVFPType(VFPType type) {
2634   switch (type) {
2635     case F32:
2636       return false;
2637     case F64:
2638       return true;
2639     default:
2640       UNREACHABLE();
2641       return false;
2642   }
2643 }
2644 
2645 
2646 // Split five bit reg_code based on size of reg_type.
2647 //  32-bit register codes are Vm:M
2648 //  64-bit register codes are M:Vm
2649 // where Vm is four bits, and M is a single bit.
SplitRegCode(VFPType reg_type,int reg_code,int * vm,int * m)2650 static void SplitRegCode(VFPType reg_type,
2651                          int reg_code,
2652                          int* vm,
2653                          int* m) {
2654   ASSERT((reg_code >= 0) && (reg_code <= 31));
2655   if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2656     // 32 bit type.
2657     *m  = reg_code & 0x1;
2658     *vm = reg_code >> 1;
2659   } else {
2660     // 64 bit type.
2661     *m  = (reg_code & 0x10) >> 4;
2662     *vm = reg_code & 0x0F;
2663   }
2664 }
2665 
2666 
2667 // Encode vcvt.src_type.dst_type instruction.
EncodeVCVT(const VFPType dst_type,const int dst_code,const VFPType src_type,const int src_code,VFPConversionMode mode,const Condition cond)2668 static Instr EncodeVCVT(const VFPType dst_type,
2669                         const int dst_code,
2670                         const VFPType src_type,
2671                         const int src_code,
2672                         VFPConversionMode mode,
2673                         const Condition cond) {
2674   ASSERT(src_type != dst_type);
2675   int D, Vd, M, Vm;
2676   SplitRegCode(src_type, src_code, &Vm, &M);
2677   SplitRegCode(dst_type, dst_code, &Vd, &D);
2678 
2679   if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2680     // Conversion between IEEE floating point and 32-bit integer.
2681     // Instruction details available in ARM DDI 0406B, A8.6.295.
2682     // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2683     // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2684     ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2685 
2686     int sz, opc2, op;
2687 
2688     if (IsIntegerVFPType(dst_type)) {
2689       opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2690       sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2691       op = mode;
2692     } else {
2693       ASSERT(IsIntegerVFPType(src_type));
2694       opc2 = 0x0;
2695       sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2696       op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2697     }
2698 
2699     return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2700             Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2701   } else {
2702     // Conversion between IEEE double and single precision.
2703     // Instruction details available in ARM DDI 0406B, A8.6.298.
2704     // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2705     // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2706     int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2707     return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2708             Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2709   }
2710 }
2711 
2712 
vcvt_f64_s32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2713 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2714                              const SwVfpRegister src,
2715                              VFPConversionMode mode,
2716                              const Condition cond) {
2717   emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2718 }
2719 
2720 
vcvt_f32_s32(const SwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2721 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2722                              const SwVfpRegister src,
2723                              VFPConversionMode mode,
2724                              const Condition cond) {
2725   emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2726 }
2727 
2728 
vcvt_f64_u32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2729 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2730                              const SwVfpRegister src,
2731                              VFPConversionMode mode,
2732                              const Condition cond) {
2733   emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2734 }
2735 
2736 
vcvt_s32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)2737 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2738                              const DwVfpRegister src,
2739                              VFPConversionMode mode,
2740                              const Condition cond) {
2741   emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2742 }
2743 
2744 
vcvt_u32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)2745 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2746                              const DwVfpRegister src,
2747                              VFPConversionMode mode,
2748                              const Condition cond) {
2749   emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2750 }
2751 
2752 
vcvt_f64_f32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2753 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2754                              const SwVfpRegister src,
2755                              VFPConversionMode mode,
2756                              const Condition cond) {
2757   emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2758 }
2759 
2760 
vcvt_f32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)2761 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2762                              const DwVfpRegister src,
2763                              VFPConversionMode mode,
2764                              const Condition cond) {
2765   emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2766 }
2767 
2768 
vcvt_f64_s32(const DwVfpRegister dst,int fraction_bits,const Condition cond)2769 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2770                              int fraction_bits,
2771                              const Condition cond) {
2772   // Instruction details available in ARM DDI 0406C.b, A8-874.
2773   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
2774   // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
2775   ASSERT(fraction_bits > 0 && fraction_bits <= 32);
2776   ASSERT(CpuFeatures::IsSupported(VFP3));
2777   int vd, d;
2778   dst.split_code(&vd, &d);
2779   int i = ((32 - fraction_bits) >> 4) & 1;
2780   int imm4 = (32 - fraction_bits) & 0xf;
2781   emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
2782        vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
2783 }
2784 
2785 
vneg(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2786 void Assembler::vneg(const DwVfpRegister dst,
2787                      const DwVfpRegister src,
2788                      const Condition cond) {
2789   // Instruction details available in ARM DDI 0406C.b, A8-968.
2790   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
2791   // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2792   int vd, d;
2793   dst.split_code(&vd, &d);
2794   int vm, m;
2795   src.split_code(&vm, &m);
2796 
2797   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2798        m*B5 | vm);
2799 }
2800 
2801 
vabs(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2802 void Assembler::vabs(const DwVfpRegister dst,
2803                      const DwVfpRegister src,
2804                      const Condition cond) {
2805   // Instruction details available in ARM DDI 0406C.b, A8-524.
2806   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2807   // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2808   int vd, d;
2809   dst.split_code(&vd, &d);
2810   int vm, m;
2811   src.split_code(&vm, &m);
2812   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
2813        m*B5 | vm);
2814 }
2815 
2816 
vadd(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2817 void Assembler::vadd(const DwVfpRegister dst,
2818                      const DwVfpRegister src1,
2819                      const DwVfpRegister src2,
2820                      const Condition cond) {
2821   // Dd = vadd(Dn, Dm) double precision floating point addition.
2822   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2823   // Instruction details available in ARM DDI 0406C.b, A8-830.
2824   // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2825   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2826   int vd, d;
2827   dst.split_code(&vd, &d);
2828   int vn, n;
2829   src1.split_code(&vn, &n);
2830   int vm, m;
2831   src2.split_code(&vm, &m);
2832   emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2833        n*B7 | m*B5 | vm);
2834 }
2835 
2836 
vsub(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2837 void Assembler::vsub(const DwVfpRegister dst,
2838                      const DwVfpRegister src1,
2839                      const DwVfpRegister src2,
2840                      const Condition cond) {
2841   // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2842   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2843   // Instruction details available in ARM DDI 0406C.b, A8-1086.
2844   // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2845   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2846   int vd, d;
2847   dst.split_code(&vd, &d);
2848   int vn, n;
2849   src1.split_code(&vn, &n);
2850   int vm, m;
2851   src2.split_code(&vm, &m);
2852   emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2853        n*B7 | B6 | m*B5 | vm);
2854 }
2855 
2856 
vmul(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2857 void Assembler::vmul(const DwVfpRegister dst,
2858                      const DwVfpRegister src1,
2859                      const DwVfpRegister src2,
2860                      const Condition cond) {
2861   // Dd = vmul(Dn, Dm) double precision floating point multiplication.
2862   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2863   // Instruction details available in ARM DDI 0406C.b, A8-960.
2864   // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
2865   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2866   int vd, d;
2867   dst.split_code(&vd, &d);
2868   int vn, n;
2869   src1.split_code(&vn, &n);
2870   int vm, m;
2871   src2.split_code(&vm, &m);
2872   emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2873        n*B7 | m*B5 | vm);
2874 }
2875 
2876 
vmla(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2877 void Assembler::vmla(const DwVfpRegister dst,
2878                      const DwVfpRegister src1,
2879                      const DwVfpRegister src2,
2880                      const Condition cond) {
2881   // Instruction details available in ARM DDI 0406C.b, A8-932.
2882   // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
2883   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
2884   int vd, d;
2885   dst.split_code(&vd, &d);
2886   int vn, n;
2887   src1.split_code(&vn, &n);
2888   int vm, m;
2889   src2.split_code(&vm, &m);
2890   emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
2891        vm);
2892 }
2893 
2894 
vmls(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2895 void Assembler::vmls(const DwVfpRegister dst,
2896                      const DwVfpRegister src1,
2897                      const DwVfpRegister src2,
2898                      const Condition cond) {
2899   // Instruction details available in ARM DDI 0406C.b, A8-932.
2900   // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
2901   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
2902   int vd, d;
2903   dst.split_code(&vd, &d);
2904   int vn, n;
2905   src1.split_code(&vn, &n);
2906   int vm, m;
2907   src2.split_code(&vm, &m);
2908   emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
2909        m*B5 | vm);
2910 }
2911 
2912 
vdiv(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2913 void Assembler::vdiv(const DwVfpRegister dst,
2914                      const DwVfpRegister src1,
2915                      const DwVfpRegister src2,
2916                      const Condition cond) {
2917   // Dd = vdiv(Dn, Dm) double precision floating point division.
2918   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2919   // Instruction details available in ARM DDI 0406C.b, A8-882.
2920   // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
2921   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2922   int vd, d;
2923   dst.split_code(&vd, &d);
2924   int vn, n;
2925   src1.split_code(&vn, &n);
2926   int vm, m;
2927   src2.split_code(&vm, &m);
2928   emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
2929        vm);
2930 }
2931 
2932 
vcmp(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2933 void Assembler::vcmp(const DwVfpRegister src1,
2934                      const DwVfpRegister src2,
2935                      const Condition cond) {
2936   // vcmp(Dd, Dm) double precision floating point comparison.
2937   // Instruction details available in ARM DDI 0406C.b, A8-864.
2938   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
2939   // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2940   int vd, d;
2941   src1.split_code(&vd, &d);
2942   int vm, m;
2943   src2.split_code(&vm, &m);
2944   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2945        m*B5 | vm);
2946 }
2947 
2948 
vcmp(const DwVfpRegister src1,const double src2,const Condition cond)2949 void Assembler::vcmp(const DwVfpRegister src1,
2950                      const double src2,
2951                      const Condition cond) {
2952   // vcmp(Dd, #0.0) double precision floating point comparison.
2953   // Instruction details available in ARM DDI 0406C.b, A8-864.
2954   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
2955   // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
2956   ASSERT(src2 == 0.0);
2957   int vd, d;
2958   src1.split_code(&vd, &d);
2959   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
2960 }
2961 
2962 
vmsr(Register dst,Condition cond)2963 void Assembler::vmsr(Register dst, Condition cond) {
2964   // Instruction details available in ARM DDI 0406A, A8-652.
2965   // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
2966   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2967   emit(cond | 0xE*B24 | 0xE*B20 |  B16 |
2968        dst.code()*B12 | 0xA*B8 | B4);
2969 }
2970 
2971 
vmrs(Register dst,Condition cond)2972 void Assembler::vmrs(Register dst, Condition cond) {
2973   // Instruction details available in ARM DDI 0406A, A8-652.
2974   // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
2975   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2976   emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
2977        dst.code()*B12 | 0xA*B8 | B4);
2978 }
2979 
2980 
vsqrt(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2981 void Assembler::vsqrt(const DwVfpRegister dst,
2982                       const DwVfpRegister src,
2983                       const Condition cond) {
2984   // Instruction details available in ARM DDI 0406C.b, A8-1058.
2985   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
2986   // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
2987   int vd, d;
2988   dst.split_code(&vd, &d);
2989   int vm, m;
2990   src.split_code(&vm, &m);
2991   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
2992        m*B5 | vm);
2993 }
2994 
2995 
2996 // Support for NEON.
2997 
vld1(NeonSize size,const NeonListOperand & dst,const NeonMemOperand & src)2998 void Assembler::vld1(NeonSize size,
2999                      const NeonListOperand& dst,
3000                      const NeonMemOperand& src) {
3001   // Instruction details available in ARM DDI 0406C.b, A8.8.320.
3002   // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
3003   // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3004   ASSERT(CpuFeatures::IsSupported(NEON));
3005   int vd, d;
3006   dst.base().split_code(&vd, &d);
3007   emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
3008        dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
3009 }
3010 
3011 
vst1(NeonSize size,const NeonListOperand & src,const NeonMemOperand & dst)3012 void Assembler::vst1(NeonSize size,
3013                      const NeonListOperand& src,
3014                      const NeonMemOperand& dst) {
3015   // Instruction details available in ARM DDI 0406C.b, A8.8.404.
3016   // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
3017   // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3018   ASSERT(CpuFeatures::IsSupported(NEON));
3019   int vd, d;
3020   src.base().split_code(&vd, &d);
3021   emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
3022        size*B6 | dst.align()*B4 | dst.rm().code());
3023 }
3024 
3025 
vmovl(NeonDataType dt,QwNeonRegister dst,DwVfpRegister src)3026 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3027   // Instruction details available in ARM DDI 0406C.b, A8.8.346.
3028   // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
3029   // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
3030   ASSERT(CpuFeatures::IsSupported(NEON));
3031   int vd, d;
3032   dst.split_code(&vd, &d);
3033   int vm, m;
3034   src.split_code(&vm, &m);
3035   emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
3036         (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
3037 }
3038 
3039 
3040 // Pseudo instructions.
nop(int type)3041 void Assembler::nop(int type) {
3042   // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
3043   // some of the CPU's pipeline and has to issue. Older ARM chips simply used
3044   // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
3045   // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
3046   // a type.
3047   ASSERT(0 <= type && type <= 14);  // mov pc, pc isn't a nop.
3048   emit(al | 13*B21 | type*B12 | type);
3049 }
3050 
3051 
IsMovT(Instr instr)3052 bool Assembler::IsMovT(Instr instr) {
3053   instr &= ~(((kNumberOfConditions - 1) << 28) |  // Mask off conditions
3054              ((kNumRegisters-1)*B12) |            // mask out register
3055              EncodeMovwImmediate(0xFFFF));        // mask out immediate value
3056   return instr == 0x34*B20;
3057 }
3058 
3059 
IsMovW(Instr instr)3060 bool Assembler::IsMovW(Instr instr) {
3061   instr &= ~(((kNumberOfConditions - 1) << 28) |  // Mask off conditions
3062              ((kNumRegisters-1)*B12) |            // mask out destination
3063              EncodeMovwImmediate(0xFFFF));        // mask out immediate value
3064   return instr == 0x30*B20;
3065 }
3066 
3067 
IsNop(Instr instr,int type)3068 bool Assembler::IsNop(Instr instr, int type) {
3069   ASSERT(0 <= type && type <= 14);  // mov pc, pc isn't a nop.
3070   // Check for mov rx, rx where x = type.
3071   return instr == (al | 13*B21 | type*B12 | type);
3072 }
3073 
3074 
ImmediateFitsAddrMode1Instruction(int32_t imm32)3075 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
3076   uint32_t dummy1;
3077   uint32_t dummy2;
3078   return fits_shifter(imm32, &dummy1, &dummy2, NULL);
3079 }
3080 
3081 
3082 // Debugging.
RecordJSReturn()3083 void Assembler::RecordJSReturn() {
3084   positions_recorder()->WriteRecordedPositions();
3085   CheckBuffer();
3086   RecordRelocInfo(RelocInfo::JS_RETURN);
3087 }
3088 
3089 
RecordDebugBreakSlot()3090 void Assembler::RecordDebugBreakSlot() {
3091   positions_recorder()->WriteRecordedPositions();
3092   CheckBuffer();
3093   RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3094 }
3095 
3096 
RecordComment(const char * msg)3097 void Assembler::RecordComment(const char* msg) {
3098   if (FLAG_code_comments) {
3099     CheckBuffer();
3100     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
3101   }
3102 }
3103 
3104 
RecordConstPool(int size)3105 void Assembler::RecordConstPool(int size) {
3106   // We only need this for debugger support, to correctly compute offsets in the
3107   // code.
3108 #ifdef ENABLE_DEBUGGER_SUPPORT
3109   RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3110 #endif
3111 }
3112 
3113 
GrowBuffer()3114 void Assembler::GrowBuffer() {
3115   if (!own_buffer_) FATAL("external code buffer is too small");
3116 
3117   // Compute new buffer size.
3118   CodeDesc desc;  // the new buffer
3119   if (buffer_size_ < 4*KB) {
3120     desc.buffer_size = 4*KB;
3121   } else if (buffer_size_ < 1*MB) {
3122     desc.buffer_size = 2*buffer_size_;
3123   } else {
3124     desc.buffer_size = buffer_size_ + 1*MB;
3125   }
3126   CHECK_GT(desc.buffer_size, 0);  // no overflow
3127 
3128   // Set up new buffer.
3129   desc.buffer = NewArray<byte>(desc.buffer_size);
3130 
3131   desc.instr_size = pc_offset();
3132   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3133 
3134   // Copy the data.
3135   int pc_delta = desc.buffer - buffer_;
3136   int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3137   OS::MemMove(desc.buffer, buffer_, desc.instr_size);
3138   OS::MemMove(reloc_info_writer.pos() + rc_delta,
3139               reloc_info_writer.pos(), desc.reloc_size);
3140 
3141   // Switch buffers.
3142   DeleteArray(buffer_);
3143   buffer_ = desc.buffer;
3144   buffer_size_ = desc.buffer_size;
3145   pc_ += pc_delta;
3146   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3147                                reloc_info_writer.last_pc() + pc_delta);
3148 
3149   // None of our relocation types are pc relative pointing outside the code
3150   // buffer nor pc absolute pointing inside the code buffer, so there is no need
3151   // to relocate any emitted relocation entries.
3152 
3153   // Relocate pending relocation entries.
3154   for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3155     RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3156     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3157            rinfo.rmode() != RelocInfo::POSITION);
3158     if (rinfo.rmode() != RelocInfo::JS_RETURN) {
3159       rinfo.set_pc(rinfo.pc() + pc_delta);
3160     }
3161   }
3162   for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3163     RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3164     ASSERT(rinfo.rmode() == RelocInfo::NONE64);
3165     rinfo.set_pc(rinfo.pc() + pc_delta);
3166   }
3167 }
3168 
3169 
db(uint8_t data)3170 void Assembler::db(uint8_t data) {
3171   // No relocation info should be pending while using db. db is used
3172   // to write pure data with no pointers and the constant pool should
3173   // be emitted before using db.
3174   ASSERT(num_pending_32_bit_reloc_info_ == 0);
3175   ASSERT(num_pending_64_bit_reloc_info_ == 0);
3176   CheckBuffer();
3177   *reinterpret_cast<uint8_t*>(pc_) = data;
3178   pc_ += sizeof(uint8_t);
3179 }
3180 
3181 
dd(uint32_t data)3182 void Assembler::dd(uint32_t data) {
3183   // No relocation info should be pending while using dd. dd is used
3184   // to write pure data with no pointers and the constant pool should
3185   // be emitted before using dd.
3186   ASSERT(num_pending_32_bit_reloc_info_ == 0);
3187   ASSERT(num_pending_64_bit_reloc_info_ == 0);
3188   CheckBuffer();
3189   *reinterpret_cast<uint32_t*>(pc_) = data;
3190   pc_ += sizeof(uint32_t);
3191 }
3192 
3193 
emit_code_stub_address(Code * stub)3194 void Assembler::emit_code_stub_address(Code* stub) {
3195   CheckBuffer();
3196   *reinterpret_cast<uint32_t*>(pc_) =
3197       reinterpret_cast<uint32_t>(stub->instruction_start());
3198   pc_ += sizeof(uint32_t);
3199 }
3200 
3201 
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data,UseConstantPoolMode mode)3202 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
3203                                 UseConstantPoolMode mode) {
3204   // We do not try to reuse pool constants.
3205   RelocInfo rinfo(pc_, rmode, data, NULL);
3206   if (((rmode >= RelocInfo::JS_RETURN) &&
3207        (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
3208       (rmode == RelocInfo::CONST_POOL) ||
3209       mode == DONT_USE_CONSTANT_POOL) {
3210     // Adjust code for new modes.
3211     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
3212            || RelocInfo::IsJSReturn(rmode)
3213            || RelocInfo::IsComment(rmode)
3214            || RelocInfo::IsPosition(rmode)
3215            || RelocInfo::IsConstPool(rmode)
3216            || mode == DONT_USE_CONSTANT_POOL);
3217     // These modes do not need an entry in the constant pool.
3218   } else {
3219     RecordRelocInfoConstantPoolEntryHelper(rinfo);
3220   }
3221   if (!RelocInfo::IsNone(rinfo.rmode())) {
3222     // Don't record external references unless the heap will be serialized.
3223     if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
3224 #ifdef DEBUG
3225       if (!Serializer::enabled()) {
3226         Serializer::TooLateToEnableNow();
3227       }
3228 #endif
3229       if (!Serializer::enabled() && !emit_debug_code()) {
3230         return;
3231       }
3232     }
3233     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
3234     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
3235       RelocInfo reloc_info_with_ast_id(pc_,
3236                                        rmode,
3237                                        RecordedAstId().ToInt(),
3238                                        NULL);
3239       ClearRecordedAstId();
3240       reloc_info_writer.Write(&reloc_info_with_ast_id);
3241     } else {
3242       reloc_info_writer.Write(&rinfo);
3243     }
3244   }
3245 }
3246 
3247 
RecordRelocInfo(double data)3248 void Assembler::RecordRelocInfo(double data) {
3249   // We do not try to reuse pool constants.
3250   RelocInfo rinfo(pc_, data);
3251   RecordRelocInfoConstantPoolEntryHelper(rinfo);
3252 }
3253 
3254 
RecordRelocInfoConstantPoolEntryHelper(const RelocInfo & rinfo)3255 void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
3256   if (rinfo.rmode() == RelocInfo::NONE64) {
3257     ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3258     if (num_pending_64_bit_reloc_info_ == 0) {
3259       first_const_pool_64_use_ = pc_offset();
3260     }
3261     pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3262   } else {
3263     ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3264     if (num_pending_32_bit_reloc_info_ == 0) {
3265       first_const_pool_32_use_ = pc_offset();
3266     }
3267     pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3268   }
3269   // Make sure the constant pool is not emitted in place of the next
3270   // instruction for which we just recorded relocation info.
3271   BlockConstPoolFor(1);
3272 }
3273 
3274 
BlockConstPoolFor(int instructions)3275 void Assembler::BlockConstPoolFor(int instructions) {
3276   int pc_limit = pc_offset() + instructions * kInstrSize;
3277   if (no_const_pool_before_ < pc_limit) {
3278     // Max pool start (if we need a jump and an alignment).
3279 #ifdef DEBUG
3280     int start = pc_limit + kInstrSize + 2 * kPointerSize;
3281     ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
3282            (start - first_const_pool_32_use_ +
3283             num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
3284     ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
3285            (start - first_const_pool_64_use_ < kMaxDistToFPPool));
3286 #endif
3287     no_const_pool_before_ = pc_limit;
3288   }
3289 
3290   if (next_buffer_check_ < no_const_pool_before_) {
3291     next_buffer_check_ = no_const_pool_before_;
3292   }
3293 }
3294 
3295 
CheckConstPool(bool force_emit,bool require_jump)3296 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
3297   // Some short sequence of instruction mustn't be broken up by constant pool
3298   // emission, such sequences are protected by calls to BlockConstPoolFor and
3299   // BlockConstPoolScope.
3300   if (is_const_pool_blocked()) {
3301     // Something is wrong if emission is forced and blocked at the same time.
3302     ASSERT(!force_emit);
3303     return;
3304   }
3305 
3306   // There is nothing to do if there are no pending constant pool entries.
3307   if ((num_pending_32_bit_reloc_info_ == 0) &&
3308       (num_pending_64_bit_reloc_info_ == 0)) {
3309     // Calculate the offset of the next check.
3310     next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3311     return;
3312   }
3313 
3314   // Check that the code buffer is large enough before emitting the constant
3315   // pool (include the jump over the pool and the constant pool marker and
3316   // the gap to the relocation information).
3317   int jump_instr = require_jump ? kInstrSize : 0;
3318   int size_up_to_marker = jump_instr + kInstrSize;
3319   int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
3320   bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
3321   bool require_64_bit_align = false;
3322   if (has_fp_values) {
3323     require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
3324     if (require_64_bit_align) {
3325       size_after_marker += kInstrSize;
3326     }
3327     size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
3328   }
3329 
3330   int size = size_up_to_marker + size_after_marker;
3331 
3332   // We emit a constant pool when:
3333   //  * requested to do so by parameter force_emit (e.g. after each function).
3334   //  * the distance from the first instruction accessing the constant pool to
3335   //    any of the constant pool entries will exceed its limit the next
3336   //    time the pool is checked. This is overly restrictive, but we don't emit
3337   //    constant pool entries in-order so it's conservatively correct.
3338   //  * the instruction doesn't require a jump after itself to jump over the
3339   //    constant pool, and we're getting close to running out of range.
3340   if (!force_emit) {
3341     ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
3342     bool need_emit = false;
3343     if (has_fp_values) {
3344       int dist64 = pc_offset() +
3345                    size -
3346                    num_pending_32_bit_reloc_info_ * kPointerSize -
3347                    first_const_pool_64_use_;
3348       if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
3349           (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
3350         need_emit = true;
3351       }
3352     }
3353     int dist32 =
3354       pc_offset() + size - first_const_pool_32_use_;
3355     if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
3356         (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
3357       need_emit = true;
3358     }
3359     if (!need_emit) return;
3360   }
3361 
3362   int needed_space = size + kGap;
3363   while (buffer_space() <= needed_space) GrowBuffer();
3364 
3365   {
3366     // Block recursive calls to CheckConstPool.
3367     BlockConstPoolScope block_const_pool(this);
3368     RecordComment("[ Constant Pool");
3369     RecordConstPool(size);
3370 
3371     // Emit jump over constant pool if necessary.
3372     Label after_pool;
3373     if (require_jump) {
3374       b(&after_pool);
3375     }
3376 
3377     // Put down constant pool marker "Undefined instruction".
3378     // The data size helps disassembly know what to print.
3379     emit(kConstantPoolMarker |
3380          EncodeConstantPoolLength(size_after_marker / kPointerSize));
3381 
3382     if (require_64_bit_align) {
3383       emit(kConstantPoolMarker);
3384     }
3385 
3386     // Emit 64-bit constant pool entries first: their range is smaller than
3387     // 32-bit entries.
3388     for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3389       RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3390 
3391       ASSERT(!((uintptr_t)pc_ & 0x7));  // Check 64-bit alignment.
3392 
3393       Instr instr = instr_at(rinfo.pc());
3394       // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
3395       ASSERT((IsVldrDPcImmediateOffset(instr) &&
3396               GetVldrDRegisterImmediateOffset(instr) == 0));
3397 
3398       int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3399       ASSERT(is_uint10(delta));
3400 
3401       bool found = false;
3402       uint64_t value = rinfo.raw_data64();
3403       for (int j = 0; j < i; j++) {
3404         RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
3405         if (value == rinfo2.raw_data64()) {
3406           found = true;
3407           ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
3408           Instr instr2 = instr_at(rinfo2.pc());
3409           ASSERT(IsVldrDPcImmediateOffset(instr2));
3410           delta = GetVldrDRegisterImmediateOffset(instr2);
3411           delta += rinfo2.pc() - rinfo.pc();
3412           break;
3413         }
3414       }
3415 
3416       instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
3417 
3418       if (!found) {
3419         uint64_t uint_data = rinfo.raw_data64();
3420         emit(uint_data & 0xFFFFFFFF);
3421         emit(uint_data >> 32);
3422       }
3423     }
3424 
3425     // Emit 32-bit constant pool entries.
3426     for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3427       RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3428       ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3429              rinfo.rmode() != RelocInfo::POSITION &&
3430              rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
3431              rinfo.rmode() != RelocInfo::CONST_POOL &&
3432              rinfo.rmode() != RelocInfo::NONE64);
3433 
3434       Instr instr = instr_at(rinfo.pc());
3435 
3436       // 64-bit loads shouldn't get here.
3437       ASSERT(!IsVldrDPcImmediateOffset(instr));
3438 
3439       if (IsLdrPcImmediateOffset(instr) &&
3440           GetLdrRegisterImmediateOffset(instr) == 0) {
3441         int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3442         ASSERT(is_uint12(delta));
3443         // 0 is the smallest delta:
3444         //   ldr rd, [pc, #0]
3445         //   constant pool marker
3446         //   data
3447 
3448         bool found = false;
3449         if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) {
3450           for (int j = 0; j < i; j++) {
3451             RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
3452 
3453             if ((rinfo2.data() == rinfo.data()) &&
3454                 (rinfo2.rmode() == rinfo.rmode())) {
3455               Instr instr2 = instr_at(rinfo2.pc());
3456               if (IsLdrPcImmediateOffset(instr2)) {
3457                 delta = GetLdrRegisterImmediateOffset(instr2);
3458                 delta += rinfo2.pc() - rinfo.pc();
3459                 found = true;
3460                 break;
3461               }
3462             }
3463           }
3464         }
3465 
3466         instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
3467 
3468         if (!found) {
3469           emit(rinfo.data());
3470         }
3471       } else {
3472         ASSERT(IsMovW(instr));
3473       }
3474     }
3475 
3476     num_pending_32_bit_reloc_info_ = 0;
3477     num_pending_64_bit_reloc_info_ = 0;
3478     first_const_pool_32_use_ = -1;
3479     first_const_pool_64_use_ = -1;
3480 
3481     RecordComment("]");
3482 
3483     if (after_pool.is_linked()) {
3484       bind(&after_pool);
3485     }
3486   }
3487 
3488   // Since a constant pool was just emitted, move the check offset forward by
3489   // the standard interval.
3490   next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3491 }
3492 
3493 
3494 } }  // namespace v8::internal
3495 
3496 #endif  // V8_TARGET_ARCH_ARM
3497