1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2011 the V8 project authors. All rights reserved.
36
37 #include "v8.h"
38
39 #if defined(V8_TARGET_ARCH_ARM)
40
41 #include "arm/assembler-arm-inl.h"
42 #include "serialize.h"
43
44 namespace v8 {
45 namespace internal {
46
47 #ifdef DEBUG
48 bool CpuFeatures::initialized_ = false;
49 #endif
50 unsigned CpuFeatures::supported_ = 0;
51 unsigned CpuFeatures::found_by_runtime_probing_ = 0;
52
53
54 #ifdef __arm__
CpuFeaturesImpliedByCompiler()55 static uint64_t CpuFeaturesImpliedByCompiler() {
56 uint64_t answer = 0;
57 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
58 answer |= 1u << ARMv7;
59 #endif // def CAN_USE_ARMV7_INSTRUCTIONS
60 // If the compiler is allowed to use VFP then we can use VFP too in our code
61 // generation even when generating snapshots. This won't work for cross
62 // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
63 #if defined(__VFP_FP__) && !defined(__SOFTFP__)
64 answer |= 1u << VFP3 | 1u << ARMv7;
65 #endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
66 #ifdef CAN_USE_VFP_INSTRUCTIONS
67 answer |= 1u << VFP3 | 1u << ARMv7;
68 #endif // def CAN_USE_VFP_INSTRUCTIONS
69 return answer;
70 }
71 #endif // def __arm__
72
73
Probe()74 void CpuFeatures::Probe() {
75 ASSERT(!initialized_);
76 #ifdef DEBUG
77 initialized_ = true;
78 #endif
79 #ifndef __arm__
80 // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
81 // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
82 if (FLAG_enable_vfp3) {
83 supported_ |= 1u << VFP3 | 1u << ARMv7;
84 }
85 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
86 if (FLAG_enable_armv7) {
87 supported_ |= 1u << ARMv7;
88 }
89 #else // def __arm__
90 if (Serializer::enabled()) {
91 supported_ |= OS::CpuFeaturesImpliedByPlatform();
92 supported_ |= CpuFeaturesImpliedByCompiler();
93 return; // No features if we might serialize.
94 }
95
96 if (OS::ArmCpuHasFeature(VFP3)) {
97 // This implementation also sets the VFP flags if runtime
98 // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
99 // 0406B, page A1-6.
100 supported_ |= 1u << VFP3 | 1u << ARMv7;
101 found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
102 }
103
104 if (OS::ArmCpuHasFeature(ARMv7)) {
105 supported_ |= 1u << ARMv7;
106 found_by_runtime_probing_ |= 1u << ARMv7;
107 }
108 #endif
109 }
110
111
112 // -----------------------------------------------------------------------------
113 // Implementation of RelocInfo
114
115 const int RelocInfo::kApplyMask = 0;
116
117
IsCodedSpecially()118 bool RelocInfo::IsCodedSpecially() {
119 // The deserializer needs to know whether a pointer is specially coded. Being
120 // specially coded on ARM means that it is a movw/movt instruction. We don't
121 // generate those yet.
122 return false;
123 }
124
125
126
PatchCode(byte * instructions,int instruction_count)127 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
128 // Patch the code at the current address with the supplied instructions.
129 Instr* pc = reinterpret_cast<Instr*>(pc_);
130 Instr* instr = reinterpret_cast<Instr*>(instructions);
131 for (int i = 0; i < instruction_count; i++) {
132 *(pc + i) = *(instr + i);
133 }
134
135 // Indicate that code has changed.
136 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
137 }
138
139
140 // Patch the code at the current PC with a call to the target address.
141 // Additional guard instructions can be added if required.
PatchCodeWithCall(Address target,int guard_bytes)142 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
143 // Patch the code at the current address with a call to the target.
144 UNIMPLEMENTED();
145 }
146
147
148 // -----------------------------------------------------------------------------
149 // Implementation of Operand and MemOperand
150 // See assembler-arm-inl.h for inlined constructors
151
Operand(Handle<Object> handle)152 Operand::Operand(Handle<Object> handle) {
153 rm_ = no_reg;
154 // Verify all Objects referred by code are NOT in new space.
155 Object* obj = *handle;
156 ASSERT(!HEAP->InNewSpace(obj));
157 if (obj->IsHeapObject()) {
158 imm32_ = reinterpret_cast<intptr_t>(handle.location());
159 rmode_ = RelocInfo::EMBEDDED_OBJECT;
160 } else {
161 // no relocation needed
162 imm32_ = reinterpret_cast<intptr_t>(obj);
163 rmode_ = RelocInfo::NONE;
164 }
165 }
166
167
Operand(Register rm,ShiftOp shift_op,int shift_imm)168 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
169 ASSERT(is_uint5(shift_imm));
170 ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
171 rm_ = rm;
172 rs_ = no_reg;
173 shift_op_ = shift_op;
174 shift_imm_ = shift_imm & 31;
175 if (shift_op == RRX) {
176 // encoded as ROR with shift_imm == 0
177 ASSERT(shift_imm == 0);
178 shift_op_ = ROR;
179 shift_imm_ = 0;
180 }
181 }
182
183
Operand(Register rm,ShiftOp shift_op,Register rs)184 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
185 ASSERT(shift_op != RRX);
186 rm_ = rm;
187 rs_ = no_reg;
188 shift_op_ = shift_op;
189 rs_ = rs;
190 }
191
192
MemOperand(Register rn,int32_t offset,AddrMode am)193 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
194 rn_ = rn;
195 rm_ = no_reg;
196 offset_ = offset;
197 am_ = am;
198 }
199
MemOperand(Register rn,Register rm,AddrMode am)200 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
201 rn_ = rn;
202 rm_ = rm;
203 shift_op_ = LSL;
204 shift_imm_ = 0;
205 am_ = am;
206 }
207
208
MemOperand(Register rn,Register rm,ShiftOp shift_op,int shift_imm,AddrMode am)209 MemOperand::MemOperand(Register rn, Register rm,
210 ShiftOp shift_op, int shift_imm, AddrMode am) {
211 ASSERT(is_uint5(shift_imm));
212 rn_ = rn;
213 rm_ = rm;
214 shift_op_ = shift_op;
215 shift_imm_ = shift_imm & 31;
216 am_ = am;
217 }
218
219
220 // -----------------------------------------------------------------------------
221 // Specific instructions, constants, and masks.
222
223 // add(sp, sp, 4) instruction (aka Pop())
224 const Instr kPopInstruction =
225 al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
226 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
227 // register r is not encoded.
228 const Instr kPushRegPattern =
229 al | B26 | 4 | NegPreIndex | sp.code() * B16;
230 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
231 // register r is not encoded.
232 const Instr kPopRegPattern =
233 al | B26 | L | 4 | PostIndex | sp.code() * B16;
234 // mov lr, pc
235 const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
236 // ldr rd, [pc, #offset]
237 const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
238 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
239 // blxcc rm
240 const Instr kBlxRegMask =
241 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
242 const Instr kBlxRegPattern =
243 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
244 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
245 const Instr kMovMvnPattern = 0xd * B21;
246 const Instr kMovMvnFlip = B22;
247 const Instr kMovLeaveCCMask = 0xdff * B16;
248 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
249 const Instr kMovwMask = 0xff * B20;
250 const Instr kMovwPattern = 0x30 * B20;
251 const Instr kMovwLeaveCCFlip = 0x5 * B21;
252 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
253 const Instr kCmpCmnPattern = 0x15 * B20;
254 const Instr kCmpCmnFlip = B21;
255 const Instr kAddSubFlip = 0x6 * B21;
256 const Instr kAndBicFlip = 0xe * B21;
257
258 // A mask for the Rd register for push, pop, ldr, str instructions.
259 const Instr kLdrRegFpOffsetPattern =
260 al | B26 | L | Offset | fp.code() * B16;
261 const Instr kStrRegFpOffsetPattern =
262 al | B26 | Offset | fp.code() * B16;
263 const Instr kLdrRegFpNegOffsetPattern =
264 al | B26 | L | NegOffset | fp.code() * B16;
265 const Instr kStrRegFpNegOffsetPattern =
266 al | B26 | NegOffset | fp.code() * B16;
267 const Instr kLdrStrInstrTypeMask = 0xffff0000;
268 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
269 const Instr kLdrStrOffsetMask = 0x00000fff;
270
271
272 // Spare buffer.
273 static const int kMinimalBufferSize = 4*KB;
274
275
Assembler(Isolate * arg_isolate,void * buffer,int buffer_size)276 Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
277 : AssemblerBase(arg_isolate),
278 positions_recorder_(this),
279 allow_peephole_optimization_(false),
280 emit_debug_code_(FLAG_debug_code) {
281 allow_peephole_optimization_ = FLAG_peephole_optimization;
282 if (buffer == NULL) {
283 // Do our own buffer management.
284 if (buffer_size <= kMinimalBufferSize) {
285 buffer_size = kMinimalBufferSize;
286
287 if (isolate()->assembler_spare_buffer() != NULL) {
288 buffer = isolate()->assembler_spare_buffer();
289 isolate()->set_assembler_spare_buffer(NULL);
290 }
291 }
292 if (buffer == NULL) {
293 buffer_ = NewArray<byte>(buffer_size);
294 } else {
295 buffer_ = static_cast<byte*>(buffer);
296 }
297 buffer_size_ = buffer_size;
298 own_buffer_ = true;
299
300 } else {
301 // Use externally provided buffer instead.
302 ASSERT(buffer_size > 0);
303 buffer_ = static_cast<byte*>(buffer);
304 buffer_size_ = buffer_size;
305 own_buffer_ = false;
306 }
307
308 // Setup buffer pointers.
309 ASSERT(buffer_ != NULL);
310 pc_ = buffer_;
311 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
312 num_prinfo_ = 0;
313 next_buffer_check_ = 0;
314 const_pool_blocked_nesting_ = 0;
315 no_const_pool_before_ = 0;
316 last_const_pool_end_ = 0;
317 last_bound_pos_ = 0;
318 }
319
320
~Assembler()321 Assembler::~Assembler() {
322 ASSERT(const_pool_blocked_nesting_ == 0);
323 if (own_buffer_) {
324 if (isolate()->assembler_spare_buffer() == NULL &&
325 buffer_size_ == kMinimalBufferSize) {
326 isolate()->set_assembler_spare_buffer(buffer_);
327 } else {
328 DeleteArray(buffer_);
329 }
330 }
331 }
332
333
GetCode(CodeDesc * desc)334 void Assembler::GetCode(CodeDesc* desc) {
335 // Emit constant pool if necessary.
336 CheckConstPool(true, false);
337 ASSERT(num_prinfo_ == 0);
338
339 // Setup code descriptor.
340 desc->buffer = buffer_;
341 desc->buffer_size = buffer_size_;
342 desc->instr_size = pc_offset();
343 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
344 }
345
346
Align(int m)347 void Assembler::Align(int m) {
348 ASSERT(m >= 4 && IsPowerOf2(m));
349 while ((pc_offset() & (m - 1)) != 0) {
350 nop();
351 }
352 }
353
354
CodeTargetAlign()355 void Assembler::CodeTargetAlign() {
356 // Preferred alignment of jump targets on some ARM chips.
357 Align(8);
358 }
359
360
GetCondition(Instr instr)361 Condition Assembler::GetCondition(Instr instr) {
362 return Instruction::ConditionField(instr);
363 }
364
365
IsBranch(Instr instr)366 bool Assembler::IsBranch(Instr instr) {
367 return (instr & (B27 | B25)) == (B27 | B25);
368 }
369
370
GetBranchOffset(Instr instr)371 int Assembler::GetBranchOffset(Instr instr) {
372 ASSERT(IsBranch(instr));
373 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
374 // with 4 to get the offset in bytes.
375 return ((instr & kImm24Mask) << 8) >> 6;
376 }
377
378
IsLdrRegisterImmediate(Instr instr)379 bool Assembler::IsLdrRegisterImmediate(Instr instr) {
380 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
381 }
382
383
GetLdrRegisterImmediateOffset(Instr instr)384 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
385 ASSERT(IsLdrRegisterImmediate(instr));
386 bool positive = (instr & B23) == B23;
387 int offset = instr & kOff12Mask; // Zero extended offset.
388 return positive ? offset : -offset;
389 }
390
391
SetLdrRegisterImmediateOffset(Instr instr,int offset)392 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
393 ASSERT(IsLdrRegisterImmediate(instr));
394 bool positive = offset >= 0;
395 if (!positive) offset = -offset;
396 ASSERT(is_uint12(offset));
397 // Set bit indicating whether the offset should be added.
398 instr = (instr & ~B23) | (positive ? B23 : 0);
399 // Set the actual offset.
400 return (instr & ~kOff12Mask) | offset;
401 }
402
403
IsStrRegisterImmediate(Instr instr)404 bool Assembler::IsStrRegisterImmediate(Instr instr) {
405 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
406 }
407
408
SetStrRegisterImmediateOffset(Instr instr,int offset)409 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
410 ASSERT(IsStrRegisterImmediate(instr));
411 bool positive = offset >= 0;
412 if (!positive) offset = -offset;
413 ASSERT(is_uint12(offset));
414 // Set bit indicating whether the offset should be added.
415 instr = (instr & ~B23) | (positive ? B23 : 0);
416 // Set the actual offset.
417 return (instr & ~kOff12Mask) | offset;
418 }
419
420
IsAddRegisterImmediate(Instr instr)421 bool Assembler::IsAddRegisterImmediate(Instr instr) {
422 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
423 }
424
425
SetAddRegisterImmediateOffset(Instr instr,int offset)426 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
427 ASSERT(IsAddRegisterImmediate(instr));
428 ASSERT(offset >= 0);
429 ASSERT(is_uint12(offset));
430 // Set the offset.
431 return (instr & ~kOff12Mask) | offset;
432 }
433
434
GetRd(Instr instr)435 Register Assembler::GetRd(Instr instr) {
436 Register reg;
437 reg.code_ = Instruction::RdValue(instr);
438 return reg;
439 }
440
441
GetRn(Instr instr)442 Register Assembler::GetRn(Instr instr) {
443 Register reg;
444 reg.code_ = Instruction::RnValue(instr);
445 return reg;
446 }
447
448
GetRm(Instr instr)449 Register Assembler::GetRm(Instr instr) {
450 Register reg;
451 reg.code_ = Instruction::RmValue(instr);
452 return reg;
453 }
454
455
IsPush(Instr instr)456 bool Assembler::IsPush(Instr instr) {
457 return ((instr & ~kRdMask) == kPushRegPattern);
458 }
459
460
IsPop(Instr instr)461 bool Assembler::IsPop(Instr instr) {
462 return ((instr & ~kRdMask) == kPopRegPattern);
463 }
464
465
IsStrRegFpOffset(Instr instr)466 bool Assembler::IsStrRegFpOffset(Instr instr) {
467 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
468 }
469
470
IsLdrRegFpOffset(Instr instr)471 bool Assembler::IsLdrRegFpOffset(Instr instr) {
472 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
473 }
474
475
IsStrRegFpNegOffset(Instr instr)476 bool Assembler::IsStrRegFpNegOffset(Instr instr) {
477 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
478 }
479
480
IsLdrRegFpNegOffset(Instr instr)481 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
482 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
483 }
484
485
IsLdrPcImmediateOffset(Instr instr)486 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
487 // Check the instruction is indeed a
488 // ldr<cond> <Rd>, [pc +/- offset_12].
489 return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
490 }
491
492
IsTstImmediate(Instr instr)493 bool Assembler::IsTstImmediate(Instr instr) {
494 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
495 (I | TST | S);
496 }
497
498
IsCmpRegister(Instr instr)499 bool Assembler::IsCmpRegister(Instr instr) {
500 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
501 (CMP | S);
502 }
503
504
IsCmpImmediate(Instr instr)505 bool Assembler::IsCmpImmediate(Instr instr) {
506 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
507 (I | CMP | S);
508 }
509
510
GetCmpImmediateRegister(Instr instr)511 Register Assembler::GetCmpImmediateRegister(Instr instr) {
512 ASSERT(IsCmpImmediate(instr));
513 return GetRn(instr);
514 }
515
516
GetCmpImmediateRawImmediate(Instr instr)517 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
518 ASSERT(IsCmpImmediate(instr));
519 return instr & kOff12Mask;
520 }
521
522 // Labels refer to positions in the (to be) generated code.
523 // There are bound, linked, and unused labels.
524 //
525 // Bound labels refer to known positions in the already
526 // generated code. pos() is the position the label refers to.
527 //
528 // Linked labels refer to unknown positions in the code
529 // to be generated; pos() is the position of the last
530 // instruction using the label.
531
532
533 // The link chain is terminated by a negative code position (must be aligned)
534 const int kEndOfChain = -4;
535
536
target_at(int pos)537 int Assembler::target_at(int pos) {
538 Instr instr = instr_at(pos);
539 if ((instr & ~kImm24Mask) == 0) {
540 // Emitted label constant, not part of a branch.
541 return instr - (Code::kHeaderSize - kHeapObjectTag);
542 }
543 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
544 int imm26 = ((instr & kImm24Mask) << 8) >> 6;
545 if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
546 ((instr & B24) != 0)) {
547 // blx uses bit 24 to encode bit 2 of imm26
548 imm26 += 2;
549 }
550 return pos + kPcLoadDelta + imm26;
551 }
552
553
target_at_put(int pos,int target_pos)554 void Assembler::target_at_put(int pos, int target_pos) {
555 Instr instr = instr_at(pos);
556 if ((instr & ~kImm24Mask) == 0) {
557 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
558 // Emitted label constant, not part of a branch.
559 // Make label relative to Code* of generated Code object.
560 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
561 return;
562 }
563 int imm26 = target_pos - (pos + kPcLoadDelta);
564 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
565 if (Instruction::ConditionField(instr) == kSpecialCondition) {
566 // blx uses bit 24 to encode bit 2 of imm26
567 ASSERT((imm26 & 1) == 0);
568 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
569 } else {
570 ASSERT((imm26 & 3) == 0);
571 instr &= ~kImm24Mask;
572 }
573 int imm24 = imm26 >> 2;
574 ASSERT(is_int24(imm24));
575 instr_at_put(pos, instr | (imm24 & kImm24Mask));
576 }
577
578
print(Label * L)579 void Assembler::print(Label* L) {
580 if (L->is_unused()) {
581 PrintF("unused label\n");
582 } else if (L->is_bound()) {
583 PrintF("bound label to %d\n", L->pos());
584 } else if (L->is_linked()) {
585 Label l = *L;
586 PrintF("unbound label");
587 while (l.is_linked()) {
588 PrintF("@ %d ", l.pos());
589 Instr instr = instr_at(l.pos());
590 if ((instr & ~kImm24Mask) == 0) {
591 PrintF("value\n");
592 } else {
593 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
594 Condition cond = Instruction::ConditionField(instr);
595 const char* b;
596 const char* c;
597 if (cond == kSpecialCondition) {
598 b = "blx";
599 c = "";
600 } else {
601 if ((instr & B24) != 0)
602 b = "bl";
603 else
604 b = "b";
605
606 switch (cond) {
607 case eq: c = "eq"; break;
608 case ne: c = "ne"; break;
609 case hs: c = "hs"; break;
610 case lo: c = "lo"; break;
611 case mi: c = "mi"; break;
612 case pl: c = "pl"; break;
613 case vs: c = "vs"; break;
614 case vc: c = "vc"; break;
615 case hi: c = "hi"; break;
616 case ls: c = "ls"; break;
617 case ge: c = "ge"; break;
618 case lt: c = "lt"; break;
619 case gt: c = "gt"; break;
620 case le: c = "le"; break;
621 case al: c = ""; break;
622 default:
623 c = "";
624 UNREACHABLE();
625 }
626 }
627 PrintF("%s%s\n", b, c);
628 }
629 next(&l);
630 }
631 } else {
632 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
633 }
634 }
635
636
bind_to(Label * L,int pos)637 void Assembler::bind_to(Label* L, int pos) {
638 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
639 while (L->is_linked()) {
640 int fixup_pos = L->pos();
641 next(L); // call next before overwriting link with target at fixup_pos
642 target_at_put(fixup_pos, pos);
643 }
644 L->bind_to(pos);
645
646 // Keep track of the last bound label so we don't eliminate any instructions
647 // before a bound label.
648 if (pos > last_bound_pos_)
649 last_bound_pos_ = pos;
650 }
651
652
link_to(Label * L,Label * appendix)653 void Assembler::link_to(Label* L, Label* appendix) {
654 if (appendix->is_linked()) {
655 if (L->is_linked()) {
656 // Append appendix to L's list.
657 int fixup_pos;
658 int link = L->pos();
659 do {
660 fixup_pos = link;
661 link = target_at(fixup_pos);
662 } while (link > 0);
663 ASSERT(link == kEndOfChain);
664 target_at_put(fixup_pos, appendix->pos());
665 } else {
666 // L is empty, simply use appendix.
667 *L = *appendix;
668 }
669 }
670 appendix->Unuse(); // appendix should not be used anymore
671 }
672
673
bind(Label * L)674 void Assembler::bind(Label* L) {
675 ASSERT(!L->is_bound()); // label can only be bound once
676 bind_to(L, pc_offset());
677 }
678
679
next(Label * L)680 void Assembler::next(Label* L) {
681 ASSERT(L->is_linked());
682 int link = target_at(L->pos());
683 if (link > 0) {
684 L->link_to(link);
685 } else {
686 ASSERT(link == kEndOfChain);
687 L->Unuse();
688 }
689 }
690
691
EncodeMovwImmediate(uint32_t immediate)692 static Instr EncodeMovwImmediate(uint32_t immediate) {
693 ASSERT(immediate < 0x10000);
694 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
695 }
696
697
698 // Low-level code emission routines depending on the addressing mode.
699 // If this returns true then you have to use the rotate_imm and immed_8
700 // that it returns, because it may have already changed the instruction
701 // to match them!
fits_shifter(uint32_t imm32,uint32_t * rotate_imm,uint32_t * immed_8,Instr * instr)702 static bool fits_shifter(uint32_t imm32,
703 uint32_t* rotate_imm,
704 uint32_t* immed_8,
705 Instr* instr) {
706 // imm32 must be unsigned.
707 for (int rot = 0; rot < 16; rot++) {
708 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
709 if ((imm8 <= 0xff)) {
710 *rotate_imm = rot;
711 *immed_8 = imm8;
712 return true;
713 }
714 }
715 // If the opcode is one with a complementary version and the complementary
716 // immediate fits, change the opcode.
717 if (instr != NULL) {
718 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
719 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
720 *instr ^= kMovMvnFlip;
721 return true;
722 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
723 if (CpuFeatures::IsSupported(ARMv7)) {
724 if (imm32 < 0x10000) {
725 *instr ^= kMovwLeaveCCFlip;
726 *instr |= EncodeMovwImmediate(imm32);
727 *rotate_imm = *immed_8 = 0; // Not used for movw.
728 return true;
729 }
730 }
731 }
732 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
733 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
734 *instr ^= kCmpCmnFlip;
735 return true;
736 }
737 } else {
738 Instr alu_insn = (*instr & kALUMask);
739 if (alu_insn == ADD ||
740 alu_insn == SUB) {
741 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
742 *instr ^= kAddSubFlip;
743 return true;
744 }
745 } else if (alu_insn == AND ||
746 alu_insn == BIC) {
747 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
748 *instr ^= kAndBicFlip;
749 return true;
750 }
751 }
752 }
753 }
754 return false;
755 }
756
757
758 // We have to use the temporary register for things that can be relocated even
759 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
760 // space. There is no guarantee that the relocated location can be similarly
761 // encoded.
must_use_constant_pool() const762 bool Operand::must_use_constant_pool() const {
763 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
764 #ifdef DEBUG
765 if (!Serializer::enabled()) {
766 Serializer::TooLateToEnableNow();
767 }
768 #endif // def DEBUG
769 return Serializer::enabled();
770 } else if (rmode_ == RelocInfo::NONE) {
771 return false;
772 }
773 return true;
774 }
775
776
is_single_instruction(Instr instr) const777 bool Operand::is_single_instruction(Instr instr) const {
778 if (rm_.is_valid()) return true;
779 uint32_t dummy1, dummy2;
780 if (must_use_constant_pool() ||
781 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
782 // The immediate operand cannot be encoded as a shifter operand, or use of
783 // constant pool is required. For a mov instruction not setting the
784 // condition code additional instruction conventions can be used.
785 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
786 if (must_use_constant_pool() ||
787 !CpuFeatures::IsSupported(ARMv7)) {
788 // mov instruction will be an ldr from constant pool (one instruction).
789 return true;
790 } else {
791 // mov instruction will be a mov or movw followed by movt (two
792 // instructions).
793 return false;
794 }
795 } else {
796 // If this is not a mov or mvn instruction there will always an additional
797 // instructions - either mov or ldr. The mov might actually be two
798 // instructions mov or movw followed by movt so including the actual
799 // instruction two or three instructions will be generated.
800 return false;
801 }
802 } else {
803 // No use of constant pool and the immediate operand can be encoded as a
804 // shifter operand.
805 return true;
806 }
807 }
808
809
addrmod1(Instr instr,Register rn,Register rd,const Operand & x)810 void Assembler::addrmod1(Instr instr,
811 Register rn,
812 Register rd,
813 const Operand& x) {
814 CheckBuffer();
815 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
816 if (!x.rm_.is_valid()) {
817 // Immediate.
818 uint32_t rotate_imm;
819 uint32_t immed_8;
820 if (x.must_use_constant_pool() ||
821 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
822 // The immediate operand cannot be encoded as a shifter operand, so load
823 // it first to register ip and change the original instruction to use ip.
824 // However, if the original instruction is a 'mov rd, x' (not setting the
825 // condition code), then replace it with a 'ldr rd, [pc]'.
826 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
827 Condition cond = Instruction::ConditionField(instr);
828 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
829 if (x.must_use_constant_pool() ||
830 !CpuFeatures::IsSupported(ARMv7)) {
831 RecordRelocInfo(x.rmode_, x.imm32_);
832 ldr(rd, MemOperand(pc, 0), cond);
833 } else {
834 // Will probably use movw, will certainly not use constant pool.
835 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
836 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
837 }
838 } else {
839 // If this is not a mov or mvn instruction we may still be able to avoid
840 // a constant pool entry by using mvn or movw.
841 if (!x.must_use_constant_pool() &&
842 (instr & kMovMvnMask) != kMovMvnPattern) {
843 mov(ip, x, LeaveCC, cond);
844 } else {
845 RecordRelocInfo(x.rmode_, x.imm32_);
846 ldr(ip, MemOperand(pc, 0), cond);
847 }
848 addrmod1(instr, rn, rd, Operand(ip));
849 }
850 return;
851 }
852 instr |= I | rotate_imm*B8 | immed_8;
853 } else if (!x.rs_.is_valid()) {
854 // Immediate shift.
855 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
856 } else {
857 // Register shift.
858 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
859 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
860 }
861 emit(instr | rn.code()*B16 | rd.code()*B12);
862 if (rn.is(pc) || x.rm_.is(pc)) {
863 // Block constant pool emission for one instruction after reading pc.
864 BlockConstPoolBefore(pc_offset() + kInstrSize);
865 }
866 }
867
868
addrmod2(Instr instr,Register rd,const MemOperand & x)869 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
870 ASSERT((instr & ~(kCondMask | B | L)) == B26);
871 int am = x.am_;
872 if (!x.rm_.is_valid()) {
873 // Immediate offset.
874 int offset_12 = x.offset_;
875 if (offset_12 < 0) {
876 offset_12 = -offset_12;
877 am ^= U;
878 }
879 if (!is_uint12(offset_12)) {
880 // Immediate offset cannot be encoded, load it first to register ip
881 // rn (and rd in a load) should never be ip, or will be trashed.
882 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
883 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
884 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
885 return;
886 }
887 ASSERT(offset_12 >= 0); // no masking needed
888 instr |= offset_12;
889 } else {
890 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
891 // register offset the constructors make sure than both shift_imm_
892 // and shift_op_ are initialized.
893 ASSERT(!x.rm_.is(pc));
894 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
895 }
896 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
897 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
898 }
899
900
addrmod3(Instr instr,Register rd,const MemOperand & x)901 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
902 ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
903 ASSERT(x.rn_.is_valid());
904 int am = x.am_;
905 if (!x.rm_.is_valid()) {
906 // Immediate offset.
907 int offset_8 = x.offset_;
908 if (offset_8 < 0) {
909 offset_8 = -offset_8;
910 am ^= U;
911 }
912 if (!is_uint8(offset_8)) {
913 // Immediate offset cannot be encoded, load it first to register ip
914 // rn (and rd in a load) should never be ip, or will be trashed.
915 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
916 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
917 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
918 return;
919 }
920 ASSERT(offset_8 >= 0); // no masking needed
921 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
922 } else if (x.shift_imm_ != 0) {
923 // Scaled register offset not supported, load index first
924 // rn (and rd in a load) should never be ip, or will be trashed.
925 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
926 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
927 Instruction::ConditionField(instr));
928 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
929 return;
930 } else {
931 // Register offset.
932 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
933 instr |= x.rm_.code();
934 }
935 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
936 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
937 }
938
939
addrmod4(Instr instr,Register rn,RegList rl)940 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
941 ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
942 ASSERT(rl != 0);
943 ASSERT(!rn.is(pc));
944 emit(instr | rn.code()*B16 | rl);
945 }
946
947
addrmod5(Instr instr,CRegister crd,const MemOperand & x)948 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
949 // Unindexed addressing is not encoded by this function.
950 ASSERT_EQ((B27 | B26),
951 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
952 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
953 int am = x.am_;
954 int offset_8 = x.offset_;
955 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
956 offset_8 >>= 2;
957 if (offset_8 < 0) {
958 offset_8 = -offset_8;
959 am ^= U;
960 }
961 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
962 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
963
964 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
965 if ((am & P) == 0)
966 am |= W;
967
968 ASSERT(offset_8 >= 0); // no masking needed
969 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
970 }
971
972
branch_offset(Label * L,bool jump_elimination_allowed)973 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
974 int target_pos;
975 if (L->is_bound()) {
976 target_pos = L->pos();
977 } else {
978 if (L->is_linked()) {
979 target_pos = L->pos(); // L's link
980 } else {
981 target_pos = kEndOfChain;
982 }
983 L->link_to(pc_offset());
984 }
985
986 // Block the emission of the constant pool, since the branch instruction must
987 // be emitted at the pc offset recorded by the label.
988 BlockConstPoolBefore(pc_offset() + kInstrSize);
989 return target_pos - (pc_offset() + kPcLoadDelta);
990 }
991
992
label_at_put(Label * L,int at_offset)993 void Assembler::label_at_put(Label* L, int at_offset) {
994 int target_pos;
995 if (L->is_bound()) {
996 target_pos = L->pos();
997 } else {
998 if (L->is_linked()) {
999 target_pos = L->pos(); // L's link
1000 } else {
1001 target_pos = kEndOfChain;
1002 }
1003 L->link_to(at_offset);
1004 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1005 }
1006 }
1007
1008
1009 // Branch instructions.
b(int branch_offset,Condition cond)1010 void Assembler::b(int branch_offset, Condition cond) {
1011 ASSERT((branch_offset & 3) == 0);
1012 int imm24 = branch_offset >> 2;
1013 ASSERT(is_int24(imm24));
1014 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1015
1016 if (cond == al) {
1017 // Dead code is a good location to emit the constant pool.
1018 CheckConstPool(false, false);
1019 }
1020 }
1021
1022
bl(int branch_offset,Condition cond)1023 void Assembler::bl(int branch_offset, Condition cond) {
1024 positions_recorder()->WriteRecordedPositions();
1025 ASSERT((branch_offset & 3) == 0);
1026 int imm24 = branch_offset >> 2;
1027 ASSERT(is_int24(imm24));
1028 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1029 }
1030
1031
blx(int branch_offset)1032 void Assembler::blx(int branch_offset) { // v5 and above
1033 positions_recorder()->WriteRecordedPositions();
1034 ASSERT((branch_offset & 1) == 0);
1035 int h = ((branch_offset & 2) >> 1)*B24;
1036 int imm24 = branch_offset >> 2;
1037 ASSERT(is_int24(imm24));
1038 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1039 }
1040
1041
blx(Register target,Condition cond)1042 void Assembler::blx(Register target, Condition cond) { // v5 and above
1043 positions_recorder()->WriteRecordedPositions();
1044 ASSERT(!target.is(pc));
1045 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1046 }
1047
1048
bx(Register target,Condition cond)1049 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
1050 positions_recorder()->WriteRecordedPositions();
1051 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
1052 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1053 }
1054
1055
1056 // Data-processing instructions.
1057
and_(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1058 void Assembler::and_(Register dst, Register src1, const Operand& src2,
1059 SBit s, Condition cond) {
1060 addrmod1(cond | AND | s, src1, dst, src2);
1061 }
1062
1063
eor(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1064 void Assembler::eor(Register dst, Register src1, const Operand& src2,
1065 SBit s, Condition cond) {
1066 addrmod1(cond | EOR | s, src1, dst, src2);
1067 }
1068
1069
sub(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1070 void Assembler::sub(Register dst, Register src1, const Operand& src2,
1071 SBit s, Condition cond) {
1072 addrmod1(cond | SUB | s, src1, dst, src2);
1073 }
1074
1075
rsb(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1076 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1077 SBit s, Condition cond) {
1078 addrmod1(cond | RSB | s, src1, dst, src2);
1079 }
1080
1081
add(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1082 void Assembler::add(Register dst, Register src1, const Operand& src2,
1083 SBit s, Condition cond) {
1084 addrmod1(cond | ADD | s, src1, dst, src2);
1085
1086 // Eliminate pattern: push(r), pop()
1087 // str(src, MemOperand(sp, 4, NegPreIndex), al);
1088 // add(sp, sp, Operand(kPointerSize));
1089 // Both instructions can be eliminated.
1090 if (can_peephole_optimize(2) &&
1091 // Pattern.
1092 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
1093 (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
1094 pc_ -= 2 * kInstrSize;
1095 if (FLAG_print_peephole_optimization) {
1096 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
1097 }
1098 }
1099 }
1100
1101
adc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1102 void Assembler::adc(Register dst, Register src1, const Operand& src2,
1103 SBit s, Condition cond) {
1104 addrmod1(cond | ADC | s, src1, dst, src2);
1105 }
1106
1107
sbc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1108 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1109 SBit s, Condition cond) {
1110 addrmod1(cond | SBC | s, src1, dst, src2);
1111 }
1112
1113
rsc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1114 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1115 SBit s, Condition cond) {
1116 addrmod1(cond | RSC | s, src1, dst, src2);
1117 }
1118
1119
tst(Register src1,const Operand & src2,Condition cond)1120 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1121 addrmod1(cond | TST | S, src1, r0, src2);
1122 }
1123
1124
teq(Register src1,const Operand & src2,Condition cond)1125 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1126 addrmod1(cond | TEQ | S, src1, r0, src2);
1127 }
1128
1129
cmp(Register src1,const Operand & src2,Condition cond)1130 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1131 addrmod1(cond | CMP | S, src1, r0, src2);
1132 }
1133
1134
cmp_raw_immediate(Register src,int raw_immediate,Condition cond)1135 void Assembler::cmp_raw_immediate(
1136 Register src, int raw_immediate, Condition cond) {
1137 ASSERT(is_uint12(raw_immediate));
1138 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1139 }
1140
1141
cmn(Register src1,const Operand & src2,Condition cond)1142 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1143 addrmod1(cond | CMN | S, src1, r0, src2);
1144 }
1145
1146
orr(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1147 void Assembler::orr(Register dst, Register src1, const Operand& src2,
1148 SBit s, Condition cond) {
1149 addrmod1(cond | ORR | s, src1, dst, src2);
1150 }
1151
1152
mov(Register dst,const Operand & src,SBit s,Condition cond)1153 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1154 if (dst.is(pc)) {
1155 positions_recorder()->WriteRecordedPositions();
1156 }
1157 // Don't allow nop instructions in the form mov rn, rn to be generated using
1158 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1159 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1160 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1161 addrmod1(cond | MOV | s, r0, dst, src);
1162 }
1163
1164
movw(Register reg,uint32_t immediate,Condition cond)1165 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1166 ASSERT(immediate < 0x10000);
1167 mov(reg, Operand(immediate), LeaveCC, cond);
1168 }
1169
1170
movt(Register reg,uint32_t immediate,Condition cond)1171 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1172 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1173 }
1174
1175
bic(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1176 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1177 SBit s, Condition cond) {
1178 addrmod1(cond | BIC | s, src1, dst, src2);
1179 }
1180
1181
mvn(Register dst,const Operand & src,SBit s,Condition cond)1182 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1183 addrmod1(cond | MVN | s, r0, dst, src);
1184 }
1185
1186
1187 // Multiply instructions.
mla(Register dst,Register src1,Register src2,Register srcA,SBit s,Condition cond)1188 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1189 SBit s, Condition cond) {
1190 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1191 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1192 src2.code()*B8 | B7 | B4 | src1.code());
1193 }
1194
1195
mul(Register dst,Register src1,Register src2,SBit s,Condition cond)1196 void Assembler::mul(Register dst, Register src1, Register src2,
1197 SBit s, Condition cond) {
1198 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1199 // dst goes in bits 16-19 for this instruction!
1200 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1201 }
1202
1203
smlal(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1204 void Assembler::smlal(Register dstL,
1205 Register dstH,
1206 Register src1,
1207 Register src2,
1208 SBit s,
1209 Condition cond) {
1210 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1211 ASSERT(!dstL.is(dstH));
1212 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1213 src2.code()*B8 | B7 | B4 | src1.code());
1214 }
1215
1216
smull(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1217 void Assembler::smull(Register dstL,
1218 Register dstH,
1219 Register src1,
1220 Register src2,
1221 SBit s,
1222 Condition cond) {
1223 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1224 ASSERT(!dstL.is(dstH));
1225 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1226 src2.code()*B8 | B7 | B4 | src1.code());
1227 }
1228
1229
umlal(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1230 void Assembler::umlal(Register dstL,
1231 Register dstH,
1232 Register src1,
1233 Register src2,
1234 SBit s,
1235 Condition cond) {
1236 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1237 ASSERT(!dstL.is(dstH));
1238 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1239 src2.code()*B8 | B7 | B4 | src1.code());
1240 }
1241
1242
umull(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1243 void Assembler::umull(Register dstL,
1244 Register dstH,
1245 Register src1,
1246 Register src2,
1247 SBit s,
1248 Condition cond) {
1249 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1250 ASSERT(!dstL.is(dstH));
1251 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1252 src2.code()*B8 | B7 | B4 | src1.code());
1253 }
1254
1255
1256 // Miscellaneous arithmetic instructions.
clz(Register dst,Register src,Condition cond)1257 void Assembler::clz(Register dst, Register src, Condition cond) {
1258 // v5 and above.
1259 ASSERT(!dst.is(pc) && !src.is(pc));
1260 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1261 15*B8 | CLZ | src.code());
1262 }
1263
1264
1265 // Saturating instructions.
1266
1267 // Unsigned saturate.
usat(Register dst,int satpos,const Operand & src,Condition cond)1268 void Assembler::usat(Register dst,
1269 int satpos,
1270 const Operand& src,
1271 Condition cond) {
1272 // v6 and above.
1273 ASSERT(CpuFeatures::IsSupported(ARMv7));
1274 ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1275 ASSERT((satpos >= 0) && (satpos <= 31));
1276 ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1277 ASSERT(src.rs_.is(no_reg));
1278
1279 int sh = 0;
1280 if (src.shift_op_ == ASR) {
1281 sh = 1;
1282 }
1283
1284 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1285 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1286 }
1287
1288
1289 // Bitfield manipulation instructions.
1290
1291 // Unsigned bit field extract.
1292 // Extracts #width adjacent bits from position #lsb in a register, and
1293 // writes them to the low bits of a destination register.
1294 // ubfx dst, src, #lsb, #width
ubfx(Register dst,Register src,int lsb,int width,Condition cond)1295 void Assembler::ubfx(Register dst,
1296 Register src,
1297 int lsb,
1298 int width,
1299 Condition cond) {
1300 // v7 and above.
1301 ASSERT(CpuFeatures::IsSupported(ARMv7));
1302 ASSERT(!dst.is(pc) && !src.is(pc));
1303 ASSERT((lsb >= 0) && (lsb <= 31));
1304 ASSERT((width >= 1) && (width <= (32 - lsb)));
1305 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1306 lsb*B7 | B6 | B4 | src.code());
1307 }
1308
1309
1310 // Signed bit field extract.
1311 // Extracts #width adjacent bits from position #lsb in a register, and
1312 // writes them to the low bits of a destination register. The extracted
1313 // value is sign extended to fill the destination register.
1314 // sbfx dst, src, #lsb, #width
sbfx(Register dst,Register src,int lsb,int width,Condition cond)1315 void Assembler::sbfx(Register dst,
1316 Register src,
1317 int lsb,
1318 int width,
1319 Condition cond) {
1320 // v7 and above.
1321 ASSERT(CpuFeatures::IsSupported(ARMv7));
1322 ASSERT(!dst.is(pc) && !src.is(pc));
1323 ASSERT((lsb >= 0) && (lsb <= 31));
1324 ASSERT((width >= 1) && (width <= (32 - lsb)));
1325 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1326 lsb*B7 | B6 | B4 | src.code());
1327 }
1328
1329
1330 // Bit field clear.
1331 // Sets #width adjacent bits at position #lsb in the destination register
1332 // to zero, preserving the value of the other bits.
1333 // bfc dst, #lsb, #width
bfc(Register dst,int lsb,int width,Condition cond)1334 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1335 // v7 and above.
1336 ASSERT(CpuFeatures::IsSupported(ARMv7));
1337 ASSERT(!dst.is(pc));
1338 ASSERT((lsb >= 0) && (lsb <= 31));
1339 ASSERT((width >= 1) && (width <= (32 - lsb)));
1340 int msb = lsb + width - 1;
1341 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1342 }
1343
1344
1345 // Bit field insert.
1346 // Inserts #width adjacent bits from the low bits of the source register
1347 // into position #lsb of the destination register.
1348 // bfi dst, src, #lsb, #width
bfi(Register dst,Register src,int lsb,int width,Condition cond)1349 void Assembler::bfi(Register dst,
1350 Register src,
1351 int lsb,
1352 int width,
1353 Condition cond) {
1354 // v7 and above.
1355 ASSERT(CpuFeatures::IsSupported(ARMv7));
1356 ASSERT(!dst.is(pc) && !src.is(pc));
1357 ASSERT((lsb >= 0) && (lsb <= 31));
1358 ASSERT((width >= 1) && (width <= (32 - lsb)));
1359 int msb = lsb + width - 1;
1360 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1361 src.code());
1362 }
1363
1364
1365 // Status register access instructions.
mrs(Register dst,SRegister s,Condition cond)1366 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1367 ASSERT(!dst.is(pc));
1368 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1369 }
1370
1371
msr(SRegisterFieldMask fields,const Operand & src,Condition cond)1372 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1373 Condition cond) {
1374 ASSERT(fields >= B16 && fields < B20); // at least one field set
1375 Instr instr;
1376 if (!src.rm_.is_valid()) {
1377 // Immediate.
1378 uint32_t rotate_imm;
1379 uint32_t immed_8;
1380 if (src.must_use_constant_pool() ||
1381 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1382 // Immediate operand cannot be encoded, load it first to register ip.
1383 RecordRelocInfo(src.rmode_, src.imm32_);
1384 ldr(ip, MemOperand(pc, 0), cond);
1385 msr(fields, Operand(ip), cond);
1386 return;
1387 }
1388 instr = I | rotate_imm*B8 | immed_8;
1389 } else {
1390 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1391 instr = src.rm_.code();
1392 }
1393 emit(cond | instr | B24 | B21 | fields | 15*B12);
1394 }
1395
1396
1397 // Load/Store instructions.
ldr(Register dst,const MemOperand & src,Condition cond)1398 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1399 if (dst.is(pc)) {
1400 positions_recorder()->WriteRecordedPositions();
1401 }
1402 addrmod2(cond | B26 | L, dst, src);
1403
1404 // Eliminate pattern: push(ry), pop(rx)
1405 // str(ry, MemOperand(sp, 4, NegPreIndex), al)
1406 // ldr(rx, MemOperand(sp, 4, PostIndex), al)
1407 // Both instructions can be eliminated if ry = rx.
1408 // If ry != rx, a register copy from ry to rx is inserted
1409 // after eliminating the push and the pop instructions.
1410 if (can_peephole_optimize(2)) {
1411 Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
1412 Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
1413
1414 if (IsPush(push_instr) && IsPop(pop_instr)) {
1415 if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
1416 // For consecutive push and pop on different registers,
1417 // we delete both the push & pop and insert a register move.
1418 // push ry, pop rx --> mov rx, ry
1419 Register reg_pushed, reg_popped;
1420 reg_pushed = GetRd(push_instr);
1421 reg_popped = GetRd(pop_instr);
1422 pc_ -= 2 * kInstrSize;
1423 // Insert a mov instruction, which is better than a pair of push & pop
1424 mov(reg_popped, reg_pushed);
1425 if (FLAG_print_peephole_optimization) {
1426 PrintF("%x push/pop (diff reg) replaced by a reg move\n",
1427 pc_offset());
1428 }
1429 } else {
1430 // For consecutive push and pop on the same register,
1431 // both the push and the pop can be deleted.
1432 pc_ -= 2 * kInstrSize;
1433 if (FLAG_print_peephole_optimization) {
1434 PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1435 }
1436 }
1437 }
1438 }
1439
1440 if (can_peephole_optimize(2)) {
1441 Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
1442 Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
1443
1444 if ((IsStrRegFpOffset(str_instr) &&
1445 IsLdrRegFpOffset(ldr_instr)) ||
1446 (IsStrRegFpNegOffset(str_instr) &&
1447 IsLdrRegFpNegOffset(ldr_instr))) {
1448 if ((ldr_instr & kLdrStrInstrArgumentMask) ==
1449 (str_instr & kLdrStrInstrArgumentMask)) {
1450 // Pattern: Ldr/str same fp+offset, same register.
1451 //
1452 // The following:
1453 // str rx, [fp, #-12]
1454 // ldr rx, [fp, #-12]
1455 //
1456 // Becomes:
1457 // str rx, [fp, #-12]
1458
1459 pc_ -= 1 * kInstrSize;
1460 if (FLAG_print_peephole_optimization) {
1461 PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
1462 }
1463 } else if ((ldr_instr & kLdrStrOffsetMask) ==
1464 (str_instr & kLdrStrOffsetMask)) {
1465 // Pattern: Ldr/str same fp+offset, different register.
1466 //
1467 // The following:
1468 // str rx, [fp, #-12]
1469 // ldr ry, [fp, #-12]
1470 //
1471 // Becomes:
1472 // str rx, [fp, #-12]
1473 // mov ry, rx
1474
1475 Register reg_stored, reg_loaded;
1476 reg_stored = GetRd(str_instr);
1477 reg_loaded = GetRd(ldr_instr);
1478 pc_ -= 1 * kInstrSize;
1479 // Insert a mov instruction, which is better than ldr.
1480 mov(reg_loaded, reg_stored);
1481 if (FLAG_print_peephole_optimization) {
1482 PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
1483 }
1484 }
1485 }
1486 }
1487
1488 if (can_peephole_optimize(3)) {
1489 Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
1490 Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
1491 Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
1492 if (IsPush(mem_write_instr) &&
1493 IsPop(mem_read_instr)) {
1494 if ((IsLdrRegFpOffset(ldr_instr) ||
1495 IsLdrRegFpNegOffset(ldr_instr))) {
1496 if (Instruction::RdValue(mem_write_instr) ==
1497 Instruction::RdValue(mem_read_instr)) {
1498 // Pattern: push & pop from/to same register,
1499 // with a fp+offset ldr in between
1500 //
1501 // The following:
1502 // str rx, [sp, #-4]!
1503 // ldr rz, [fp, #-24]
1504 // ldr rx, [sp], #+4
1505 //
1506 // Becomes:
1507 // if(rx == rz)
1508 // delete all
1509 // else
1510 // ldr rz, [fp, #-24]
1511
1512 if (Instruction::RdValue(mem_write_instr) ==
1513 Instruction::RdValue(ldr_instr)) {
1514 pc_ -= 3 * kInstrSize;
1515 } else {
1516 pc_ -= 3 * kInstrSize;
1517 // Reinsert back the ldr rz.
1518 emit(ldr_instr);
1519 }
1520 if (FLAG_print_peephole_optimization) {
1521 PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
1522 }
1523 } else {
1524 // Pattern: push & pop from/to different registers
1525 // with a fp+offset ldr in between
1526 //
1527 // The following:
1528 // str rx, [sp, #-4]!
1529 // ldr rz, [fp, #-24]
1530 // ldr ry, [sp], #+4
1531 //
1532 // Becomes:
1533 // if(ry == rz)
1534 // mov ry, rx;
1535 // else if(rx != rz)
1536 // ldr rz, [fp, #-24]
1537 // mov ry, rx
1538 // else if((ry != rz) || (rx == rz)) becomes:
1539 // mov ry, rx
1540 // ldr rz, [fp, #-24]
1541
1542 Register reg_pushed, reg_popped;
1543 if (Instruction::RdValue(mem_read_instr) ==
1544 Instruction::RdValue(ldr_instr)) {
1545 reg_pushed = GetRd(mem_write_instr);
1546 reg_popped = GetRd(mem_read_instr);
1547 pc_ -= 3 * kInstrSize;
1548 mov(reg_popped, reg_pushed);
1549 } else if (Instruction::RdValue(mem_write_instr) !=
1550 Instruction::RdValue(ldr_instr)) {
1551 reg_pushed = GetRd(mem_write_instr);
1552 reg_popped = GetRd(mem_read_instr);
1553 pc_ -= 3 * kInstrSize;
1554 emit(ldr_instr);
1555 mov(reg_popped, reg_pushed);
1556 } else if ((Instruction::RdValue(mem_read_instr) !=
1557 Instruction::RdValue(ldr_instr)) ||
1558 (Instruction::RdValue(mem_write_instr) ==
1559 Instruction::RdValue(ldr_instr))) {
1560 reg_pushed = GetRd(mem_write_instr);
1561 reg_popped = GetRd(mem_read_instr);
1562 pc_ -= 3 * kInstrSize;
1563 mov(reg_popped, reg_pushed);
1564 emit(ldr_instr);
1565 }
1566 if (FLAG_print_peephole_optimization) {
1567 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
1568 }
1569 }
1570 }
1571 }
1572 }
1573 }
1574
1575
str(Register src,const MemOperand & dst,Condition cond)1576 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1577 addrmod2(cond | B26, src, dst);
1578
1579 // Eliminate pattern: pop(), push(r)
1580 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1581 // -> str r, [sp, 0], al
1582 if (can_peephole_optimize(2) &&
1583 // Pattern.
1584 instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1585 instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1586 pc_ -= 2 * kInstrSize;
1587 emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
1588 if (FLAG_print_peephole_optimization) {
1589 PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1590 }
1591 }
1592 }
1593
1594
ldrb(Register dst,const MemOperand & src,Condition cond)1595 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1596 addrmod2(cond | B26 | B | L, dst, src);
1597 }
1598
1599
strb(Register src,const MemOperand & dst,Condition cond)1600 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1601 addrmod2(cond | B26 | B, src, dst);
1602 }
1603
1604
ldrh(Register dst,const MemOperand & src,Condition cond)1605 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1606 addrmod3(cond | L | B7 | H | B4, dst, src);
1607 }
1608
1609
strh(Register src,const MemOperand & dst,Condition cond)1610 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1611 addrmod3(cond | B7 | H | B4, src, dst);
1612 }
1613
1614
ldrsb(Register dst,const MemOperand & src,Condition cond)1615 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1616 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1617 }
1618
1619
ldrsh(Register dst,const MemOperand & src,Condition cond)1620 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1621 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1622 }
1623
1624
ldrd(Register dst1,Register dst2,const MemOperand & src,Condition cond)1625 void Assembler::ldrd(Register dst1, Register dst2,
1626 const MemOperand& src, Condition cond) {
1627 ASSERT(CpuFeatures::IsEnabled(ARMv7));
1628 ASSERT(src.rm().is(no_reg));
1629 ASSERT(!dst1.is(lr)); // r14.
1630 ASSERT_EQ(0, dst1.code() % 2);
1631 ASSERT_EQ(dst1.code() + 1, dst2.code());
1632 addrmod3(cond | B7 | B6 | B4, dst1, src);
1633 }
1634
1635
strd(Register src1,Register src2,const MemOperand & dst,Condition cond)1636 void Assembler::strd(Register src1, Register src2,
1637 const MemOperand& dst, Condition cond) {
1638 ASSERT(dst.rm().is(no_reg));
1639 ASSERT(!src1.is(lr)); // r14.
1640 ASSERT_EQ(0, src1.code() % 2);
1641 ASSERT_EQ(src1.code() + 1, src2.code());
1642 ASSERT(CpuFeatures::IsEnabled(ARMv7));
1643 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
1644 }
1645
1646 // Load/Store multiple instructions.
ldm(BlockAddrMode am,Register base,RegList dst,Condition cond)1647 void Assembler::ldm(BlockAddrMode am,
1648 Register base,
1649 RegList dst,
1650 Condition cond) {
1651 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
1652 ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1653
1654 addrmod4(cond | B27 | am | L, base, dst);
1655
1656 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1657 if (cond == al && (dst & pc.bit()) != 0) {
1658 // There is a slight chance that the ldm instruction was actually a call,
1659 // in which case it would be wrong to return into the constant pool; we
1660 // recognize this case by checking if the emission of the pool was blocked
1661 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1662 // the case, we emit a jump over the pool.
1663 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1664 }
1665 }
1666
1667
stm(BlockAddrMode am,Register base,RegList src,Condition cond)1668 void Assembler::stm(BlockAddrMode am,
1669 Register base,
1670 RegList src,
1671 Condition cond) {
1672 addrmod4(cond | B27 | am, base, src);
1673 }
1674
1675
1676 // Exception-generating instructions and debugging support.
1677 // Stops with a non-negative code less than kNumOfWatchedStops support
1678 // enabling/disabling and a counter feature. See simulator-arm.h .
stop(const char * msg,Condition cond,int32_t code)1679 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
1680 #ifndef __arm__
1681 ASSERT(code >= kDefaultStopCode);
1682 // The Simulator will handle the stop instruction and get the message address.
1683 // It expects to find the address just after the svc instruction.
1684 BlockConstPoolFor(2);
1685 if (code >= 0) {
1686 svc(kStopCode + code, cond);
1687 } else {
1688 svc(kStopCode + kMaxStopCode, cond);
1689 }
1690 emit(reinterpret_cast<Instr>(msg));
1691 #else // def __arm__
1692 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
1693 if (cond != al) {
1694 Label skip;
1695 b(&skip, NegateCondition(cond));
1696 bkpt(0);
1697 bind(&skip);
1698 } else {
1699 bkpt(0);
1700 }
1701 #else // ndef CAN_USE_ARMV5_INSTRUCTIONS
1702 svc(0x9f0001, cond);
1703 #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
1704 #endif // def __arm__
1705 }
1706
1707
bkpt(uint32_t imm16)1708 void Assembler::bkpt(uint32_t imm16) { // v5 and above
1709 ASSERT(is_uint16(imm16));
1710 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
1711 }
1712
1713
svc(uint32_t imm24,Condition cond)1714 void Assembler::svc(uint32_t imm24, Condition cond) {
1715 ASSERT(is_uint24(imm24));
1716 emit(cond | 15*B24 | imm24);
1717 }
1718
1719
1720 // Coprocessor instructions.
cdp(Coprocessor coproc,int opcode_1,CRegister crd,CRegister crn,CRegister crm,int opcode_2,Condition cond)1721 void Assembler::cdp(Coprocessor coproc,
1722 int opcode_1,
1723 CRegister crd,
1724 CRegister crn,
1725 CRegister crm,
1726 int opcode_2,
1727 Condition cond) {
1728 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1729 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1730 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1731 }
1732
1733
cdp2(Coprocessor coproc,int opcode_1,CRegister crd,CRegister crn,CRegister crm,int opcode_2)1734 void Assembler::cdp2(Coprocessor coproc,
1735 int opcode_1,
1736 CRegister crd,
1737 CRegister crn,
1738 CRegister crm,
1739 int opcode_2) { // v5 and above
1740 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
1741 }
1742
1743
mcr(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2,Condition cond)1744 void Assembler::mcr(Coprocessor coproc,
1745 int opcode_1,
1746 Register rd,
1747 CRegister crn,
1748 CRegister crm,
1749 int opcode_2,
1750 Condition cond) {
1751 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1752 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1753 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1754 }
1755
1756
mcr2(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2)1757 void Assembler::mcr2(Coprocessor coproc,
1758 int opcode_1,
1759 Register rd,
1760 CRegister crn,
1761 CRegister crm,
1762 int opcode_2) { // v5 and above
1763 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
1764 }
1765
1766
mrc(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2,Condition cond)1767 void Assembler::mrc(Coprocessor coproc,
1768 int opcode_1,
1769 Register rd,
1770 CRegister crn,
1771 CRegister crm,
1772 int opcode_2,
1773 Condition cond) {
1774 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1775 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1776 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1777 }
1778
1779
mrc2(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2)1780 void Assembler::mrc2(Coprocessor coproc,
1781 int opcode_1,
1782 Register rd,
1783 CRegister crn,
1784 CRegister crm,
1785 int opcode_2) { // v5 and above
1786 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
1787 }
1788
1789
ldc(Coprocessor coproc,CRegister crd,const MemOperand & src,LFlag l,Condition cond)1790 void Assembler::ldc(Coprocessor coproc,
1791 CRegister crd,
1792 const MemOperand& src,
1793 LFlag l,
1794 Condition cond) {
1795 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1796 }
1797
1798
ldc(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l,Condition cond)1799 void Assembler::ldc(Coprocessor coproc,
1800 CRegister crd,
1801 Register rn,
1802 int option,
1803 LFlag l,
1804 Condition cond) {
1805 // Unindexed addressing.
1806 ASSERT(is_uint8(option));
1807 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1808 coproc*B8 | (option & 255));
1809 }
1810
1811
ldc2(Coprocessor coproc,CRegister crd,const MemOperand & src,LFlag l)1812 void Assembler::ldc2(Coprocessor coproc,
1813 CRegister crd,
1814 const MemOperand& src,
1815 LFlag l) { // v5 and above
1816 ldc(coproc, crd, src, l, kSpecialCondition);
1817 }
1818
1819
ldc2(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l)1820 void Assembler::ldc2(Coprocessor coproc,
1821 CRegister crd,
1822 Register rn,
1823 int option,
1824 LFlag l) { // v5 and above
1825 ldc(coproc, crd, rn, option, l, kSpecialCondition);
1826 }
1827
1828
1829 // Support for VFP.
1830
vldr(const DwVfpRegister dst,const Register base,int offset,const Condition cond)1831 void Assembler::vldr(const DwVfpRegister dst,
1832 const Register base,
1833 int offset,
1834 const Condition cond) {
1835 // Ddst = MEM(Rbase + offset).
1836 // Instruction details available in ARM DDI 0406A, A8-628.
1837 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
1838 // Vdst(15-12) | 1011(11-8) | offset
1839 ASSERT(CpuFeatures::IsEnabled(VFP3));
1840 int u = 1;
1841 if (offset < 0) {
1842 offset = -offset;
1843 u = 0;
1844 }
1845
1846 ASSERT(offset >= 0);
1847 if ((offset % 4) == 0 && (offset / 4) < 256) {
1848 emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
1849 0xB*B8 | ((offset / 4) & 255));
1850 } else {
1851 // Larger offsets must be handled by computing the correct address
1852 // in the ip register.
1853 ASSERT(!base.is(ip));
1854 if (u == 1) {
1855 add(ip, base, Operand(offset));
1856 } else {
1857 sub(ip, base, Operand(offset));
1858 }
1859 emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
1860 }
1861 }
1862
1863
vldr(const DwVfpRegister dst,const MemOperand & operand,const Condition cond)1864 void Assembler::vldr(const DwVfpRegister dst,
1865 const MemOperand& operand,
1866 const Condition cond) {
1867 ASSERT(!operand.rm().is_valid());
1868 ASSERT(operand.am_ == Offset);
1869 vldr(dst, operand.rn(), operand.offset(), cond);
1870 }
1871
1872
vldr(const SwVfpRegister dst,const Register base,int offset,const Condition cond)1873 void Assembler::vldr(const SwVfpRegister dst,
1874 const Register base,
1875 int offset,
1876 const Condition cond) {
1877 // Sdst = MEM(Rbase + offset).
1878 // Instruction details available in ARM DDI 0406A, A8-628.
1879 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
1880 // Vdst(15-12) | 1010(11-8) | offset
1881 ASSERT(CpuFeatures::IsEnabled(VFP3));
1882 int u = 1;
1883 if (offset < 0) {
1884 offset = -offset;
1885 u = 0;
1886 }
1887 int sd, d;
1888 dst.split_code(&sd, &d);
1889 ASSERT(offset >= 0);
1890
1891 if ((offset % 4) == 0 && (offset / 4) < 256) {
1892 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
1893 0xA*B8 | ((offset / 4) & 255));
1894 } else {
1895 // Larger offsets must be handled by computing the correct address
1896 // in the ip register.
1897 ASSERT(!base.is(ip));
1898 if (u == 1) {
1899 add(ip, base, Operand(offset));
1900 } else {
1901 sub(ip, base, Operand(offset));
1902 }
1903 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
1904 }
1905 }
1906
1907
vldr(const SwVfpRegister dst,const MemOperand & operand,const Condition cond)1908 void Assembler::vldr(const SwVfpRegister dst,
1909 const MemOperand& operand,
1910 const Condition cond) {
1911 ASSERT(!operand.rm().is_valid());
1912 ASSERT(operand.am_ == Offset);
1913 vldr(dst, operand.rn(), operand.offset(), cond);
1914 }
1915
1916
vstr(const DwVfpRegister src,const Register base,int offset,const Condition cond)1917 void Assembler::vstr(const DwVfpRegister src,
1918 const Register base,
1919 int offset,
1920 const Condition cond) {
1921 // MEM(Rbase + offset) = Dsrc.
1922 // Instruction details available in ARM DDI 0406A, A8-786.
1923 // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
1924 // Vsrc(15-12) | 1011(11-8) | (offset/4)
1925 ASSERT(CpuFeatures::IsEnabled(VFP3));
1926 int u = 1;
1927 if (offset < 0) {
1928 offset = -offset;
1929 u = 0;
1930 }
1931 ASSERT(offset >= 0);
1932 if ((offset % 4) == 0 && (offset / 4) < 256) {
1933 emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
1934 0xB*B8 | ((offset / 4) & 255));
1935 } else {
1936 // Larger offsets must be handled by computing the correct address
1937 // in the ip register.
1938 ASSERT(!base.is(ip));
1939 if (u == 1) {
1940 add(ip, base, Operand(offset));
1941 } else {
1942 sub(ip, base, Operand(offset));
1943 }
1944 emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
1945 }
1946 }
1947
1948
vstr(const DwVfpRegister src,const MemOperand & operand,const Condition cond)1949 void Assembler::vstr(const DwVfpRegister src,
1950 const MemOperand& operand,
1951 const Condition cond) {
1952 ASSERT(!operand.rm().is_valid());
1953 ASSERT(operand.am_ == Offset);
1954 vstr(src, operand.rn(), operand.offset(), cond);
1955 }
1956
1957
vstr(const SwVfpRegister src,const Register base,int offset,const Condition cond)1958 void Assembler::vstr(const SwVfpRegister src,
1959 const Register base,
1960 int offset,
1961 const Condition cond) {
1962 // MEM(Rbase + offset) = SSrc.
1963 // Instruction details available in ARM DDI 0406A, A8-786.
1964 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
1965 // Vdst(15-12) | 1010(11-8) | (offset/4)
1966 ASSERT(CpuFeatures::IsEnabled(VFP3));
1967 int u = 1;
1968 if (offset < 0) {
1969 offset = -offset;
1970 u = 0;
1971 }
1972 int sd, d;
1973 src.split_code(&sd, &d);
1974 ASSERT(offset >= 0);
1975 if ((offset % 4) == 0 && (offset / 4) < 256) {
1976 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
1977 0xA*B8 | ((offset / 4) & 255));
1978 } else {
1979 // Larger offsets must be handled by computing the correct address
1980 // in the ip register.
1981 ASSERT(!base.is(ip));
1982 if (u == 1) {
1983 add(ip, base, Operand(offset));
1984 } else {
1985 sub(ip, base, Operand(offset));
1986 }
1987 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
1988 }
1989 }
1990
1991
vstr(const SwVfpRegister src,const MemOperand & operand,const Condition cond)1992 void Assembler::vstr(const SwVfpRegister src,
1993 const MemOperand& operand,
1994 const Condition cond) {
1995 ASSERT(!operand.rm().is_valid());
1996 ASSERT(operand.am_ == Offset);
1997 vldr(src, operand.rn(), operand.offset(), cond);
1998 }
1999
2000
vldm(BlockAddrMode am,Register base,DwVfpRegister first,DwVfpRegister last,Condition cond)2001 void Assembler::vldm(BlockAddrMode am,
2002 Register base,
2003 DwVfpRegister first,
2004 DwVfpRegister last,
2005 Condition cond) {
2006 // Instruction details available in ARM DDI 0406A, A8-626.
2007 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2008 // first(15-12) | 1010(11-8) | (count * 2)
2009 ASSERT(CpuFeatures::IsEnabled(VFP3));
2010 ASSERT_LE(first.code(), last.code());
2011 ASSERT(am == ia || am == ia_w || am == db_w);
2012 ASSERT(!base.is(pc));
2013
2014 int sd, d;
2015 first.split_code(&sd, &d);
2016 int count = last.code() - first.code() + 1;
2017 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2018 0xB*B8 | count*2);
2019 }
2020
2021
vstm(BlockAddrMode am,Register base,DwVfpRegister first,DwVfpRegister last,Condition cond)2022 void Assembler::vstm(BlockAddrMode am,
2023 Register base,
2024 DwVfpRegister first,
2025 DwVfpRegister last,
2026 Condition cond) {
2027 // Instruction details available in ARM DDI 0406A, A8-784.
2028 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2029 // first(15-12) | 1011(11-8) | (count * 2)
2030 ASSERT(CpuFeatures::IsEnabled(VFP3));
2031 ASSERT_LE(first.code(), last.code());
2032 ASSERT(am == ia || am == ia_w || am == db_w);
2033 ASSERT(!base.is(pc));
2034
2035 int sd, d;
2036 first.split_code(&sd, &d);
2037 int count = last.code() - first.code() + 1;
2038 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2039 0xB*B8 | count*2);
2040 }
2041
vldm(BlockAddrMode am,Register base,SwVfpRegister first,SwVfpRegister last,Condition cond)2042 void Assembler::vldm(BlockAddrMode am,
2043 Register base,
2044 SwVfpRegister first,
2045 SwVfpRegister last,
2046 Condition cond) {
2047 // Instruction details available in ARM DDI 0406A, A8-626.
2048 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2049 // first(15-12) | 1010(11-8) | (count/2)
2050 ASSERT(CpuFeatures::IsEnabled(VFP3));
2051 ASSERT_LE(first.code(), last.code());
2052 ASSERT(am == ia || am == ia_w || am == db_w);
2053 ASSERT(!base.is(pc));
2054
2055 int sd, d;
2056 first.split_code(&sd, &d);
2057 int count = last.code() - first.code() + 1;
2058 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2059 0xA*B8 | count);
2060 }
2061
2062
vstm(BlockAddrMode am,Register base,SwVfpRegister first,SwVfpRegister last,Condition cond)2063 void Assembler::vstm(BlockAddrMode am,
2064 Register base,
2065 SwVfpRegister first,
2066 SwVfpRegister last,
2067 Condition cond) {
2068 // Instruction details available in ARM DDI 0406A, A8-784.
2069 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2070 // first(15-12) | 1011(11-8) | (count/2)
2071 ASSERT(CpuFeatures::IsEnabled(VFP3));
2072 ASSERT_LE(first.code(), last.code());
2073 ASSERT(am == ia || am == ia_w || am == db_w);
2074 ASSERT(!base.is(pc));
2075
2076 int sd, d;
2077 first.split_code(&sd, &d);
2078 int count = last.code() - first.code() + 1;
2079 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2080 0xA*B8 | count);
2081 }
2082
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)2083 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2084 uint64_t i;
2085 memcpy(&i, &d, 8);
2086
2087 *lo = i & 0xffffffff;
2088 *hi = i >> 32;
2089 }
2090
2091 // Only works for little endian floating point formats.
2092 // We don't support VFP on the mixed endian floating point platform.
FitsVMOVDoubleImmediate(double d,uint32_t * encoding)2093 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
2094 ASSERT(CpuFeatures::IsEnabled(VFP3));
2095
2096 // VMOV can accept an immediate of the form:
2097 //
2098 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2099 //
2100 // The immediate is encoded using an 8-bit quantity, comprised of two
2101 // 4-bit fields. For an 8-bit immediate of the form:
2102 //
2103 // [abcdefgh]
2104 //
2105 // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2106 // created of the form:
2107 //
2108 // [aBbbbbbb,bbcdefgh,00000000,00000000,
2109 // 00000000,00000000,00000000,00000000]
2110 //
2111 // where B = ~b.
2112 //
2113
2114 uint32_t lo, hi;
2115 DoubleAsTwoUInt32(d, &lo, &hi);
2116
2117 // The most obvious constraint is the long block of zeroes.
2118 if ((lo != 0) || ((hi & 0xffff) != 0)) {
2119 return false;
2120 }
2121
2122 // Bits 62:55 must be all clear or all set.
2123 if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2124 return false;
2125 }
2126
2127 // Bit 63 must be NOT bit 62.
2128 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2129 return false;
2130 }
2131
2132 // Create the encoded immediate in the form:
2133 // [00000000,0000abcd,00000000,0000efgh]
2134 *encoding = (hi >> 16) & 0xf; // Low nybble.
2135 *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
2136 *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
2137
2138 return true;
2139 }
2140
2141
vmov(const DwVfpRegister dst,double imm,const Condition cond)2142 void Assembler::vmov(const DwVfpRegister dst,
2143 double imm,
2144 const Condition cond) {
2145 // Dd = immediate
2146 // Instruction details available in ARM DDI 0406B, A8-640.
2147 ASSERT(CpuFeatures::IsEnabled(VFP3));
2148
2149 uint32_t enc;
2150 if (FitsVMOVDoubleImmediate(imm, &enc)) {
2151 // The double can be encoded in the instruction.
2152 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
2153 } else {
2154 // Synthesise the double from ARM immediates. This could be implemented
2155 // using vldr from a constant pool.
2156 uint32_t lo, hi;
2157 DoubleAsTwoUInt32(imm, &lo, &hi);
2158
2159 if (lo == hi) {
2160 // If the lo and hi parts of the double are equal, the literal is easier
2161 // to create. This is the case with 0.0.
2162 mov(ip, Operand(lo));
2163 vmov(dst, ip, ip);
2164 } else {
2165 // Move the low part of the double into the lower of the corresponsing S
2166 // registers of D register dst.
2167 mov(ip, Operand(lo));
2168 vmov(dst.low(), ip, cond);
2169
2170 // Move the high part of the double into the higher of the corresponsing S
2171 // registers of D register dst.
2172 mov(ip, Operand(hi));
2173 vmov(dst.high(), ip, cond);
2174 }
2175 }
2176 }
2177
2178
vmov(const SwVfpRegister dst,const SwVfpRegister src,const Condition cond)2179 void Assembler::vmov(const SwVfpRegister dst,
2180 const SwVfpRegister src,
2181 const Condition cond) {
2182 // Sd = Sm
2183 // Instruction details available in ARM DDI 0406B, A8-642.
2184 ASSERT(CpuFeatures::IsEnabled(VFP3));
2185 int sd, d, sm, m;
2186 dst.split_code(&sd, &d);
2187 src.split_code(&sm, &m);
2188 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2189 }
2190
2191
vmov(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2192 void Assembler::vmov(const DwVfpRegister dst,
2193 const DwVfpRegister src,
2194 const Condition cond) {
2195 // Dd = Dm
2196 // Instruction details available in ARM DDI 0406B, A8-642.
2197 ASSERT(CpuFeatures::IsEnabled(VFP3));
2198 emit(cond | 0xE*B24 | 0xB*B20 |
2199 dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
2200 }
2201
2202
vmov(const DwVfpRegister dst,const Register src1,const Register src2,const Condition cond)2203 void Assembler::vmov(const DwVfpRegister dst,
2204 const Register src1,
2205 const Register src2,
2206 const Condition cond) {
2207 // Dm = <Rt,Rt2>.
2208 // Instruction details available in ARM DDI 0406A, A8-646.
2209 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2210 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2211 ASSERT(CpuFeatures::IsEnabled(VFP3));
2212 ASSERT(!src1.is(pc) && !src2.is(pc));
2213 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2214 src1.code()*B12 | 0xB*B8 | B4 | dst.code());
2215 }
2216
2217
vmov(const Register dst1,const Register dst2,const DwVfpRegister src,const Condition cond)2218 void Assembler::vmov(const Register dst1,
2219 const Register dst2,
2220 const DwVfpRegister src,
2221 const Condition cond) {
2222 // <Rt,Rt2> = Dm.
2223 // Instruction details available in ARM DDI 0406A, A8-646.
2224 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2225 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2226 ASSERT(CpuFeatures::IsEnabled(VFP3));
2227 ASSERT(!dst1.is(pc) && !dst2.is(pc));
2228 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2229 dst1.code()*B12 | 0xB*B8 | B4 | src.code());
2230 }
2231
2232
vmov(const SwVfpRegister dst,const Register src,const Condition cond)2233 void Assembler::vmov(const SwVfpRegister dst,
2234 const Register src,
2235 const Condition cond) {
2236 // Sn = Rt.
2237 // Instruction details available in ARM DDI 0406A, A8-642.
2238 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2239 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2240 ASSERT(CpuFeatures::IsEnabled(VFP3));
2241 ASSERT(!src.is(pc));
2242 int sn, n;
2243 dst.split_code(&sn, &n);
2244 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2245 }
2246
2247
vmov(const Register dst,const SwVfpRegister src,const Condition cond)2248 void Assembler::vmov(const Register dst,
2249 const SwVfpRegister src,
2250 const Condition cond) {
2251 // Rt = Sn.
2252 // Instruction details available in ARM DDI 0406A, A8-642.
2253 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2254 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2255 ASSERT(CpuFeatures::IsEnabled(VFP3));
2256 ASSERT(!dst.is(pc));
2257 int sn, n;
2258 src.split_code(&sn, &n);
2259 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2260 }
2261
2262
2263 // Type of data to read from or write to VFP register.
2264 // Used as specifier in generic vcvt instruction.
2265 enum VFPType { S32, U32, F32, F64 };
2266
2267
IsSignedVFPType(VFPType type)2268 static bool IsSignedVFPType(VFPType type) {
2269 switch (type) {
2270 case S32:
2271 return true;
2272 case U32:
2273 return false;
2274 default:
2275 UNREACHABLE();
2276 return false;
2277 }
2278 }
2279
2280
IsIntegerVFPType(VFPType type)2281 static bool IsIntegerVFPType(VFPType type) {
2282 switch (type) {
2283 case S32:
2284 case U32:
2285 return true;
2286 case F32:
2287 case F64:
2288 return false;
2289 default:
2290 UNREACHABLE();
2291 return false;
2292 }
2293 }
2294
2295
IsDoubleVFPType(VFPType type)2296 static bool IsDoubleVFPType(VFPType type) {
2297 switch (type) {
2298 case F32:
2299 return false;
2300 case F64:
2301 return true;
2302 default:
2303 UNREACHABLE();
2304 return false;
2305 }
2306 }
2307
2308
2309 // Split five bit reg_code based on size of reg_type.
2310 // 32-bit register codes are Vm:M
2311 // 64-bit register codes are M:Vm
2312 // where Vm is four bits, and M is a single bit.
SplitRegCode(VFPType reg_type,int reg_code,int * vm,int * m)2313 static void SplitRegCode(VFPType reg_type,
2314 int reg_code,
2315 int* vm,
2316 int* m) {
2317 ASSERT((reg_code >= 0) && (reg_code <= 31));
2318 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2319 // 32 bit type.
2320 *m = reg_code & 0x1;
2321 *vm = reg_code >> 1;
2322 } else {
2323 // 64 bit type.
2324 *m = (reg_code & 0x10) >> 4;
2325 *vm = reg_code & 0x0F;
2326 }
2327 }
2328
2329
2330 // Encode vcvt.src_type.dst_type instruction.
EncodeVCVT(const VFPType dst_type,const int dst_code,const VFPType src_type,const int src_code,VFPConversionMode mode,const Condition cond)2331 static Instr EncodeVCVT(const VFPType dst_type,
2332 const int dst_code,
2333 const VFPType src_type,
2334 const int src_code,
2335 VFPConversionMode mode,
2336 const Condition cond) {
2337 ASSERT(src_type != dst_type);
2338 int D, Vd, M, Vm;
2339 SplitRegCode(src_type, src_code, &Vm, &M);
2340 SplitRegCode(dst_type, dst_code, &Vd, &D);
2341
2342 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2343 // Conversion between IEEE floating point and 32-bit integer.
2344 // Instruction details available in ARM DDI 0406B, A8.6.295.
2345 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2346 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2347 ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2348
2349 int sz, opc2, op;
2350
2351 if (IsIntegerVFPType(dst_type)) {
2352 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2353 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2354 op = mode;
2355 } else {
2356 ASSERT(IsIntegerVFPType(src_type));
2357 opc2 = 0x0;
2358 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2359 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2360 }
2361
2362 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2363 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2364 } else {
2365 // Conversion between IEEE double and single precision.
2366 // Instruction details available in ARM DDI 0406B, A8.6.298.
2367 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2368 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2369 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2370 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2371 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2372 }
2373 }
2374
2375
vcvt_f64_s32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2376 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2377 const SwVfpRegister src,
2378 VFPConversionMode mode,
2379 const Condition cond) {
2380 ASSERT(CpuFeatures::IsEnabled(VFP3));
2381 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2382 }
2383
2384
vcvt_f32_s32(const SwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2385 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2386 const SwVfpRegister src,
2387 VFPConversionMode mode,
2388 const Condition cond) {
2389 ASSERT(CpuFeatures::IsEnabled(VFP3));
2390 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2391 }
2392
2393
vcvt_f64_u32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2394 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2395 const SwVfpRegister src,
2396 VFPConversionMode mode,
2397 const Condition cond) {
2398 ASSERT(CpuFeatures::IsEnabled(VFP3));
2399 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2400 }
2401
2402
vcvt_s32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)2403 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2404 const DwVfpRegister src,
2405 VFPConversionMode mode,
2406 const Condition cond) {
2407 ASSERT(CpuFeatures::IsEnabled(VFP3));
2408 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2409 }
2410
2411
vcvt_u32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)2412 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2413 const DwVfpRegister src,
2414 VFPConversionMode mode,
2415 const Condition cond) {
2416 ASSERT(CpuFeatures::IsEnabled(VFP3));
2417 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2418 }
2419
2420
vcvt_f64_f32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)2421 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2422 const SwVfpRegister src,
2423 VFPConversionMode mode,
2424 const Condition cond) {
2425 ASSERT(CpuFeatures::IsEnabled(VFP3));
2426 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2427 }
2428
2429
vcvt_f32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)2430 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2431 const DwVfpRegister src,
2432 VFPConversionMode mode,
2433 const Condition cond) {
2434 ASSERT(CpuFeatures::IsEnabled(VFP3));
2435 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2436 }
2437
2438
vneg(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2439 void Assembler::vneg(const DwVfpRegister dst,
2440 const DwVfpRegister src,
2441 const Condition cond) {
2442 emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
2443 0x5*B9 | B8 | B6 | src.code());
2444 }
2445
2446
vabs(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2447 void Assembler::vabs(const DwVfpRegister dst,
2448 const DwVfpRegister src,
2449 const Condition cond) {
2450 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
2451 0x5*B9 | B8 | 0x3*B6 | src.code());
2452 }
2453
2454
vadd(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2455 void Assembler::vadd(const DwVfpRegister dst,
2456 const DwVfpRegister src1,
2457 const DwVfpRegister src2,
2458 const Condition cond) {
2459 // Dd = vadd(Dn, Dm) double precision floating point addition.
2460 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2461 // Instruction details available in ARM DDI 0406A, A8-536.
2462 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2463 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2464 ASSERT(CpuFeatures::IsEnabled(VFP3));
2465 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2466 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2467 }
2468
2469
vsub(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2470 void Assembler::vsub(const DwVfpRegister dst,
2471 const DwVfpRegister src1,
2472 const DwVfpRegister src2,
2473 const Condition cond) {
2474 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2475 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2476 // Instruction details available in ARM DDI 0406A, A8-784.
2477 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2478 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
2479 ASSERT(CpuFeatures::IsEnabled(VFP3));
2480 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2481 dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2482 }
2483
2484
vmul(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2485 void Assembler::vmul(const DwVfpRegister dst,
2486 const DwVfpRegister src1,
2487 const DwVfpRegister src2,
2488 const Condition cond) {
2489 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
2490 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2491 // Instruction details available in ARM DDI 0406A, A8-784.
2492 // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
2493 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2494 ASSERT(CpuFeatures::IsEnabled(VFP3));
2495 emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
2496 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2497 }
2498
2499
vdiv(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2500 void Assembler::vdiv(const DwVfpRegister dst,
2501 const DwVfpRegister src1,
2502 const DwVfpRegister src2,
2503 const Condition cond) {
2504 // Dd = vdiv(Dn, Dm) double precision floating point division.
2505 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2506 // Instruction details available in ARM DDI 0406A, A8-584.
2507 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
2508 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2509 ASSERT(CpuFeatures::IsEnabled(VFP3));
2510 emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
2511 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2512 }
2513
2514
vcmp(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)2515 void Assembler::vcmp(const DwVfpRegister src1,
2516 const DwVfpRegister src2,
2517 const Condition cond) {
2518 // vcmp(Dd, Dm) double precision floating point comparison.
2519 // Instruction details available in ARM DDI 0406A, A8-570.
2520 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
2521 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
2522 ASSERT(CpuFeatures::IsEnabled(VFP3));
2523 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
2524 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2525 }
2526
2527
vcmp(const DwVfpRegister src1,const double src2,const Condition cond)2528 void Assembler::vcmp(const DwVfpRegister src1,
2529 const double src2,
2530 const Condition cond) {
2531 // vcmp(Dd, Dm) double precision floating point comparison.
2532 // Instruction details available in ARM DDI 0406A, A8-570.
2533 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
2534 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
2535 ASSERT(CpuFeatures::IsEnabled(VFP3));
2536 ASSERT(src2 == 0.0);
2537 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
2538 src1.code()*B12 | 0x5*B9 | B8 | B6);
2539 }
2540
2541
vmsr(Register dst,Condition cond)2542 void Assembler::vmsr(Register dst, Condition cond) {
2543 // Instruction details available in ARM DDI 0406A, A8-652.
2544 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
2545 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2546 ASSERT(CpuFeatures::IsEnabled(VFP3));
2547 emit(cond | 0xE*B24 | 0xE*B20 | B16 |
2548 dst.code()*B12 | 0xA*B8 | B4);
2549 }
2550
2551
vmrs(Register dst,Condition cond)2552 void Assembler::vmrs(Register dst, Condition cond) {
2553 // Instruction details available in ARM DDI 0406A, A8-652.
2554 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
2555 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2556 ASSERT(CpuFeatures::IsEnabled(VFP3));
2557 emit(cond | 0xE*B24 | 0xF*B20 | B16 |
2558 dst.code()*B12 | 0xA*B8 | B4);
2559 }
2560
2561
vsqrt(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2562 void Assembler::vsqrt(const DwVfpRegister dst,
2563 const DwVfpRegister src,
2564 const Condition cond) {
2565 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
2566 // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
2567 ASSERT(CpuFeatures::IsEnabled(VFP3));
2568 emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
2569 dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
2570 }
2571
2572
2573 // Pseudo instructions.
nop(int type)2574 void Assembler::nop(int type) {
2575 // This is mov rx, rx.
2576 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2577 emit(al | 13*B21 | type*B12 | type);
2578 }
2579
2580
IsNop(Instr instr,int type)2581 bool Assembler::IsNop(Instr instr, int type) {
2582 // Check for mov rx, rx where x = type.
2583 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2584 return instr == (al | 13*B21 | type*B12 | type);
2585 }
2586
2587
ImmediateFitsAddrMode1Instruction(int32_t imm32)2588 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
2589 uint32_t dummy1;
2590 uint32_t dummy2;
2591 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
2592 }
2593
2594
BlockConstPoolFor(int instructions)2595 void Assembler::BlockConstPoolFor(int instructions) {
2596 BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
2597 }
2598
2599
2600 // Debugging.
RecordJSReturn()2601 void Assembler::RecordJSReturn() {
2602 positions_recorder()->WriteRecordedPositions();
2603 CheckBuffer();
2604 RecordRelocInfo(RelocInfo::JS_RETURN);
2605 }
2606
2607
RecordDebugBreakSlot()2608 void Assembler::RecordDebugBreakSlot() {
2609 positions_recorder()->WriteRecordedPositions();
2610 CheckBuffer();
2611 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2612 }
2613
2614
RecordComment(const char * msg)2615 void Assembler::RecordComment(const char* msg) {
2616 if (FLAG_code_comments) {
2617 CheckBuffer();
2618 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2619 }
2620 }
2621
2622
GrowBuffer()2623 void Assembler::GrowBuffer() {
2624 if (!own_buffer_) FATAL("external code buffer is too small");
2625
2626 // Compute new buffer size.
2627 CodeDesc desc; // the new buffer
2628 if (buffer_size_ < 4*KB) {
2629 desc.buffer_size = 4*KB;
2630 } else if (buffer_size_ < 1*MB) {
2631 desc.buffer_size = 2*buffer_size_;
2632 } else {
2633 desc.buffer_size = buffer_size_ + 1*MB;
2634 }
2635 CHECK_GT(desc.buffer_size, 0); // no overflow
2636
2637 // Setup new buffer.
2638 desc.buffer = NewArray<byte>(desc.buffer_size);
2639
2640 desc.instr_size = pc_offset();
2641 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2642
2643 // Copy the data.
2644 int pc_delta = desc.buffer - buffer_;
2645 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2646 memmove(desc.buffer, buffer_, desc.instr_size);
2647 memmove(reloc_info_writer.pos() + rc_delta,
2648 reloc_info_writer.pos(), desc.reloc_size);
2649
2650 // Switch buffers.
2651 DeleteArray(buffer_);
2652 buffer_ = desc.buffer;
2653 buffer_size_ = desc.buffer_size;
2654 pc_ += pc_delta;
2655 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2656 reloc_info_writer.last_pc() + pc_delta);
2657
2658 // None of our relocation types are pc relative pointing outside the code
2659 // buffer nor pc absolute pointing inside the code buffer, so there is no need
2660 // to relocate any emitted relocation entries.
2661
2662 // Relocate pending relocation entries.
2663 for (int i = 0; i < num_prinfo_; i++) {
2664 RelocInfo& rinfo = prinfo_[i];
2665 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2666 rinfo.rmode() != RelocInfo::POSITION);
2667 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2668 rinfo.set_pc(rinfo.pc() + pc_delta);
2669 }
2670 }
2671 }
2672
2673
db(uint8_t data)2674 void Assembler::db(uint8_t data) {
2675 // No relocation info should be pending while using db. db is used
2676 // to write pure data with no pointers and the constant pool should
2677 // be emitted before using db.
2678 ASSERT(num_prinfo_ == 0);
2679 CheckBuffer();
2680 *reinterpret_cast<uint8_t*>(pc_) = data;
2681 pc_ += sizeof(uint8_t);
2682 }
2683
2684
dd(uint32_t data)2685 void Assembler::dd(uint32_t data) {
2686 // No relocation info should be pending while using dd. dd is used
2687 // to write pure data with no pointers and the constant pool should
2688 // be emitted before using dd.
2689 ASSERT(num_prinfo_ == 0);
2690 CheckBuffer();
2691 *reinterpret_cast<uint32_t*>(pc_) = data;
2692 pc_ += sizeof(uint32_t);
2693 }
2694
2695
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)2696 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2697 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
2698 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2699 // Adjust code for new modes.
2700 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2701 || RelocInfo::IsJSReturn(rmode)
2702 || RelocInfo::IsComment(rmode)
2703 || RelocInfo::IsPosition(rmode));
2704 // These modes do not need an entry in the constant pool.
2705 } else {
2706 ASSERT(num_prinfo_ < kMaxNumPRInfo);
2707 prinfo_[num_prinfo_++] = rinfo;
2708 // Make sure the constant pool is not emitted in place of the next
2709 // instruction for which we just recorded relocation info.
2710 BlockConstPoolBefore(pc_offset() + kInstrSize);
2711 }
2712 if (rinfo.rmode() != RelocInfo::NONE) {
2713 // Don't record external references unless the heap will be serialized.
2714 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2715 #ifdef DEBUG
2716 if (!Serializer::enabled()) {
2717 Serializer::TooLateToEnableNow();
2718 }
2719 #endif
2720 if (!Serializer::enabled() && !emit_debug_code()) {
2721 return;
2722 }
2723 }
2724 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2725 reloc_info_writer.Write(&rinfo);
2726 }
2727 }
2728
2729
CheckConstPool(bool force_emit,bool require_jump)2730 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2731 // Calculate the offset of the next check. It will be overwritten
2732 // when a const pool is generated or when const pools are being
2733 // blocked for a specific range.
2734 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2735
2736 // There is nothing to do if there are no pending relocation info entries.
2737 if (num_prinfo_ == 0) return;
2738
2739 // We emit a constant pool at regular intervals of about kDistBetweenPools
2740 // or when requested by parameter force_emit (e.g. after each function).
2741 // We prefer not to emit a jump unless the max distance is reached or if we
2742 // are running low on slots, which can happen if a lot of constants are being
2743 // emitted (e.g. --debug-code and many static references).
2744 int dist = pc_offset() - last_const_pool_end_;
2745 if (!force_emit && dist < kMaxDistBetweenPools &&
2746 (require_jump || dist < kDistBetweenPools) &&
2747 // TODO(1236125): Cleanup the "magic" number below. We know that
2748 // the code generation will test every kCheckConstIntervalInst.
2749 // Thus we are safe as long as we generate less than 7 constant
2750 // entries per instruction.
2751 (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
2752 return;
2753 }
2754
2755 // If we did not return by now, we need to emit the constant pool soon.
2756
2757 // However, some small sequences of instructions must not be broken up by the
2758 // insertion of a constant pool; such sequences are protected by setting
2759 // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
2760 // both checked here. Also, recursive calls to CheckConstPool are blocked by
2761 // no_const_pool_before_.
2762 if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
2763 // Emission is currently blocked; make sure we try again as soon as
2764 // possible.
2765 if (const_pool_blocked_nesting_ > 0) {
2766 next_buffer_check_ = pc_offset() + kInstrSize;
2767 } else {
2768 next_buffer_check_ = no_const_pool_before_;
2769 }
2770
2771 // Something is wrong if emission is forced and blocked at the same time.
2772 ASSERT(!force_emit);
2773 return;
2774 }
2775
2776 int jump_instr = require_jump ? kInstrSize : 0;
2777
2778 // Check that the code buffer is large enough before emitting the constant
2779 // pool and relocation information (include the jump over the pool and the
2780 // constant pool marker).
2781 int max_needed_space =
2782 jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
2783 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
2784
2785 // Block recursive calls to CheckConstPool.
2786 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
2787 num_prinfo_*kInstrSize);
2788 // Don't bother to check for the emit calls below.
2789 next_buffer_check_ = no_const_pool_before_;
2790
2791 // Emit jump over constant pool if necessary.
2792 Label after_pool;
2793 if (require_jump) b(&after_pool);
2794
2795 RecordComment("[ Constant Pool");
2796
2797 // Put down constant pool marker "Undefined instruction" as specified by
2798 // A5.6 (ARMv7) Instruction set encoding.
2799 emit(kConstantPoolMarker | num_prinfo_);
2800
2801 // Emit constant pool entries.
2802 for (int i = 0; i < num_prinfo_; i++) {
2803 RelocInfo& rinfo = prinfo_[i];
2804 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2805 rinfo.rmode() != RelocInfo::POSITION &&
2806 rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
2807 Instr instr = instr_at(rinfo.pc());
2808
2809 // Instruction to patch must be a ldr/str [pc, #offset].
2810 // P and U set, B and W clear, Rn == pc, offset12 still 0.
2811 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
2812 (2*B25 | P | U | pc.code()*B16));
2813 int delta = pc_ - rinfo.pc() - 8;
2814 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
2815 if (delta < 0) {
2816 instr &= ~U;
2817 delta = -delta;
2818 }
2819 ASSERT(is_uint12(delta));
2820 instr_at_put(rinfo.pc(), instr + delta);
2821 emit(rinfo.data());
2822 }
2823 num_prinfo_ = 0;
2824 last_const_pool_end_ = pc_offset();
2825
2826 RecordComment("]");
2827
2828 if (after_pool.is_linked()) {
2829 bind(&after_pool);
2830 }
2831
2832 // Since a constant pool was just emitted, move the check offset forward by
2833 // the standard interval.
2834 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2835 }
2836
2837
2838 } } // namespace v8::internal
2839
2840 #endif // V8_TARGET_ARCH_ARM
2841