1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 // The original source code covered by the above license above has been modified
34 // significantly by Google Inc.
35 // Copyright 2006-2008 the V8 project authors. All rights reserved.
36
37 #include "v8.h"
38
39 #include "arm/assembler-arm-inl.h"
40 #include "serialize.h"
41
42 namespace v8 {
43 namespace internal {
44
45 // -----------------------------------------------------------------------------
46 // Implementation of Register and CRegister
47
48 Register no_reg = { -1 };
49
50 Register r0 = { 0 };
51 Register r1 = { 1 };
52 Register r2 = { 2 };
53 Register r3 = { 3 };
54 Register r4 = { 4 };
55 Register r5 = { 5 };
56 Register r6 = { 6 };
57 Register r7 = { 7 };
58 Register r8 = { 8 };
59 Register r9 = { 9 };
60 Register r10 = { 10 };
61 Register fp = { 11 };
62 Register ip = { 12 };
63 Register sp = { 13 };
64 Register lr = { 14 };
65 Register pc = { 15 };
66
67
68 CRegister no_creg = { -1 };
69
70 CRegister cr0 = { 0 };
71 CRegister cr1 = { 1 };
72 CRegister cr2 = { 2 };
73 CRegister cr3 = { 3 };
74 CRegister cr4 = { 4 };
75 CRegister cr5 = { 5 };
76 CRegister cr6 = { 6 };
77 CRegister cr7 = { 7 };
78 CRegister cr8 = { 8 };
79 CRegister cr9 = { 9 };
80 CRegister cr10 = { 10 };
81 CRegister cr11 = { 11 };
82 CRegister cr12 = { 12 };
83 CRegister cr13 = { 13 };
84 CRegister cr14 = { 14 };
85 CRegister cr15 = { 15 };
86
87
88 // -----------------------------------------------------------------------------
89 // Implementation of RelocInfo
90
91 const int RelocInfo::kApplyMask = 0;
92
93
PatchCode(byte * instructions,int instruction_count)94 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
95 // Patch the code at the current address with the supplied instructions.
96 UNIMPLEMENTED();
97 }
98
99
100 // Patch the code at the current PC with a call to the target address.
101 // Additional guard instructions can be added if required.
PatchCodeWithCall(Address target,int guard_bytes)102 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
103 // Patch the code at the current address with a call to the target.
104 UNIMPLEMENTED();
105 }
106
107
108 // -----------------------------------------------------------------------------
109 // Implementation of Operand and MemOperand
110 // See assembler-arm-inl.h for inlined constructors
111
Operand(Handle<Object> handle)112 Operand::Operand(Handle<Object> handle) {
113 rm_ = no_reg;
114 // Verify all Objects referred by code are NOT in new space.
115 Object* obj = *handle;
116 ASSERT(!Heap::InNewSpace(obj));
117 if (obj->IsHeapObject()) {
118 imm32_ = reinterpret_cast<intptr_t>(handle.location());
119 rmode_ = RelocInfo::EMBEDDED_OBJECT;
120 } else {
121 // no relocation needed
122 imm32_ = reinterpret_cast<intptr_t>(obj);
123 rmode_ = RelocInfo::NONE;
124 }
125 }
126
127
Operand(Register rm,ShiftOp shift_op,int shift_imm)128 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
129 ASSERT(is_uint5(shift_imm));
130 ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
131 rm_ = rm;
132 rs_ = no_reg;
133 shift_op_ = shift_op;
134 shift_imm_ = shift_imm & 31;
135 if (shift_op == RRX) {
136 // encoded as ROR with shift_imm == 0
137 ASSERT(shift_imm == 0);
138 shift_op_ = ROR;
139 shift_imm_ = 0;
140 }
141 }
142
143
Operand(Register rm,ShiftOp shift_op,Register rs)144 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
145 ASSERT(shift_op != RRX);
146 rm_ = rm;
147 rs_ = no_reg;
148 shift_op_ = shift_op;
149 rs_ = rs;
150 }
151
152
MemOperand(Register rn,int32_t offset,AddrMode am)153 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
154 rn_ = rn;
155 rm_ = no_reg;
156 offset_ = offset;
157 am_ = am;
158 }
159
MemOperand(Register rn,Register rm,AddrMode am)160 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
161 rn_ = rn;
162 rm_ = rm;
163 shift_op_ = LSL;
164 shift_imm_ = 0;
165 am_ = am;
166 }
167
168
MemOperand(Register rn,Register rm,ShiftOp shift_op,int shift_imm,AddrMode am)169 MemOperand::MemOperand(Register rn, Register rm,
170 ShiftOp shift_op, int shift_imm, AddrMode am) {
171 ASSERT(is_uint5(shift_imm));
172 rn_ = rn;
173 rm_ = rm;
174 shift_op_ = shift_op;
175 shift_imm_ = shift_imm & 31;
176 am_ = am;
177 }
178
179
180 // -----------------------------------------------------------------------------
181 // Implementation of Assembler
182
183 // Instruction encoding bits
184 enum {
185 H = 1 << 5, // halfword (or byte)
186 S6 = 1 << 6, // signed (or unsigned)
187 L = 1 << 20, // load (or store)
188 S = 1 << 20, // set condition code (or leave unchanged)
189 W = 1 << 21, // writeback base register (or leave unchanged)
190 A = 1 << 21, // accumulate in multiply instruction (or not)
191 B = 1 << 22, // unsigned byte (or word)
192 N = 1 << 22, // long (or short)
193 U = 1 << 23, // positive (or negative) offset/index
194 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
195 I = 1 << 25, // immediate shifter operand (or not)
196
197 B4 = 1 << 4,
198 B5 = 1 << 5,
199 B7 = 1 << 7,
200 B8 = 1 << 8,
201 B12 = 1 << 12,
202 B16 = 1 << 16,
203 B20 = 1 << 20,
204 B21 = 1 << 21,
205 B22 = 1 << 22,
206 B23 = 1 << 23,
207 B24 = 1 << 24,
208 B25 = 1 << 25,
209 B26 = 1 << 26,
210 B27 = 1 << 27,
211
212 // Instruction bit masks
213 RdMask = 15 << 12, // in str instruction
214 CondMask = 15 << 28,
215 CoprocessorMask = 15 << 8,
216 OpCodeMask = 15 << 21, // in data-processing instructions
217 Imm24Mask = (1 << 24) - 1,
218 Off12Mask = (1 << 12) - 1,
219 // Reserved condition
220 nv = 15 << 28
221 };
222
223
224 // add(sp, sp, 4) instruction (aka Pop())
225 static const Instr kPopInstruction =
226 al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
227 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
228 // register r is not encoded.
229 static const Instr kPushRegPattern =
230 al | B26 | 4 | NegPreIndex | sp.code() * B16;
231 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
232 // register r is not encoded.
233 static const Instr kPopRegPattern =
234 al | B26 | L | 4 | PostIndex | sp.code() * B16;
235
236 // spare_buffer_
237 static const int kMinimalBufferSize = 4*KB;
238 static byte* spare_buffer_ = NULL;
239
Assembler(void * buffer,int buffer_size)240 Assembler::Assembler(void* buffer, int buffer_size) {
241 if (buffer == NULL) {
242 // do our own buffer management
243 if (buffer_size <= kMinimalBufferSize) {
244 buffer_size = kMinimalBufferSize;
245
246 if (spare_buffer_ != NULL) {
247 buffer = spare_buffer_;
248 spare_buffer_ = NULL;
249 }
250 }
251 if (buffer == NULL) {
252 buffer_ = NewArray<byte>(buffer_size);
253 } else {
254 buffer_ = static_cast<byte*>(buffer);
255 }
256 buffer_size_ = buffer_size;
257 own_buffer_ = true;
258
259 } else {
260 // use externally provided buffer instead
261 ASSERT(buffer_size > 0);
262 buffer_ = static_cast<byte*>(buffer);
263 buffer_size_ = buffer_size;
264 own_buffer_ = false;
265 }
266
267 // setup buffer pointers
268 ASSERT(buffer_ != NULL);
269 pc_ = buffer_;
270 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
271 num_prinfo_ = 0;
272 next_buffer_check_ = 0;
273 no_const_pool_before_ = 0;
274 last_const_pool_end_ = 0;
275 last_bound_pos_ = 0;
276 current_statement_position_ = RelocInfo::kNoPosition;
277 current_position_ = RelocInfo::kNoPosition;
278 written_statement_position_ = current_statement_position_;
279 written_position_ = current_position_;
280 }
281
282
~Assembler()283 Assembler::~Assembler() {
284 if (own_buffer_) {
285 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
286 spare_buffer_ = buffer_;
287 } else {
288 DeleteArray(buffer_);
289 }
290 }
291 }
292
293
GetCode(CodeDesc * desc)294 void Assembler::GetCode(CodeDesc* desc) {
295 // emit constant pool if necessary
296 CheckConstPool(true, false);
297 ASSERT(num_prinfo_ == 0);
298
299 // setup desc
300 desc->buffer = buffer_;
301 desc->buffer_size = buffer_size_;
302 desc->instr_size = pc_offset();
303 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
304 }
305
306
Align(int m)307 void Assembler::Align(int m) {
308 ASSERT(m >= 4 && IsPowerOf2(m));
309 while ((pc_offset() & (m - 1)) != 0) {
310 nop();
311 }
312 }
313
314
315 // Labels refer to positions in the (to be) generated code.
316 // There are bound, linked, and unused labels.
317 //
318 // Bound labels refer to known positions in the already
319 // generated code. pos() is the position the label refers to.
320 //
321 // Linked labels refer to unknown positions in the code
322 // to be generated; pos() is the position of the last
323 // instruction using the label.
324
325
326 // The link chain is terminated by a negative code position (must be aligned)
327 const int kEndOfChain = -4;
328
329
target_at(int pos)330 int Assembler::target_at(int pos) {
331 Instr instr = instr_at(pos);
332 if ((instr & ~Imm24Mask) == 0) {
333 // Emitted label constant, not part of a branch.
334 return instr - (Code::kHeaderSize - kHeapObjectTag);
335 }
336 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
337 int imm26 = ((instr & Imm24Mask) << 8) >> 6;
338 if ((instr & CondMask) == nv && (instr & B24) != 0)
339 // blx uses bit 24 to encode bit 2 of imm26
340 imm26 += 2;
341
342 return pos + kPcLoadDelta + imm26;
343 }
344
345
target_at_put(int pos,int target_pos)346 void Assembler::target_at_put(int pos, int target_pos) {
347 Instr instr = instr_at(pos);
348 if ((instr & ~Imm24Mask) == 0) {
349 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
350 // Emitted label constant, not part of a branch.
351 // Make label relative to Code* of generated Code object.
352 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
353 return;
354 }
355 int imm26 = target_pos - (pos + kPcLoadDelta);
356 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
357 if ((instr & CondMask) == nv) {
358 // blx uses bit 24 to encode bit 2 of imm26
359 ASSERT((imm26 & 1) == 0);
360 instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
361 } else {
362 ASSERT((imm26 & 3) == 0);
363 instr &= ~Imm24Mask;
364 }
365 int imm24 = imm26 >> 2;
366 ASSERT(is_int24(imm24));
367 instr_at_put(pos, instr | (imm24 & Imm24Mask));
368 }
369
370
print(Label * L)371 void Assembler::print(Label* L) {
372 if (L->is_unused()) {
373 PrintF("unused label\n");
374 } else if (L->is_bound()) {
375 PrintF("bound label to %d\n", L->pos());
376 } else if (L->is_linked()) {
377 Label l = *L;
378 PrintF("unbound label");
379 while (l.is_linked()) {
380 PrintF("@ %d ", l.pos());
381 Instr instr = instr_at(l.pos());
382 if ((instr & ~Imm24Mask) == 0) {
383 PrintF("value\n");
384 } else {
385 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
386 int cond = instr & CondMask;
387 const char* b;
388 const char* c;
389 if (cond == nv) {
390 b = "blx";
391 c = "";
392 } else {
393 if ((instr & B24) != 0)
394 b = "bl";
395 else
396 b = "b";
397
398 switch (cond) {
399 case eq: c = "eq"; break;
400 case ne: c = "ne"; break;
401 case hs: c = "hs"; break;
402 case lo: c = "lo"; break;
403 case mi: c = "mi"; break;
404 case pl: c = "pl"; break;
405 case vs: c = "vs"; break;
406 case vc: c = "vc"; break;
407 case hi: c = "hi"; break;
408 case ls: c = "ls"; break;
409 case ge: c = "ge"; break;
410 case lt: c = "lt"; break;
411 case gt: c = "gt"; break;
412 case le: c = "le"; break;
413 case al: c = ""; break;
414 default:
415 c = "";
416 UNREACHABLE();
417 }
418 }
419 PrintF("%s%s\n", b, c);
420 }
421 next(&l);
422 }
423 } else {
424 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
425 }
426 }
427
428
bind_to(Label * L,int pos)429 void Assembler::bind_to(Label* L, int pos) {
430 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
431 while (L->is_linked()) {
432 int fixup_pos = L->pos();
433 next(L); // call next before overwriting link with target at fixup_pos
434 target_at_put(fixup_pos, pos);
435 }
436 L->bind_to(pos);
437
438 // Keep track of the last bound label so we don't eliminate any instructions
439 // before a bound label.
440 if (pos > last_bound_pos_)
441 last_bound_pos_ = pos;
442 }
443
444
link_to(Label * L,Label * appendix)445 void Assembler::link_to(Label* L, Label* appendix) {
446 if (appendix->is_linked()) {
447 if (L->is_linked()) {
448 // append appendix to L's list
449 int fixup_pos;
450 int link = L->pos();
451 do {
452 fixup_pos = link;
453 link = target_at(fixup_pos);
454 } while (link > 0);
455 ASSERT(link == kEndOfChain);
456 target_at_put(fixup_pos, appendix->pos());
457 } else {
458 // L is empty, simply use appendix
459 *L = *appendix;
460 }
461 }
462 appendix->Unuse(); // appendix should not be used anymore
463 }
464
465
bind(Label * L)466 void Assembler::bind(Label* L) {
467 ASSERT(!L->is_bound()); // label can only be bound once
468 bind_to(L, pc_offset());
469 }
470
471
next(Label * L)472 void Assembler::next(Label* L) {
473 ASSERT(L->is_linked());
474 int link = target_at(L->pos());
475 if (link > 0) {
476 L->link_to(link);
477 } else {
478 ASSERT(link == kEndOfChain);
479 L->Unuse();
480 }
481 }
482
483
484 // Low-level code emission routines depending on the addressing mode
fits_shifter(uint32_t imm32,uint32_t * rotate_imm,uint32_t * immed_8,Instr * instr)485 static bool fits_shifter(uint32_t imm32,
486 uint32_t* rotate_imm,
487 uint32_t* immed_8,
488 Instr* instr) {
489 // imm32 must be unsigned
490 for (int rot = 0; rot < 16; rot++) {
491 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
492 if ((imm8 <= 0xff)) {
493 *rotate_imm = rot;
494 *immed_8 = imm8;
495 return true;
496 }
497 }
498 // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
499 if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
500 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
501 *instr ^= 0x2*B21;
502 return true;
503 }
504 }
505 return false;
506 }
507
508
509 // We have to use the temporary register for things that can be relocated even
510 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
511 // space. There is no guarantee that the relocated location can be similarly
512 // encoded.
MustUseIp(RelocInfo::Mode rmode)513 static bool MustUseIp(RelocInfo::Mode rmode) {
514 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
515 return Serializer::enabled();
516 } else if (rmode == RelocInfo::NONE) {
517 return false;
518 }
519 return true;
520 }
521
522
addrmod1(Instr instr,Register rn,Register rd,const Operand & x)523 void Assembler::addrmod1(Instr instr,
524 Register rn,
525 Register rd,
526 const Operand& x) {
527 CheckBuffer();
528 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
529 if (!x.rm_.is_valid()) {
530 // immediate
531 uint32_t rotate_imm;
532 uint32_t immed_8;
533 if (MustUseIp(x.rmode_) ||
534 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
535 // The immediate operand cannot be encoded as a shifter operand, so load
536 // it first to register ip and change the original instruction to use ip.
537 // However, if the original instruction is a 'mov rd, x' (not setting the
538 // condition code), then replace it with a 'ldr rd, [pc]'
539 RecordRelocInfo(x.rmode_, x.imm32_);
540 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
541 Condition cond = static_cast<Condition>(instr & CondMask);
542 if ((instr & ~CondMask) == 13*B21) { // mov, S not set
543 ldr(rd, MemOperand(pc, 0), cond);
544 } else {
545 ldr(ip, MemOperand(pc, 0), cond);
546 addrmod1(instr, rn, rd, Operand(ip));
547 }
548 return;
549 }
550 instr |= I | rotate_imm*B8 | immed_8;
551 } else if (!x.rs_.is_valid()) {
552 // immediate shift
553 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
554 } else {
555 // register shift
556 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
557 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
558 }
559 emit(instr | rn.code()*B16 | rd.code()*B12);
560 if (rn.is(pc) || x.rm_.is(pc))
561 // block constant pool emission for one instruction after reading pc
562 BlockConstPoolBefore(pc_offset() + kInstrSize);
563 }
564
565
addrmod2(Instr instr,Register rd,const MemOperand & x)566 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
567 ASSERT((instr & ~(CondMask | B | L)) == B26);
568 int am = x.am_;
569 if (!x.rm_.is_valid()) {
570 // immediate offset
571 int offset_12 = x.offset_;
572 if (offset_12 < 0) {
573 offset_12 = -offset_12;
574 am ^= U;
575 }
576 if (!is_uint12(offset_12)) {
577 // immediate offset cannot be encoded, load it first to register ip
578 // rn (and rd in a load) should never be ip, or will be trashed
579 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
580 mov(ip, Operand(x.offset_), LeaveCC,
581 static_cast<Condition>(instr & CondMask));
582 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
583 return;
584 }
585 ASSERT(offset_12 >= 0); // no masking needed
586 instr |= offset_12;
587 } else {
588 // register offset (shift_imm_ and shift_op_ are 0) or scaled
589 // register offset the constructors make sure than both shift_imm_
590 // and shift_op_ are initialized
591 ASSERT(!x.rm_.is(pc));
592 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
593 }
594 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
595 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
596 }
597
598
addrmod3(Instr instr,Register rd,const MemOperand & x)599 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
600 ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
601 ASSERT(x.rn_.is_valid());
602 int am = x.am_;
603 if (!x.rm_.is_valid()) {
604 // immediate offset
605 int offset_8 = x.offset_;
606 if (offset_8 < 0) {
607 offset_8 = -offset_8;
608 am ^= U;
609 }
610 if (!is_uint8(offset_8)) {
611 // immediate offset cannot be encoded, load it first to register ip
612 // rn (and rd in a load) should never be ip, or will be trashed
613 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
614 mov(ip, Operand(x.offset_), LeaveCC,
615 static_cast<Condition>(instr & CondMask));
616 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
617 return;
618 }
619 ASSERT(offset_8 >= 0); // no masking needed
620 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
621 } else if (x.shift_imm_ != 0) {
622 // scaled register offset not supported, load index first
623 // rn (and rd in a load) should never be ip, or will be trashed
624 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
625 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
626 static_cast<Condition>(instr & CondMask));
627 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
628 return;
629 } else {
630 // register offset
631 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
632 instr |= x.rm_.code();
633 }
634 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
635 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
636 }
637
638
addrmod4(Instr instr,Register rn,RegList rl)639 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
640 ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
641 ASSERT(rl != 0);
642 ASSERT(!rn.is(pc));
643 emit(instr | rn.code()*B16 | rl);
644 }
645
646
addrmod5(Instr instr,CRegister crd,const MemOperand & x)647 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
648 // unindexed addressing is not encoded by this function
649 ASSERT_EQ((B27 | B26),
650 (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
651 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
652 int am = x.am_;
653 int offset_8 = x.offset_;
654 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
655 offset_8 >>= 2;
656 if (offset_8 < 0) {
657 offset_8 = -offset_8;
658 am ^= U;
659 }
660 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
661 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
662
663 // post-indexed addressing requires W == 1; different than in addrmod2/3
664 if ((am & P) == 0)
665 am |= W;
666
667 ASSERT(offset_8 >= 0); // no masking needed
668 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
669 }
670
671
branch_offset(Label * L,bool jump_elimination_allowed)672 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
673 int target_pos;
674 if (L->is_bound()) {
675 target_pos = L->pos();
676 } else {
677 if (L->is_linked()) {
678 target_pos = L->pos(); // L's link
679 } else {
680 target_pos = kEndOfChain;
681 }
682 L->link_to(pc_offset());
683 }
684
685 // Block the emission of the constant pool, since the branch instruction must
686 // be emitted at the pc offset recorded by the label
687 BlockConstPoolBefore(pc_offset() + kInstrSize);
688 return target_pos - (pc_offset() + kPcLoadDelta);
689 }
690
691
label_at_put(Label * L,int at_offset)692 void Assembler::label_at_put(Label* L, int at_offset) {
693 int target_pos;
694 if (L->is_bound()) {
695 target_pos = L->pos();
696 } else {
697 if (L->is_linked()) {
698 target_pos = L->pos(); // L's link
699 } else {
700 target_pos = kEndOfChain;
701 }
702 L->link_to(at_offset);
703 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
704 }
705 }
706
707
708 // Branch instructions
b(int branch_offset,Condition cond)709 void Assembler::b(int branch_offset, Condition cond) {
710 ASSERT((branch_offset & 3) == 0);
711 int imm24 = branch_offset >> 2;
712 ASSERT(is_int24(imm24));
713 emit(cond | B27 | B25 | (imm24 & Imm24Mask));
714
715 if (cond == al)
716 // dead code is a good location to emit the constant pool
717 CheckConstPool(false, false);
718 }
719
720
bl(int branch_offset,Condition cond)721 void Assembler::bl(int branch_offset, Condition cond) {
722 ASSERT((branch_offset & 3) == 0);
723 int imm24 = branch_offset >> 2;
724 ASSERT(is_int24(imm24));
725 emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
726 }
727
728
blx(int branch_offset)729 void Assembler::blx(int branch_offset) { // v5 and above
730 WriteRecordedPositions();
731 ASSERT((branch_offset & 1) == 0);
732 int h = ((branch_offset & 2) >> 1)*B24;
733 int imm24 = branch_offset >> 2;
734 ASSERT(is_int24(imm24));
735 emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
736 }
737
738
blx(Register target,Condition cond)739 void Assembler::blx(Register target, Condition cond) { // v5 and above
740 WriteRecordedPositions();
741 ASSERT(!target.is(pc));
742 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
743 }
744
745
bx(Register target,Condition cond)746 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
747 WriteRecordedPositions();
748 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
749 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
750 }
751
752
753 // Data-processing instructions
and_(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)754 void Assembler::and_(Register dst, Register src1, const Operand& src2,
755 SBit s, Condition cond) {
756 addrmod1(cond | 0*B21 | s, src1, dst, src2);
757 }
758
759
eor(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)760 void Assembler::eor(Register dst, Register src1, const Operand& src2,
761 SBit s, Condition cond) {
762 addrmod1(cond | 1*B21 | s, src1, dst, src2);
763 }
764
765
sub(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)766 void Assembler::sub(Register dst, Register src1, const Operand& src2,
767 SBit s, Condition cond) {
768 addrmod1(cond | 2*B21 | s, src1, dst, src2);
769 }
770
771
rsb(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)772 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
773 SBit s, Condition cond) {
774 addrmod1(cond | 3*B21 | s, src1, dst, src2);
775 }
776
777
add(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)778 void Assembler::add(Register dst, Register src1, const Operand& src2,
779 SBit s, Condition cond) {
780 addrmod1(cond | 4*B21 | s, src1, dst, src2);
781
782 // Eliminate pattern: push(r), pop()
783 // str(src, MemOperand(sp, 4, NegPreIndex), al);
784 // add(sp, sp, Operand(kPointerSize));
785 // Both instructions can be eliminated.
786 int pattern_size = 2 * kInstrSize;
787 if (FLAG_push_pop_elimination &&
788 last_bound_pos_ <= (pc_offset() - pattern_size) &&
789 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
790 // pattern
791 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
792 (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
793 pc_ -= 2 * kInstrSize;
794 if (FLAG_print_push_pop_elimination) {
795 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
796 }
797 }
798 }
799
800
adc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)801 void Assembler::adc(Register dst, Register src1, const Operand& src2,
802 SBit s, Condition cond) {
803 addrmod1(cond | 5*B21 | s, src1, dst, src2);
804 }
805
806
sbc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)807 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
808 SBit s, Condition cond) {
809 addrmod1(cond | 6*B21 | s, src1, dst, src2);
810 }
811
812
rsc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)813 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
814 SBit s, Condition cond) {
815 addrmod1(cond | 7*B21 | s, src1, dst, src2);
816 }
817
818
tst(Register src1,const Operand & src2,Condition cond)819 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
820 addrmod1(cond | 8*B21 | S, src1, r0, src2);
821 }
822
823
teq(Register src1,const Operand & src2,Condition cond)824 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
825 addrmod1(cond | 9*B21 | S, src1, r0, src2);
826 }
827
828
cmp(Register src1,const Operand & src2,Condition cond)829 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
830 addrmod1(cond | 10*B21 | S, src1, r0, src2);
831 }
832
833
cmn(Register src1,const Operand & src2,Condition cond)834 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
835 addrmod1(cond | 11*B21 | S, src1, r0, src2);
836 }
837
838
orr(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)839 void Assembler::orr(Register dst, Register src1, const Operand& src2,
840 SBit s, Condition cond) {
841 addrmod1(cond | 12*B21 | s, src1, dst, src2);
842 }
843
844
mov(Register dst,const Operand & src,SBit s,Condition cond)845 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
846 if (dst.is(pc)) {
847 WriteRecordedPositions();
848 }
849 addrmod1(cond | 13*B21 | s, r0, dst, src);
850 }
851
852
bic(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)853 void Assembler::bic(Register dst, Register src1, const Operand& src2,
854 SBit s, Condition cond) {
855 addrmod1(cond | 14*B21 | s, src1, dst, src2);
856 }
857
858
mvn(Register dst,const Operand & src,SBit s,Condition cond)859 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
860 addrmod1(cond | 15*B21 | s, r0, dst, src);
861 }
862
863
864 // Multiply instructions
mla(Register dst,Register src1,Register src2,Register srcA,SBit s,Condition cond)865 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
866 SBit s, Condition cond) {
867 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
868 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
869 src2.code()*B8 | B7 | B4 | src1.code());
870 }
871
872
mul(Register dst,Register src1,Register src2,SBit s,Condition cond)873 void Assembler::mul(Register dst, Register src1, Register src2,
874 SBit s, Condition cond) {
875 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
876 // dst goes in bits 16-19 for this instruction!
877 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
878 }
879
880
smlal(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)881 void Assembler::smlal(Register dstL,
882 Register dstH,
883 Register src1,
884 Register src2,
885 SBit s,
886 Condition cond) {
887 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
888 ASSERT(!dstL.is(dstH));
889 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
890 src2.code()*B8 | B7 | B4 | src1.code());
891 }
892
893
smull(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)894 void Assembler::smull(Register dstL,
895 Register dstH,
896 Register src1,
897 Register src2,
898 SBit s,
899 Condition cond) {
900 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
901 ASSERT(!dstL.is(dstH));
902 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
903 src2.code()*B8 | B7 | B4 | src1.code());
904 }
905
906
umlal(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)907 void Assembler::umlal(Register dstL,
908 Register dstH,
909 Register src1,
910 Register src2,
911 SBit s,
912 Condition cond) {
913 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
914 ASSERT(!dstL.is(dstH));
915 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
916 src2.code()*B8 | B7 | B4 | src1.code());
917 }
918
919
umull(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)920 void Assembler::umull(Register dstL,
921 Register dstH,
922 Register src1,
923 Register src2,
924 SBit s,
925 Condition cond) {
926 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
927 ASSERT(!dstL.is(dstH));
928 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
929 src2.code()*B8 | B7 | B4 | src1.code());
930 }
931
932
933 // Miscellaneous arithmetic instructions
clz(Register dst,Register src,Condition cond)934 void Assembler::clz(Register dst, Register src, Condition cond) {
935 // v5 and above.
936 ASSERT(!dst.is(pc) && !src.is(pc));
937 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
938 15*B8 | B4 | src.code());
939 }
940
941
942 // Status register access instructions
mrs(Register dst,SRegister s,Condition cond)943 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
944 ASSERT(!dst.is(pc));
945 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
946 }
947
948
msr(SRegisterFieldMask fields,const Operand & src,Condition cond)949 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
950 Condition cond) {
951 ASSERT(fields >= B16 && fields < B20); // at least one field set
952 Instr instr;
953 if (!src.rm_.is_valid()) {
954 // immediate
955 uint32_t rotate_imm;
956 uint32_t immed_8;
957 if (MustUseIp(src.rmode_) ||
958 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
959 // immediate operand cannot be encoded, load it first to register ip
960 RecordRelocInfo(src.rmode_, src.imm32_);
961 ldr(ip, MemOperand(pc, 0), cond);
962 msr(fields, Operand(ip), cond);
963 return;
964 }
965 instr = I | rotate_imm*B8 | immed_8;
966 } else {
967 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
968 instr = src.rm_.code();
969 }
970 emit(cond | instr | B24 | B21 | fields | 15*B12);
971 }
972
973
974 // Load/Store instructions
ldr(Register dst,const MemOperand & src,Condition cond)975 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
976 if (dst.is(pc)) {
977 WriteRecordedPositions();
978 }
979 addrmod2(cond | B26 | L, dst, src);
980
981 // Eliminate pattern: push(r), pop(r)
982 // str(r, MemOperand(sp, 4, NegPreIndex), al)
983 // ldr(r, MemOperand(sp, 4, PostIndex), al)
984 // Both instructions can be eliminated.
985 int pattern_size = 2 * kInstrSize;
986 if (FLAG_push_pop_elimination &&
987 last_bound_pos_ <= (pc_offset() - pattern_size) &&
988 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
989 // pattern
990 instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
991 instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
992 pc_ -= 2 * kInstrSize;
993 if (FLAG_print_push_pop_elimination) {
994 PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
995 }
996 }
997 }
998
999
str(Register src,const MemOperand & dst,Condition cond)1000 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1001 addrmod2(cond | B26, src, dst);
1002
1003 // Eliminate pattern: pop(), push(r)
1004 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1005 // -> str r, [sp, 0], al
1006 int pattern_size = 2 * kInstrSize;
1007 if (FLAG_push_pop_elimination &&
1008 last_bound_pos_ <= (pc_offset() - pattern_size) &&
1009 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1010 instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1011 instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1012 pc_ -= 2 * kInstrSize;
1013 emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
1014 if (FLAG_print_push_pop_elimination) {
1015 PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1016 }
1017 }
1018 }
1019
1020
ldrb(Register dst,const MemOperand & src,Condition cond)1021 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1022 addrmod2(cond | B26 | B | L, dst, src);
1023 }
1024
1025
strb(Register src,const MemOperand & dst,Condition cond)1026 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1027 addrmod2(cond | B26 | B, src, dst);
1028 }
1029
1030
ldrh(Register dst,const MemOperand & src,Condition cond)1031 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1032 addrmod3(cond | L | B7 | H | B4, dst, src);
1033 }
1034
1035
strh(Register src,const MemOperand & dst,Condition cond)1036 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1037 addrmod3(cond | B7 | H | B4, src, dst);
1038 }
1039
1040
ldrsb(Register dst,const MemOperand & src,Condition cond)1041 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1042 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1043 }
1044
1045
ldrsh(Register dst,const MemOperand & src,Condition cond)1046 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1047 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1048 }
1049
1050
1051 // Load/Store multiple instructions
ldm(BlockAddrMode am,Register base,RegList dst,Condition cond)1052 void Assembler::ldm(BlockAddrMode am,
1053 Register base,
1054 RegList dst,
1055 Condition cond) {
1056 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
1057 ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1058
1059 addrmod4(cond | B27 | am | L, base, dst);
1060
1061 // emit the constant pool after a function return implemented by ldm ..{..pc}
1062 if (cond == al && (dst & pc.bit()) != 0) {
1063 // There is a slight chance that the ldm instruction was actually a call,
1064 // in which case it would be wrong to return into the constant pool; we
1065 // recognize this case by checking if the emission of the pool was blocked
1066 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1067 // the case, we emit a jump over the pool.
1068 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1069 }
1070 }
1071
1072
stm(BlockAddrMode am,Register base,RegList src,Condition cond)1073 void Assembler::stm(BlockAddrMode am,
1074 Register base,
1075 RegList src,
1076 Condition cond) {
1077 addrmod4(cond | B27 | am, base, src);
1078 }
1079
1080
1081 // Semaphore instructions
swp(Register dst,Register src,Register base,Condition cond)1082 void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
1083 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1084 ASSERT(!dst.is(base) && !src.is(base));
1085 emit(cond | P | base.code()*B16 | dst.code()*B12 |
1086 B7 | B4 | src.code());
1087 }
1088
1089
swpb(Register dst,Register src,Register base,Condition cond)1090 void Assembler::swpb(Register dst,
1091 Register src,
1092 Register base,
1093 Condition cond) {
1094 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1095 ASSERT(!dst.is(base) && !src.is(base));
1096 emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
1097 B7 | B4 | src.code());
1098 }
1099
1100
1101 // Exception-generating instructions and debugging support
stop(const char * msg)1102 void Assembler::stop(const char* msg) {
1103 #if !defined(__arm__)
1104 // The simulator handles these special instructions and stops execution.
1105 emit(15 << 28 | ((intptr_t) msg));
1106 #else
1107 // Just issue a simple break instruction for now. Alternatively we could use
1108 // the swi(0x9f0001) instruction on Linux.
1109 bkpt(0);
1110 #endif
1111 }
1112
1113
bkpt(uint32_t imm16)1114 void Assembler::bkpt(uint32_t imm16) { // v5 and above
1115 ASSERT(is_uint16(imm16));
1116 emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
1117 }
1118
1119
swi(uint32_t imm24,Condition cond)1120 void Assembler::swi(uint32_t imm24, Condition cond) {
1121 ASSERT(is_uint24(imm24));
1122 emit(cond | 15*B24 | imm24);
1123 }
1124
1125
1126 // Coprocessor instructions
cdp(Coprocessor coproc,int opcode_1,CRegister crd,CRegister crn,CRegister crm,int opcode_2,Condition cond)1127 void Assembler::cdp(Coprocessor coproc,
1128 int opcode_1,
1129 CRegister crd,
1130 CRegister crn,
1131 CRegister crm,
1132 int opcode_2,
1133 Condition cond) {
1134 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1135 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1136 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1137 }
1138
1139
cdp2(Coprocessor coproc,int opcode_1,CRegister crd,CRegister crn,CRegister crm,int opcode_2)1140 void Assembler::cdp2(Coprocessor coproc,
1141 int opcode_1,
1142 CRegister crd,
1143 CRegister crn,
1144 CRegister crm,
1145 int opcode_2) { // v5 and above
1146 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
1147 }
1148
1149
mcr(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2,Condition cond)1150 void Assembler::mcr(Coprocessor coproc,
1151 int opcode_1,
1152 Register rd,
1153 CRegister crn,
1154 CRegister crm,
1155 int opcode_2,
1156 Condition cond) {
1157 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1158 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1159 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1160 }
1161
1162
mcr2(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2)1163 void Assembler::mcr2(Coprocessor coproc,
1164 int opcode_1,
1165 Register rd,
1166 CRegister crn,
1167 CRegister crm,
1168 int opcode_2) { // v5 and above
1169 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1170 }
1171
1172
mrc(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2,Condition cond)1173 void Assembler::mrc(Coprocessor coproc,
1174 int opcode_1,
1175 Register rd,
1176 CRegister crn,
1177 CRegister crm,
1178 int opcode_2,
1179 Condition cond) {
1180 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1181 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1182 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1183 }
1184
1185
mrc2(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2)1186 void Assembler::mrc2(Coprocessor coproc,
1187 int opcode_1,
1188 Register rd,
1189 CRegister crn,
1190 CRegister crm,
1191 int opcode_2) { // v5 and above
1192 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1193 }
1194
1195
ldc(Coprocessor coproc,CRegister crd,const MemOperand & src,LFlag l,Condition cond)1196 void Assembler::ldc(Coprocessor coproc,
1197 CRegister crd,
1198 const MemOperand& src,
1199 LFlag l,
1200 Condition cond) {
1201 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1202 }
1203
1204
ldc(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l,Condition cond)1205 void Assembler::ldc(Coprocessor coproc,
1206 CRegister crd,
1207 Register rn,
1208 int option,
1209 LFlag l,
1210 Condition cond) {
1211 // unindexed addressing
1212 ASSERT(is_uint8(option));
1213 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1214 coproc*B8 | (option & 255));
1215 }
1216
1217
ldc2(Coprocessor coproc,CRegister crd,const MemOperand & src,LFlag l)1218 void Assembler::ldc2(Coprocessor coproc,
1219 CRegister crd,
1220 const MemOperand& src,
1221 LFlag l) { // v5 and above
1222 ldc(coproc, crd, src, l, static_cast<Condition>(nv));
1223 }
1224
1225
ldc2(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l)1226 void Assembler::ldc2(Coprocessor coproc,
1227 CRegister crd,
1228 Register rn,
1229 int option,
1230 LFlag l) { // v5 and above
1231 ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1232 }
1233
1234
stc(Coprocessor coproc,CRegister crd,const MemOperand & dst,LFlag l,Condition cond)1235 void Assembler::stc(Coprocessor coproc,
1236 CRegister crd,
1237 const MemOperand& dst,
1238 LFlag l,
1239 Condition cond) {
1240 addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
1241 }
1242
1243
stc(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l,Condition cond)1244 void Assembler::stc(Coprocessor coproc,
1245 CRegister crd,
1246 Register rn,
1247 int option,
1248 LFlag l,
1249 Condition cond) {
1250 // unindexed addressing
1251 ASSERT(is_uint8(option));
1252 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1253 coproc*B8 | (option & 255));
1254 }
1255
1256
stc2(Coprocessor coproc,CRegister crd,const MemOperand & dst,LFlag l)1257 void Assembler::stc2(Coprocessor
1258 coproc, CRegister crd,
1259 const MemOperand& dst,
1260 LFlag l) { // v5 and above
1261 stc(coproc, crd, dst, l, static_cast<Condition>(nv));
1262 }
1263
1264
stc2(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l)1265 void Assembler::stc2(Coprocessor coproc,
1266 CRegister crd,
1267 Register rn,
1268 int option,
1269 LFlag l) { // v5 and above
1270 stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1271 }
1272
1273
1274 // Pseudo instructions
lea(Register dst,const MemOperand & x,SBit s,Condition cond)1275 void Assembler::lea(Register dst,
1276 const MemOperand& x,
1277 SBit s,
1278 Condition cond) {
1279 int am = x.am_;
1280 if (!x.rm_.is_valid()) {
1281 // immediate offset
1282 if ((am & P) == 0) // post indexing
1283 mov(dst, Operand(x.rn_), s, cond);
1284 else if ((am & U) == 0) // negative indexing
1285 sub(dst, x.rn_, Operand(x.offset_), s, cond);
1286 else
1287 add(dst, x.rn_, Operand(x.offset_), s, cond);
1288 } else {
1289 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1290 // register offset the constructors make sure than both shift_imm_
1291 // and shift_op_ are initialized.
1292 ASSERT(!x.rm_.is(pc));
1293 if ((am & P) == 0) // post indexing
1294 mov(dst, Operand(x.rn_), s, cond);
1295 else if ((am & U) == 0) // negative indexing
1296 sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1297 else
1298 add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1299 }
1300 }
1301
1302
1303 // Debugging
RecordComment(const char * msg)1304 void Assembler::RecordComment(const char* msg) {
1305 if (FLAG_debug_code) {
1306 CheckBuffer();
1307 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1308 }
1309 }
1310
1311
RecordPosition(int pos)1312 void Assembler::RecordPosition(int pos) {
1313 if (pos == RelocInfo::kNoPosition) return;
1314 ASSERT(pos >= 0);
1315 current_position_ = pos;
1316 }
1317
1318
RecordStatementPosition(int pos)1319 void Assembler::RecordStatementPosition(int pos) {
1320 if (pos == RelocInfo::kNoPosition) return;
1321 ASSERT(pos >= 0);
1322 current_statement_position_ = pos;
1323 }
1324
1325
WriteRecordedPositions()1326 void Assembler::WriteRecordedPositions() {
1327 // Write the statement position if it is different from what was written last
1328 // time.
1329 if (current_statement_position_ != written_statement_position_) {
1330 CheckBuffer();
1331 RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
1332 written_statement_position_ = current_statement_position_;
1333 }
1334
1335 // Write the position if it is different from what was written last time and
1336 // also different from the written statement position.
1337 if (current_position_ != written_position_ &&
1338 current_position_ != written_statement_position_) {
1339 CheckBuffer();
1340 RecordRelocInfo(RelocInfo::POSITION, current_position_);
1341 written_position_ = current_position_;
1342 }
1343 }
1344
1345
GrowBuffer()1346 void Assembler::GrowBuffer() {
1347 if (!own_buffer_) FATAL("external code buffer is too small");
1348
1349 // compute new buffer size
1350 CodeDesc desc; // the new buffer
1351 if (buffer_size_ < 4*KB) {
1352 desc.buffer_size = 4*KB;
1353 } else if (buffer_size_ < 1*MB) {
1354 desc.buffer_size = 2*buffer_size_;
1355 } else {
1356 desc.buffer_size = buffer_size_ + 1*MB;
1357 }
1358 CHECK_GT(desc.buffer_size, 0); // no overflow
1359
1360 // setup new buffer
1361 desc.buffer = NewArray<byte>(desc.buffer_size);
1362
1363 desc.instr_size = pc_offset();
1364 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1365
1366 // copy the data
1367 int pc_delta = desc.buffer - buffer_;
1368 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1369 memmove(desc.buffer, buffer_, desc.instr_size);
1370 memmove(reloc_info_writer.pos() + rc_delta,
1371 reloc_info_writer.pos(), desc.reloc_size);
1372
1373 // switch buffers
1374 DeleteArray(buffer_);
1375 buffer_ = desc.buffer;
1376 buffer_size_ = desc.buffer_size;
1377 pc_ += pc_delta;
1378 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1379 reloc_info_writer.last_pc() + pc_delta);
1380
1381 // none of our relocation types are pc relative pointing outside the code
1382 // buffer nor pc absolute pointing inside the code buffer, so there is no need
1383 // to relocate any emitted relocation entries
1384
1385 // relocate pending relocation entries
1386 for (int i = 0; i < num_prinfo_; i++) {
1387 RelocInfo& rinfo = prinfo_[i];
1388 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
1389 rinfo.rmode() != RelocInfo::POSITION);
1390 rinfo.set_pc(rinfo.pc() + pc_delta);
1391 }
1392 }
1393
1394
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)1395 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1396 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
1397 if (rmode >= RelocInfo::COMMENT && rmode <= RelocInfo::STATEMENT_POSITION) {
1398 // adjust code for new modes
1399 ASSERT(RelocInfo::IsComment(rmode) || RelocInfo::IsPosition(rmode));
1400 // these modes do not need an entry in the constant pool
1401 } else {
1402 ASSERT(num_prinfo_ < kMaxNumPRInfo);
1403 prinfo_[num_prinfo_++] = rinfo;
1404 // Make sure the constant pool is not emitted in place of the next
1405 // instruction for which we just recorded relocation info
1406 BlockConstPoolBefore(pc_offset() + kInstrSize);
1407 }
1408 if (rinfo.rmode() != RelocInfo::NONE) {
1409 // Don't record external references unless the heap will be serialized.
1410 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
1411 !Serializer::enabled() &&
1412 !FLAG_debug_code) {
1413 return;
1414 }
1415 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
1416 reloc_info_writer.Write(&rinfo);
1417 }
1418 }
1419
1420
CheckConstPool(bool force_emit,bool require_jump)1421 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
1422 // Calculate the offset of the next check. It will be overwritten
1423 // when a const pool is generated or when const pools are being
1424 // blocked for a specific range.
1425 next_buffer_check_ = pc_offset() + kCheckConstInterval;
1426
1427 // There is nothing to do if there are no pending relocation info entries
1428 if (num_prinfo_ == 0) return;
1429
1430 // We emit a constant pool at regular intervals of about kDistBetweenPools
1431 // or when requested by parameter force_emit (e.g. after each function).
1432 // We prefer not to emit a jump unless the max distance is reached or if we
1433 // are running low on slots, which can happen if a lot of constants are being
1434 // emitted (e.g. --debug-code and many static references).
1435 int dist = pc_offset() - last_const_pool_end_;
1436 if (!force_emit && dist < kMaxDistBetweenPools &&
1437 (require_jump || dist < kDistBetweenPools) &&
1438 // TODO(1236125): Cleanup the "magic" number below. We know that
1439 // the code generation will test every kCheckConstIntervalInst.
1440 // Thus we are safe as long as we generate less than 7 constant
1441 // entries per instruction.
1442 (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
1443 return;
1444 }
1445
1446 // If we did not return by now, we need to emit the constant pool soon.
1447
1448 // However, some small sequences of instructions must not be broken up by the
1449 // insertion of a constant pool; such sequences are protected by setting
1450 // no_const_pool_before_, which is checked here. Also, recursive calls to
1451 // CheckConstPool are blocked by no_const_pool_before_.
1452 if (pc_offset() < no_const_pool_before_) {
1453 // Emission is currently blocked; make sure we try again as soon as possible
1454 next_buffer_check_ = no_const_pool_before_;
1455
1456 // Something is wrong if emission is forced and blocked at the same time
1457 ASSERT(!force_emit);
1458 return;
1459 }
1460
1461 int jump_instr = require_jump ? kInstrSize : 0;
1462
1463 // Check that the code buffer is large enough before emitting the constant
1464 // pool and relocation information (include the jump over the pool and the
1465 // constant pool marker).
1466 int max_needed_space =
1467 jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
1468 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
1469
1470 // Block recursive calls to CheckConstPool
1471 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
1472 num_prinfo_*kInstrSize);
1473 // Don't bother to check for the emit calls below.
1474 next_buffer_check_ = no_const_pool_before_;
1475
1476 // Emit jump over constant pool if necessary
1477 Label after_pool;
1478 if (require_jump) b(&after_pool);
1479
1480 RecordComment("[ Constant Pool");
1481
1482 // Put down constant pool marker
1483 // "Undefined instruction" as specified by A3.1 Instruction set encoding
1484 emit(0x03000000 | num_prinfo_);
1485
1486 // Emit constant pool entries
1487 for (int i = 0; i < num_prinfo_; i++) {
1488 RelocInfo& rinfo = prinfo_[i];
1489 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
1490 rinfo.rmode() != RelocInfo::POSITION &&
1491 rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
1492 Instr instr = instr_at(rinfo.pc());
1493 // Instruction to patch must be a ldr/str [pc, #offset]
1494 // P and U set, B and W clear, Rn == pc, offset12 still 0
1495 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
1496 (2*B25 | P | U | pc.code()*B16));
1497 int delta = pc_ - rinfo.pc() - 8;
1498 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
1499 if (delta < 0) {
1500 instr &= ~U;
1501 delta = -delta;
1502 }
1503 ASSERT(is_uint12(delta));
1504 instr_at_put(rinfo.pc(), instr + delta);
1505 emit(rinfo.data());
1506 }
1507 num_prinfo_ = 0;
1508 last_const_pool_end_ = pc_offset();
1509
1510 RecordComment("]");
1511
1512 if (after_pool.is_linked()) {
1513 bind(&after_pool);
1514 }
1515
1516 // Since a constant pool was just emitted, move the check offset forward by
1517 // the standard interval.
1518 next_buffer_check_ = pc_offset() + kCheckConstInterval;
1519 }
1520
1521
1522 } } // namespace v8::internal
1523