1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "assembler_arm.h"
18
19 #include "base/logging.h"
20 #include "entrypoints/quick/quick_entrypoints.h"
21 #include "offsets.h"
22 #include "thread.h"
23 #include "utils.h"
24
25 namespace art {
26 namespace arm {
27
28 // Instruction encoding bits.
29 enum {
30 H = 1 << 5, // halfword (or byte)
31 L = 1 << 20, // load (or store)
32 S = 1 << 20, // set condition code (or leave unchanged)
33 W = 1 << 21, // writeback base register (or leave unchanged)
34 A = 1 << 21, // accumulate in multiply instruction (or not)
35 B = 1 << 22, // unsigned byte (or word)
36 N = 1 << 22, // long (or short)
37 U = 1 << 23, // positive (or negative) offset/index
38 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
39 I = 1 << 25, // immediate shifter operand (or not)
40
41 B0 = 1,
42 B1 = 1 << 1,
43 B2 = 1 << 2,
44 B3 = 1 << 3,
45 B4 = 1 << 4,
46 B5 = 1 << 5,
47 B6 = 1 << 6,
48 B7 = 1 << 7,
49 B8 = 1 << 8,
50 B9 = 1 << 9,
51 B10 = 1 << 10,
52 B11 = 1 << 11,
53 B12 = 1 << 12,
54 B16 = 1 << 16,
55 B17 = 1 << 17,
56 B18 = 1 << 18,
57 B19 = 1 << 19,
58 B20 = 1 << 20,
59 B21 = 1 << 21,
60 B22 = 1 << 22,
61 B23 = 1 << 23,
62 B24 = 1 << 24,
63 B25 = 1 << 25,
64 B26 = 1 << 26,
65 B27 = 1 << 27,
66
67 // Instruction bit masks.
68 RdMask = 15 << 12, // in str instruction
69 CondMask = 15 << 28,
70 CoprocessorMask = 15 << 8,
71 OpCodeMask = 15 << 21, // in data-processing instructions
72 Imm24Mask = (1 << 24) - 1,
73 Off12Mask = (1 << 12) - 1,
74
75 // ldrex/strex register field encodings.
76 kLdExRnShift = 16,
77 kLdExRtShift = 12,
78 kStrExRnShift = 16,
79 kStrExRdShift = 12,
80 kStrExRtShift = 0,
81 };
82
83
84 static const char* kRegisterNames[] = {
85 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
86 "fp", "ip", "sp", "lr", "pc"
87 };
operator <<(std::ostream & os,const Register & rhs)88 std::ostream& operator<<(std::ostream& os, const Register& rhs) {
89 if (rhs >= R0 && rhs <= PC) {
90 os << kRegisterNames[rhs];
91 } else {
92 os << "Register[" << static_cast<int>(rhs) << "]";
93 }
94 return os;
95 }
96
97
operator <<(std::ostream & os,const SRegister & rhs)98 std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
99 if (rhs >= S0 && rhs < kNumberOfSRegisters) {
100 os << "s" << static_cast<int>(rhs);
101 } else {
102 os << "SRegister[" << static_cast<int>(rhs) << "]";
103 }
104 return os;
105 }
106
107
operator <<(std::ostream & os,const DRegister & rhs)108 std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
109 if (rhs >= D0 && rhs < kNumberOfDRegisters) {
110 os << "d" << static_cast<int>(rhs);
111 } else {
112 os << "DRegister[" << static_cast<int>(rhs) << "]";
113 }
114 return os;
115 }
116
117
118 static const char* kConditionNames[] = {
119 "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
120 "LE", "AL",
121 };
operator <<(std::ostream & os,const Condition & rhs)122 std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
123 if (rhs >= EQ && rhs <= AL) {
124 os << kConditionNames[rhs];
125 } else {
126 os << "Condition[" << static_cast<int>(rhs) << "]";
127 }
128 return os;
129 }
130
Emit(int32_t value)131 void ArmAssembler::Emit(int32_t value) {
132 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
133 buffer_.Emit<int32_t>(value);
134 }
135
136
EmitType01(Condition cond,int type,Opcode opcode,int set_cc,Register rn,Register rd,ShifterOperand so)137 void ArmAssembler::EmitType01(Condition cond,
138 int type,
139 Opcode opcode,
140 int set_cc,
141 Register rn,
142 Register rd,
143 ShifterOperand so) {
144 CHECK_NE(rd, kNoRegister);
145 CHECK_NE(cond, kNoCondition);
146 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
147 type << kTypeShift |
148 static_cast<int32_t>(opcode) << kOpcodeShift |
149 set_cc << kSShift |
150 static_cast<int32_t>(rn) << kRnShift |
151 static_cast<int32_t>(rd) << kRdShift |
152 so.encoding();
153 Emit(encoding);
154 }
155
156
EmitType5(Condition cond,int offset,bool link)157 void ArmAssembler::EmitType5(Condition cond, int offset, bool link) {
158 CHECK_NE(cond, kNoCondition);
159 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
160 5 << kTypeShift |
161 (link ? 1 : 0) << kLinkShift;
162 Emit(ArmAssembler::EncodeBranchOffset(offset, encoding));
163 }
164
165
EmitMemOp(Condition cond,bool load,bool byte,Register rd,Address ad)166 void ArmAssembler::EmitMemOp(Condition cond,
167 bool load,
168 bool byte,
169 Register rd,
170 Address ad) {
171 CHECK_NE(rd, kNoRegister);
172 CHECK_NE(cond, kNoCondition);
173 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
174 B26 |
175 (load ? L : 0) |
176 (byte ? B : 0) |
177 (static_cast<int32_t>(rd) << kRdShift) |
178 ad.encoding();
179 Emit(encoding);
180 }
181
182
EmitMemOpAddressMode3(Condition cond,int32_t mode,Register rd,Address ad)183 void ArmAssembler::EmitMemOpAddressMode3(Condition cond,
184 int32_t mode,
185 Register rd,
186 Address ad) {
187 CHECK_NE(rd, kNoRegister);
188 CHECK_NE(cond, kNoCondition);
189 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
190 B22 |
191 mode |
192 (static_cast<int32_t>(rd) << kRdShift) |
193 ad.encoding3();
194 Emit(encoding);
195 }
196
197
EmitMultiMemOp(Condition cond,BlockAddressMode am,bool load,Register base,RegList regs)198 void ArmAssembler::EmitMultiMemOp(Condition cond,
199 BlockAddressMode am,
200 bool load,
201 Register base,
202 RegList regs) {
203 CHECK_NE(base, kNoRegister);
204 CHECK_NE(cond, kNoCondition);
205 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
206 B27 |
207 am |
208 (load ? L : 0) |
209 (static_cast<int32_t>(base) << kRnShift) |
210 regs;
211 Emit(encoding);
212 }
213
214
EmitShiftImmediate(Condition cond,Shift opcode,Register rd,Register rm,ShifterOperand so)215 void ArmAssembler::EmitShiftImmediate(Condition cond,
216 Shift opcode,
217 Register rd,
218 Register rm,
219 ShifterOperand so) {
220 CHECK_NE(cond, kNoCondition);
221 CHECK_EQ(so.type(), 1U);
222 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
223 static_cast<int32_t>(MOV) << kOpcodeShift |
224 static_cast<int32_t>(rd) << kRdShift |
225 so.encoding() << kShiftImmShift |
226 static_cast<int32_t>(opcode) << kShiftShift |
227 static_cast<int32_t>(rm);
228 Emit(encoding);
229 }
230
231
EmitShiftRegister(Condition cond,Shift opcode,Register rd,Register rm,ShifterOperand so)232 void ArmAssembler::EmitShiftRegister(Condition cond,
233 Shift opcode,
234 Register rd,
235 Register rm,
236 ShifterOperand so) {
237 CHECK_NE(cond, kNoCondition);
238 CHECK_EQ(so.type(), 0U);
239 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
240 static_cast<int32_t>(MOV) << kOpcodeShift |
241 static_cast<int32_t>(rd) << kRdShift |
242 so.encoding() << kShiftRegisterShift |
243 static_cast<int32_t>(opcode) << kShiftShift |
244 B4 |
245 static_cast<int32_t>(rm);
246 Emit(encoding);
247 }
248
249
EmitBranch(Condition cond,Label * label,bool link)250 void ArmAssembler::EmitBranch(Condition cond, Label* label, bool link) {
251 if (label->IsBound()) {
252 EmitType5(cond, label->Position() - buffer_.Size(), link);
253 } else {
254 int position = buffer_.Size();
255 // Use the offset field of the branch instruction for linking the sites.
256 EmitType5(cond, label->position_, link);
257 label->LinkTo(position);
258 }
259 }
260
and_(Register rd,Register rn,ShifterOperand so,Condition cond)261 void ArmAssembler::and_(Register rd, Register rn, ShifterOperand so,
262 Condition cond) {
263 EmitType01(cond, so.type(), AND, 0, rn, rd, so);
264 }
265
266
eor(Register rd,Register rn,ShifterOperand so,Condition cond)267 void ArmAssembler::eor(Register rd, Register rn, ShifterOperand so,
268 Condition cond) {
269 EmitType01(cond, so.type(), EOR, 0, rn, rd, so);
270 }
271
272
sub(Register rd,Register rn,ShifterOperand so,Condition cond)273 void ArmAssembler::sub(Register rd, Register rn, ShifterOperand so,
274 Condition cond) {
275 EmitType01(cond, so.type(), SUB, 0, rn, rd, so);
276 }
277
rsb(Register rd,Register rn,ShifterOperand so,Condition cond)278 void ArmAssembler::rsb(Register rd, Register rn, ShifterOperand so,
279 Condition cond) {
280 EmitType01(cond, so.type(), RSB, 0, rn, rd, so);
281 }
282
rsbs(Register rd,Register rn,ShifterOperand so,Condition cond)283 void ArmAssembler::rsbs(Register rd, Register rn, ShifterOperand so,
284 Condition cond) {
285 EmitType01(cond, so.type(), RSB, 1, rn, rd, so);
286 }
287
288
add(Register rd,Register rn,ShifterOperand so,Condition cond)289 void ArmAssembler::add(Register rd, Register rn, ShifterOperand so,
290 Condition cond) {
291 EmitType01(cond, so.type(), ADD, 0, rn, rd, so);
292 }
293
294
adds(Register rd,Register rn,ShifterOperand so,Condition cond)295 void ArmAssembler::adds(Register rd, Register rn, ShifterOperand so,
296 Condition cond) {
297 EmitType01(cond, so.type(), ADD, 1, rn, rd, so);
298 }
299
300
subs(Register rd,Register rn,ShifterOperand so,Condition cond)301 void ArmAssembler::subs(Register rd, Register rn, ShifterOperand so,
302 Condition cond) {
303 EmitType01(cond, so.type(), SUB, 1, rn, rd, so);
304 }
305
306
adc(Register rd,Register rn,ShifterOperand so,Condition cond)307 void ArmAssembler::adc(Register rd, Register rn, ShifterOperand so,
308 Condition cond) {
309 EmitType01(cond, so.type(), ADC, 0, rn, rd, so);
310 }
311
312
sbc(Register rd,Register rn,ShifterOperand so,Condition cond)313 void ArmAssembler::sbc(Register rd, Register rn, ShifterOperand so,
314 Condition cond) {
315 EmitType01(cond, so.type(), SBC, 0, rn, rd, so);
316 }
317
318
rsc(Register rd,Register rn,ShifterOperand so,Condition cond)319 void ArmAssembler::rsc(Register rd, Register rn, ShifterOperand so,
320 Condition cond) {
321 EmitType01(cond, so.type(), RSC, 0, rn, rd, so);
322 }
323
324
tst(Register rn,ShifterOperand so,Condition cond)325 void ArmAssembler::tst(Register rn, ShifterOperand so, Condition cond) {
326 CHECK_NE(rn, PC); // Reserve tst pc instruction for exception handler marker.
327 EmitType01(cond, so.type(), TST, 1, rn, R0, so);
328 }
329
330
teq(Register rn,ShifterOperand so,Condition cond)331 void ArmAssembler::teq(Register rn, ShifterOperand so, Condition cond) {
332 CHECK_NE(rn, PC); // Reserve teq pc instruction for exception handler marker.
333 EmitType01(cond, so.type(), TEQ, 1, rn, R0, so);
334 }
335
336
cmp(Register rn,ShifterOperand so,Condition cond)337 void ArmAssembler::cmp(Register rn, ShifterOperand so, Condition cond) {
338 EmitType01(cond, so.type(), CMP, 1, rn, R0, so);
339 }
340
341
cmn(Register rn,ShifterOperand so,Condition cond)342 void ArmAssembler::cmn(Register rn, ShifterOperand so, Condition cond) {
343 EmitType01(cond, so.type(), CMN, 1, rn, R0, so);
344 }
345
346
orr(Register rd,Register rn,ShifterOperand so,Condition cond)347 void ArmAssembler::orr(Register rd, Register rn,
348 ShifterOperand so, Condition cond) {
349 EmitType01(cond, so.type(), ORR, 0, rn, rd, so);
350 }
351
352
orrs(Register rd,Register rn,ShifterOperand so,Condition cond)353 void ArmAssembler::orrs(Register rd, Register rn,
354 ShifterOperand so, Condition cond) {
355 EmitType01(cond, so.type(), ORR, 1, rn, rd, so);
356 }
357
358
mov(Register rd,ShifterOperand so,Condition cond)359 void ArmAssembler::mov(Register rd, ShifterOperand so, Condition cond) {
360 EmitType01(cond, so.type(), MOV, 0, R0, rd, so);
361 }
362
363
movs(Register rd,ShifterOperand so,Condition cond)364 void ArmAssembler::movs(Register rd, ShifterOperand so, Condition cond) {
365 EmitType01(cond, so.type(), MOV, 1, R0, rd, so);
366 }
367
368
bic(Register rd,Register rn,ShifterOperand so,Condition cond)369 void ArmAssembler::bic(Register rd, Register rn, ShifterOperand so,
370 Condition cond) {
371 EmitType01(cond, so.type(), BIC, 0, rn, rd, so);
372 }
373
374
mvn(Register rd,ShifterOperand so,Condition cond)375 void ArmAssembler::mvn(Register rd, ShifterOperand so, Condition cond) {
376 EmitType01(cond, so.type(), MVN, 0, R0, rd, so);
377 }
378
379
mvns(Register rd,ShifterOperand so,Condition cond)380 void ArmAssembler::mvns(Register rd, ShifterOperand so, Condition cond) {
381 EmitType01(cond, so.type(), MVN, 1, R0, rd, so);
382 }
383
384
clz(Register rd,Register rm,Condition cond)385 void ArmAssembler::clz(Register rd, Register rm, Condition cond) {
386 CHECK_NE(rd, kNoRegister);
387 CHECK_NE(rm, kNoRegister);
388 CHECK_NE(cond, kNoCondition);
389 CHECK_NE(rd, PC);
390 CHECK_NE(rm, PC);
391 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
392 B24 | B22 | B21 | (0xf << 16) |
393 (static_cast<int32_t>(rd) << kRdShift) |
394 (0xf << 8) | B4 | static_cast<int32_t>(rm);
395 Emit(encoding);
396 }
397
398
movw(Register rd,uint16_t imm16,Condition cond)399 void ArmAssembler::movw(Register rd, uint16_t imm16, Condition cond) {
400 CHECK_NE(cond, kNoCondition);
401 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
402 B25 | B24 | ((imm16 >> 12) << 16) |
403 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
404 Emit(encoding);
405 }
406
407
movt(Register rd,uint16_t imm16,Condition cond)408 void ArmAssembler::movt(Register rd, uint16_t imm16, Condition cond) {
409 CHECK_NE(cond, kNoCondition);
410 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
411 B25 | B24 | B22 | ((imm16 >> 12) << 16) |
412 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
413 Emit(encoding);
414 }
415
416
EmitMulOp(Condition cond,int32_t opcode,Register rd,Register rn,Register rm,Register rs)417 void ArmAssembler::EmitMulOp(Condition cond, int32_t opcode,
418 Register rd, Register rn,
419 Register rm, Register rs) {
420 CHECK_NE(rd, kNoRegister);
421 CHECK_NE(rn, kNoRegister);
422 CHECK_NE(rm, kNoRegister);
423 CHECK_NE(rs, kNoRegister);
424 CHECK_NE(cond, kNoCondition);
425 int32_t encoding = opcode |
426 (static_cast<int32_t>(cond) << kConditionShift) |
427 (static_cast<int32_t>(rn) << kRnShift) |
428 (static_cast<int32_t>(rd) << kRdShift) |
429 (static_cast<int32_t>(rs) << kRsShift) |
430 B7 | B4 |
431 (static_cast<int32_t>(rm) << kRmShift);
432 Emit(encoding);
433 }
434
435
mul(Register rd,Register rn,Register rm,Condition cond)436 void ArmAssembler::mul(Register rd, Register rn, Register rm, Condition cond) {
437 // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
438 EmitMulOp(cond, 0, R0, rd, rn, rm);
439 }
440
441
mla(Register rd,Register rn,Register rm,Register ra,Condition cond)442 void ArmAssembler::mla(Register rd, Register rn, Register rm, Register ra,
443 Condition cond) {
444 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
445 EmitMulOp(cond, B21, ra, rd, rn, rm);
446 }
447
448
mls(Register rd,Register rn,Register rm,Register ra,Condition cond)449 void ArmAssembler::mls(Register rd, Register rn, Register rm, Register ra,
450 Condition cond) {
451 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
452 EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
453 }
454
455
umull(Register rd_lo,Register rd_hi,Register rn,Register rm,Condition cond)456 void ArmAssembler::umull(Register rd_lo, Register rd_hi, Register rn,
457 Register rm, Condition cond) {
458 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
459 EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
460 }
461
462
ldr(Register rd,Address ad,Condition cond)463 void ArmAssembler::ldr(Register rd, Address ad, Condition cond) {
464 EmitMemOp(cond, true, false, rd, ad);
465 }
466
467
str(Register rd,Address ad,Condition cond)468 void ArmAssembler::str(Register rd, Address ad, Condition cond) {
469 EmitMemOp(cond, false, false, rd, ad);
470 }
471
472
ldrb(Register rd,Address ad,Condition cond)473 void ArmAssembler::ldrb(Register rd, Address ad, Condition cond) {
474 EmitMemOp(cond, true, true, rd, ad);
475 }
476
477
strb(Register rd,Address ad,Condition cond)478 void ArmAssembler::strb(Register rd, Address ad, Condition cond) {
479 EmitMemOp(cond, false, true, rd, ad);
480 }
481
482
ldrh(Register rd,Address ad,Condition cond)483 void ArmAssembler::ldrh(Register rd, Address ad, Condition cond) {
484 EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
485 }
486
487
strh(Register rd,Address ad,Condition cond)488 void ArmAssembler::strh(Register rd, Address ad, Condition cond) {
489 EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
490 }
491
492
ldrsb(Register rd,Address ad,Condition cond)493 void ArmAssembler::ldrsb(Register rd, Address ad, Condition cond) {
494 EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
495 }
496
497
ldrsh(Register rd,Address ad,Condition cond)498 void ArmAssembler::ldrsh(Register rd, Address ad, Condition cond) {
499 EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
500 }
501
502
ldrd(Register rd,Address ad,Condition cond)503 void ArmAssembler::ldrd(Register rd, Address ad, Condition cond) {
504 CHECK_EQ(rd % 2, 0);
505 EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad);
506 }
507
508
strd(Register rd,Address ad,Condition cond)509 void ArmAssembler::strd(Register rd, Address ad, Condition cond) {
510 CHECK_EQ(rd % 2, 0);
511 EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad);
512 }
513
514
ldm(BlockAddressMode am,Register base,RegList regs,Condition cond)515 void ArmAssembler::ldm(BlockAddressMode am,
516 Register base,
517 RegList regs,
518 Condition cond) {
519 EmitMultiMemOp(cond, am, true, base, regs);
520 }
521
522
stm(BlockAddressMode am,Register base,RegList regs,Condition cond)523 void ArmAssembler::stm(BlockAddressMode am,
524 Register base,
525 RegList regs,
526 Condition cond) {
527 EmitMultiMemOp(cond, am, false, base, regs);
528 }
529
530
ldrex(Register rt,Register rn,Condition cond)531 void ArmAssembler::ldrex(Register rt, Register rn, Condition cond) {
532 CHECK_NE(rn, kNoRegister);
533 CHECK_NE(rt, kNoRegister);
534 CHECK_NE(cond, kNoCondition);
535 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
536 B24 |
537 B23 |
538 L |
539 (static_cast<int32_t>(rn) << kLdExRnShift) |
540 (static_cast<int32_t>(rt) << kLdExRtShift) |
541 B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
542 Emit(encoding);
543 }
544
545
strex(Register rd,Register rt,Register rn,Condition cond)546 void ArmAssembler::strex(Register rd,
547 Register rt,
548 Register rn,
549 Condition cond) {
550 CHECK_NE(rn, kNoRegister);
551 CHECK_NE(rd, kNoRegister);
552 CHECK_NE(rt, kNoRegister);
553 CHECK_NE(cond, kNoCondition);
554 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
555 B24 |
556 B23 |
557 (static_cast<int32_t>(rn) << kStrExRnShift) |
558 (static_cast<int32_t>(rd) << kStrExRdShift) |
559 B11 | B10 | B9 | B8 | B7 | B4 |
560 (static_cast<int32_t>(rt) << kStrExRtShift);
561 Emit(encoding);
562 }
563
564
clrex()565 void ArmAssembler::clrex() {
566 int32_t encoding = (kSpecialCondition << kConditionShift) |
567 B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf;
568 Emit(encoding);
569 }
570
571
nop(Condition cond)572 void ArmAssembler::nop(Condition cond) {
573 CHECK_NE(cond, kNoCondition);
574 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
575 B25 | B24 | B21 | (0xf << 12);
576 Emit(encoding);
577 }
578
579
vmovsr(SRegister sn,Register rt,Condition cond)580 void ArmAssembler::vmovsr(SRegister sn, Register rt, Condition cond) {
581 CHECK_NE(sn, kNoSRegister);
582 CHECK_NE(rt, kNoRegister);
583 CHECK_NE(rt, SP);
584 CHECK_NE(rt, PC);
585 CHECK_NE(cond, kNoCondition);
586 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
587 B27 | B26 | B25 |
588 ((static_cast<int32_t>(sn) >> 1)*B16) |
589 (static_cast<int32_t>(rt)*B12) | B11 | B9 |
590 ((static_cast<int32_t>(sn) & 1)*B7) | B4;
591 Emit(encoding);
592 }
593
594
vmovrs(Register rt,SRegister sn,Condition cond)595 void ArmAssembler::vmovrs(Register rt, SRegister sn, Condition cond) {
596 CHECK_NE(sn, kNoSRegister);
597 CHECK_NE(rt, kNoRegister);
598 CHECK_NE(rt, SP);
599 CHECK_NE(rt, PC);
600 CHECK_NE(cond, kNoCondition);
601 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
602 B27 | B26 | B25 | B20 |
603 ((static_cast<int32_t>(sn) >> 1)*B16) |
604 (static_cast<int32_t>(rt)*B12) | B11 | B9 |
605 ((static_cast<int32_t>(sn) & 1)*B7) | B4;
606 Emit(encoding);
607 }
608
609
vmovsrr(SRegister sm,Register rt,Register rt2,Condition cond)610 void ArmAssembler::vmovsrr(SRegister sm, Register rt, Register rt2,
611 Condition cond) {
612 CHECK_NE(sm, kNoSRegister);
613 CHECK_NE(sm, S31);
614 CHECK_NE(rt, kNoRegister);
615 CHECK_NE(rt, SP);
616 CHECK_NE(rt, PC);
617 CHECK_NE(rt2, kNoRegister);
618 CHECK_NE(rt2, SP);
619 CHECK_NE(rt2, PC);
620 CHECK_NE(cond, kNoCondition);
621 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
622 B27 | B26 | B22 |
623 (static_cast<int32_t>(rt2)*B16) |
624 (static_cast<int32_t>(rt)*B12) | B11 | B9 |
625 ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
626 (static_cast<int32_t>(sm) >> 1);
627 Emit(encoding);
628 }
629
630
vmovrrs(Register rt,Register rt2,SRegister sm,Condition cond)631 void ArmAssembler::vmovrrs(Register rt, Register rt2, SRegister sm,
632 Condition cond) {
633 CHECK_NE(sm, kNoSRegister);
634 CHECK_NE(sm, S31);
635 CHECK_NE(rt, kNoRegister);
636 CHECK_NE(rt, SP);
637 CHECK_NE(rt, PC);
638 CHECK_NE(rt2, kNoRegister);
639 CHECK_NE(rt2, SP);
640 CHECK_NE(rt2, PC);
641 CHECK_NE(rt, rt2);
642 CHECK_NE(cond, kNoCondition);
643 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
644 B27 | B26 | B22 | B20 |
645 (static_cast<int32_t>(rt2)*B16) |
646 (static_cast<int32_t>(rt)*B12) | B11 | B9 |
647 ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
648 (static_cast<int32_t>(sm) >> 1);
649 Emit(encoding);
650 }
651
652
vmovdrr(DRegister dm,Register rt,Register rt2,Condition cond)653 void ArmAssembler::vmovdrr(DRegister dm, Register rt, Register rt2,
654 Condition cond) {
655 CHECK_NE(dm, kNoDRegister);
656 CHECK_NE(rt, kNoRegister);
657 CHECK_NE(rt, SP);
658 CHECK_NE(rt, PC);
659 CHECK_NE(rt2, kNoRegister);
660 CHECK_NE(rt2, SP);
661 CHECK_NE(rt2, PC);
662 CHECK_NE(cond, kNoCondition);
663 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
664 B27 | B26 | B22 |
665 (static_cast<int32_t>(rt2)*B16) |
666 (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
667 ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
668 (static_cast<int32_t>(dm) & 0xf);
669 Emit(encoding);
670 }
671
672
vmovrrd(Register rt,Register rt2,DRegister dm,Condition cond)673 void ArmAssembler::vmovrrd(Register rt, Register rt2, DRegister dm,
674 Condition cond) {
675 CHECK_NE(dm, kNoDRegister);
676 CHECK_NE(rt, kNoRegister);
677 CHECK_NE(rt, SP);
678 CHECK_NE(rt, PC);
679 CHECK_NE(rt2, kNoRegister);
680 CHECK_NE(rt2, SP);
681 CHECK_NE(rt2, PC);
682 CHECK_NE(rt, rt2);
683 CHECK_NE(cond, kNoCondition);
684 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
685 B27 | B26 | B22 | B20 |
686 (static_cast<int32_t>(rt2)*B16) |
687 (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
688 ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
689 (static_cast<int32_t>(dm) & 0xf);
690 Emit(encoding);
691 }
692
693
vldrs(SRegister sd,Address ad,Condition cond)694 void ArmAssembler::vldrs(SRegister sd, Address ad, Condition cond) {
695 CHECK_NE(sd, kNoSRegister);
696 CHECK_NE(cond, kNoCondition);
697 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
698 B27 | B26 | B24 | B20 |
699 ((static_cast<int32_t>(sd) & 1)*B22) |
700 ((static_cast<int32_t>(sd) >> 1)*B12) |
701 B11 | B9 | ad.vencoding();
702 Emit(encoding);
703 }
704
705
vstrs(SRegister sd,Address ad,Condition cond)706 void ArmAssembler::vstrs(SRegister sd, Address ad, Condition cond) {
707 CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
708 CHECK_NE(sd, kNoSRegister);
709 CHECK_NE(cond, kNoCondition);
710 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
711 B27 | B26 | B24 |
712 ((static_cast<int32_t>(sd) & 1)*B22) |
713 ((static_cast<int32_t>(sd) >> 1)*B12) |
714 B11 | B9 | ad.vencoding();
715 Emit(encoding);
716 }
717
718
vldrd(DRegister dd,Address ad,Condition cond)719 void ArmAssembler::vldrd(DRegister dd, Address ad, Condition cond) {
720 CHECK_NE(dd, kNoDRegister);
721 CHECK_NE(cond, kNoCondition);
722 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
723 B27 | B26 | B24 | B20 |
724 ((static_cast<int32_t>(dd) >> 4)*B22) |
725 ((static_cast<int32_t>(dd) & 0xf)*B12) |
726 B11 | B9 | B8 | ad.vencoding();
727 Emit(encoding);
728 }
729
730
vstrd(DRegister dd,Address ad,Condition cond)731 void ArmAssembler::vstrd(DRegister dd, Address ad, Condition cond) {
732 CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
733 CHECK_NE(dd, kNoDRegister);
734 CHECK_NE(cond, kNoCondition);
735 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
736 B27 | B26 | B24 |
737 ((static_cast<int32_t>(dd) >> 4)*B22) |
738 ((static_cast<int32_t>(dd) & 0xf)*B12) |
739 B11 | B9 | B8 | ad.vencoding();
740 Emit(encoding);
741 }
742
743
EmitVFPsss(Condition cond,int32_t opcode,SRegister sd,SRegister sn,SRegister sm)744 void ArmAssembler::EmitVFPsss(Condition cond, int32_t opcode,
745 SRegister sd, SRegister sn, SRegister sm) {
746 CHECK_NE(sd, kNoSRegister);
747 CHECK_NE(sn, kNoSRegister);
748 CHECK_NE(sm, kNoSRegister);
749 CHECK_NE(cond, kNoCondition);
750 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
751 B27 | B26 | B25 | B11 | B9 | opcode |
752 ((static_cast<int32_t>(sd) & 1)*B22) |
753 ((static_cast<int32_t>(sn) >> 1)*B16) |
754 ((static_cast<int32_t>(sd) >> 1)*B12) |
755 ((static_cast<int32_t>(sn) & 1)*B7) |
756 ((static_cast<int32_t>(sm) & 1)*B5) |
757 (static_cast<int32_t>(sm) >> 1);
758 Emit(encoding);
759 }
760
761
EmitVFPddd(Condition cond,int32_t opcode,DRegister dd,DRegister dn,DRegister dm)762 void ArmAssembler::EmitVFPddd(Condition cond, int32_t opcode,
763 DRegister dd, DRegister dn, DRegister dm) {
764 CHECK_NE(dd, kNoDRegister);
765 CHECK_NE(dn, kNoDRegister);
766 CHECK_NE(dm, kNoDRegister);
767 CHECK_NE(cond, kNoCondition);
768 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
769 B27 | B26 | B25 | B11 | B9 | B8 | opcode |
770 ((static_cast<int32_t>(dd) >> 4)*B22) |
771 ((static_cast<int32_t>(dn) & 0xf)*B16) |
772 ((static_cast<int32_t>(dd) & 0xf)*B12) |
773 ((static_cast<int32_t>(dn) >> 4)*B7) |
774 ((static_cast<int32_t>(dm) >> 4)*B5) |
775 (static_cast<int32_t>(dm) & 0xf);
776 Emit(encoding);
777 }
778
779
vmovs(SRegister sd,SRegister sm,Condition cond)780 void ArmAssembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
781 EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
782 }
783
784
vmovd(DRegister dd,DRegister dm,Condition cond)785 void ArmAssembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
786 EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
787 }
788
789
vmovs(SRegister sd,float s_imm,Condition cond)790 bool ArmAssembler::vmovs(SRegister sd, float s_imm, Condition cond) {
791 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
792 if (((imm32 & ((1 << 19) - 1)) == 0) &&
793 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
794 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
795 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
796 ((imm32 >> 19) & ((1 << 6) -1));
797 EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
798 sd, S0, S0);
799 return true;
800 }
801 return false;
802 }
803
804
vmovd(DRegister dd,double d_imm,Condition cond)805 bool ArmAssembler::vmovd(DRegister dd, double d_imm, Condition cond) {
806 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
807 if (((imm64 & ((1LL << 48) - 1)) == 0) &&
808 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
809 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
810 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
811 ((imm64 >> 48) & ((1 << 6) -1));
812 EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
813 dd, D0, D0);
814 return true;
815 }
816 return false;
817 }
818
819
vadds(SRegister sd,SRegister sn,SRegister sm,Condition cond)820 void ArmAssembler::vadds(SRegister sd, SRegister sn, SRegister sm,
821 Condition cond) {
822 EmitVFPsss(cond, B21 | B20, sd, sn, sm);
823 }
824
825
vaddd(DRegister dd,DRegister dn,DRegister dm,Condition cond)826 void ArmAssembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
827 Condition cond) {
828 EmitVFPddd(cond, B21 | B20, dd, dn, dm);
829 }
830
831
vsubs(SRegister sd,SRegister sn,SRegister sm,Condition cond)832 void ArmAssembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
833 Condition cond) {
834 EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
835 }
836
837
vsubd(DRegister dd,DRegister dn,DRegister dm,Condition cond)838 void ArmAssembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
839 Condition cond) {
840 EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
841 }
842
843
vmuls(SRegister sd,SRegister sn,SRegister sm,Condition cond)844 void ArmAssembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
845 Condition cond) {
846 EmitVFPsss(cond, B21, sd, sn, sm);
847 }
848
849
vmuld(DRegister dd,DRegister dn,DRegister dm,Condition cond)850 void ArmAssembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
851 Condition cond) {
852 EmitVFPddd(cond, B21, dd, dn, dm);
853 }
854
855
vmlas(SRegister sd,SRegister sn,SRegister sm,Condition cond)856 void ArmAssembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
857 Condition cond) {
858 EmitVFPsss(cond, 0, sd, sn, sm);
859 }
860
861
vmlad(DRegister dd,DRegister dn,DRegister dm,Condition cond)862 void ArmAssembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
863 Condition cond) {
864 EmitVFPddd(cond, 0, dd, dn, dm);
865 }
866
867
vmlss(SRegister sd,SRegister sn,SRegister sm,Condition cond)868 void ArmAssembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
869 Condition cond) {
870 EmitVFPsss(cond, B6, sd, sn, sm);
871 }
872
873
vmlsd(DRegister dd,DRegister dn,DRegister dm,Condition cond)874 void ArmAssembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
875 Condition cond) {
876 EmitVFPddd(cond, B6, dd, dn, dm);
877 }
878
879
vdivs(SRegister sd,SRegister sn,SRegister sm,Condition cond)880 void ArmAssembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
881 Condition cond) {
882 EmitVFPsss(cond, B23, sd, sn, sm);
883 }
884
885
vdivd(DRegister dd,DRegister dn,DRegister dm,Condition cond)886 void ArmAssembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
887 Condition cond) {
888 EmitVFPddd(cond, B23, dd, dn, dm);
889 }
890
891
vabss(SRegister sd,SRegister sm,Condition cond)892 void ArmAssembler::vabss(SRegister sd, SRegister sm, Condition cond) {
893 EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
894 }
895
896
vabsd(DRegister dd,DRegister dm,Condition cond)897 void ArmAssembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
898 EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
899 }
900
901
vnegs(SRegister sd,SRegister sm,Condition cond)902 void ArmAssembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
903 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
904 }
905
906
vnegd(DRegister dd,DRegister dm,Condition cond)907 void ArmAssembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
908 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
909 }
910
911
vsqrts(SRegister sd,SRegister sm,Condition cond)912 void ArmAssembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
913 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
914 }
915
vsqrtd(DRegister dd,DRegister dm,Condition cond)916 void ArmAssembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
917 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
918 }
919
920
EmitVFPsd(Condition cond,int32_t opcode,SRegister sd,DRegister dm)921 void ArmAssembler::EmitVFPsd(Condition cond, int32_t opcode,
922 SRegister sd, DRegister dm) {
923 CHECK_NE(sd, kNoSRegister);
924 CHECK_NE(dm, kNoDRegister);
925 CHECK_NE(cond, kNoCondition);
926 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
927 B27 | B26 | B25 | B11 | B9 | opcode |
928 ((static_cast<int32_t>(sd) & 1)*B22) |
929 ((static_cast<int32_t>(sd) >> 1)*B12) |
930 ((static_cast<int32_t>(dm) >> 4)*B5) |
931 (static_cast<int32_t>(dm) & 0xf);
932 Emit(encoding);
933 }
934
935
EmitVFPds(Condition cond,int32_t opcode,DRegister dd,SRegister sm)936 void ArmAssembler::EmitVFPds(Condition cond, int32_t opcode,
937 DRegister dd, SRegister sm) {
938 CHECK_NE(dd, kNoDRegister);
939 CHECK_NE(sm, kNoSRegister);
940 CHECK_NE(cond, kNoCondition);
941 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
942 B27 | B26 | B25 | B11 | B9 | opcode |
943 ((static_cast<int32_t>(dd) >> 4)*B22) |
944 ((static_cast<int32_t>(dd) & 0xf)*B12) |
945 ((static_cast<int32_t>(sm) & 1)*B5) |
946 (static_cast<int32_t>(sm) >> 1);
947 Emit(encoding);
948 }
949
950
vcvtsd(SRegister sd,DRegister dm,Condition cond)951 void ArmAssembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
952 EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
953 }
954
955
vcvtds(DRegister dd,SRegister sm,Condition cond)956 void ArmAssembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
957 EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
958 }
959
960
vcvtis(SRegister sd,SRegister sm,Condition cond)961 void ArmAssembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
962 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
963 }
964
965
vcvtid(SRegister sd,DRegister dm,Condition cond)966 void ArmAssembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
967 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
968 }
969
970
vcvtsi(SRegister sd,SRegister sm,Condition cond)971 void ArmAssembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
972 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
973 }
974
975
vcvtdi(DRegister dd,SRegister sm,Condition cond)976 void ArmAssembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
977 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
978 }
979
980
vcvtus(SRegister sd,SRegister sm,Condition cond)981 void ArmAssembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
982 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
983 }
984
985
vcvtud(SRegister sd,DRegister dm,Condition cond)986 void ArmAssembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
987 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
988 }
989
990
vcvtsu(SRegister sd,SRegister sm,Condition cond)991 void ArmAssembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
992 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
993 }
994
995
vcvtdu(DRegister dd,SRegister sm,Condition cond)996 void ArmAssembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
997 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
998 }
999
1000
vcmps(SRegister sd,SRegister sm,Condition cond)1001 void ArmAssembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
1002 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
1003 }
1004
1005
vcmpd(DRegister dd,DRegister dm,Condition cond)1006 void ArmAssembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
1007 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
1008 }
1009
1010
vcmpsz(SRegister sd,Condition cond)1011 void ArmAssembler::vcmpsz(SRegister sd, Condition cond) {
1012 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
1013 }
1014
1015
vcmpdz(DRegister dd,Condition cond)1016 void ArmAssembler::vcmpdz(DRegister dd, Condition cond) {
1017 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
1018 }
1019
1020
vmstat(Condition cond)1021 void ArmAssembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR
1022 CHECK_NE(cond, kNoCondition);
1023 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1024 B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
1025 (static_cast<int32_t>(PC)*B12) |
1026 B11 | B9 | B4;
1027 Emit(encoding);
1028 }
1029
1030
svc(uint32_t imm24)1031 void ArmAssembler::svc(uint32_t imm24) {
1032 CHECK(IsUint(24, imm24)) << imm24;
1033 int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
1034 Emit(encoding);
1035 }
1036
1037
bkpt(uint16_t imm16)1038 void ArmAssembler::bkpt(uint16_t imm16) {
1039 int32_t encoding = (AL << kConditionShift) | B24 | B21 |
1040 ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
1041 Emit(encoding);
1042 }
1043
1044
b(Label * label,Condition cond)1045 void ArmAssembler::b(Label* label, Condition cond) {
1046 EmitBranch(cond, label, false);
1047 }
1048
1049
bl(Label * label,Condition cond)1050 void ArmAssembler::bl(Label* label, Condition cond) {
1051 EmitBranch(cond, label, true);
1052 }
1053
1054
blx(Register rm,Condition cond)1055 void ArmAssembler::blx(Register rm, Condition cond) {
1056 CHECK_NE(rm, kNoRegister);
1057 CHECK_NE(cond, kNoCondition);
1058 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1059 B24 | B21 | (0xfff << 8) | B5 | B4 |
1060 (static_cast<int32_t>(rm) << kRmShift);
1061 Emit(encoding);
1062 }
1063
bx(Register rm,Condition cond)1064 void ArmAssembler::bx(Register rm, Condition cond) {
1065 CHECK_NE(rm, kNoRegister);
1066 CHECK_NE(cond, kNoCondition);
1067 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1068 B24 | B21 | (0xfff << 8) | B4 |
1069 (static_cast<int32_t>(rm) << kRmShift);
1070 Emit(encoding);
1071 }
1072
MarkExceptionHandler(Label * label)1073 void ArmAssembler::MarkExceptionHandler(Label* label) {
1074 EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0));
1075 Label l;
1076 b(&l);
1077 EmitBranch(AL, label, false);
1078 Bind(&l);
1079 }
1080
1081
Bind(Label * label)1082 void ArmAssembler::Bind(Label* label) {
1083 CHECK(!label->IsBound());
1084 int bound_pc = buffer_.Size();
1085 while (label->IsLinked()) {
1086 int32_t position = label->Position();
1087 int32_t next = buffer_.Load<int32_t>(position);
1088 int32_t encoded = ArmAssembler::EncodeBranchOffset(bound_pc - position, next);
1089 buffer_.Store<int32_t>(position, encoded);
1090 label->position_ = ArmAssembler::DecodeBranchOffset(next);
1091 }
1092 label->BindTo(bound_pc);
1093 }
1094
1095
EncodeUint32InTstInstructions(uint32_t data)1096 void ArmAssembler::EncodeUint32InTstInstructions(uint32_t data) {
1097 // TODO: Consider using movw ip, <16 bits>.
1098 while (!IsUint(8, data)) {
1099 tst(R0, ShifterOperand(data & 0xFF), VS);
1100 data >>= 8;
1101 }
1102 tst(R0, ShifterOperand(data), MI);
1103 }
1104
1105
EncodeBranchOffset(int offset,int32_t inst)1106 int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) {
1107 // The offset is off by 8 due to the way the ARM CPUs read PC.
1108 offset -= 8;
1109 CHECK_ALIGNED(offset, 4);
1110 CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset;
1111
1112 // Properly preserve only the bits supported in the instruction.
1113 offset >>= 2;
1114 offset &= kBranchOffsetMask;
1115 return (inst & ~kBranchOffsetMask) | offset;
1116 }
1117
1118
DecodeBranchOffset(int32_t inst)1119 int ArmAssembler::DecodeBranchOffset(int32_t inst) {
1120 // Sign-extend, left-shift by 2, then add 8.
1121 return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8);
1122 }
1123
AddConstant(Register rd,int32_t value,Condition cond)1124 void ArmAssembler::AddConstant(Register rd, int32_t value, Condition cond) {
1125 AddConstant(rd, rd, value, cond);
1126 }
1127
1128
AddConstant(Register rd,Register rn,int32_t value,Condition cond)1129 void ArmAssembler::AddConstant(Register rd, Register rn, int32_t value,
1130 Condition cond) {
1131 if (value == 0) {
1132 if (rd != rn) {
1133 mov(rd, ShifterOperand(rn), cond);
1134 }
1135 return;
1136 }
1137 // We prefer to select the shorter code sequence rather than selecting add for
1138 // positive values and sub for negatives ones, which would slightly improve
1139 // the readability of generated code for some constants.
1140 ShifterOperand shifter_op;
1141 if (ShifterOperand::CanHold(value, &shifter_op)) {
1142 add(rd, rn, shifter_op, cond);
1143 } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
1144 sub(rd, rn, shifter_op, cond);
1145 } else {
1146 CHECK(rn != IP);
1147 if (ShifterOperand::CanHold(~value, &shifter_op)) {
1148 mvn(IP, shifter_op, cond);
1149 add(rd, rn, ShifterOperand(IP), cond);
1150 } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
1151 mvn(IP, shifter_op, cond);
1152 sub(rd, rn, ShifterOperand(IP), cond);
1153 } else {
1154 movw(IP, Low16Bits(value), cond);
1155 uint16_t value_high = High16Bits(value);
1156 if (value_high != 0) {
1157 movt(IP, value_high, cond);
1158 }
1159 add(rd, rn, ShifterOperand(IP), cond);
1160 }
1161 }
1162 }
1163
1164
AddConstantSetFlags(Register rd,Register rn,int32_t value,Condition cond)1165 void ArmAssembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
1166 Condition cond) {
1167 ShifterOperand shifter_op;
1168 if (ShifterOperand::CanHold(value, &shifter_op)) {
1169 adds(rd, rn, shifter_op, cond);
1170 } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
1171 subs(rd, rn, shifter_op, cond);
1172 } else {
1173 CHECK(rn != IP);
1174 if (ShifterOperand::CanHold(~value, &shifter_op)) {
1175 mvn(IP, shifter_op, cond);
1176 adds(rd, rn, ShifterOperand(IP), cond);
1177 } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
1178 mvn(IP, shifter_op, cond);
1179 subs(rd, rn, ShifterOperand(IP), cond);
1180 } else {
1181 movw(IP, Low16Bits(value), cond);
1182 uint16_t value_high = High16Bits(value);
1183 if (value_high != 0) {
1184 movt(IP, value_high, cond);
1185 }
1186 adds(rd, rn, ShifterOperand(IP), cond);
1187 }
1188 }
1189 }
1190
1191
LoadImmediate(Register rd,int32_t value,Condition cond)1192 void ArmAssembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
1193 ShifterOperand shifter_op;
1194 if (ShifterOperand::CanHold(value, &shifter_op)) {
1195 mov(rd, shifter_op, cond);
1196 } else if (ShifterOperand::CanHold(~value, &shifter_op)) {
1197 mvn(rd, shifter_op, cond);
1198 } else {
1199 movw(rd, Low16Bits(value), cond);
1200 uint16_t value_high = High16Bits(value);
1201 if (value_high != 0) {
1202 movt(rd, value_high, cond);
1203 }
1204 }
1205 }
1206
1207
CanHoldLoadOffset(LoadOperandType type,int offset)1208 bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) {
1209 switch (type) {
1210 case kLoadSignedByte:
1211 case kLoadSignedHalfword:
1212 case kLoadUnsignedHalfword:
1213 case kLoadWordPair:
1214 return IsAbsoluteUint(8, offset); // Addressing mode 3.
1215 case kLoadUnsignedByte:
1216 case kLoadWord:
1217 return IsAbsoluteUint(12, offset); // Addressing mode 2.
1218 case kLoadSWord:
1219 case kLoadDWord:
1220 return IsAbsoluteUint(10, offset); // VFP addressing mode.
1221 default:
1222 LOG(FATAL) << "UNREACHABLE";
1223 return false;
1224 }
1225 }
1226
1227
CanHoldStoreOffset(StoreOperandType type,int offset)1228 bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) {
1229 switch (type) {
1230 case kStoreHalfword:
1231 case kStoreWordPair:
1232 return IsAbsoluteUint(8, offset); // Addressing mode 3.
1233 case kStoreByte:
1234 case kStoreWord:
1235 return IsAbsoluteUint(12, offset); // Addressing mode 2.
1236 case kStoreSWord:
1237 case kStoreDWord:
1238 return IsAbsoluteUint(10, offset); // VFP addressing mode.
1239 default:
1240 LOG(FATAL) << "UNREACHABLE";
1241 return false;
1242 }
1243 }
1244
1245
1246 // Implementation note: this method must emit at most one instruction when
1247 // Address::CanHoldLoadOffset.
LoadFromOffset(LoadOperandType type,Register reg,Register base,int32_t offset,Condition cond)1248 void ArmAssembler::LoadFromOffset(LoadOperandType type,
1249 Register reg,
1250 Register base,
1251 int32_t offset,
1252 Condition cond) {
1253 if (!Address::CanHoldLoadOffset(type, offset)) {
1254 CHECK(base != IP);
1255 LoadImmediate(IP, offset, cond);
1256 add(IP, IP, ShifterOperand(base), cond);
1257 base = IP;
1258 offset = 0;
1259 }
1260 CHECK(Address::CanHoldLoadOffset(type, offset));
1261 switch (type) {
1262 case kLoadSignedByte:
1263 ldrsb(reg, Address(base, offset), cond);
1264 break;
1265 case kLoadUnsignedByte:
1266 ldrb(reg, Address(base, offset), cond);
1267 break;
1268 case kLoadSignedHalfword:
1269 ldrsh(reg, Address(base, offset), cond);
1270 break;
1271 case kLoadUnsignedHalfword:
1272 ldrh(reg, Address(base, offset), cond);
1273 break;
1274 case kLoadWord:
1275 ldr(reg, Address(base, offset), cond);
1276 break;
1277 case kLoadWordPair:
1278 ldrd(reg, Address(base, offset), cond);
1279 break;
1280 default:
1281 LOG(FATAL) << "UNREACHABLE";
1282 }
1283 }
1284
1285 // Implementation note: this method must emit at most one instruction when
1286 // Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
LoadSFromOffset(SRegister reg,Register base,int32_t offset,Condition cond)1287 void ArmAssembler::LoadSFromOffset(SRegister reg,
1288 Register base,
1289 int32_t offset,
1290 Condition cond) {
1291 if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) {
1292 CHECK_NE(base, IP);
1293 LoadImmediate(IP, offset, cond);
1294 add(IP, IP, ShifterOperand(base), cond);
1295 base = IP;
1296 offset = 0;
1297 }
1298 CHECK(Address::CanHoldLoadOffset(kLoadSWord, offset));
1299 vldrs(reg, Address(base, offset), cond);
1300 }
1301
1302 // Implementation note: this method must emit at most one instruction when
1303 // Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
LoadDFromOffset(DRegister reg,Register base,int32_t offset,Condition cond)1304 void ArmAssembler::LoadDFromOffset(DRegister reg,
1305 Register base,
1306 int32_t offset,
1307 Condition cond) {
1308 if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) {
1309 CHECK_NE(base, IP);
1310 LoadImmediate(IP, offset, cond);
1311 add(IP, IP, ShifterOperand(base), cond);
1312 base = IP;
1313 offset = 0;
1314 }
1315 CHECK(Address::CanHoldLoadOffset(kLoadDWord, offset));
1316 vldrd(reg, Address(base, offset), cond);
1317 }
1318
1319 // Implementation note: this method must emit at most one instruction when
1320 // Address::CanHoldStoreOffset.
StoreToOffset(StoreOperandType type,Register reg,Register base,int32_t offset,Condition cond)1321 void ArmAssembler::StoreToOffset(StoreOperandType type,
1322 Register reg,
1323 Register base,
1324 int32_t offset,
1325 Condition cond) {
1326 if (!Address::CanHoldStoreOffset(type, offset)) {
1327 CHECK(reg != IP);
1328 CHECK(base != IP);
1329 LoadImmediate(IP, offset, cond);
1330 add(IP, IP, ShifterOperand(base), cond);
1331 base = IP;
1332 offset = 0;
1333 }
1334 CHECK(Address::CanHoldStoreOffset(type, offset));
1335 switch (type) {
1336 case kStoreByte:
1337 strb(reg, Address(base, offset), cond);
1338 break;
1339 case kStoreHalfword:
1340 strh(reg, Address(base, offset), cond);
1341 break;
1342 case kStoreWord:
1343 str(reg, Address(base, offset), cond);
1344 break;
1345 case kStoreWordPair:
1346 strd(reg, Address(base, offset), cond);
1347 break;
1348 default:
1349 LOG(FATAL) << "UNREACHABLE";
1350 }
1351 }
1352
1353 // Implementation note: this method must emit at most one instruction when
1354 // Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreToOffset.
StoreSToOffset(SRegister reg,Register base,int32_t offset,Condition cond)1355 void ArmAssembler::StoreSToOffset(SRegister reg,
1356 Register base,
1357 int32_t offset,
1358 Condition cond) {
1359 if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) {
1360 CHECK_NE(base, IP);
1361 LoadImmediate(IP, offset, cond);
1362 add(IP, IP, ShifterOperand(base), cond);
1363 base = IP;
1364 offset = 0;
1365 }
1366 CHECK(Address::CanHoldStoreOffset(kStoreSWord, offset));
1367 vstrs(reg, Address(base, offset), cond);
1368 }
1369
1370 // Implementation note: this method must emit at most one instruction when
1371 // Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreSToOffset.
StoreDToOffset(DRegister reg,Register base,int32_t offset,Condition cond)1372 void ArmAssembler::StoreDToOffset(DRegister reg,
1373 Register base,
1374 int32_t offset,
1375 Condition cond) {
1376 if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) {
1377 CHECK_NE(base, IP);
1378 LoadImmediate(IP, offset, cond);
1379 add(IP, IP, ShifterOperand(base), cond);
1380 base = IP;
1381 offset = 0;
1382 }
1383 CHECK(Address::CanHoldStoreOffset(kStoreDWord, offset));
1384 vstrd(reg, Address(base, offset), cond);
1385 }
1386
Push(Register rd,Condition cond)1387 void ArmAssembler::Push(Register rd, Condition cond) {
1388 str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
1389 }
1390
Pop(Register rd,Condition cond)1391 void ArmAssembler::Pop(Register rd, Condition cond) {
1392 ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
1393 }
1394
PushList(RegList regs,Condition cond)1395 void ArmAssembler::PushList(RegList regs, Condition cond) {
1396 stm(DB_W, SP, regs, cond);
1397 }
1398
PopList(RegList regs,Condition cond)1399 void ArmAssembler::PopList(RegList regs, Condition cond) {
1400 ldm(IA_W, SP, regs, cond);
1401 }
1402
Mov(Register rd,Register rm,Condition cond)1403 void ArmAssembler::Mov(Register rd, Register rm, Condition cond) {
1404 if (rd != rm) {
1405 mov(rd, ShifterOperand(rm), cond);
1406 }
1407 }
1408
Lsl(Register rd,Register rm,uint32_t shift_imm,Condition cond)1409 void ArmAssembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
1410 Condition cond) {
1411 CHECK_NE(shift_imm, 0u); // Do not use Lsl if no shift is wanted.
1412 mov(rd, ShifterOperand(rm, LSL, shift_imm), cond);
1413 }
1414
Lsr(Register rd,Register rm,uint32_t shift_imm,Condition cond)1415 void ArmAssembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
1416 Condition cond) {
1417 CHECK_NE(shift_imm, 0u); // Do not use Lsr if no shift is wanted.
1418 if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
1419 mov(rd, ShifterOperand(rm, LSR, shift_imm), cond);
1420 }
1421
Asr(Register rd,Register rm,uint32_t shift_imm,Condition cond)1422 void ArmAssembler::Asr(Register rd, Register rm, uint32_t shift_imm,
1423 Condition cond) {
1424 CHECK_NE(shift_imm, 0u); // Do not use Asr if no shift is wanted.
1425 if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
1426 mov(rd, ShifterOperand(rm, ASR, shift_imm), cond);
1427 }
1428
Ror(Register rd,Register rm,uint32_t shift_imm,Condition cond)1429 void ArmAssembler::Ror(Register rd, Register rm, uint32_t shift_imm,
1430 Condition cond) {
1431 CHECK_NE(shift_imm, 0u); // Use Rrx instruction.
1432 mov(rd, ShifterOperand(rm, ROR, shift_imm), cond);
1433 }
1434
Rrx(Register rd,Register rm,Condition cond)1435 void ArmAssembler::Rrx(Register rd, Register rm, Condition cond) {
1436 mov(rd, ShifterOperand(rm, ROR, 0), cond);
1437 }
1438
BuildFrame(size_t frame_size,ManagedRegister method_reg,const std::vector<ManagedRegister> & callee_save_regs,const std::vector<ManagedRegister> & entry_spills)1439 void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
1440 const std::vector<ManagedRegister>& callee_save_regs,
1441 const std::vector<ManagedRegister>& entry_spills) {
1442 CHECK_ALIGNED(frame_size, kStackAlignment);
1443 CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
1444
1445 // Push callee saves and link register.
1446 RegList push_list = 1 << LR;
1447 size_t pushed_values = 1;
1448 for (size_t i = 0; i < callee_save_regs.size(); i++) {
1449 Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
1450 push_list |= 1 << reg;
1451 pushed_values++;
1452 }
1453 PushList(push_list);
1454
1455 // Increase frame to required size.
1456 CHECK_GT(frame_size, pushed_values * kPointerSize); // Must be at least space to push Method*
1457 size_t adjust = frame_size - (pushed_values * kPointerSize);
1458 IncreaseFrameSize(adjust);
1459
1460 // Write out Method*.
1461 StoreToOffset(kStoreWord, R0, SP, 0);
1462
1463 // Write out entry spills.
1464 for (size_t i = 0; i < entry_spills.size(); ++i) {
1465 Register reg = entry_spills.at(i).AsArm().AsCoreRegister();
1466 StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize));
1467 }
1468 }
1469
RemoveFrame(size_t frame_size,const std::vector<ManagedRegister> & callee_save_regs)1470 void ArmAssembler::RemoveFrame(size_t frame_size,
1471 const std::vector<ManagedRegister>& callee_save_regs) {
1472 CHECK_ALIGNED(frame_size, kStackAlignment);
1473 // Compute callee saves to pop and PC
1474 RegList pop_list = 1 << PC;
1475 size_t pop_values = 1;
1476 for (size_t i = 0; i < callee_save_regs.size(); i++) {
1477 Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
1478 pop_list |= 1 << reg;
1479 pop_values++;
1480 }
1481
1482 // Decrease frame to start of callee saves
1483 CHECK_GT(frame_size, pop_values * kPointerSize);
1484 size_t adjust = frame_size - (pop_values * kPointerSize);
1485 DecreaseFrameSize(adjust);
1486
1487 // Pop callee saves and PC
1488 PopList(pop_list);
1489 }
1490
IncreaseFrameSize(size_t adjust)1491 void ArmAssembler::IncreaseFrameSize(size_t adjust) {
1492 AddConstant(SP, -adjust);
1493 }
1494
DecreaseFrameSize(size_t adjust)1495 void ArmAssembler::DecreaseFrameSize(size_t adjust) {
1496 AddConstant(SP, adjust);
1497 }
1498
Store(FrameOffset dest,ManagedRegister msrc,size_t size)1499 void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
1500 ArmManagedRegister src = msrc.AsArm();
1501 if (src.IsNoRegister()) {
1502 CHECK_EQ(0u, size);
1503 } else if (src.IsCoreRegister()) {
1504 CHECK_EQ(4u, size);
1505 StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1506 } else if (src.IsRegisterPair()) {
1507 CHECK_EQ(8u, size);
1508 StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
1509 StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
1510 SP, dest.Int32Value() + 4);
1511 } else if (src.IsSRegister()) {
1512 StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
1513 } else {
1514 CHECK(src.IsDRegister()) << src;
1515 StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
1516 }
1517 }
1518
StoreRef(FrameOffset dest,ManagedRegister msrc)1519 void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
1520 ArmManagedRegister src = msrc.AsArm();
1521 CHECK(src.IsCoreRegister()) << src;
1522 StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1523 }
1524
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)1525 void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
1526 ArmManagedRegister src = msrc.AsArm();
1527 CHECK(src.IsCoreRegister()) << src;
1528 StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1529 }
1530
StoreSpanning(FrameOffset dest,ManagedRegister msrc,FrameOffset in_off,ManagedRegister mscratch)1531 void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
1532 FrameOffset in_off, ManagedRegister mscratch) {
1533 ArmManagedRegister src = msrc.AsArm();
1534 ArmManagedRegister scratch = mscratch.AsArm();
1535 StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1536 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
1537 StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
1538 }
1539
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister mscratch)1540 void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
1541 ManagedRegister mscratch) {
1542 ArmManagedRegister scratch = mscratch.AsArm();
1543 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
1544 StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1545 }
1546
LoadRef(ManagedRegister mdest,ManagedRegister base,MemberOffset offs)1547 void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
1548 MemberOffset offs) {
1549 ArmManagedRegister dst = mdest.AsArm();
1550 CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
1551 LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
1552 base.AsArm().AsCoreRegister(), offs.Int32Value());
1553 }
1554
LoadRef(ManagedRegister mdest,FrameOffset src)1555 void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
1556 ArmManagedRegister dst = mdest.AsArm();
1557 CHECK(dst.IsCoreRegister()) << dst;
1558 LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
1559 }
1560
LoadRawPtr(ManagedRegister mdest,ManagedRegister base,Offset offs)1561 void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
1562 Offset offs) {
1563 ArmManagedRegister dst = mdest.AsArm();
1564 CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
1565 LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
1566 base.AsArm().AsCoreRegister(), offs.Int32Value());
1567 }
1568
StoreImmediateToFrame(FrameOffset dest,uint32_t imm,ManagedRegister mscratch)1569 void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
1570 ManagedRegister mscratch) {
1571 ArmManagedRegister scratch = mscratch.AsArm();
1572 CHECK(scratch.IsCoreRegister()) << scratch;
1573 LoadImmediate(scratch.AsCoreRegister(), imm);
1574 StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1575 }
1576
StoreImmediateToThread(ThreadOffset dest,uint32_t imm,ManagedRegister mscratch)1577 void ArmAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
1578 ManagedRegister mscratch) {
1579 ArmManagedRegister scratch = mscratch.AsArm();
1580 CHECK(scratch.IsCoreRegister()) << scratch;
1581 LoadImmediate(scratch.AsCoreRegister(), imm);
1582 StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
1583 }
1584
EmitLoad(ArmAssembler * assembler,ManagedRegister m_dst,Register src_register,int32_t src_offset,size_t size)1585 static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
1586 Register src_register, int32_t src_offset, size_t size) {
1587 ArmManagedRegister dst = m_dst.AsArm();
1588 if (dst.IsNoRegister()) {
1589 CHECK_EQ(0u, size) << dst;
1590 } else if (dst.IsCoreRegister()) {
1591 CHECK_EQ(4u, size) << dst;
1592 assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
1593 } else if (dst.IsRegisterPair()) {
1594 CHECK_EQ(8u, size) << dst;
1595 assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
1596 assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
1597 } else if (dst.IsSRegister()) {
1598 assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
1599 } else {
1600 CHECK(dst.IsDRegister()) << dst;
1601 assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
1602 }
1603 }
1604
Load(ManagedRegister m_dst,FrameOffset src,size_t size)1605 void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
1606 return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
1607 }
1608
Load(ManagedRegister m_dst,ThreadOffset src,size_t size)1609 void ArmAssembler::Load(ManagedRegister m_dst, ThreadOffset src, size_t size) {
1610 return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
1611 }
1612
LoadRawPtrFromThread(ManagedRegister m_dst,ThreadOffset offs)1613 void ArmAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset offs) {
1614 ArmManagedRegister dst = m_dst.AsArm();
1615 CHECK(dst.IsCoreRegister()) << dst;
1616 LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
1617 }
1618
CopyRawPtrFromThread(FrameOffset fr_offs,ThreadOffset thr_offs,ManagedRegister mscratch)1619 void ArmAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
1620 ThreadOffset thr_offs,
1621 ManagedRegister mscratch) {
1622 ArmManagedRegister scratch = mscratch.AsArm();
1623 CHECK(scratch.IsCoreRegister()) << scratch;
1624 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1625 TR, thr_offs.Int32Value());
1626 StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1627 SP, fr_offs.Int32Value());
1628 }
1629
CopyRawPtrToThread(ThreadOffset thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)1630 void ArmAssembler::CopyRawPtrToThread(ThreadOffset thr_offs,
1631 FrameOffset fr_offs,
1632 ManagedRegister mscratch) {
1633 ArmManagedRegister scratch = mscratch.AsArm();
1634 CHECK(scratch.IsCoreRegister()) << scratch;
1635 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1636 SP, fr_offs.Int32Value());
1637 StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1638 TR, thr_offs.Int32Value());
1639 }
1640
StoreStackOffsetToThread(ThreadOffset thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)1641 void ArmAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
1642 FrameOffset fr_offs,
1643 ManagedRegister mscratch) {
1644 ArmManagedRegister scratch = mscratch.AsArm();
1645 CHECK(scratch.IsCoreRegister()) << scratch;
1646 AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
1647 StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1648 TR, thr_offs.Int32Value());
1649 }
1650
StoreStackPointerToThread(ThreadOffset thr_offs)1651 void ArmAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
1652 StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
1653 }
1654
SignExtend(ManagedRegister,size_t)1655 void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
1656 UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
1657 }
1658
ZeroExtend(ManagedRegister,size_t)1659 void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
1660 UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
1661 }
1662
Move(ManagedRegister m_dst,ManagedRegister m_src,size_t)1663 void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
1664 ArmManagedRegister dst = m_dst.AsArm();
1665 ArmManagedRegister src = m_src.AsArm();
1666 if (!dst.Equals(src)) {
1667 if (dst.IsCoreRegister()) {
1668 CHECK(src.IsCoreRegister()) << src;
1669 mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
1670 } else if (dst.IsDRegister()) {
1671 CHECK(src.IsDRegister()) << src;
1672 vmovd(dst.AsDRegister(), src.AsDRegister());
1673 } else if (dst.IsSRegister()) {
1674 CHECK(src.IsSRegister()) << src;
1675 vmovs(dst.AsSRegister(), src.AsSRegister());
1676 } else {
1677 CHECK(dst.IsRegisterPair()) << dst;
1678 CHECK(src.IsRegisterPair()) << src;
1679 // Ensure that the first move doesn't clobber the input of the second
1680 if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
1681 mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
1682 mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
1683 } else {
1684 mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
1685 mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
1686 }
1687 }
1688 }
1689 }
1690
Copy(FrameOffset dest,FrameOffset src,ManagedRegister mscratch,size_t size)1691 void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
1692 ArmManagedRegister scratch = mscratch.AsArm();
1693 CHECK(scratch.IsCoreRegister()) << scratch;
1694 CHECK(size == 4 || size == 8) << size;
1695 if (size == 4) {
1696 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
1697 StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1698 } else if (size == 8) {
1699 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
1700 StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1701 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
1702 StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
1703 }
1704 }
1705
Copy(FrameOffset dest,ManagedRegister src_base,Offset src_offset,ManagedRegister mscratch,size_t size)1706 void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
1707 ManagedRegister mscratch, size_t size) {
1708 Register scratch = mscratch.AsArm().AsCoreRegister();
1709 CHECK_EQ(size, 4u);
1710 LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
1711 StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
1712 }
1713
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister mscratch,size_t size)1714 void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
1715 ManagedRegister mscratch, size_t size) {
1716 Register scratch = mscratch.AsArm().AsCoreRegister();
1717 CHECK_EQ(size, 4u);
1718 LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
1719 StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
1720 }
1721
Copy(FrameOffset,FrameOffset,Offset,ManagedRegister,size_t)1722 void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
1723 ManagedRegister /*mscratch*/, size_t /*size*/) {
1724 UNIMPLEMENTED(FATAL);
1725 }
1726
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister mscratch,size_t size)1727 void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
1728 ManagedRegister src, Offset src_offset,
1729 ManagedRegister mscratch, size_t size) {
1730 CHECK_EQ(size, 4u);
1731 Register scratch = mscratch.AsArm().AsCoreRegister();
1732 LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
1733 StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
1734 }
1735
Copy(FrameOffset,Offset,FrameOffset,Offset,ManagedRegister,size_t)1736 void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
1737 ManagedRegister /*scratch*/, size_t /*size*/) {
1738 UNIMPLEMENTED(FATAL);
1739 }
1740
1741
MemoryBarrier(ManagedRegister mscratch)1742 void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) {
1743 CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
1744 #if ANDROID_SMP != 0
1745 #if defined(__ARM_HAVE_DMB)
1746 int32_t encoding = 0xf57ff05f; // dmb
1747 Emit(encoding);
1748 #elif defined(__ARM_HAVE_LDREX_STREX)
1749 LoadImmediate(R12, 0);
1750 int32_t encoding = 0xee07cfba; // mcr p15, 0, r12, c7, c10, 5
1751 Emit(encoding);
1752 #else
1753 LoadImmediate(R12, 0xffff0fa0); // kuser_memory_barrier
1754 blx(R12);
1755 #endif
1756 #endif
1757 }
1758
CreateSirtEntry(ManagedRegister mout_reg,FrameOffset sirt_offset,ManagedRegister min_reg,bool null_allowed)1759 void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg,
1760 FrameOffset sirt_offset,
1761 ManagedRegister min_reg, bool null_allowed) {
1762 ArmManagedRegister out_reg = mout_reg.AsArm();
1763 ArmManagedRegister in_reg = min_reg.AsArm();
1764 CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
1765 CHECK(out_reg.IsCoreRegister()) << out_reg;
1766 if (null_allowed) {
1767 // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
1768 // the address in the SIRT holding the reference.
1769 // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
1770 if (in_reg.IsNoRegister()) {
1771 LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
1772 SP, sirt_offset.Int32Value());
1773 in_reg = out_reg;
1774 }
1775 cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
1776 if (!out_reg.Equals(in_reg)) {
1777 LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
1778 }
1779 AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
1780 } else {
1781 AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
1782 }
1783 }
1784
CreateSirtEntry(FrameOffset out_off,FrameOffset sirt_offset,ManagedRegister mscratch,bool null_allowed)1785 void ArmAssembler::CreateSirtEntry(FrameOffset out_off,
1786 FrameOffset sirt_offset,
1787 ManagedRegister mscratch,
1788 bool null_allowed) {
1789 ArmManagedRegister scratch = mscratch.AsArm();
1790 CHECK(scratch.IsCoreRegister()) << scratch;
1791 if (null_allowed) {
1792 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
1793 sirt_offset.Int32Value());
1794 // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
1795 // the address in the SIRT holding the reference.
1796 // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
1797 cmp(scratch.AsCoreRegister(), ShifterOperand(0));
1798 AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
1799 } else {
1800 AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
1801 }
1802 StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
1803 }
1804
LoadReferenceFromSirt(ManagedRegister mout_reg,ManagedRegister min_reg)1805 void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
1806 ManagedRegister min_reg) {
1807 ArmManagedRegister out_reg = mout_reg.AsArm();
1808 ArmManagedRegister in_reg = min_reg.AsArm();
1809 CHECK(out_reg.IsCoreRegister()) << out_reg;
1810 CHECK(in_reg.IsCoreRegister()) << in_reg;
1811 Label null_arg;
1812 if (!out_reg.Equals(in_reg)) {
1813 LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
1814 }
1815 cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
1816 LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
1817 in_reg.AsCoreRegister(), 0, NE);
1818 }
1819
VerifyObject(ManagedRegister,bool)1820 void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
1821 // TODO: not validating references
1822 }
1823
VerifyObject(FrameOffset,bool)1824 void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
1825 // TODO: not validating references
1826 }
1827
Call(ManagedRegister mbase,Offset offset,ManagedRegister mscratch)1828 void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
1829 ManagedRegister mscratch) {
1830 ArmManagedRegister base = mbase.AsArm();
1831 ArmManagedRegister scratch = mscratch.AsArm();
1832 CHECK(base.IsCoreRegister()) << base;
1833 CHECK(scratch.IsCoreRegister()) << scratch;
1834 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1835 base.AsCoreRegister(), offset.Int32Value());
1836 blx(scratch.AsCoreRegister());
1837 // TODO: place reference map on call
1838 }
1839
Call(FrameOffset base,Offset offset,ManagedRegister mscratch)1840 void ArmAssembler::Call(FrameOffset base, Offset offset,
1841 ManagedRegister mscratch) {
1842 ArmManagedRegister scratch = mscratch.AsArm();
1843 CHECK(scratch.IsCoreRegister()) << scratch;
1844 // Call *(*(SP + base) + offset)
1845 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1846 SP, base.Int32Value());
1847 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1848 scratch.AsCoreRegister(), offset.Int32Value());
1849 blx(scratch.AsCoreRegister());
1850 // TODO: place reference map on call
1851 }
1852
Call(ThreadOffset,ManagedRegister)1853 void ArmAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*scratch*/) {
1854 UNIMPLEMENTED(FATAL);
1855 }
1856
GetCurrentThread(ManagedRegister tr)1857 void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
1858 mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
1859 }
1860
GetCurrentThread(FrameOffset offset,ManagedRegister)1861 void ArmAssembler::GetCurrentThread(FrameOffset offset,
1862 ManagedRegister /*scratch*/) {
1863 StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
1864 }
1865
ExceptionPoll(ManagedRegister mscratch,size_t stack_adjust)1866 void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
1867 ArmManagedRegister scratch = mscratch.AsArm();
1868 ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust);
1869 buffer_.EnqueueSlowPath(slow);
1870 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1871 TR, Thread::ExceptionOffset().Int32Value());
1872 cmp(scratch.AsCoreRegister(), ShifterOperand(0));
1873 b(slow->Entry(), NE);
1874 }
1875
Emit(Assembler * sasm)1876 void ArmExceptionSlowPath::Emit(Assembler* sasm) {
1877 ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
1878 #define __ sp_asm->
1879 __ Bind(&entry_);
1880 if (stack_adjust_ != 0) { // Fix up the frame.
1881 __ DecreaseFrameSize(stack_adjust_);
1882 }
1883 // Pass exception object as argument
1884 // Don't care about preserving R0 as this call won't return
1885 __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
1886 // Set up call to Thread::Current()->pDeliverException
1887 __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException).Int32Value());
1888 __ blx(R12);
1889 // Call never returns
1890 __ bkpt(0);
1891 #undef __
1892 }
1893
1894 } // namespace arm
1895 } // namespace art
1896