• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2013, the Dart project authors.  Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file.
4 //
5 // This is forked from Dart revision df52deea9f25690eb8b66c5995da92b70f7ac1fe
6 // Please update the (git) revision if we merge changes from Dart.
7 // https://code.google.com/p/dart/wiki/GettingTheSource
8 
9 #include "vm/globals.h" // NOLINT
10 #if defined(TARGET_ARCH_ARM)
11 
12 #include "vm/assembler.h"
13 #include "vm/cpu.h"
14 #include "vm/longjump.h"
15 #include "vm/runtime_entry.h"
16 #include "vm/simulator.h"
17 #include "vm/stack_frame.h"
18 #include "vm/stub_code.h"
19 
20 // An extra check since we are assuming the existence of /proc/cpuinfo below.
21 #if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID)
22 #error ARM cross-compile only supported on Linux
23 #endif
24 
25 namespace dart {
26 
27 DECLARE_FLAG(bool, allow_absolute_addresses);
28 DEFINE_FLAG(bool, print_stop_message, true, "Print stop message.");
29 DECLARE_FLAG(bool, inline_alloc);
30 
31 #if 0
32 // Moved to encodeImmRegOffsetEnc3 in IceAssemblerARM32.cpp
33 uint32_t Address::encoding3() const {
34   if (kind_ == Immediate) {
35     uint32_t offset = encoding_ & kOffset12Mask;
36     ASSERT(offset < 256);
37     return (encoding_ & ~kOffset12Mask) | B22 |
38            ((offset & 0xf0) << 4) | (offset & 0xf);
39   }
40   ASSERT(kind_ == IndexRegister);
41   return encoding_;
42 }
43 #endif
44 
vencoding() const45 uint32_t Address::vencoding() const {
46   ASSERT(kind_ == Immediate);
47   uint32_t offset = encoding_ & kOffset12Mask;
48   ASSERT(offset < (1 << 10));          // In the range 0 to +1020.
49   ASSERT(Utils::IsAligned(offset, 4)); // Multiple of 4.
50   int mode = encoding_ & ((8 | 4 | 1) << 21);
51   ASSERT((mode == Offset) || (mode == NegOffset));
52   uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2);
53   if (mode == Offset) {
54     vencoding |= 1 << 23;
55   }
56   return vencoding;
57 }
58 
InitializeMemoryWithBreakpoints(uword data,intptr_t length)59 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
60   ASSERT(Utils::IsAligned(data, 4));
61   ASSERT(Utils::IsAligned(length, 4));
62   const uword end = data + length;
63   while (data < end) {
64     *reinterpret_cast<int32_t *>(data) = Instr::kBreakPointInstruction;
65     data += 4;
66   }
67 }
68 
Emit(int32_t value)69 void Assembler::Emit(int32_t value) {
70   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
71   buffer_.Emit<int32_t>(value);
72 }
73 
74 #if 0
75 // Moved to ARM32::AssemblerARM32::emitType01()
76 void Assembler::EmitType01(Condition cond,
77                            int type,
78                            Opcode opcode,
79                            int set_cc,
80                            Register rn,
81                            Register rd,
82                            Operand o) {
83   ASSERT(rd != kNoRegister);
84   ASSERT(cond != kNoCondition);
85   int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
86                      type << kTypeShift |
87                      static_cast<int32_t>(opcode) << kOpcodeShift |
88                      set_cc << kSShift |
89                      static_cast<int32_t>(rn) << kRnShift |
90                      static_cast<int32_t>(rd) << kRdShift |
91                      o.encoding();
92   Emit(encoding);
93 }
94 
95 // Moved to ARM32::AssemblerARM32::emitType05()
96 void Assembler::EmitType5(Condition cond, int32_t offset, bool link) {
97   ASSERT(cond != kNoCondition);
98   int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
99                      5 << kTypeShift |
100                      (link ? 1 : 0) << kLinkShift;
101   Emit(Assembler::EncodeBranchOffset(offset, encoding));
102 }
103 
104 // Moved to ARM32::AssemblerARM32::emitMemOp()
105 void Assembler::EmitMemOp(Condition cond,
106                           bool load,
107                           bool byte,
108                           Register rd,
109                           Address ad) {
110   ASSERT(rd != kNoRegister);
111   ASSERT(cond != kNoCondition);
112   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
113                      B26 | (ad.kind() == Address::Immediate ? 0 : B25) |
114                      (load ? L : 0) |
115                      (byte ? B : 0) |
116                      (static_cast<int32_t>(rd) << kRdShift) |
117                      ad.encoding();
118   Emit(encoding);
119 }
120 
121 // Moved to AssemblerARM32::emitMemOpEnc3();
122 void Assembler::EmitMemOpAddressMode3(Condition cond,
123                                       int32_t mode,
124                                       Register rd,
125                                       Address ad) {
126   ASSERT(rd != kNoRegister);
127   ASSERT(cond != kNoCondition);
128   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
129                      mode |
130                      (static_cast<int32_t>(rd) << kRdShift) |
131                      ad.encoding3();
132   Emit(encoding);
133 }
134 
135 // Moved to ARM32::AssemblerARM32::emitMuliMemOp()
136 void Assembler::EmitMultiMemOp(Condition cond,
137                                BlockAddressMode am,
138                                bool load,
139                                Register base,
140                                RegList regs) {
141   ASSERT(base != kNoRegister);
142   ASSERT(cond != kNoCondition);
143   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
144                      B27 |
145                      am |
146                      (load ? L : 0) |
147                      (static_cast<int32_t>(base) << kRnShift) |
148                      regs;
149   Emit(encoding);
150 }
151 #endif
152 
EmitShiftImmediate(Condition cond,Shift opcode,Register rd,Register rm,Operand o)153 void Assembler::EmitShiftImmediate(Condition cond, Shift opcode, Register rd,
154                                    Register rm, Operand o) {
155   ASSERT(cond != kNoCondition);
156   ASSERT(o.type() == 1);
157   int32_t encoding =
158       static_cast<int32_t>(cond) << kConditionShift |
159       static_cast<int32_t>(MOV) << kOpcodeShift |
160       static_cast<int32_t>(rd) << kRdShift | o.encoding() << kShiftImmShift |
161       static_cast<int32_t>(opcode) << kShiftShift | static_cast<int32_t>(rm);
162   Emit(encoding);
163 }
164 
EmitShiftRegister(Condition cond,Shift opcode,Register rd,Register rm,Operand o)165 void Assembler::EmitShiftRegister(Condition cond, Shift opcode, Register rd,
166                                   Register rm, Operand o) {
167   ASSERT(cond != kNoCondition);
168   ASSERT(o.type() == 0);
169   int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
170                      static_cast<int32_t>(MOV) << kOpcodeShift |
171                      static_cast<int32_t>(rd) << kRdShift |
172                      o.encoding() << kShiftRegisterShift |
173                      static_cast<int32_t>(opcode) << kShiftShift | B4 |
174                      static_cast<int32_t>(rm);
175   Emit(encoding);
176 }
177 
178 #if 0
179 // Moved to ARM32::AssemblerARM32::and_()
180 void Assembler::and_(Register rd, Register rn, Operand o, Condition cond) {
181   EmitType01(cond, o.type(), AND, 0, rn, rd, o);
182 }
183 
184 // Moved to ARM32::AssemberARM32::eor()
185 void Assembler::eor(Register rd, Register rn, Operand o, Condition cond) {
186   EmitType01(cond, o.type(), EOR, 0, rn, rd, o);
187 }
188 
189 // Moved to ARM32::AssemberARM32::sub()
190 void Assembler::sub(Register rd, Register rn, Operand o, Condition cond) {
191   EmitType01(cond, o.type(), SUB, 0, rn, rd, o);
192 }
193 
194 // Moved to ARM32::AssemberARM32::rsb()
195 void Assembler::rsb(Register rd, Register rn, Operand o, Condition cond) {
196   EmitType01(cond, o.type(), RSB, 0, rn, rd, o);
197 }
198 
199 // Moved to ARM32::AssemberARM32::rsb()
200 void Assembler::rsbs(Register rd, Register rn, Operand o, Condition cond) {
201   EmitType01(cond, o.type(), RSB, 1, rn, rd, o);
202 }
203 
204 // Moved to ARM32::AssemberARM32::add()
205 void Assembler::add(Register rd, Register rn, Operand o, Condition cond) {
206   EmitType01(cond, o.type(), ADD, 0, rn, rd, o);
207 }
208 
209 // Moved to ARM32::AssemberARM32::add()
210 void Assembler::adds(Register rd, Register rn, Operand o, Condition cond) {
211   EmitType01(cond, o.type(), ADD, 1, rn, rd, o);
212 }
213 
214 // Moved to ARM32::AssemberARM32::sub()
215 void Assembler::subs(Register rd, Register rn, Operand o, Condition cond) {
216   EmitType01(cond, o.type(), SUB, 1, rn, rd, o);
217 }
218 
219 // Moved to ARM32::AssemberARM32::adc()
220 void Assembler::adc(Register rd, Register rn, Operand o, Condition cond) {
221   EmitType01(cond, o.type(), ADC, 0, rn, rd, o);
222 }
223 
224 // Moved to ARM32::AssemberARM32::adc()
225 void Assembler::adcs(Register rd, Register rn, Operand o, Condition cond) {
226   EmitType01(cond, o.type(), ADC, 1, rn, rd, o);
227 }
228 #endif
229 
sbc(Register rd,Register rn,Operand o,Condition cond)230 void Assembler::sbc(Register rd, Register rn, Operand o, Condition cond) {
231   EmitType01(cond, o.type(), SBC, 0, rn, rd, o);
232 }
233 
sbcs(Register rd,Register rn,Operand o,Condition cond)234 void Assembler::sbcs(Register rd, Register rn, Operand o, Condition cond) {
235   EmitType01(cond, o.type(), SBC, 1, rn, rd, o);
236 }
237 
238 #if 0
239 // Moved to ARM32::AssemblerARM32::rsc()f
240 void Assembler::rsc(Register rd, Register rn, Operand o, Condition cond) {
241   EmitType01(cond, o.type(), RSC, 0, rn, rd, o);
242 }
243 
244 // Moved to ARM32::AssemblerARM32::tst()
245 void Assembler::tst(Register rn, Operand o, Condition cond) {
246   EmitType01(cond, o.type(), TST, 1, rn, R0, o);
247 }
248 #endif
249 
teq(Register rn,Operand o,Condition cond)250 void Assembler::teq(Register rn, Operand o, Condition cond) {
251   EmitType01(cond, o.type(), TEQ, 1, rn, R0, o);
252 }
253 
254 #if 0
255 // Moved to ARM32::AssemblerARM32::cmp()
256 void Assembler::cmp(Register rn, Operand o, Condition cond) {
257   EmitType01(cond, o.type(), CMP, 1, rn, R0, o);
258 }
259 
260 // Moved to ARM32::AssemblerARM32::cmn()
261 void Assembler::cmn(Register rn, Operand o, Condition cond) {
262   EmitType01(cond, o.type(), CMN, 1, rn, R0, o);
263 }
264 
265 // Moved to ARM32::AssemberARM32::orr()
266 void Assembler::orr(Register rd, Register rn, Operand o, Condition cond) {
267   EmitType01(cond, o.type(), ORR, 0, rn, rd, o);
268 }
269 
270 // Moved to ARM32::AssemberARM32::orr()
271 void Assembler::orrs(Register rd, Register rn, Operand o, Condition cond) {
272   EmitType01(cond, o.type(), ORR, 1, rn, rd, o);
273 }
274 
275 // Moved to ARM32::AssemblerARM32::mov()
276 // TODO(kschimpf) other forms of move.
277 void Assembler::mov(Register rd, Operand o, Condition cond) {
278   EmitType01(cond, o.type(), MOV, 0, R0, rd, o);
279 }
280 #endif
281 
movs(Register rd,Operand o,Condition cond)282 void Assembler::movs(Register rd, Operand o, Condition cond) {
283   EmitType01(cond, o.type(), MOV, 1, R0, rd, o);
284 }
285 
286 #if 0
287 // Moved to ARM32::AssemblerARM32::bic()
288 void Assembler::bic(Register rd, Register rn, Operand o, Condition cond) {
289   EmitType01(cond, o.type(), BIC, 0, rn, rd, o);
290 }
291 
292 // Moved to ARM32::AssemblerARM32::bic()
293 void Assembler::bics(Register rd, Register rn, Operand o, Condition cond) {
294   EmitType01(cond, o.type(), BIC, 1, rn, rd, o);
295 }
296 
297 // Moved to ARM32::AssemblerARM32::mvn()
298 void Assembler::mvn(Register rd, Operand o, Condition cond) {
299   EmitType01(cond, o.type(), MVN, 0, R0, rd, o);
300 }
301 
302 // Moved to ARM32::AssemblerARM32::mvn()
303 void Assembler::mvns(Register rd, Operand o, Condition cond) {
304   EmitType01(cond, o.type(), MVN, 1, R0, rd, o);
305 }
306 
307 // Moved to ARM32::AssemblerARM32::clz()
308 void Assembler::clz(Register rd, Register rm, Condition cond) {
309   ASSERT(rd != kNoRegister);
310   ASSERT(rm != kNoRegister);
311   ASSERT(cond != kNoCondition);
312   ASSERT(rd != PC);
313   ASSERT(rm != PC);
314   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
315                      B24 | B22 | B21 | (0xf << 16) |
316                      (static_cast<int32_t>(rd) << kRdShift) |
317                      (0xf << 8) | B4 | static_cast<int32_t>(rm);
318   Emit(encoding);
319 }
320 
321 // Moved to ARM32::AssemblerARM32::movw()
322 void Assembler::movw(Register rd, uint16_t imm16, Condition cond) {
323   ASSERT(cond != kNoCondition);
324   int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
325                      B25 | B24 | ((imm16 >> 12) << 16) |
326                      static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
327   Emit(encoding);
328 }
329 
330 
331 // Moved to ARM32::AssemblerARM32::movt()
332 void Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
333   ASSERT(cond != kNoCondition);
334   int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
335                      B25 | B24 | B22 | ((imm16 >> 12) << 16) |
336                      static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
337   Emit(encoding);
338 }
339 
340 // Moved to ARM32::AssemblerARM32::emitMulOp()
341 void Assembler::EmitMulOp(Condition cond, int32_t opcode,
342                           Register rd, Register rn,
343                           Register rm, Register rs) {
344   ASSERT(rd != kNoRegister);
345   ASSERT(rn != kNoRegister);
346   ASSERT(rm != kNoRegister);
347   ASSERT(rs != kNoRegister);
348   ASSERT(cond != kNoCondition);
349   int32_t encoding = opcode |
350       (static_cast<int32_t>(cond) << kConditionShift) |
351       (static_cast<int32_t>(rn) << kRnShift) |
352       (static_cast<int32_t>(rd) << kRdShift) |
353       (static_cast<int32_t>(rs) << kRsShift) |
354       B7 | B4 |
355       (static_cast<int32_t>(rm) << kRmShift);
356   Emit(encoding);
357 }
358 
359 // Moved to ARM32::AssemblerARM32::mul()
360 void Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
361   // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
362   EmitMulOp(cond, 0, R0, rd, rn, rm);
363 }
364 #endif
365 
366 // Like mul, but sets condition flags.
muls(Register rd,Register rn,Register rm,Condition cond)367 void Assembler::muls(Register rd, Register rn, Register rm, Condition cond) {
368   EmitMulOp(cond, B20, R0, rd, rn, rm);
369 }
370 
371 #if 0
372 // Moved to ARM32::AssemblerARM32::mla()
373 void Assembler::mla(Register rd, Register rn,
374                     Register rm, Register ra, Condition cond) {
375   // rd <- ra + rn * rm.
376   // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
377   EmitMulOp(cond, B21, ra, rd, rn, rm);
378 }
379 
380 // Moved to ARM32::AssemblerARM32::mla()
381 void Assembler::mls(Register rd, Register rn,
382                     Register rm, Register ra, Condition cond) {
383   // rd <- ra - rn * rm.
384   if (TargetCPUFeatures::arm_version() == ARMv7) {
385     // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
386     EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
387   } else {
388     mul(IP, rn, rm, cond);
389     sub(rd, ra, Operand(IP), cond);
390   }
391 }
392 #endif
393 
smull(Register rd_lo,Register rd_hi,Register rn,Register rm,Condition cond)394 void Assembler::smull(Register rd_lo, Register rd_hi, Register rn, Register rm,
395                       Condition cond) {
396   // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
397   EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm);
398 }
399 
400 #if 0
401 // Moved to ARM32::AssemblerARM32::umull()
402 void Assembler::umull(Register rd_lo, Register rd_hi,
403                       Register rn, Register rm, Condition cond) {
404   // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
405   EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
406 }
407 #endif
408 
umlal(Register rd_lo,Register rd_hi,Register rn,Register rm,Condition cond)409 void Assembler::umlal(Register rd_lo, Register rd_hi, Register rn, Register rm,
410                       Condition cond) {
411   // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
412   EmitMulOp(cond, B23 | B21, rd_lo, rd_hi, rn, rm);
413 }
414 
umaal(Register rd_lo,Register rd_hi,Register rn,Register rm)415 void Assembler::umaal(Register rd_lo, Register rd_hi, Register rn,
416                       Register rm) {
417   ASSERT(rd_lo != IP);
418   ASSERT(rd_hi != IP);
419   ASSERT(rn != IP);
420   ASSERT(rm != IP);
421   if (TargetCPUFeatures::arm_version() != ARMv5TE) {
422     // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
423     EmitMulOp(AL, B22, rd_lo, rd_hi, rn, rm);
424   } else {
425     mov(IP, Operand(0));
426     umlal(rd_lo, IP, rn, rm);
427     adds(rd_lo, rd_lo, Operand(rd_hi));
428     adc(rd_hi, IP, Operand(0));
429   }
430 }
431 
432 #if 0
433 // Moved to ARM32::AssemblerARM32::emitDivOp()
434 void Assembler::EmitDivOp(Condition cond, int32_t opcode,
435                           Register rd, Register rn, Register rm) {
436   ASSERT(TargetCPUFeatures::integer_division_supported());
437   ASSERT(rd != kNoRegister);
438   ASSERT(rn != kNoRegister);
439   ASSERT(rm != kNoRegister);
440   ASSERT(cond != kNoCondition);
441   int32_t encoding = opcode |
442     (static_cast<int32_t>(cond) << kConditionShift) |
443     (static_cast<int32_t>(rn) << kDivRnShift) |
444     (static_cast<int32_t>(rd) << kDivRdShift) |
445       // TODO(kschimpf): Why not also: B15 | B14 | B13 | B12?
446     B26 | B25 | B24 | B20 | B4 |
447     (static_cast<int32_t>(rm) << kDivRmShift);
448   Emit(encoding);
449 }
450 
451 // Moved to ARM32::AssemblerARM32::sdiv()
452 void Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
453   EmitDivOp(cond, 0, rd, rn, rm);
454 }
455 
456 // Moved to ARM32::AssemblerARM32::udiv()
457 void Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
458   EmitDivOp(cond, B21 , rd, rn, rm);
459 }
460 
461 // Moved to ARM32::AssemblerARM32::ldr()
462 void Assembler::ldr(Register rd, Address ad, Condition cond) {
463   EmitMemOp(cond, true, false, rd, ad);
464 }
465 
466 // Moved to ARM32::AssemblerARM32::str()
467 void Assembler::str(Register rd, Address ad, Condition cond) {
468   EmitMemOp(cond, false, false, rd, ad);
469 }
470 
471 // Moved to ARM32::AssemblerARM32::ldr()
472 void Assembler::ldrb(Register rd, Address ad, Condition cond) {
473   EmitMemOp(cond, true, true, rd, ad);
474 }
475 
476 // Moved to ARM32::AssemblerARM32::str()
477 void Assembler::strb(Register rd, Address ad, Condition cond) {
478   EmitMemOp(cond, false, true, rd, ad);
479 }
480 #endif
481 
ldrh(Register rd,Address ad,Condition cond)482 void Assembler::ldrh(Register rd, Address ad, Condition cond) {
483   EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
484 }
485 
strh(Register rd,Address ad,Condition cond)486 void Assembler::strh(Register rd, Address ad, Condition cond) {
487   EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
488 }
489 
ldrsb(Register rd,Address ad,Condition cond)490 void Assembler::ldrsb(Register rd, Address ad, Condition cond) {
491   EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
492 }
493 
ldrsh(Register rd,Address ad,Condition cond)494 void Assembler::ldrsh(Register rd, Address ad, Condition cond) {
495   EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
496 }
497 
ldrd(Register rd,Register rn,int32_t offset,Condition cond)498 void Assembler::ldrd(Register rd, Register rn, int32_t offset, Condition cond) {
499   ASSERT((rd % 2) == 0);
500   if (TargetCPUFeatures::arm_version() == ARMv5TE) {
501     const Register rd2 = static_cast<Register>(static_cast<int32_t>(rd) + 1);
502     ldr(rd, Address(rn, offset), cond);
503     ldr(rd2, Address(rn, offset + kWordSize), cond);
504   } else {
505     EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset));
506   }
507 }
508 
strd(Register rd,Register rn,int32_t offset,Condition cond)509 void Assembler::strd(Register rd, Register rn, int32_t offset, Condition cond) {
510   ASSERT((rd % 2) == 0);
511   if (TargetCPUFeatures::arm_version() == ARMv5TE) {
512     const Register rd2 = static_cast<Register>(static_cast<int32_t>(rd) + 1);
513     str(rd, Address(rn, offset), cond);
514     str(rd2, Address(rn, offset + kWordSize), cond);
515   } else {
516     EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset));
517   }
518 }
519 
520 #if 0
521 // Folded into ARM32::AssemblerARM32::popList(), since it is its only
522 // use (and doesn't implement ARM STM instruction).
523 void Assembler::ldm(BlockAddressMode am, Register base, RegList regs,
524                     Condition cond) {
525   ASSERT(regs != 0);
526   EmitMultiMemOp(cond, am, true, base, regs);
527 }
528 
529 // Folded into ARM32::AssemblerARM32::pushList(), since it is its only
530 // use (and doesn't implement ARM STM instruction).
531 void Assembler::stm(BlockAddressMode am, Register base, RegList regs,
532                     Condition cond) {
533   ASSERT(regs != 0);
534   EmitMultiMemOp(cond, am, false, base, regs);
535 }
536 
537 // Moved to ARM::AssemblerARM32::ldrex();
538 void Assembler::ldrex(Register rt, Register rn, Condition cond) {
539   ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE);
540   ASSERT(rn != kNoRegister);
541   ASSERT(rt != kNoRegister);
542   ASSERT(cond != kNoCondition);
543   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
544                      B24 |
545                      B23 |
546                      L   |
547                      (static_cast<int32_t>(rn) << kLdExRnShift) |
548                      (static_cast<int32_t>(rt) << kLdExRtShift) |
549                      B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
550   Emit(encoding);
551 }
552 
553 // Moved to ARM::AssemblerARM32::strex();
554 void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) {
555   ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE);
556   ASSERT(rn != kNoRegister);
557   ASSERT(rd != kNoRegister);
558   ASSERT(rt != kNoRegister);
559   ASSERT(cond != kNoCondition);
560   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
561                      B24 |
562                      B23 |
563                      (static_cast<int32_t>(rn) << kStrExRnShift) |
564                      (static_cast<int32_t>(rd) << kStrExRdShift) |
565                      B11 | B10 | B9 | B8 | B7 | B4 |
566                      (static_cast<int32_t>(rt) << kStrExRtShift);
567   Emit(encoding);
568 }
569 #endif
570 
clrex()571 void Assembler::clrex() {
572   ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE);
573   int32_t encoding = (kSpecialCondition << kConditionShift) | B26 | B24 | B22 |
574                      B21 | B20 | (0xff << 12) | B4 | 0xf;
575   Emit(encoding);
576 }
577 
578 #if 0
579 // Moved to ARM32::AssemblerARM32::nop().
580 void Assembler::nop(Condition cond) {
581   ASSERT(cond != kNoCondition);
582   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
583                      B25 | B24 | B21 | (0xf << 12);
584   Emit(encoding);
585 }
586 
587 // Moved to ARM32::AssemblerARM32::vmovsr().
588 void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) {
589   ASSERT(TargetCPUFeatures::vfp_supported());
590   ASSERT(sn != kNoSRegister);
591   ASSERT(rt != kNoRegister);
592   ASSERT(rt != SP);
593   ASSERT(rt != PC);
594   ASSERT(cond != kNoCondition);
595   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
596                      B27 | B26 | B25 |
597                      ((static_cast<int32_t>(sn) >> 1)*B16) |
598                      (static_cast<int32_t>(rt)*B12) | B11 | B9 |
599                      ((static_cast<int32_t>(sn) & 1)*B7) | B4;
600   Emit(encoding);
601 }
602 
603 // Moved to ARM32::AssemblerARM32::vmovrs().
604 void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) {
605   ASSERT(TargetCPUFeatures::vfp_supported());
606   ASSERT(sn != kNoSRegister);
607   ASSERT(rt != kNoRegister);
608   ASSERT(rt != SP);
609   ASSERT(rt != PC);
610   ASSERT(cond != kNoCondition);
611   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
612                      B27 | B26 | B25 | B20 |
613                      ((static_cast<int32_t>(sn) >> 1)*B16) |
614                      (static_cast<int32_t>(rt)*B12) | B11 | B9 |
615                      ((static_cast<int32_t>(sn) & 1)*B7) | B4;
616   Emit(encoding);
617 }
618 #endif
619 
vmovsrr(SRegister sm,Register rt,Register rt2,Condition cond)620 void Assembler::vmovsrr(SRegister sm, Register rt, Register rt2,
621                         Condition cond) {
622   ASSERT(TargetCPUFeatures::vfp_supported());
623   ASSERT(sm != kNoSRegister);
624   ASSERT(sm != S31);
625   ASSERT(rt != kNoRegister);
626   ASSERT(rt != SP);
627   ASSERT(rt != PC);
628   ASSERT(rt2 != kNoRegister);
629   ASSERT(rt2 != SP);
630   ASSERT(rt2 != PC);
631   ASSERT(cond != kNoCondition);
632   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
633                      B26 | B22 | (static_cast<int32_t>(rt2) * B16) |
634                      (static_cast<int32_t>(rt) * B12) | B11 | B9 |
635                      ((static_cast<int32_t>(sm) & 1) * B5) | B4 |
636                      (static_cast<int32_t>(sm) >> 1);
637   Emit(encoding);
638 }
639 
vmovrrs(Register rt,Register rt2,SRegister sm,Condition cond)640 void Assembler::vmovrrs(Register rt, Register rt2, SRegister sm,
641                         Condition cond) {
642   ASSERT(TargetCPUFeatures::vfp_supported());
643   ASSERT(sm != kNoSRegister);
644   ASSERT(sm != S31);
645   ASSERT(rt != kNoRegister);
646   ASSERT(rt != SP);
647   ASSERT(rt != PC);
648   ASSERT(rt2 != kNoRegister);
649   ASSERT(rt2 != SP);
650   ASSERT(rt2 != PC);
651   ASSERT(rt != rt2);
652   ASSERT(cond != kNoCondition);
653   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
654                      B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) |
655                      (static_cast<int32_t>(rt) * B12) | B11 | B9 |
656                      ((static_cast<int32_t>(sm) & 1) * B5) | B4 |
657                      (static_cast<int32_t>(sm) >> 1);
658   Emit(encoding);
659 }
660 
661 #if 0
662 // Moved to ARM32::AssemblerARM32::vmovdqir().
663 void Assembler::vmovdr(DRegister dn, int i, Register rt, Condition cond) {
664   ASSERT(TargetCPUFeatures::vfp_supported());
665   ASSERT((i == 0) || (i == 1));
666   ASSERT(rt != kNoRegister);
667   ASSERT(rt != SP);
668   ASSERT(rt != PC);
669   ASSERT(dn != kNoDRegister);
670   ASSERT(cond != kNoCondition);
671   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
672                      B27 | B26 | B25 |
673                      (i*B21) |
674                      (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
675                      ((static_cast<int32_t>(dn) >> 4)*B7) |
676                      ((static_cast<int32_t>(dn) & 0xf)*B16) | B4;
677   Emit(encoding);
678 }
679 
680 // Moved to ARM32::AssemblerARM32::vmovdrr().
681 void Assembler::vmovdrr(DRegister dm, Register rt, Register rt2,
682                         Condition cond) {
683   ASSERT(TargetCPUFeatures::vfp_supported());
684   ASSERT(dm != kNoDRegister);
685   ASSERT(rt != kNoRegister);
686   ASSERT(rt != SP);
687   ASSERT(rt != PC);
688   ASSERT(rt2 != kNoRegister);
689   ASSERT(rt2 != SP);
690   ASSERT(rt2 != PC);
691   ASSERT(cond != kNoCondition);
692   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
693                      B27 | B26 | B22 |
694                      (static_cast<int32_t>(rt2)*B16) |
695                      (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
696                      ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
697                      (static_cast<int32_t>(dm) & 0xf);
698   Emit(encoding);
699 }
700 
701 // Moved to ARM32::AssemblerARM32::vmovrrd().
702 void Assembler::vmovrrd(Register rt, Register rt2, DRegister dm,
703                         Condition cond) {
704   ASSERT(TargetCPUFeatures::vfp_supported());
705   ASSERT(dm != kNoDRegister);
706   ASSERT(rt != kNoRegister);
707   ASSERT(rt != SP);
708   ASSERT(rt != PC);
709   ASSERT(rt2 != kNoRegister);
710   ASSERT(rt2 != SP);
711   ASSERT(rt2 != PC);
712   ASSERT(rt != rt2);
713   ASSERT(cond != kNoCondition);
714   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
715                      B27 | B26 | B22 | B20 |
716                      (static_cast<int32_t>(rt2)*B16) |
717                      (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
718                      ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
719                      (static_cast<int32_t>(dm) & 0xf);
720   Emit(encoding);
721 }
722 
723 // Moved to ARM32::AssemblerARM32::vldrs()
724 void Assembler::vldrs(SRegister sd, Address ad, Condition cond) {
725   ASSERT(TargetCPUFeatures::vfp_supported());
726   ASSERT(sd != kNoSRegister);
727   ASSERT(cond != kNoCondition);
728   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
729                      B27 | B26 | B24 | B20 |
730                      ((static_cast<int32_t>(sd) & 1)*B22) |
731                      ((static_cast<int32_t>(sd) >> 1)*B12) |
732                      B11 | B9 | ad.vencoding();
733   Emit(encoding);
734 }
735 
736 // Moved to Arm32::AssemblerARM32::vstrs()
737 void Assembler::vstrs(SRegister sd, Address ad, Condition cond) {
738   ASSERT(TargetCPUFeatures::vfp_supported());
739   ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC);
740   ASSERT(sd != kNoSRegister);
741   ASSERT(cond != kNoCondition);
742   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
743                      B27 | B26 | B24 |
744                      ((static_cast<int32_t>(sd) & 1)*B22) |
745                      ((static_cast<int32_t>(sd) >> 1)*B12) |
746                      B11 | B9 | ad.vencoding();
747   Emit(encoding);
748 }
749 
750 void Assembler::vldrd(DRegister dd, Address ad, Condition cond) {
751   ASSERT(TargetCPUFeatures::vfp_supported());
752   ASSERT(dd != kNoDRegister);
753   ASSERT(cond != kNoCondition);
754   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
755                      B27 | B26 | B24 | B20 |
756                      ((static_cast<int32_t>(dd) >> 4)*B22) |
757                      ((static_cast<int32_t>(dd) & 0xf)*B12) |
758                      B11 | B9 | B8 | ad.vencoding();
759   Emit(encoding);
760 }
761 #endif
762 
vstrd(DRegister dd,Address ad,Condition cond)763 void Assembler::vstrd(DRegister dd, Address ad, Condition cond) {
764   ASSERT(TargetCPUFeatures::vfp_supported());
765   ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC);
766   ASSERT(dd != kNoDRegister);
767   ASSERT(cond != kNoCondition);
768   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
769                      B26 | B24 | ((static_cast<int32_t>(dd) >> 4) * B22) |
770                      ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 |
771                      ad.vencoding();
772   Emit(encoding);
773 }
774 
EmitMultiVSMemOp(Condition cond,BlockAddressMode am,bool load,Register base,SRegister start,uint32_t count)775 void Assembler::EmitMultiVSMemOp(Condition cond, BlockAddressMode am, bool load,
776                                  Register base, SRegister start,
777                                  uint32_t count) {
778   ASSERT(TargetCPUFeatures::vfp_supported());
779   ASSERT(base != kNoRegister);
780   ASSERT(cond != kNoCondition);
781   ASSERT(start != kNoSRegister);
782   ASSERT(static_cast<int32_t>(start) + count <= kNumberOfSRegisters);
783 
784   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
785                      B26 | B11 | B9 | am | (load ? L : 0) |
786                      (static_cast<int32_t>(base) << kRnShift) |
787                      ((static_cast<int32_t>(start) & 0x1) ? D : 0) |
788                      ((static_cast<int32_t>(start) >> 1) << 12) | count;
789   Emit(encoding);
790 }
791 
EmitMultiVDMemOp(Condition cond,BlockAddressMode am,bool load,Register base,DRegister start,int32_t count)792 void Assembler::EmitMultiVDMemOp(Condition cond, BlockAddressMode am, bool load,
793                                  Register base, DRegister start,
794                                  int32_t count) {
795   ASSERT(TargetCPUFeatures::vfp_supported());
796   ASSERT(base != kNoRegister);
797   ASSERT(cond != kNoCondition);
798   ASSERT(start != kNoDRegister);
799   ASSERT(static_cast<int32_t>(start) + count <= kNumberOfDRegisters);
800   const int armv5te = TargetCPUFeatures::arm_version() == ARMv5TE ? 1 : 0;
801 
802   int32_t encoding =
803       (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B11 | B9 |
804       B8 | am | (load ? L : 0) | (static_cast<int32_t>(base) << kRnShift) |
805       ((static_cast<int32_t>(start) & 0x10) ? D : 0) |
806       ((static_cast<int32_t>(start) & 0xf) << 12) | (count << 1) | armv5te;
807   Emit(encoding);
808 }
809 
vldms(BlockAddressMode am,Register base,SRegister first,SRegister last,Condition cond)810 void Assembler::vldms(BlockAddressMode am, Register base, SRegister first,
811                       SRegister last, Condition cond) {
812   ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
813   ASSERT(last > first);
814   EmitMultiVSMemOp(cond, am, true, base, first, last - first + 1);
815 }
816 
vstms(BlockAddressMode am,Register base,SRegister first,SRegister last,Condition cond)817 void Assembler::vstms(BlockAddressMode am, Register base, SRegister first,
818                       SRegister last, Condition cond) {
819   ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
820   ASSERT(last > first);
821   EmitMultiVSMemOp(cond, am, false, base, first, last - first + 1);
822 }
823 
vldmd(BlockAddressMode am,Register base,DRegister first,intptr_t count,Condition cond)824 void Assembler::vldmd(BlockAddressMode am, Register base, DRegister first,
825                       intptr_t count, Condition cond) {
826   ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
827   ASSERT(count <= 16);
828   ASSERT(first + count <= kNumberOfDRegisters);
829   EmitMultiVDMemOp(cond, am, true, base, first, count);
830 }
831 
vstmd(BlockAddressMode am,Register base,DRegister first,intptr_t count,Condition cond)832 void Assembler::vstmd(BlockAddressMode am, Register base, DRegister first,
833                       intptr_t count, Condition cond) {
834   ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
835   ASSERT(count <= 16);
836   ASSERT(first + count <= kNumberOfDRegisters);
837   EmitMultiVDMemOp(cond, am, false, base, first, count);
838 }
839 
840 #if 0
841 // Moved to ARM32::AssemblerARM32::emitVFPsss
842 void Assembler::EmitVFPsss(Condition cond, int32_t opcode,
843                            SRegister sd, SRegister sn, SRegister sm) {
844   ASSERT(TargetCPUFeatures::vfp_supported());
845   ASSERT(sd != kNoSRegister);
846   ASSERT(sn != kNoSRegister);
847   ASSERT(sm != kNoSRegister);
848   ASSERT(cond != kNoCondition);
849   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
850                      B27 | B26 | B25 | B11 | B9 | opcode |
851                      ((static_cast<int32_t>(sd) & 1)*B22) |
852                      ((static_cast<int32_t>(sn) >> 1)*B16) |
853                      ((static_cast<int32_t>(sd) >> 1)*B12) |
854                      ((static_cast<int32_t>(sn) & 1)*B7) |
855                      ((static_cast<int32_t>(sm) & 1)*B5) |
856                      (static_cast<int32_t>(sm) >> 1);
857   Emit(encoding);
858 }
859 
860 // Moved to ARM32::AssemblerARM32::emitVFPddd
861 void Assembler::EmitVFPddd(Condition cond, int32_t opcode,
862                            DRegister dd, DRegister dn, DRegister dm) {
863   ASSERT(TargetCPUFeatures::vfp_supported());
864   ASSERT(dd != kNoDRegister);
865   ASSERT(dn != kNoDRegister);
866   ASSERT(dm != kNoDRegister);
867   ASSERT(cond != kNoCondition);
868   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
869                      B27 | B26 | B25 | B11 | B9 | B8 | opcode |
870                      ((static_cast<int32_t>(dd) >> 4)*B22) |
871                      ((static_cast<int32_t>(dn) & 0xf)*B16) |
872                      ((static_cast<int32_t>(dd) & 0xf)*B12) |
873                      ((static_cast<int32_t>(dn) >> 4)*B7) |
874                      ((static_cast<int32_t>(dm) >> 4)*B5) |
875                      (static_cast<int32_t>(dm) & 0xf);
876   Emit(encoding);
877 }
878 
879 // Moved to Arm32::AssemblerARM32::vmovss()
880 void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
881   EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
882 }
883 
884 // Moved to Arm32::AssemblerARM32::vmovdd()
885 void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
886   EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
887 }
888 
889 // Moved to Arm32::AssemblerARM32::vmovs()
890 bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) {
891   if (TargetCPUFeatures::arm_version() != ARMv7) {
892     return false;
893   }
894   uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
895   if (((imm32 & ((1 << 19) - 1)) == 0) &&
896       ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
897        (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
898     uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
899         ((imm32 >> 19) & ((1 << 6) -1));
900     EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
901                sd, S0, S0);
902     return true;
903   }
904   return false;
905 }
906 
907 // Moved to Arm32::AssemblerARM32::vmovd()
908 bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) {
909   if (TargetCPUFeatures::arm_version() != ARMv7) {
910     return false;
911   }
912   uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
913   if (((imm64 & ((1LL << 48) - 1)) == 0) &&
914       ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
915        (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
916     uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
917         ((imm64 >> 48) & ((1 << 6) -1));
918     EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
919                dd, D0, D0);
920     return true;
921   }
922   return false;
923 }
924 
925 // Moved to Arm32::AssemblerARM32::vadds()
926 void Assembler::vadds(SRegister sd, SRegister sn, SRegister sm,
927                       Condition cond) {
928   EmitVFPsss(cond, B21 | B20, sd, sn, sm);
929 }
930 
931 // Moved to Arm32::AssemblerARM32::vaddd()
932 void Assembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
933                       Condition cond) {
934   EmitVFPddd(cond, B21 | B20, dd, dn, dm);
935 }
936 
937 // Moved to Arm32::AssemblerARM32::vsubs()
938 void Assembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
939                       Condition cond) {
940   EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
941 }
942 
943 // Moved to Arm32::AssemblerARM32::vsubd()
944 void Assembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
945                       Condition cond) {
946   EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
947 }
948 
949 // Moved to Arm32::AssemblerARM32::vmuls()
950 void Assembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
951                       Condition cond) {
952   EmitVFPsss(cond, B21, sd, sn, sm);
953 }
954 
955 // Moved to Arm32::AssemblerARM32::vmuld()
956 void Assembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
957                       Condition cond) {
958   EmitVFPddd(cond, B21, dd, dn, dm);
959 }
960 
961 // Moved to Arm32::AssemblerARM32::vmlas()
962 void Assembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
963                       Condition cond) {
964   EmitVFPsss(cond, 0, sd, sn, sm);
965 }
966 
967 // Moved to Arm32::AssemblerARM32::vmlad()
968 void Assembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
969                       Condition cond) {
970   EmitVFPddd(cond, 0, dd, dn, dm);
971 }
972 
973 // Moved to Arm32::AssemblerARM32::vmlss()
974 void Assembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
975                       Condition cond) {
976   EmitVFPsss(cond, B6, sd, sn, sm);
977 }
978 
979 // Moved to Arm32::AssemblerARM32::vmlsd()
980 void Assembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
981                       Condition cond) {
982   EmitVFPddd(cond, B6, dd, dn, dm);
983 }
984 
985 // Moved to Arm32::AssemblerARM32::vdivs()
986 void Assembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
987                       Condition cond) {
988   EmitVFPsss(cond, B23, sd, sn, sm);
989 }
990 
991 // Moved to Arm32::AssemblerARM32::vdivd()
992 void Assembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
993                       Condition cond) {
994   EmitVFPddd(cond, B23, dd, dn, dm);
995 }
996 
997 // Moved to Arm32::AssemblerARM32::vabss().
998 void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) {
999   EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
1000 }
1001 
1002 // Moved to Arm32::AssemblerARM32::vabsd().
1003 void Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
1004   EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
1005 }
1006 #endif
1007 
vnegs(SRegister sd,SRegister sm,Condition cond)1008 void Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
1009   EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
1010 }
1011 
vnegd(DRegister dd,DRegister dm,Condition cond)1012 void Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
1013   EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
1014 }
1015 
1016 #if 0
1017 // Moved to ARM32::AssemblerARM32::vsqrts().
1018 void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
1019   EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
1020 }
1021 
1022 // Moved to ARM32::AssemblerARM32::vsqrtd().
1023 void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
1024   EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
1025 }
1026 
1027 // Moved to ARM32::AssemblerARM32::emitVFPsd
1028 void Assembler::EmitVFPsd(Condition cond, int32_t opcode,
1029                           SRegister sd, DRegister dm) {
1030   ASSERT(TargetCPUFeatures::vfp_supported());
1031   ASSERT(sd != kNoSRegister);
1032   ASSERT(dm != kNoDRegister);
1033   ASSERT(cond != kNoCondition);
1034   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1035                      B27 | B26 | B25 | B11 | B9 | opcode |
1036                      ((static_cast<int32_t>(sd) & 1)*B22) |
1037                      ((static_cast<int32_t>(sd) >> 1)*B12) |
1038                      ((static_cast<int32_t>(dm) >> 4)*B5) |
1039                      (static_cast<int32_t>(dm) & 0xf);
1040   Emit(encoding);
1041 }
1042 
1043 // Moved to ARM32::AssemblerARM32::emitVFPds
1044 void Assembler::EmitVFPds(Condition cond, int32_t opcode,
1045                           DRegister dd, SRegister sm) {
1046   ASSERT(TargetCPUFeatures::vfp_supported());
1047   ASSERT(dd != kNoDRegister);
1048   ASSERT(sm != kNoSRegister);
1049   ASSERT(cond != kNoCondition);
1050   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1051                      B27 | B26 | B25 | B11 | B9 | opcode |
1052                      ((static_cast<int32_t>(dd) >> 4)*B22) |
1053                      ((static_cast<int32_t>(dd) & 0xf)*B12) |
1054                      ((static_cast<int32_t>(sm) & 1)*B5) |
1055                      (static_cast<int32_t>(sm) >> 1);
1056   Emit(encoding);
1057 }
1058 
1059 // Moved to ARM32::AssemblerARM32::vcvtsd().
1060 void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
1061   EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
1062 }
1063 
1064 // Moved to ARM32::AssemblerARM32::vcvtds().
1065 void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
1066   EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
1067 }
1068 
1069 // Moved to ARM32::AssemblerARM32::vcvtis()
1070 void Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
1071   EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
1072 }
1073 #endif
1074 
vcvtid(SRegister sd,DRegister dm,Condition cond)1075 void Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
1076   EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
1077 }
1078 
1079 #if 0
1080 // Moved to ARM32::AssemblerARM32::vcvtsi()
1081 void Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
1082   EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
1083 }
1084 
1085 // Moved to ARM32::AssemblerARM32::vcvtdi()
1086 void Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
1087   EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
1088 }
1089 
1090 // Moved to ARM32::AssemblerARM32::vcvtus().
1091 void Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
1092   EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
1093 }
1094 
1095 // Moved to ARM32::AssemblerARM32::vcvtud().
1096 void Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
1097   EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
1098 }
1099 
1100 // Moved to ARM32::AssemblerARM32::vcvtsu()
1101 void Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
1102   EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
1103 }
1104 
1105 // Moved to ARM32::AssemblerARM32::vcvtdu()
1106 void Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
1107   EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
1108 }
1109 
1110 // Moved to ARM23::AssemblerARM32::vcmps().
1111 void Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
1112   EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
1113 }
1114 
1115 // Moved to ARM23::AssemblerARM32::vcmpd().
1116 void Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
1117   EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
1118 }
1119 
1120 // Moved to ARM23::AssemblerARM32::vcmpsz().
1121 void Assembler::vcmpsz(SRegister sd, Condition cond) {
1122   EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
1123 }
1124 
1125 // Moved to ARM23::AssemblerARM32::vcmpdz().
1126 void Assembler::vcmpdz(DRegister dd, Condition cond) {
1127   EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
1128 }
1129 
1130 // APSR_nzcv version moved to ARM32::AssemblerARM32::vmrsAPSR_nzcv()
1131 void Assembler::vmrs(Register rd, Condition cond) {
1132   ASSERT(TargetCPUFeatures::vfp_supported());
1133   ASSERT(cond != kNoCondition);
1134   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1135                      B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
1136                      (static_cast<int32_t>(rd)*B12) |
1137                      B11 | B9 | B4;
1138   Emit(encoding);
1139 }
1140 #endif
1141 
vmstat(Condition cond)1142 void Assembler::vmstat(Condition cond) { vmrs(APSR, cond); }
1143 
ShiftOfOperandSize(OperandSize size)1144 static inline int ShiftOfOperandSize(OperandSize size) {
1145   switch (size) {
1146   case kByte:
1147   case kUnsignedByte:
1148     return 0;
1149   case kHalfword:
1150   case kUnsignedHalfword:
1151     return 1;
1152   case kWord:
1153   case kUnsignedWord:
1154     return 2;
1155   case kWordPair:
1156     return 3;
1157   case kSWord:
1158   case kDWord:
1159     return 0;
1160   default:
1161     UNREACHABLE();
1162     break;
1163   }
1164 
1165   UNREACHABLE();
1166   return -1;
1167 }
1168 
1169 #if 0
1170 // Moved to ARM32::AssemblerARM32::emitSIMDqqq()
1171 void Assembler::EmitSIMDqqq(int32_t opcode, OperandSize size,
1172                             QRegister qd, QRegister qn, QRegister qm) {
1173   ASSERT(TargetCPUFeatures::neon_supported());
1174   int sz = ShiftOfOperandSize(size);
1175   int32_t encoding =
1176       (static_cast<int32_t>(kSpecialCondition) << kConditionShift) |
1177       B25 | B6 |
1178       opcode | ((sz & 0x3) * B20) |
1179       ((static_cast<int32_t>(qd * 2) >> 4)*B22) |
1180       ((static_cast<int32_t>(qn * 2) & 0xf)*B16) |
1181       ((static_cast<int32_t>(qd * 2) & 0xf)*B12) |
1182       ((static_cast<int32_t>(qn * 2) >> 4)*B7) |
1183       ((static_cast<int32_t>(qm * 2) >> 4)*B5) |
1184       (static_cast<int32_t>(qm * 2) & 0xf);
1185   Emit(encoding);
1186 }
1187 #endif
1188 
EmitSIMDddd(int32_t opcode,OperandSize size,DRegister dd,DRegister dn,DRegister dm)1189 void Assembler::EmitSIMDddd(int32_t opcode, OperandSize size, DRegister dd,
1190                             DRegister dn, DRegister dm) {
1191   ASSERT(TargetCPUFeatures::neon_supported());
1192   int sz = ShiftOfOperandSize(size);
1193   int32_t encoding =
1194       (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 |
1195       opcode | ((sz & 0x3) * B20) | ((static_cast<int32_t>(dd) >> 4) * B22) |
1196       ((static_cast<int32_t>(dn) & 0xf) * B16) |
1197       ((static_cast<int32_t>(dd) & 0xf) * B12) |
1198       ((static_cast<int32_t>(dn) >> 4) * B7) |
1199       ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf);
1200   Emit(encoding);
1201 }
1202 
vmovq(QRegister qd,QRegister qm)1203 void Assembler::vmovq(QRegister qd, QRegister qm) {
1204   EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qm, qm);
1205 }
1206 
1207 #if 0
1208 // Moved to ARM32::AssemblerARM32::vaddqi().
1209 void Assembler::vaddqi(OperandSize sz,
1210                        QRegister qd, QRegister qn, QRegister qm) {
1211   EmitSIMDqqq(B11, sz, qd, qn, qm);
1212 }
1213 
1214 // Moved to ARM32::AssemblerARM32::vaddqf().
1215 void Assembler::vaddqs(QRegister qd, QRegister qn, QRegister qm) {
1216   EmitSIMDqqq(B11 | B10 | B8, kSWord, qd, qn, qm);
1217 }
1218 #endif
1219 
vsubqi(OperandSize sz,QRegister qd,QRegister qn,QRegister qm)1220 void Assembler::vsubqi(OperandSize sz, QRegister qd, QRegister qn,
1221                        QRegister qm) {
1222   EmitSIMDqqq(B24 | B11, sz, qd, qn, qm);
1223 }
1224 
vsubqs(QRegister qd,QRegister qn,QRegister qm)1225 void Assembler::vsubqs(QRegister qd, QRegister qn, QRegister qm) {
1226   EmitSIMDqqq(B21 | B11 | B10 | B8, kSWord, qd, qn, qm);
1227 }
1228 
1229 #if 0
1230 // Moved to ARM32::AssemblerARM32::vmulqi().
1231 void Assembler::vmulqi(OperandSize sz,
1232                        QRegister qd, QRegister qn, QRegister qm) {
1233   EmitSIMDqqq(B11 | B8 | B4, sz, qd, qn, qm);
1234 }
1235 
1236 // Moved to ARM32::AssemblerARM32::vmulqf().
1237 void Assembler::vmulqs(QRegister qd, QRegister qn, QRegister qm) {
1238   EmitSIMDqqq(B24 | B11 | B10 | B8 | B4, kSWord, qd, qn, qm);
1239 }
1240 
1241 // Moved to ARM32::AssemblerARM32::vshlqi().
1242 void Assembler::vshlqi(OperandSize sz,
1243                        QRegister qd, QRegister qm, QRegister qn) {
1244   EmitSIMDqqq(B25 | B10, sz, qd, qn, qm);
1245 }
1246 
1247 
1248 // Moved to ARM32::AssemblerARM32::vshlqu().
1249 void Assembler::vshlqu(OperandSize sz,
1250                        QRegister qd, QRegister qm, QRegister qn) {
1251   EmitSIMDqqq(B25 | B24 | B10, sz, qd, qn, qm);
1252 }
1253 
1254 // Moved to ARM32::AssemblerARM32::veorq()
1255 void Assembler::veorq(QRegister qd, QRegister qn, QRegister qm) {
1256   EmitSIMDqqq(B24 | B8 | B4, kByte, qd, qn, qm);
1257 }
1258 
1259 // Moved to ARM32::AssemblerARM32::vorrq()
1260 void Assembler::vorrq(QRegister qd, QRegister qn, QRegister qm) {
1261   EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qn, qm);
1262 }
1263 #endif
1264 
vornq(QRegister qd,QRegister qn,QRegister qm)1265 void Assembler::vornq(QRegister qd, QRegister qn, QRegister qm) {
1266   EmitSIMDqqq(B21 | B20 | B8 | B4, kByte, qd, qn, qm);
1267 }
1268 
1269 #if 0
1270 // Moved to ARM32::AssemblerARM32::vandq()
1271 void Assembler::vandq(QRegister qd, QRegister qn, QRegister qm) {
1272   EmitSIMDqqq(B8 | B4, kByte, qd, qn, qm);
1273 }
1274 
1275 void Assembler::vmvnq(QRegister qd, QRegister qm) {
1276   EmitSIMDqqq(B25 | B24 | B23 | B10 | B8 | B7, kWordPair, qd, Q0, qm);
1277 }
1278 #endif
1279 
vminqs(QRegister qd,QRegister qn,QRegister qm)1280 void Assembler::vminqs(QRegister qd, QRegister qn, QRegister qm) {
1281   EmitSIMDqqq(B21 | B11 | B10 | B9 | B8, kSWord, qd, qn, qm);
1282 }
1283 
vmaxqs(QRegister qd,QRegister qn,QRegister qm)1284 void Assembler::vmaxqs(QRegister qd, QRegister qn, QRegister qm) {
1285   EmitSIMDqqq(B11 | B10 | B9 | B8, kSWord, qd, qn, qm);
1286 }
1287 
1288 #if 0
1289 // Moved to Arm32::AssemblerARM32::vabsq().
1290 void Assembler::vabsqs(QRegister qd, QRegister qm) {
1291   EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8, kSWord,
1292               qd, Q0, qm);
1293 }
1294 
1295 // Moved to Arm32::AssemblerARM32::vnegqs().
1296 void Assembler::vnegqs(QRegister qd, QRegister qm) {
1297   EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8 | B7, kSWord,
1298               qd, Q0, qm);
1299 }
1300 #endif
1301 
vrecpeqs(QRegister qd,QRegister qm)1302 void Assembler::vrecpeqs(QRegister qd, QRegister qm) {
1303   EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8, kSWord, qd,
1304               Q0, qm);
1305 }
1306 
vrecpsqs(QRegister qd,QRegister qn,QRegister qm)1307 void Assembler::vrecpsqs(QRegister qd, QRegister qn, QRegister qm) {
1308   EmitSIMDqqq(B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm);
1309 }
1310 
vrsqrteqs(QRegister qd,QRegister qm)1311 void Assembler::vrsqrteqs(QRegister qd, QRegister qm) {
1312   EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8 | B7, kSWord,
1313               qd, Q0, qm);
1314 }
1315 
vrsqrtsqs(QRegister qd,QRegister qn,QRegister qm)1316 void Assembler::vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm) {
1317   EmitSIMDqqq(B21 | B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm);
1318 }
1319 
vdup(OperandSize sz,QRegister qd,DRegister dm,int idx)1320 void Assembler::vdup(OperandSize sz, QRegister qd, DRegister dm, int idx) {
1321   ASSERT((sz != kDWord) && (sz != kSWord) && (sz != kWordPair));
1322   int code = 0;
1323 
1324   switch (sz) {
1325   case kByte:
1326   case kUnsignedByte: {
1327     ASSERT((idx >= 0) && (idx < 8));
1328     code = 1 | (idx << 1);
1329     break;
1330   }
1331   case kHalfword:
1332   case kUnsignedHalfword: {
1333     ASSERT((idx >= 0) && (idx < 4));
1334     code = 2 | (idx << 2);
1335     break;
1336   }
1337   case kWord:
1338   case kUnsignedWord: {
1339     ASSERT((idx >= 0) && (idx < 2));
1340     code = 4 | (idx << 3);
1341     break;
1342   }
1343   default: {
1344     break;
1345   }
1346   }
1347 
1348   EmitSIMDddd(B24 | B23 | B11 | B10 | B6, kWordPair,
1349               static_cast<DRegister>(qd * 2),
1350               static_cast<DRegister>(code & 0xf), dm);
1351 }
1352 
vtbl(DRegister dd,DRegister dn,int len,DRegister dm)1353 void Assembler::vtbl(DRegister dd, DRegister dn, int len, DRegister dm) {
1354   ASSERT((len >= 1) && (len <= 4));
1355   EmitSIMDddd(B24 | B23 | B11 | ((len - 1) * B8), kWordPair, dd, dn, dm);
1356 }
1357 
vzipqw(QRegister qd,QRegister qm)1358 void Assembler::vzipqw(QRegister qd, QRegister qm) {
1359   EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B8 | B7, kByte, qd, Q0, qm);
1360 }
1361 
1362 #if 0
1363 // Moved to Arm32::AssemblerARM32::vceqqi().
1364 void Assembler::vceqqi(OperandSize sz,
1365                       QRegister qd, QRegister qn, QRegister qm) {
1366   EmitSIMDqqq(B24 | B11 | B4, sz, qd, qn, qm);
1367 }
1368 
1369 // Moved to Arm32::AssemblerARM32::vceqqi().
1370 void Assembler::vceqqs(QRegister qd, QRegister qn, QRegister qm) {
1371   EmitSIMDqqq(B11 | B10 | B9, kSWord, qd, qn, qm);
1372 }
1373 
1374 // Moved to Arm32::AssemblerARM32::vcgeqi().
1375 void Assembler::vcgeqi(OperandSize sz,
1376                       QRegister qd, QRegister qn, QRegister qm) {
1377   EmitSIMDqqq(B9 | B8 | B4, sz, qd, qn, qm);
1378 }
1379 
1380 // Moved to Arm32::AssemblerARM32::vcugeqi().
1381 void Assembler::vcugeqi(OperandSize sz,
1382                       QRegister qd, QRegister qn, QRegister qm) {
1383   EmitSIMDqqq(B24 | B9 | B8 | B4, sz, qd, qn, qm);
1384 }
1385 
1386 // Moved to Arm32::AssemblerARM32::vcgeqs().
1387 void Assembler::vcgeqs(QRegister qd, QRegister qn, QRegister qm) {
1388   EmitSIMDqqq(B24 | B11 | B10 | B9, kSWord, qd, qn, qm);
1389 }
1390 
1391 // Moved to Arm32::AssemblerARM32::vcgtqi().
1392 void Assembler::vcgtqi(OperandSize sz,
1393                       QRegister qd, QRegister qn, QRegister qm) {
1394   EmitSIMDqqq(B9 | B8, sz, qd, qn, qm);
1395 }
1396 
1397 // Moved to Arm32::AssemblerARM32::vcugtqi().
1398 void Assembler::vcugtqi(OperandSize sz,
1399                       QRegister qd, QRegister qn, QRegister qm) {
1400   EmitSIMDqqq(B24 | B9 | B8, sz, qd, qn, qm);
1401 }
1402 
1403 // Moved to Arm32::AssemblerARM32::vcgtqs().
1404 void Assembler::vcgtqs(QRegister qd, QRegister qn, QRegister qm) {
1405   EmitSIMDqqq(B24 | B21 | B11 | B10 | B9, kSWord, qd, qn, qm);
1406 }
1407 
1408 // Moved to ARM32::AssemblerARM32::bkpt()
1409 void Assembler::bkpt(uint16_t imm16) {
1410   Emit(BkptEncoding(imm16));
1411 }
1412 #endif
1413 
b(Label * label,Condition cond)1414 void Assembler::b(Label *label, Condition cond) {
1415   EmitBranch(cond, label, false);
1416 }
1417 
1418 #if 0
1419 // Moved to ARM32::AssemblerARM32::bl()
1420 void Assembler::bl(Label* label, Condition cond) {
1421   EmitBranch(cond, label, true);
1422 }
1423 
1424 // Moved to ARM32::AssemblerARM32::bx()
1425 void Assembler::bx(Register rm, Condition cond) {
1426   ASSERT(rm != kNoRegister);
1427   ASSERT(cond != kNoCondition);
1428   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1429                      B24 | B21 | (0xfff << 8) | B4 |
1430                      (static_cast<int32_t>(rm) << kRmShift);
1431   Emit(encoding);
1432 }
1433 
1434 // Moved to ARM32::AssemblerARM32::blx()
1435 void Assembler::blx(Register rm, Condition cond) {
1436   ASSERT(rm != kNoRegister);
1437   ASSERT(cond != kNoCondition);
1438   int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1439                      B24 | B21 | (0xfff << 8) | B5 | B4 |
1440                      (static_cast<int32_t>(rm) << kRmShift);
1441   Emit(encoding);
1442 }
1443 #endif
1444 
MarkExceptionHandler(Label * label)1445 void Assembler::MarkExceptionHandler(Label *label) {
1446   EmitType01(AL, 1, TST, 1, PC, R0, Operand(0));
1447   Label l;
1448   b(&l);
1449   EmitBranch(AL, label, false);
1450   Bind(&l);
1451 }
1452 
Drop(intptr_t stack_elements)1453 void Assembler::Drop(intptr_t stack_elements) {
1454   ASSERT(stack_elements >= 0);
1455   if (stack_elements > 0) {
1456     AddImmediate(SP, SP, stack_elements * kWordSize);
1457   }
1458 }
1459 
FindImmediate(int32_t imm)1460 intptr_t Assembler::FindImmediate(int32_t imm) {
1461   return object_pool_wrapper_.FindImmediate(imm);
1462 }
1463 
1464 // Uses a code sequence that can easily be decoded.
LoadWordFromPoolOffset(Register rd,int32_t offset,Register pp,Condition cond)1465 void Assembler::LoadWordFromPoolOffset(Register rd, int32_t offset, Register pp,
1466                                        Condition cond) {
1467   ASSERT((pp != PP) || constant_pool_allowed());
1468   ASSERT(rd != pp);
1469   int32_t offset_mask = 0;
1470   if (Address::CanHoldLoadOffset(kWord, offset, &offset_mask)) {
1471     ldr(rd, Address(pp, offset), cond);
1472   } else {
1473     int32_t offset_hi = offset & ~offset_mask; // signed
1474     uint32_t offset_lo = offset & offset_mask; // unsigned
1475     // Inline a simplified version of AddImmediate(rd, pp, offset_hi).
1476     Operand o;
1477     if (Operand::CanHold(offset_hi, &o)) {
1478       add(rd, pp, o, cond);
1479     } else {
1480       LoadImmediate(rd, offset_hi, cond);
1481       add(rd, pp, Operand(rd), cond);
1482     }
1483     ldr(rd, Address(rd, offset_lo), cond);
1484   }
1485 }
1486 
CheckCodePointer()1487 void Assembler::CheckCodePointer() {
1488 #ifdef DEBUG
1489   Label cid_ok, instructions_ok;
1490   Push(R0);
1491   Push(IP);
1492   CompareClassId(CODE_REG, kCodeCid, R0);
1493   b(&cid_ok, EQ);
1494   bkpt(0);
1495   Bind(&cid_ok);
1496 
1497   const intptr_t offset = CodeSize() + Instr::kPCReadOffset +
1498                           Instructions::HeaderSize() - kHeapObjectTag;
1499   mov(R0, Operand(PC));
1500   AddImmediate(R0, R0, -offset);
1501   ldr(IP, FieldAddress(CODE_REG, Code::saved_instructions_offset()));
1502   cmp(R0, Operand(IP));
1503   b(&instructions_ok, EQ);
1504   bkpt(1);
1505   Bind(&instructions_ok);
1506   Pop(IP);
1507   Pop(R0);
1508 #endif
1509 }
1510 
RestoreCodePointer()1511 void Assembler::RestoreCodePointer() {
1512   ldr(CODE_REG, Address(FP, kPcMarkerSlotFromFp * kWordSize));
1513   CheckCodePointer();
1514 }
1515 
LoadPoolPointer(Register reg)1516 void Assembler::LoadPoolPointer(Register reg) {
1517   // Load new pool pointer.
1518   CheckCodePointer();
1519   ldr(reg, FieldAddress(CODE_REG, Code::object_pool_offset()));
1520   set_constant_pool_allowed(reg == PP);
1521 }
1522 
LoadIsolate(Register rd)1523 void Assembler::LoadIsolate(Register rd) {
1524   ldr(rd, Address(THR, Thread::isolate_offset()));
1525 }
1526 
CanLoadFromObjectPool(const Object & object) const1527 bool Assembler::CanLoadFromObjectPool(const Object &object) const {
1528   ASSERT(!Thread::CanLoadFromThread(object));
1529   if (!constant_pool_allowed()) {
1530     return false;
1531   }
1532 
1533   ASSERT(object.IsNotTemporaryScopedHandle());
1534   ASSERT(object.IsOld());
1535   return true;
1536 }
1537 
LoadObjectHelper(Register rd,const Object & object,Condition cond,bool is_unique,Register pp)1538 void Assembler::LoadObjectHelper(Register rd, const Object &object,
1539                                  Condition cond, bool is_unique, Register pp) {
1540   // Load common VM constants from the thread. This works also in places where
1541   // no constant pool is set up (e.g. intrinsic code).
1542   if (Thread::CanLoadFromThread(object)) {
1543     // Load common VM constants from the thread. This works also in places where
1544     // no constant pool is set up (e.g. intrinsic code).
1545     ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond);
1546   } else if (object.IsSmi()) {
1547     // Relocation doesn't apply to Smis.
1548     LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond);
1549   } else if (CanLoadFromObjectPool(object)) {
1550     // Make sure that class CallPattern is able to decode this load from the
1551     // object pool.
1552     const int32_t offset = ObjectPool::element_offset(
1553         is_unique ? object_pool_wrapper_.AddObject(object)
1554                   : object_pool_wrapper_.FindObject(object));
1555     LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond);
1556   } else {
1557     ASSERT(FLAG_allow_absolute_addresses);
1558     ASSERT(object.IsOld());
1559     // Make sure that class CallPattern is able to decode this load immediate.
1560     const int32_t object_raw = reinterpret_cast<int32_t>(object.raw());
1561     LoadImmediate(rd, object_raw, cond);
1562   }
1563 }
1564 
LoadObject(Register rd,const Object & object,Condition cond)1565 void Assembler::LoadObject(Register rd, const Object &object, Condition cond) {
1566   LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP);
1567 }
1568 
LoadUniqueObject(Register rd,const Object & object,Condition cond)1569 void Assembler::LoadUniqueObject(Register rd, const Object &object,
1570                                  Condition cond) {
1571   LoadObjectHelper(rd, object, cond, /* is_unique = */ true, PP);
1572 }
1573 
LoadFunctionFromCalleePool(Register dst,const Function & function,Register new_pp)1574 void Assembler::LoadFunctionFromCalleePool(Register dst,
1575                                            const Function &function,
1576                                            Register new_pp) {
1577   const int32_t offset =
1578       ObjectPool::element_offset(object_pool_wrapper_.FindObject(function));
1579   LoadWordFromPoolOffset(dst, offset - kHeapObjectTag, new_pp, AL);
1580 }
1581 
LoadNativeEntry(Register rd,const ExternalLabel * label,Patchability patchable,Condition cond)1582 void Assembler::LoadNativeEntry(Register rd, const ExternalLabel *label,
1583                                 Patchability patchable, Condition cond) {
1584   const int32_t offset = ObjectPool::element_offset(
1585       object_pool_wrapper_.FindNativeEntry(label, patchable));
1586   LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond);
1587 }
1588 
PushObject(const Object & object)1589 void Assembler::PushObject(const Object &object) {
1590   LoadObject(IP, object);
1591   Push(IP);
1592 }
1593 
CompareObject(Register rn,const Object & object)1594 void Assembler::CompareObject(Register rn, const Object &object) {
1595   ASSERT(rn != IP);
1596   if (object.IsSmi()) {
1597     CompareImmediate(rn, reinterpret_cast<int32_t>(object.raw()));
1598   } else {
1599     LoadObject(IP, object);
1600     cmp(rn, Operand(IP));
1601   }
1602 }
1603 
1604 // Preserves object and value registers.
StoreIntoObjectFilterNoSmi(Register object,Register value,Label * no_update)1605 void Assembler::StoreIntoObjectFilterNoSmi(Register object, Register value,
1606                                            Label *no_update) {
1607   COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
1608                  (kOldObjectAlignmentOffset == 0));
1609 
1610   // Write-barrier triggers if the value is in the new space (has bit set) and
1611   // the object is in the old space (has bit cleared).
1612   // To check that, we compute value & ~object and skip the write barrier
1613   // if the bit is not set. We can't destroy the object.
1614   bic(IP, value, Operand(object));
1615   tst(IP, Operand(kNewObjectAlignmentOffset));
1616   b(no_update, EQ);
1617 }
1618 
1619 // Preserves object and value registers.
StoreIntoObjectFilter(Register object,Register value,Label * no_update)1620 void Assembler::StoreIntoObjectFilter(Register object, Register value,
1621                                       Label *no_update) {
1622   // For the value we are only interested in the new/old bit and the tag bit.
1623   // And the new bit with the tag bit. The resulting bit will be 0 for a Smi.
1624   and_(IP, value, Operand(value, LSL, kObjectAlignmentLog2 - 1));
1625   // And the result with the negated space bit of the object.
1626   bic(IP, IP, Operand(object));
1627   tst(IP, Operand(kNewObjectAlignmentOffset));
1628   b(no_update, EQ);
1629 }
1630 
GetVerifiedMemoryShadow()1631 Operand Assembler::GetVerifiedMemoryShadow() {
1632   Operand offset;
1633   if (!Operand::CanHold(VerifiedMemory::offset(), &offset)) {
1634     FATAL1("Offset 0x%" Px " not representable", VerifiedMemory::offset());
1635   }
1636   return offset;
1637 }
1638 
WriteShadowedField(Register base,intptr_t offset,Register value,Condition cond)1639 void Assembler::WriteShadowedField(Register base, intptr_t offset,
1640                                    Register value, Condition cond) {
1641   if (VerifiedMemory::enabled()) {
1642     ASSERT(base != value);
1643     Operand shadow(GetVerifiedMemoryShadow());
1644     add(base, base, shadow, cond);
1645     str(value, Address(base, offset), cond);
1646     sub(base, base, shadow, cond);
1647   }
1648   str(value, Address(base, offset), cond);
1649 }
1650 
WriteShadowedFieldPair(Register base,intptr_t offset,Register value_even,Register value_odd,Condition cond)1651 void Assembler::WriteShadowedFieldPair(Register base, intptr_t offset,
1652                                        Register value_even, Register value_odd,
1653                                        Condition cond) {
1654   ASSERT(value_odd == value_even + 1);
1655   if (VerifiedMemory::enabled()) {
1656     ASSERT(base != value_even);
1657     ASSERT(base != value_odd);
1658     Operand shadow(GetVerifiedMemoryShadow());
1659     add(base, base, shadow, cond);
1660     strd(value_even, base, offset, cond);
1661     sub(base, base, shadow, cond);
1662   }
1663   strd(value_even, base, offset, cond);
1664 }
1665 
UseRegister(Register reg,RegList * used)1666 Register UseRegister(Register reg, RegList *used) {
1667   ASSERT(reg != SP);
1668   ASSERT(reg != PC);
1669   ASSERT((*used & (1 << reg)) == 0);
1670   *used |= (1 << reg);
1671   return reg;
1672 }
1673 
AllocateRegister(RegList * used)1674 Register AllocateRegister(RegList *used) {
1675   const RegList free = ~*used;
1676   return (free == 0) ? kNoRegister
1677                      : UseRegister(static_cast<Register>(
1678                                        Utils::CountTrailingZeros(free)),
1679                                    used);
1680 }
1681 
VerifiedWrite(const Address & address,Register new_value,FieldContent old_content)1682 void Assembler::VerifiedWrite(const Address &address, Register new_value,
1683                               FieldContent old_content) {
1684 #if defined(DEBUG)
1685   ASSERT(address.mode() == Address::Offset ||
1686          address.mode() == Address::NegOffset);
1687   // Allocate temporary registers (and check for register collisions).
1688   RegList used = 0;
1689   UseRegister(new_value, &used);
1690   Register base = UseRegister(address.rn(), &used);
1691   if (address.rm() != kNoRegister) {
1692     UseRegister(address.rm(), &used);
1693   }
1694   Register old_value = AllocateRegister(&used);
1695   Register temp = AllocateRegister(&used);
1696   PushList(used);
1697   ldr(old_value, address);
1698   // First check that 'old_value' contains 'old_content'.
1699   // Smi test.
1700   tst(old_value, Operand(kHeapObjectTag));
1701   Label ok;
1702   switch (old_content) {
1703   case kOnlySmi:
1704     b(&ok, EQ); // Smi is OK.
1705     Stop("Expected smi.");
1706     break;
1707   case kHeapObjectOrSmi:
1708     b(&ok, EQ); // Smi is OK.
1709     // Non-smi case: Verify object pointer is word-aligned when untagged.
1710     COMPILE_ASSERT(kHeapObjectTag == 1);
1711     tst(old_value, Operand((kWordSize - 1) - kHeapObjectTag));
1712     b(&ok, EQ);
1713     Stop("Expected heap object or Smi");
1714     break;
1715   case kEmptyOrSmiOrNull:
1716     b(&ok, EQ); // Smi is OK.
1717     // Non-smi case: Check for the special zap word or null.
1718     // Note: Cannot use CompareImmediate, since IP may be in use.
1719     LoadImmediate(temp, Heap::kZap32Bits);
1720     cmp(old_value, Operand(temp));
1721     b(&ok, EQ);
1722     LoadObject(temp, Object::null_object());
1723     cmp(old_value, Operand(temp));
1724     b(&ok, EQ);
1725     Stop("Expected zapped, Smi or null");
1726     break;
1727   default:
1728     UNREACHABLE();
1729   }
1730   Bind(&ok);
1731   if (VerifiedMemory::enabled()) {
1732     Operand shadow_offset(GetVerifiedMemoryShadow());
1733     // Adjust the address to shadow.
1734     add(base, base, shadow_offset);
1735     ldr(temp, address);
1736     cmp(old_value, Operand(temp));
1737     Label match;
1738     b(&match, EQ);
1739     Stop("Write barrier verification failed");
1740     Bind(&match);
1741     // Write new value in shadow.
1742     str(new_value, address);
1743     // Restore original address.
1744     sub(base, base, shadow_offset);
1745   }
1746   str(new_value, address);
1747   PopList(used);
1748 #else
1749   str(new_value, address);
1750 #endif // DEBUG
1751 }
1752 
StoreIntoObject(Register object,const Address & dest,Register value,bool can_value_be_smi)1753 void Assembler::StoreIntoObject(Register object, const Address &dest,
1754                                 Register value, bool can_value_be_smi) {
1755   ASSERT(object != value);
1756   VerifiedWrite(dest, value, kHeapObjectOrSmi);
1757   Label done;
1758   if (can_value_be_smi) {
1759     StoreIntoObjectFilter(object, value, &done);
1760   } else {
1761     StoreIntoObjectFilterNoSmi(object, value, &done);
1762   }
1763   // A store buffer update is required.
1764   RegList regs = (1 << CODE_REG) | (1 << LR);
1765   if (value != R0) {
1766     regs |= (1 << R0); // Preserve R0.
1767   }
1768   PushList(regs);
1769   if (object != R0) {
1770     mov(R0, Operand(object));
1771   }
1772   ldr(CODE_REG, Address(THR, Thread::update_store_buffer_code_offset()));
1773   ldr(LR, Address(THR, Thread::update_store_buffer_entry_point_offset()));
1774   blx(LR);
1775   PopList(regs);
1776   Bind(&done);
1777 }
1778 
StoreIntoObjectOffset(Register object,int32_t offset,Register value,bool can_value_be_smi)1779 void Assembler::StoreIntoObjectOffset(Register object, int32_t offset,
1780                                       Register value, bool can_value_be_smi) {
1781   int32_t ignored = 0;
1782   if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
1783     StoreIntoObject(object, FieldAddress(object, offset), value,
1784                     can_value_be_smi);
1785   } else {
1786     AddImmediate(IP, object, offset - kHeapObjectTag);
1787     StoreIntoObject(object, Address(IP), value, can_value_be_smi);
1788   }
1789 }
1790 
StoreIntoObjectNoBarrier(Register object,const Address & dest,Register value,FieldContent old_content)1791 void Assembler::StoreIntoObjectNoBarrier(Register object, const Address &dest,
1792                                          Register value,
1793                                          FieldContent old_content) {
1794   VerifiedWrite(dest, value, old_content);
1795 #if defined(DEBUG)
1796   Label done;
1797   StoreIntoObjectFilter(object, value, &done);
1798   Stop("Store buffer update is required");
1799   Bind(&done);
1800 #endif // defined(DEBUG)
1801   // No store buffer update.
1802 }
1803 
StoreIntoObjectNoBarrierOffset(Register object,int32_t offset,Register value,FieldContent old_content)1804 void Assembler::StoreIntoObjectNoBarrierOffset(Register object, int32_t offset,
1805                                                Register value,
1806                                                FieldContent old_content) {
1807   int32_t ignored = 0;
1808   if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
1809     StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
1810                              old_content);
1811   } else {
1812     AddImmediate(IP, object, offset - kHeapObjectTag);
1813     StoreIntoObjectNoBarrier(object, Address(IP), value, old_content);
1814   }
1815 }
1816 
StoreIntoObjectNoBarrier(Register object,const Address & dest,const Object & value,FieldContent old_content)1817 void Assembler::StoreIntoObjectNoBarrier(Register object, const Address &dest,
1818                                          const Object &value,
1819                                          FieldContent old_content) {
1820   ASSERT(value.IsSmi() || value.InVMHeap() ||
1821          (value.IsOld() && value.IsNotTemporaryScopedHandle()));
1822   // No store buffer update.
1823   LoadObject(IP, value);
1824   VerifiedWrite(dest, IP, old_content);
1825 }
1826 
StoreIntoObjectNoBarrierOffset(Register object,int32_t offset,const Object & value,FieldContent old_content)1827 void Assembler::StoreIntoObjectNoBarrierOffset(Register object, int32_t offset,
1828                                                const Object &value,
1829                                                FieldContent old_content) {
1830   int32_t ignored = 0;
1831   if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
1832     StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
1833                              old_content);
1834   } else {
1835     AddImmediate(IP, object, offset - kHeapObjectTag);
1836     StoreIntoObjectNoBarrier(object, Address(IP), value, old_content);
1837   }
1838 }
1839 
InitializeFieldsNoBarrier(Register object,Register begin,Register end,Register value_even,Register value_odd)1840 void Assembler::InitializeFieldsNoBarrier(Register object, Register begin,
1841                                           Register end, Register value_even,
1842                                           Register value_odd) {
1843   ASSERT(value_odd == value_even + 1);
1844   Label init_loop;
1845   Bind(&init_loop);
1846   AddImmediate(begin, 2 * kWordSize);
1847   cmp(begin, Operand(end));
1848   WriteShadowedFieldPair(begin, -2 * kWordSize, value_even, value_odd, LS);
1849   b(&init_loop, CC);
1850   WriteShadowedField(begin, -2 * kWordSize, value_even, HI);
1851 #if defined(DEBUG)
1852   Label done;
1853   StoreIntoObjectFilter(object, value_even, &done);
1854   StoreIntoObjectFilter(object, value_odd, &done);
1855   Stop("Store buffer update is required");
1856   Bind(&done);
1857 #endif // defined(DEBUG)
1858   // No store buffer update.
1859 }
1860 
InitializeFieldsNoBarrierUnrolled(Register object,Register base,intptr_t begin_offset,intptr_t end_offset,Register value_even,Register value_odd)1861 void Assembler::InitializeFieldsNoBarrierUnrolled(
1862     Register object, Register base, intptr_t begin_offset, intptr_t end_offset,
1863     Register value_even, Register value_odd) {
1864   ASSERT(value_odd == value_even + 1);
1865   intptr_t current_offset = begin_offset;
1866   while (current_offset + kWordSize < end_offset) {
1867     WriteShadowedFieldPair(base, current_offset, value_even, value_odd);
1868     current_offset += 2 * kWordSize;
1869   }
1870   while (current_offset < end_offset) {
1871     WriteShadowedField(base, current_offset, value_even);
1872     current_offset += kWordSize;
1873   }
1874 #if defined(DEBUG)
1875   Label done;
1876   StoreIntoObjectFilter(object, value_even, &done);
1877   StoreIntoObjectFilter(object, value_odd, &done);
1878   Stop("Store buffer update is required");
1879   Bind(&done);
1880 #endif // defined(DEBUG)
1881   // No store buffer update.
1882 }
1883 
StoreIntoSmiField(const Address & dest,Register value)1884 void Assembler::StoreIntoSmiField(const Address &dest, Register value) {
1885 #if defined(DEBUG)
1886   Label done;
1887   tst(value, Operand(kHeapObjectTag));
1888   b(&done, EQ);
1889   Stop("New value must be Smi.");
1890   Bind(&done);
1891 #endif // defined(DEBUG)
1892   VerifiedWrite(dest, value, kOnlySmi);
1893 }
1894 
LoadClassId(Register result,Register object,Condition cond)1895 void Assembler::LoadClassId(Register result, Register object, Condition cond) {
1896   ASSERT(RawObject::kClassIdTagPos == 16);
1897   ASSERT(RawObject::kClassIdTagSize == 16);
1898   const intptr_t class_id_offset =
1899       Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
1900   ldrh(result, FieldAddress(object, class_id_offset), cond);
1901 }
1902 
LoadClassById(Register result,Register class_id)1903 void Assembler::LoadClassById(Register result, Register class_id) {
1904   ASSERT(result != class_id);
1905   LoadIsolate(result);
1906   const intptr_t offset =
1907       Isolate::class_table_offset() + ClassTable::table_offset();
1908   LoadFromOffset(kWord, result, result, offset);
1909   ldr(result, Address(result, class_id, LSL, 2));
1910 }
1911 
LoadClass(Register result,Register object,Register scratch)1912 void Assembler::LoadClass(Register result, Register object, Register scratch) {
1913   ASSERT(scratch != result);
1914   LoadClassId(scratch, object);
1915   LoadClassById(result, scratch);
1916 }
1917 
CompareClassId(Register object,intptr_t class_id,Register scratch)1918 void Assembler::CompareClassId(Register object, intptr_t class_id,
1919                                Register scratch) {
1920   LoadClassId(scratch, object);
1921   CompareImmediate(scratch, class_id);
1922 }
1923 
LoadClassIdMayBeSmi(Register result,Register object)1924 void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
1925   tst(object, Operand(kSmiTagMask));
1926   LoadClassId(result, object, NE);
1927   LoadImmediate(result, kSmiCid, EQ);
1928 }
1929 
LoadTaggedClassIdMayBeSmi(Register result,Register object)1930 void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
1931   LoadClassIdMayBeSmi(result, object);
1932   SmiTag(result);
1933 }
1934 
ComputeRange(Register result,Register value,Register scratch,Label * not_mint)1935 void Assembler::ComputeRange(Register result, Register value, Register scratch,
1936                              Label *not_mint) {
1937   const Register hi = TMP;
1938   const Register lo = scratch;
1939 
1940   Label done;
1941   mov(result, Operand(value, LSR, kBitsPerWord - 1));
1942   tst(value, Operand(kSmiTagMask));
1943   b(&done, EQ);
1944   CompareClassId(value, kMintCid, result);
1945   b(not_mint, NE);
1946   ldr(hi, FieldAddress(value, Mint::value_offset() + kWordSize));
1947   ldr(lo, FieldAddress(value, Mint::value_offset()));
1948   rsb(result, hi, Operand(ICData::kInt32RangeBit));
1949   cmp(hi, Operand(lo, ASR, kBitsPerWord - 1));
1950   b(&done, EQ);
1951   LoadImmediate(result, ICData::kUint32RangeBit); // Uint32
1952   tst(hi, Operand(hi));
1953   LoadImmediate(result, ICData::kInt64RangeBit, NE); // Int64
1954   Bind(&done);
1955 }
1956 
UpdateRangeFeedback(Register value,intptr_t index,Register ic_data,Register scratch1,Register scratch2,Label * miss)1957 void Assembler::UpdateRangeFeedback(Register value, intptr_t index,
1958                                     Register ic_data, Register scratch1,
1959                                     Register scratch2, Label *miss) {
1960   ASSERT(ICData::IsValidRangeFeedbackIndex(index));
1961   ComputeRange(scratch1, value, scratch2, miss);
1962   ldr(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()));
1963   orr(scratch2, scratch2,
1964       Operand(scratch1, LSL, ICData::RangeFeedbackShift(index)));
1965   str(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()));
1966 }
1967 
1968 #if 0
1969 // Moved to ::canEncodeBranchoffset() in IceAssemblerARM32.cpp.
1970 static bool CanEncodeBranchOffset(int32_t offset) {
1971   ASSERT(Utils::IsAligned(offset, 4));
1972   // Note: This check doesn't take advantage of the fact that offset>>2
1973   // is stored (allowing two more bits in address space).
1974   return Utils::IsInt(Utils::CountOneBits(kBranchOffsetMask), offset);
1975 }
1976 
1977 // Moved to ARM32::AssemblerARM32::encodeBranchOffset()
1978 int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) {
1979   // The offset is off by 8 due to the way the ARM CPUs read PC.
1980   offset -= Instr::kPCReadOffset;
1981 
1982   if (!CanEncodeBranchOffset(offset)) {
1983     ASSERT(!use_far_branches());
1984     Thread::Current()->long_jump_base()->Jump(
1985         1, Object::branch_offset_error());
1986   }
1987 
1988   // Properly preserve only the bits supported in the instruction.
1989   offset >>= 2;
1990   offset &= kBranchOffsetMask;
1991   return (inst & ~kBranchOffsetMask) | offset;
1992 }
1993 
1994 // Moved to AssemberARM32::decodeBranchOffset()
1995 int Assembler::DecodeBranchOffset(int32_t inst) {
1996   // Sign-extend, left-shift by 2, then add 8.
1997   return ((((inst & kBranchOffsetMask) << 8) >> 6) + Instr::kPCReadOffset);
1998 }
1999 #endif
2000 
DecodeARMv7LoadImmediate(int32_t movt,int32_t movw)2001 static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) {
2002   int32_t offset = 0;
2003   offset |= (movt & 0xf0000) << 12;
2004   offset |= (movt & 0xfff) << 16;
2005   offset |= (movw & 0xf0000) >> 4;
2006   offset |= movw & 0xfff;
2007   return offset;
2008 }
2009 
DecodeARMv6LoadImmediate(int32_t mov,int32_t or1,int32_t or2,int32_t or3)2010 static int32_t DecodeARMv6LoadImmediate(int32_t mov, int32_t or1, int32_t or2,
2011                                         int32_t or3) {
2012   int32_t offset = 0;
2013   offset |= (mov & 0xff) << 24;
2014   offset |= (or1 & 0xff) << 16;
2015   offset |= (or2 & 0xff) << 8;
2016   offset |= (or3 & 0xff);
2017   return offset;
2018 }
2019 
2020 class PatchFarBranch : public AssemblerFixup {
2021 public:
PatchFarBranch()2022   PatchFarBranch() {}
2023 
Process(const MemoryRegion & region,intptr_t position)2024   void Process(const MemoryRegion &region, intptr_t position) {
2025     const ARMVersion version = TargetCPUFeatures::arm_version();
2026     if ((version == ARMv5TE) || (version == ARMv6)) {
2027       ProcessARMv6(region, position);
2028     } else {
2029       ASSERT(version == ARMv7);
2030       ProcessARMv7(region, position);
2031     }
2032   }
2033 
2034 private:
ProcessARMv6(const MemoryRegion & region,intptr_t position)2035   void ProcessARMv6(const MemoryRegion &region, intptr_t position) {
2036     const int32_t mov = region.Load<int32_t>(position);
2037     const int32_t or1 = region.Load<int32_t>(position + 1 * Instr::kInstrSize);
2038     const int32_t or2 = region.Load<int32_t>(position + 2 * Instr::kInstrSize);
2039     const int32_t or3 = region.Load<int32_t>(position + 3 * Instr::kInstrSize);
2040     const int32_t bx = region.Load<int32_t>(position + 4 * Instr::kInstrSize);
2041 
2042     if (((mov & 0xffffff00) == 0xe3a0c400) && // mov IP, (byte3 rot 4)
2043         ((or1 & 0xffffff00) == 0xe38cc800) && // orr IP, IP, (byte2 rot 8)
2044         ((or2 & 0xffffff00) == 0xe38ccc00) && // orr IP, IP, (byte1 rot 12)
2045         ((or3 & 0xffffff00) == 0xe38cc000)) { // orr IP, IP, byte0
2046       const int32_t offset = DecodeARMv6LoadImmediate(mov, or1, or2, or3);
2047       const int32_t dest = region.start() + offset;
2048       const int32_t dest0 = (dest & 0x000000ff);
2049       const int32_t dest1 = (dest & 0x0000ff00) >> 8;
2050       const int32_t dest2 = (dest & 0x00ff0000) >> 16;
2051       const int32_t dest3 = (dest & 0xff000000) >> 24;
2052       const int32_t patched_mov = 0xe3a0c400 | dest3;
2053       const int32_t patched_or1 = 0xe38cc800 | dest2;
2054       const int32_t patched_or2 = 0xe38ccc00 | dest1;
2055       const int32_t patched_or3 = 0xe38cc000 | dest0;
2056 
2057       region.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_mov);
2058       region.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_or1);
2059       region.Store<int32_t>(position + 2 * Instr::kInstrSize, patched_or2);
2060       region.Store<int32_t>(position + 3 * Instr::kInstrSize, patched_or3);
2061       return;
2062     }
2063 
2064     // If the offset loading instructions aren't there, we must have replaced
2065     // the far branch with a near one, and so these instructions
2066     // should be NOPs.
2067     ASSERT((or1 == Instr::kNopInstruction) && (or2 == Instr::kNopInstruction) &&
2068            (or3 == Instr::kNopInstruction) && (bx == Instr::kNopInstruction));
2069   }
2070 
ProcessARMv7(const MemoryRegion & region,intptr_t position)2071   void ProcessARMv7(const MemoryRegion &region, intptr_t position) {
2072     const int32_t movw = region.Load<int32_t>(position);
2073     const int32_t movt = region.Load<int32_t>(position + Instr::kInstrSize);
2074     const int32_t bx = region.Load<int32_t>(position + 2 * Instr::kInstrSize);
2075 
2076     if (((movt & 0xfff0f000) == 0xe340c000) && // movt IP, high
2077         ((movw & 0xfff0f000) == 0xe300c000)) { // movw IP, low
2078       const int32_t offset = DecodeARMv7LoadImmediate(movt, movw);
2079       const int32_t dest = region.start() + offset;
2080       const uint16_t dest_high = Utils::High16Bits(dest);
2081       const uint16_t dest_low = Utils::Low16Bits(dest);
2082       const int32_t patched_movt =
2083           0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2084       const int32_t patched_movw =
2085           0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2086 
2087       region.Store<int32_t>(position, patched_movw);
2088       region.Store<int32_t>(position + Instr::kInstrSize, patched_movt);
2089       return;
2090     }
2091 
2092     // If the offset loading instructions aren't there, we must have replaced
2093     // the far branch with a near one, and so these instructions
2094     // should be NOPs.
2095     ASSERT((movt == Instr::kNopInstruction) && (bx == Instr::kNopInstruction));
2096   }
2097 
IsPointerOffset() const2098   virtual bool IsPointerOffset() const { return false; }
2099 };
2100 
EmitFarBranch(Condition cond,int32_t offset,bool link)2101 void Assembler::EmitFarBranch(Condition cond, int32_t offset, bool link) {
2102   buffer_.EmitFixup(new PatchFarBranch());
2103   LoadPatchableImmediate(IP, offset);
2104   if (link) {
2105     blx(IP, cond);
2106   } else {
2107     bx(IP, cond);
2108   }
2109 }
2110 
EmitBranch(Condition cond,Label * label,bool link)2111 void Assembler::EmitBranch(Condition cond, Label *label, bool link) {
2112   if (label->IsBound()) {
2113     const int32_t dest = label->Position() - buffer_.Size();
2114     if (use_far_branches() && !CanEncodeBranchOffset(dest)) {
2115       EmitFarBranch(cond, label->Position(), link);
2116     } else {
2117       EmitType5(cond, dest, link);
2118     }
2119   } else {
2120     const intptr_t position = buffer_.Size();
2121     if (use_far_branches()) {
2122       const int32_t dest = label->position_;
2123       EmitFarBranch(cond, dest, link);
2124     } else {
2125       // Use the offset field of the branch instruction for linking the sites.
2126       EmitType5(cond, label->position_, link);
2127     }
2128     label->LinkTo(position);
2129   }
2130 }
2131 
BindARMv6(Label * label)2132 void Assembler::BindARMv6(Label *label) {
2133   ASSERT(!label->IsBound());
2134   intptr_t bound_pc = buffer_.Size();
2135   while (label->IsLinked()) {
2136     const int32_t position = label->Position();
2137     int32_t dest = bound_pc - position;
2138     if (use_far_branches() && !CanEncodeBranchOffset(dest)) {
2139       // Far branches are enabled and we can't encode the branch offset.
2140 
2141       // Grab instructions that load the offset.
2142       const int32_t mov = buffer_.Load<int32_t>(position);
2143       const int32_t or1 =
2144           buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2145       const int32_t or2 =
2146           buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize);
2147       const int32_t or3 =
2148           buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize);
2149 
2150       // Change from relative to the branch to relative to the assembler
2151       // buffer.
2152       dest = buffer_.Size();
2153       const int32_t dest0 = (dest & 0x000000ff);
2154       const int32_t dest1 = (dest & 0x0000ff00) >> 8;
2155       const int32_t dest2 = (dest & 0x00ff0000) >> 16;
2156       const int32_t dest3 = (dest & 0xff000000) >> 24;
2157       const int32_t patched_mov = 0xe3a0c400 | dest3;
2158       const int32_t patched_or1 = 0xe38cc800 | dest2;
2159       const int32_t patched_or2 = 0xe38ccc00 | dest1;
2160       const int32_t patched_or3 = 0xe38cc000 | dest0;
2161 
2162       // Rewrite the instructions.
2163       buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_mov);
2164       buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_or1);
2165       buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, patched_or2);
2166       buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize, patched_or3);
2167       label->position_ = DecodeARMv6LoadImmediate(mov, or1, or2, or3);
2168     } else if (use_far_branches() && CanEncodeBranchOffset(dest)) {
2169       // Grab instructions that load the offset, and the branch.
2170       const int32_t mov = buffer_.Load<int32_t>(position);
2171       const int32_t or1 =
2172           buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2173       const int32_t or2 =
2174           buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize);
2175       const int32_t or3 =
2176           buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize);
2177       const int32_t branch =
2178           buffer_.Load<int32_t>(position + 4 * Instr::kInstrSize);
2179 
2180       // Grab the branch condition, and encode the link bit.
2181       const int32_t cond = branch & 0xf0000000;
2182       const int32_t link = (branch & 0x20) << 19;
2183 
2184       // Encode the branch and the offset.
2185       const int32_t new_branch = cond | link | 0x0a000000;
2186       const int32_t encoded = EncodeBranchOffset(dest, new_branch);
2187 
2188       // Write the encoded branch instruction followed by two nops.
2189       buffer_.Store<int32_t>(position, encoded);
2190       buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
2191                              Instr::kNopInstruction);
2192       buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize,
2193                              Instr::kNopInstruction);
2194       buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize,
2195                              Instr::kNopInstruction);
2196       buffer_.Store<int32_t>(position + 4 * Instr::kInstrSize,
2197                              Instr::kNopInstruction);
2198 
2199       label->position_ = DecodeARMv6LoadImmediate(mov, or1, or2, or3);
2200     } else {
2201       int32_t next = buffer_.Load<int32_t>(position);
2202       int32_t encoded = Assembler::EncodeBranchOffset(dest, next);
2203       buffer_.Store<int32_t>(position, encoded);
2204       label->position_ = Assembler::DecodeBranchOffset(next);
2205     }
2206   }
2207   label->BindTo(bound_pc);
2208 }
2209 
2210 #if 0
2211 // Moved to ARM32::AssemblerARM32::bind(Label* Label)
2212 // Note: Most of this code isn't needed because instruction selection has
2213 // already been handler
2214 void Assembler::BindARMv7(Label* label) {
2215   ASSERT(!label->IsBound());
2216   intptr_t bound_pc = buffer_.Size();
2217   while (label->IsLinked()) {
2218     const int32_t position = label->Position();
2219     int32_t dest = bound_pc - position;
2220     if (use_far_branches() && !CanEncodeBranchOffset(dest)) {
2221       // Far branches are enabled and we can't encode the branch offset.
2222 
2223       // Grab instructions that load the offset.
2224       const int32_t movw =
2225           buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2226       const int32_t movt =
2227           buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2228 
2229       // Change from relative to the branch to relative to the assembler
2230       // buffer.
2231       dest = buffer_.Size();
2232       const uint16_t dest_high = Utils::High16Bits(dest);
2233       const uint16_t dest_low = Utils::Low16Bits(dest);
2234       const int32_t patched_movt =
2235           0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2236       const int32_t patched_movw =
2237           0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2238 
2239       // Rewrite the instructions.
2240       buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_movw);
2241       buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_movt);
2242       label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2243     } else if (use_far_branches() && CanEncodeBranchOffset(dest)) {
2244       // Far branches are enabled, but we can encode the branch offset.
2245 
2246       // Grab instructions that load the offset, and the branch.
2247       const int32_t movw =
2248           buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2249       const int32_t movt =
2250           buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2251       const int32_t branch =
2252           buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize);
2253 
2254       // Grab the branch condition, and encode the link bit.
2255       const int32_t cond = branch & 0xf0000000;
2256       const int32_t link = (branch & 0x20) << 19;
2257 
2258       // Encode the branch and the offset.
2259       const int32_t new_branch = cond | link | 0x0a000000;
2260       const int32_t encoded = EncodeBranchOffset(dest, new_branch);
2261 
2262       // Write the encoded branch instruction followed by two nops.
2263       buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
2264           encoded);
2265       buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
2266           Instr::kNopInstruction);
2267       buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize,
2268           Instr::kNopInstruction);
2269 
2270       label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2271     } else {
2272       int32_t next = buffer_.Load<int32_t>(position);
2273       int32_t encoded = Assembler::EncodeBranchOffset(dest, next);
2274       buffer_.Store<int32_t>(position, encoded);
2275       label->position_ = Assembler::DecodeBranchOffset(next);
2276     }
2277   }
2278   label->BindTo(bound_pc);
2279 }
2280 #endif
2281 
Bind(Label * label)2282 void Assembler::Bind(Label *label) {
2283   const ARMVersion version = TargetCPUFeatures::arm_version();
2284   if ((version == ARMv5TE) || (version == ARMv6)) {
2285     BindARMv6(label);
2286   } else {
2287     ASSERT(version == ARMv7);
2288     BindARMv7(label);
2289   }
2290 }
2291 
OperandSizeFor(intptr_t cid)2292 OperandSize Address::OperandSizeFor(intptr_t cid) {
2293   switch (cid) {
2294   case kArrayCid:
2295   case kImmutableArrayCid:
2296     return kWord;
2297   case kOneByteStringCid:
2298   case kExternalOneByteStringCid:
2299     return kByte;
2300   case kTwoByteStringCid:
2301   case kExternalTwoByteStringCid:
2302     return kHalfword;
2303   case kTypedDataInt8ArrayCid:
2304     return kByte;
2305   case kTypedDataUint8ArrayCid:
2306   case kTypedDataUint8ClampedArrayCid:
2307   case kExternalTypedDataUint8ArrayCid:
2308   case kExternalTypedDataUint8ClampedArrayCid:
2309     return kUnsignedByte;
2310   case kTypedDataInt16ArrayCid:
2311     return kHalfword;
2312   case kTypedDataUint16ArrayCid:
2313     return kUnsignedHalfword;
2314   case kTypedDataInt32ArrayCid:
2315     return kWord;
2316   case kTypedDataUint32ArrayCid:
2317     return kUnsignedWord;
2318   case kTypedDataInt64ArrayCid:
2319   case kTypedDataUint64ArrayCid:
2320     UNREACHABLE();
2321     return kByte;
2322   case kTypedDataFloat32ArrayCid:
2323     return kSWord;
2324   case kTypedDataFloat64ArrayCid:
2325     return kDWord;
2326   case kTypedDataFloat32x4ArrayCid:
2327   case kTypedDataInt32x4ArrayCid:
2328   case kTypedDataFloat64x2ArrayCid:
2329     return kRegList;
2330   case kTypedDataInt8ArrayViewCid:
2331     UNREACHABLE();
2332     return kByte;
2333   default:
2334     UNREACHABLE();
2335     return kByte;
2336   }
2337 }
2338 
CanHoldLoadOffset(OperandSize size,int32_t offset,int32_t * offset_mask)2339 bool Address::CanHoldLoadOffset(OperandSize size, int32_t offset,
2340                                 int32_t *offset_mask) {
2341   switch (size) {
2342   case kByte:
2343   case kHalfword:
2344   case kUnsignedHalfword:
2345   case kWordPair: {
2346     *offset_mask = 0xff;
2347     return Utils::IsAbsoluteUint(8, offset); // Addressing mode 3.
2348   }
2349   case kUnsignedByte:
2350   case kWord:
2351   case kUnsignedWord: {
2352     *offset_mask = 0xfff;
2353     return Utils::IsAbsoluteUint(12, offset); // Addressing mode 2.
2354   }
2355   case kSWord:
2356   case kDWord: {
2357     *offset_mask = 0x3fc; // Multiple of 4.
2358     // VFP addressing mode.
2359     return (Utils::IsAbsoluteUint(10, offset) && Utils::IsAligned(offset, 4));
2360   }
2361   case kRegList: {
2362     *offset_mask = 0x0;
2363     return offset == 0;
2364   }
2365   default: {
2366     UNREACHABLE();
2367     return false;
2368   }
2369   }
2370 }
2371 
CanHoldStoreOffset(OperandSize size,int32_t offset,int32_t * offset_mask)2372 bool Address::CanHoldStoreOffset(OperandSize size, int32_t offset,
2373                                  int32_t *offset_mask) {
2374   switch (size) {
2375   case kHalfword:
2376   case kUnsignedHalfword:
2377   case kWordPair: {
2378     *offset_mask = 0xff;
2379     return Utils::IsAbsoluteUint(8, offset); // Addressing mode 3.
2380   }
2381   case kByte:
2382   case kUnsignedByte:
2383   case kWord:
2384   case kUnsignedWord: {
2385     *offset_mask = 0xfff;
2386     return Utils::IsAbsoluteUint(12, offset); // Addressing mode 2.
2387   }
2388   case kSWord:
2389   case kDWord: {
2390     *offset_mask = 0x3fc; // Multiple of 4.
2391     // VFP addressing mode.
2392     return (Utils::IsAbsoluteUint(10, offset) && Utils::IsAligned(offset, 4));
2393   }
2394   case kRegList: {
2395     *offset_mask = 0x0;
2396     return offset == 0;
2397   }
2398   default: {
2399     UNREACHABLE();
2400     return false;
2401   }
2402   }
2403 }
2404 
CanHoldImmediateOffset(bool is_load,intptr_t cid,int64_t offset)2405 bool Address::CanHoldImmediateOffset(bool is_load, intptr_t cid,
2406                                      int64_t offset) {
2407   int32_t offset_mask = 0;
2408   if (is_load) {
2409     return CanHoldLoadOffset(OperandSizeFor(cid), offset, &offset_mask);
2410   } else {
2411     return CanHoldStoreOffset(OperandSizeFor(cid), offset, &offset_mask);
2412   }
2413 }
2414 
2415 #if 0
2416 // Moved to ARM32::AssemblerARM32::push().
2417 void Assembler::Push(Register rd, Condition cond) {
2418   str(rd, Address(SP, -kWordSize, Address::PreIndex), cond);
2419 }
2420 
2421 // Moved to ARM32::AssemblerARM32::pop().
2422 void Assembler::Pop(Register rd, Condition cond) {
2423   ldr(rd, Address(SP, kWordSize, Address::PostIndex), cond);
2424 }
2425 
2426 // Moved to ARM32::AssemblerARM32::pushList().
2427 void Assembler::PushList(RegList regs, Condition cond) {
2428   stm(DB_W, SP, regs, cond);
2429 }
2430 
2431 // Moved to ARM32::AssemblerARM32::popList().
2432 void Assembler::PopList(RegList regs, Condition cond) {
2433   ldm(IA_W, SP, regs, cond);
2434 }
2435 #endif
2436 
MoveRegister(Register rd,Register rm,Condition cond)2437 void Assembler::MoveRegister(Register rd, Register rm, Condition cond) {
2438   if (rd != rm) {
2439     mov(rd, Operand(rm), cond);
2440   }
2441 }
2442 
2443 #if 0
2444 // Moved to ARM32::AssemblerARM32::lsl()
2445 void Assembler::Lsl(Register rd, Register rm, const Operand& shift_imm,
2446                     Condition cond) {
2447   ASSERT(shift_imm.type() == 1);
2448   ASSERT(shift_imm.encoding() != 0);  // Do not use Lsl if no shift is wanted.
2449   mov(rd, Operand(rm, LSL, shift_imm.encoding()), cond);
2450 }
2451 
2452 // Moved to ARM32::AssemblerARM32::lsl()
2453 void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) {
2454   mov(rd, Operand(rm, LSL, rs), cond);
2455 }
2456 
2457 // Moved to ARM32::AssemblerARM32::lsr()
2458 void Assembler::Lsr(Register rd, Register rm, const Operand& shift_imm,
2459                     Condition cond) {
2460   ASSERT(shift_imm.type() == 1);
2461   uint32_t shift = shift_imm.encoding();
2462   ASSERT(shift != 0);  // Do not use Lsr if no shift is wanted.
2463   if (shift == 32) {
2464     shift = 0;  // Comply to UAL syntax.
2465   }
2466   mov(rd, Operand(rm, LSR, shift), cond);
2467 }
2468 
2469 // Moved to ARM32::AssemblerARM32::lsr()
2470 void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) {
2471   mov(rd, Operand(rm, LSR, rs), cond);
2472 }
2473 
2474 // Moved to ARM32::AssemblerARM32::asr()
2475 void Assembler::Asr(Register rd, Register rm, const Operand& shift_imm,
2476                     Condition cond) {
2477   ASSERT(shift_imm.type() == 1);
2478   uint32_t shift = shift_imm.encoding();
2479   ASSERT(shift != 0);  // Do not use Asr if no shift is wanted.
2480   if (shift == 32) {
2481     shift = 0;  // Comply to UAL syntax.
2482   }
2483   mov(rd, Operand(rm, ASR, shift), cond);
2484 }
2485 #endif
2486 
Asrs(Register rd,Register rm,const Operand & shift_imm,Condition cond)2487 void Assembler::Asrs(Register rd, Register rm, const Operand &shift_imm,
2488                      Condition cond) {
2489   ASSERT(shift_imm.type() == 1);
2490   uint32_t shift = shift_imm.encoding();
2491   ASSERT(shift != 0); // Do not use Asr if no shift is wanted.
2492   if (shift == 32) {
2493     shift = 0; // Comply to UAL syntax.
2494   }
2495   movs(rd, Operand(rm, ASR, shift), cond);
2496 }
2497 
2498 #if 0
2499 // Moved to ARM32::AssemblerARM32::asr()
2500 void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) {
2501   mov(rd, Operand(rm, ASR, rs), cond);
2502 }
2503 #endif
2504 
Ror(Register rd,Register rm,const Operand & shift_imm,Condition cond)2505 void Assembler::Ror(Register rd, Register rm, const Operand &shift_imm,
2506                     Condition cond) {
2507   ASSERT(shift_imm.type() == 1);
2508   ASSERT(shift_imm.encoding() != 0); // Use Rrx instruction.
2509   mov(rd, Operand(rm, ROR, shift_imm.encoding()), cond);
2510 }
2511 
Ror(Register rd,Register rm,Register rs,Condition cond)2512 void Assembler::Ror(Register rd, Register rm, Register rs, Condition cond) {
2513   mov(rd, Operand(rm, ROR, rs), cond);
2514 }
2515 
Rrx(Register rd,Register rm,Condition cond)2516 void Assembler::Rrx(Register rd, Register rm, Condition cond) {
2517   mov(rd, Operand(rm, ROR, 0), cond);
2518 }
2519 
SignFill(Register rd,Register rm,Condition cond)2520 void Assembler::SignFill(Register rd, Register rm, Condition cond) {
2521   Asr(rd, rm, Operand(31), cond);
2522 }
2523 
Vreciprocalqs(QRegister qd,QRegister qm)2524 void Assembler::Vreciprocalqs(QRegister qd, QRegister qm) {
2525   ASSERT(qm != QTMP);
2526   ASSERT(qd != QTMP);
2527 
2528   // Reciprocal estimate.
2529   vrecpeqs(qd, qm);
2530   // 2 Newton-Raphson steps.
2531   vrecpsqs(QTMP, qm, qd);
2532   vmulqs(qd, qd, QTMP);
2533   vrecpsqs(QTMP, qm, qd);
2534   vmulqs(qd, qd, QTMP);
2535 }
2536 
VreciprocalSqrtqs(QRegister qd,QRegister qm)2537 void Assembler::VreciprocalSqrtqs(QRegister qd, QRegister qm) {
2538   ASSERT(qm != QTMP);
2539   ASSERT(qd != QTMP);
2540 
2541   // Reciprocal square root estimate.
2542   vrsqrteqs(qd, qm);
2543   // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2.
2544   // First step.
2545   vmulqs(QTMP, qd, qd);      // QTMP <- xn^2
2546   vrsqrtsqs(QTMP, qm, QTMP); // QTMP <- (3 - Q1*QTMP) / 2.
2547   vmulqs(qd, qd, QTMP);      // xn+1 <- xn * QTMP
2548   // Second step.
2549   vmulqs(QTMP, qd, qd);
2550   vrsqrtsqs(QTMP, qm, QTMP);
2551   vmulqs(qd, qd, QTMP);
2552 }
2553 
Vsqrtqs(QRegister qd,QRegister qm,QRegister temp)2554 void Assembler::Vsqrtqs(QRegister qd, QRegister qm, QRegister temp) {
2555   ASSERT(temp != QTMP);
2556   ASSERT(qm != QTMP);
2557   ASSERT(qd != QTMP);
2558 
2559   if (temp != kNoQRegister) {
2560     vmovq(temp, qm);
2561     qm = temp;
2562   }
2563 
2564   VreciprocalSqrtqs(qd, qm);
2565   vmovq(qm, qd);
2566   Vreciprocalqs(qd, qm);
2567 }
2568 
Vdivqs(QRegister qd,QRegister qn,QRegister qm)2569 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) {
2570   ASSERT(qd != QTMP);
2571   ASSERT(qn != QTMP);
2572   ASSERT(qm != QTMP);
2573 
2574   Vreciprocalqs(qd, qm);
2575   vmulqs(qd, qn, qd);
2576 }
2577 
Branch(const StubEntry & stub_entry,Patchability patchable,Register pp,Condition cond)2578 void Assembler::Branch(const StubEntry &stub_entry, Patchability patchable,
2579                        Register pp, Condition cond) {
2580   const Code &target_code = Code::Handle(stub_entry.code());
2581   const int32_t offset = ObjectPool::element_offset(
2582       object_pool_wrapper_.FindObject(target_code, patchable));
2583   LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp, cond);
2584   ldr(IP, FieldAddress(CODE_REG, Code::entry_point_offset()), cond);
2585   bx(IP, cond);
2586 }
2587 
BranchLink(const Code & target,Patchability patchable)2588 void Assembler::BranchLink(const Code &target, Patchability patchable) {
2589   // Make sure that class CallPattern is able to patch the label referred
2590   // to by this code sequence.
2591   // For added code robustness, use 'blx lr' in a patchable sequence and
2592   // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
2593   const int32_t offset = ObjectPool::element_offset(
2594       object_pool_wrapper_.FindObject(target, patchable));
2595   LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL);
2596   ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset()));
2597   blx(LR); // Use blx instruction so that the return branch prediction works.
2598 }
2599 
BranchLink(const StubEntry & stub_entry,Patchability patchable)2600 void Assembler::BranchLink(const StubEntry &stub_entry,
2601                            Patchability patchable) {
2602   const Code &code = Code::Handle(stub_entry.code());
2603   BranchLink(code, patchable);
2604 }
2605 
BranchLinkPatchable(const Code & target)2606 void Assembler::BranchLinkPatchable(const Code &target) {
2607   BranchLink(target, kPatchable);
2608 }
2609 
BranchLink(const ExternalLabel * label)2610 void Assembler::BranchLink(const ExternalLabel *label) {
2611   LoadImmediate(LR, label->address()); // Target address is never patched.
2612   blx(LR); // Use blx instruction so that the return branch prediction works.
2613 }
2614 
BranchLinkPatchable(const StubEntry & stub_entry)2615 void Assembler::BranchLinkPatchable(const StubEntry &stub_entry) {
2616   BranchLinkPatchable(Code::Handle(stub_entry.code()));
2617 }
2618 
BranchLinkOffset(Register base,int32_t offset)2619 void Assembler::BranchLinkOffset(Register base, int32_t offset) {
2620   ASSERT(base != PC);
2621   ASSERT(base != IP);
2622   LoadFromOffset(kWord, IP, base, offset);
2623   blx(IP); // Use blx instruction so that the return branch prediction works.
2624 }
2625 
LoadPatchableImmediate(Register rd,int32_t value,Condition cond)2626 void Assembler::LoadPatchableImmediate(Register rd, int32_t value,
2627                                        Condition cond) {
2628   const ARMVersion version = TargetCPUFeatures::arm_version();
2629   if ((version == ARMv5TE) || (version == ARMv6)) {
2630     // This sequence is patched in a few places, and should remain fixed.
2631     const uint32_t byte0 = (value & 0x000000ff);
2632     const uint32_t byte1 = (value & 0x0000ff00) >> 8;
2633     const uint32_t byte2 = (value & 0x00ff0000) >> 16;
2634     const uint32_t byte3 = (value & 0xff000000) >> 24;
2635     mov(rd, Operand(4, byte3), cond);
2636     orr(rd, rd, Operand(8, byte2), cond);
2637     orr(rd, rd, Operand(12, byte1), cond);
2638     orr(rd, rd, Operand(byte0), cond);
2639   } else {
2640     ASSERT(version == ARMv7);
2641     const uint16_t value_low = Utils::Low16Bits(value);
2642     const uint16_t value_high = Utils::High16Bits(value);
2643     movw(rd, value_low, cond);
2644     movt(rd, value_high, cond);
2645   }
2646 }
2647 
LoadDecodableImmediate(Register rd,int32_t value,Condition cond)2648 void Assembler::LoadDecodableImmediate(Register rd, int32_t value,
2649                                        Condition cond) {
2650   const ARMVersion version = TargetCPUFeatures::arm_version();
2651   if ((version == ARMv5TE) || (version == ARMv6)) {
2652     if (constant_pool_allowed()) {
2653       const int32_t offset = Array::element_offset(FindImmediate(value));
2654       LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond);
2655     } else {
2656       LoadPatchableImmediate(rd, value, cond);
2657     }
2658   } else {
2659     ASSERT(version == ARMv7);
2660     movw(rd, Utils::Low16Bits(value), cond);
2661     const uint16_t value_high = Utils::High16Bits(value);
2662     if (value_high != 0) {
2663       movt(rd, value_high, cond);
2664     }
2665   }
2666 }
2667 
LoadImmediate(Register rd,int32_t value,Condition cond)2668 void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
2669   Operand o;
2670   if (Operand::CanHold(value, &o)) {
2671     mov(rd, o, cond);
2672   } else if (Operand::CanHold(~value, &o)) {
2673     mvn(rd, o, cond);
2674   } else {
2675     LoadDecodableImmediate(rd, value, cond);
2676   }
2677 }
2678 
LoadSImmediate(SRegister sd,float value,Condition cond)2679 void Assembler::LoadSImmediate(SRegister sd, float value, Condition cond) {
2680   if (!vmovs(sd, value, cond)) {
2681     const DRegister dd = static_cast<DRegister>(sd >> 1);
2682     const int index = sd & 1;
2683     LoadImmediate(IP, bit_cast<int32_t, float>(value), cond);
2684     vmovdr(dd, index, IP, cond);
2685   }
2686 }
2687 
LoadDImmediate(DRegister dd,double value,Register scratch,Condition cond)2688 void Assembler::LoadDImmediate(DRegister dd, double value, Register scratch,
2689                                Condition cond) {
2690   ASSERT(scratch != PC);
2691   ASSERT(scratch != IP);
2692   if (!vmovd(dd, value, cond)) {
2693     // A scratch register and IP are needed to load an arbitrary double.
2694     ASSERT(scratch != kNoRegister);
2695     int64_t imm64 = bit_cast<int64_t, double>(value);
2696     LoadImmediate(IP, Utils::Low32Bits(imm64), cond);
2697     LoadImmediate(scratch, Utils::High32Bits(imm64), cond);
2698     vmovdrr(dd, IP, scratch, cond);
2699   }
2700 }
2701 
LoadFromOffset(OperandSize size,Register reg,Register base,int32_t offset,Condition cond)2702 void Assembler::LoadFromOffset(OperandSize size, Register reg, Register base,
2703                                int32_t offset, Condition cond) {
2704   int32_t offset_mask = 0;
2705   if (!Address::CanHoldLoadOffset(size, offset, &offset_mask)) {
2706     ASSERT(base != IP);
2707     AddImmediate(IP, base, offset & ~offset_mask, cond);
2708     base = IP;
2709     offset = offset & offset_mask;
2710   }
2711   switch (size) {
2712   case kByte:
2713     ldrsb(reg, Address(base, offset), cond);
2714     break;
2715   case kUnsignedByte:
2716     ldrb(reg, Address(base, offset), cond);
2717     break;
2718   case kHalfword:
2719     ldrsh(reg, Address(base, offset), cond);
2720     break;
2721   case kUnsignedHalfword:
2722     ldrh(reg, Address(base, offset), cond);
2723     break;
2724   case kWord:
2725     ldr(reg, Address(base, offset), cond);
2726     break;
2727   case kWordPair:
2728     ldrd(reg, base, offset, cond);
2729     break;
2730   default:
2731     UNREACHABLE();
2732   }
2733 }
2734 
StoreToOffset(OperandSize size,Register reg,Register base,int32_t offset,Condition cond)2735 void Assembler::StoreToOffset(OperandSize size, Register reg, Register base,
2736                               int32_t offset, Condition cond) {
2737   int32_t offset_mask = 0;
2738   if (!Address::CanHoldStoreOffset(size, offset, &offset_mask)) {
2739     ASSERT(reg != IP);
2740     ASSERT(base != IP);
2741     AddImmediate(IP, base, offset & ~offset_mask, cond);
2742     base = IP;
2743     offset = offset & offset_mask;
2744   }
2745   switch (size) {
2746   case kByte:
2747     strb(reg, Address(base, offset), cond);
2748     break;
2749   case kHalfword:
2750     strh(reg, Address(base, offset), cond);
2751     break;
2752   case kWord:
2753     str(reg, Address(base, offset), cond);
2754     break;
2755   case kWordPair:
2756     strd(reg, base, offset, cond);
2757     break;
2758   default:
2759     UNREACHABLE();
2760   }
2761 }
2762 
LoadSFromOffset(SRegister reg,Register base,int32_t offset,Condition cond)2763 void Assembler::LoadSFromOffset(SRegister reg, Register base, int32_t offset,
2764                                 Condition cond) {
2765   int32_t offset_mask = 0;
2766   if (!Address::CanHoldLoadOffset(kSWord, offset, &offset_mask)) {
2767     ASSERT(base != IP);
2768     AddImmediate(IP, base, offset & ~offset_mask, cond);
2769     base = IP;
2770     offset = offset & offset_mask;
2771   }
2772   vldrs(reg, Address(base, offset), cond);
2773 }
2774 
StoreSToOffset(SRegister reg,Register base,int32_t offset,Condition cond)2775 void Assembler::StoreSToOffset(SRegister reg, Register base, int32_t offset,
2776                                Condition cond) {
2777   int32_t offset_mask = 0;
2778   if (!Address::CanHoldStoreOffset(kSWord, offset, &offset_mask)) {
2779     ASSERT(base != IP);
2780     AddImmediate(IP, base, offset & ~offset_mask, cond);
2781     base = IP;
2782     offset = offset & offset_mask;
2783   }
2784   vstrs(reg, Address(base, offset), cond);
2785 }
2786 
LoadDFromOffset(DRegister reg,Register base,int32_t offset,Condition cond)2787 void Assembler::LoadDFromOffset(DRegister reg, Register base, int32_t offset,
2788                                 Condition cond) {
2789   int32_t offset_mask = 0;
2790   if (!Address::CanHoldLoadOffset(kDWord, offset, &offset_mask)) {
2791     ASSERT(base != IP);
2792     AddImmediate(IP, base, offset & ~offset_mask, cond);
2793     base = IP;
2794     offset = offset & offset_mask;
2795   }
2796   vldrd(reg, Address(base, offset), cond);
2797 }
2798 
StoreDToOffset(DRegister reg,Register base,int32_t offset,Condition cond)2799 void Assembler::StoreDToOffset(DRegister reg, Register base, int32_t offset,
2800                                Condition cond) {
2801   int32_t offset_mask = 0;
2802   if (!Address::CanHoldStoreOffset(kDWord, offset, &offset_mask)) {
2803     ASSERT(base != IP);
2804     AddImmediate(IP, base, offset & ~offset_mask, cond);
2805     base = IP;
2806     offset = offset & offset_mask;
2807   }
2808   vstrd(reg, Address(base, offset), cond);
2809 }
2810 
LoadMultipleDFromOffset(DRegister first,intptr_t count,Register base,int32_t offset)2811 void Assembler::LoadMultipleDFromOffset(DRegister first, intptr_t count,
2812                                         Register base, int32_t offset) {
2813   ASSERT(base != IP);
2814   AddImmediate(IP, base, offset);
2815   vldmd(IA, IP, first, count);
2816 }
2817 
StoreMultipleDToOffset(DRegister first,intptr_t count,Register base,int32_t offset)2818 void Assembler::StoreMultipleDToOffset(DRegister first, intptr_t count,
2819                                        Register base, int32_t offset) {
2820   ASSERT(base != IP);
2821   AddImmediate(IP, base, offset);
2822   vstmd(IA, IP, first, count);
2823 }
2824 
CopyDoubleField(Register dst,Register src,Register tmp1,Register tmp2,DRegister dtmp)2825 void Assembler::CopyDoubleField(Register dst, Register src, Register tmp1,
2826                                 Register tmp2, DRegister dtmp) {
2827   if (TargetCPUFeatures::vfp_supported()) {
2828     LoadDFromOffset(dtmp, src, Double::value_offset() - kHeapObjectTag);
2829     StoreDToOffset(dtmp, dst, Double::value_offset() - kHeapObjectTag);
2830   } else {
2831     LoadFromOffset(kWord, tmp1, src, Double::value_offset() - kHeapObjectTag);
2832     LoadFromOffset(kWord, tmp2, src,
2833                    Double::value_offset() + kWordSize - kHeapObjectTag);
2834     StoreToOffset(kWord, tmp1, dst, Double::value_offset() - kHeapObjectTag);
2835     StoreToOffset(kWord, tmp2, dst,
2836                   Double::value_offset() + kWordSize - kHeapObjectTag);
2837   }
2838 }
2839 
CopyFloat32x4Field(Register dst,Register src,Register tmp1,Register tmp2,DRegister dtmp)2840 void Assembler::CopyFloat32x4Field(Register dst, Register src, Register tmp1,
2841                                    Register tmp2, DRegister dtmp) {
2842   if (TargetCPUFeatures::neon_supported()) {
2843     LoadMultipleDFromOffset(dtmp, 2, src,
2844                             Float32x4::value_offset() - kHeapObjectTag);
2845     StoreMultipleDToOffset(dtmp, 2, dst,
2846                            Float32x4::value_offset() - kHeapObjectTag);
2847   } else {
2848     LoadFromOffset(kWord, tmp1, src,
2849                    (Float32x4::value_offset() + 0 * kWordSize) -
2850                        kHeapObjectTag);
2851     LoadFromOffset(kWord, tmp2, src,
2852                    (Float32x4::value_offset() + 1 * kWordSize) -
2853                        kHeapObjectTag);
2854     StoreToOffset(kWord, tmp1, dst,
2855                   (Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag);
2856     StoreToOffset(kWord, tmp2, dst,
2857                   (Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag);
2858 
2859     LoadFromOffset(kWord, tmp1, src,
2860                    (Float32x4::value_offset() + 2 * kWordSize) -
2861                        kHeapObjectTag);
2862     LoadFromOffset(kWord, tmp2, src,
2863                    (Float32x4::value_offset() + 3 * kWordSize) -
2864                        kHeapObjectTag);
2865     StoreToOffset(kWord, tmp1, dst,
2866                   (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag);
2867     StoreToOffset(kWord, tmp2, dst,
2868                   (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag);
2869   }
2870 }
2871 
CopyFloat64x2Field(Register dst,Register src,Register tmp1,Register tmp2,DRegister dtmp)2872 void Assembler::CopyFloat64x2Field(Register dst, Register src, Register tmp1,
2873                                    Register tmp2, DRegister dtmp) {
2874   if (TargetCPUFeatures::neon_supported()) {
2875     LoadMultipleDFromOffset(dtmp, 2, src,
2876                             Float64x2::value_offset() - kHeapObjectTag);
2877     StoreMultipleDToOffset(dtmp, 2, dst,
2878                            Float64x2::value_offset() - kHeapObjectTag);
2879   } else {
2880     LoadFromOffset(kWord, tmp1, src,
2881                    (Float64x2::value_offset() + 0 * kWordSize) -
2882                        kHeapObjectTag);
2883     LoadFromOffset(kWord, tmp2, src,
2884                    (Float64x2::value_offset() + 1 * kWordSize) -
2885                        kHeapObjectTag);
2886     StoreToOffset(kWord, tmp1, dst,
2887                   (Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag);
2888     StoreToOffset(kWord, tmp2, dst,
2889                   (Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag);
2890 
2891     LoadFromOffset(kWord, tmp1, src,
2892                    (Float64x2::value_offset() + 2 * kWordSize) -
2893                        kHeapObjectTag);
2894     LoadFromOffset(kWord, tmp2, src,
2895                    (Float64x2::value_offset() + 3 * kWordSize) -
2896                        kHeapObjectTag);
2897     StoreToOffset(kWord, tmp1, dst,
2898                   (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag);
2899     StoreToOffset(kWord, tmp2, dst,
2900                   (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag);
2901   }
2902 }
2903 
AddImmediate(Register rd,int32_t value,Condition cond)2904 void Assembler::AddImmediate(Register rd, int32_t value, Condition cond) {
2905   AddImmediate(rd, rd, value, cond);
2906 }
2907 
AddImmediate(Register rd,Register rn,int32_t value,Condition cond)2908 void Assembler::AddImmediate(Register rd, Register rn, int32_t value,
2909                              Condition cond) {
2910   if (value == 0) {
2911     if (rd != rn) {
2912       mov(rd, Operand(rn), cond);
2913     }
2914     return;
2915   }
2916   // We prefer to select the shorter code sequence rather than selecting add for
2917   // positive values and sub for negatives ones, which would slightly improve
2918   // the readability of generated code for some constants.
2919   Operand o;
2920   if (Operand::CanHold(value, &o)) {
2921     add(rd, rn, o, cond);
2922   } else if (Operand::CanHold(-value, &o)) {
2923     sub(rd, rn, o, cond);
2924   } else {
2925     ASSERT(rn != IP);
2926     if (Operand::CanHold(~value, &o)) {
2927       mvn(IP, o, cond);
2928       add(rd, rn, Operand(IP), cond);
2929     } else if (Operand::CanHold(~(-value), &o)) {
2930       mvn(IP, o, cond);
2931       sub(rd, rn, Operand(IP), cond);
2932     } else {
2933       LoadDecodableImmediate(IP, value, cond);
2934       add(rd, rn, Operand(IP), cond);
2935     }
2936   }
2937 }
2938 
AddImmediateSetFlags(Register rd,Register rn,int32_t value,Condition cond)2939 void Assembler::AddImmediateSetFlags(Register rd, Register rn, int32_t value,
2940                                      Condition cond) {
2941   Operand o;
2942   if (Operand::CanHold(value, &o)) {
2943     // Handles value == kMinInt32.
2944     adds(rd, rn, o, cond);
2945   } else if (Operand::CanHold(-value, &o)) {
2946     ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
2947     subs(rd, rn, o, cond);
2948   } else {
2949     ASSERT(rn != IP);
2950     if (Operand::CanHold(~value, &o)) {
2951       mvn(IP, o, cond);
2952       adds(rd, rn, Operand(IP), cond);
2953     } else if (Operand::CanHold(~(-value), &o)) {
2954       ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
2955       mvn(IP, o, cond);
2956       subs(rd, rn, Operand(IP), cond);
2957     } else {
2958       LoadDecodableImmediate(IP, value, cond);
2959       adds(rd, rn, Operand(IP), cond);
2960     }
2961   }
2962 }
2963 
SubImmediateSetFlags(Register rd,Register rn,int32_t value,Condition cond)2964 void Assembler::SubImmediateSetFlags(Register rd, Register rn, int32_t value,
2965                                      Condition cond) {
2966   Operand o;
2967   if (Operand::CanHold(value, &o)) {
2968     // Handles value == kMinInt32.
2969     subs(rd, rn, o, cond);
2970   } else if (Operand::CanHold(-value, &o)) {
2971     ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
2972     adds(rd, rn, o, cond);
2973   } else {
2974     ASSERT(rn != IP);
2975     if (Operand::CanHold(~value, &o)) {
2976       mvn(IP, o, cond);
2977       subs(rd, rn, Operand(IP), cond);
2978     } else if (Operand::CanHold(~(-value), &o)) {
2979       ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
2980       mvn(IP, o, cond);
2981       adds(rd, rn, Operand(IP), cond);
2982     } else {
2983       LoadDecodableImmediate(IP, value, cond);
2984       subs(rd, rn, Operand(IP), cond);
2985     }
2986   }
2987 }
2988 
AndImmediate(Register rd,Register rs,int32_t imm,Condition cond)2989 void Assembler::AndImmediate(Register rd, Register rs, int32_t imm,
2990                              Condition cond) {
2991   Operand o;
2992   if (Operand::CanHold(imm, &o)) {
2993     and_(rd, rs, Operand(o), cond);
2994   } else {
2995     LoadImmediate(TMP, imm, cond);
2996     and_(rd, rs, Operand(TMP), cond);
2997   }
2998 }
2999 
CompareImmediate(Register rn,int32_t value,Condition cond)3000 void Assembler::CompareImmediate(Register rn, int32_t value, Condition cond) {
3001   Operand o;
3002   if (Operand::CanHold(value, &o)) {
3003     cmp(rn, o, cond);
3004   } else {
3005     ASSERT(rn != IP);
3006     LoadImmediate(IP, value, cond);
3007     cmp(rn, Operand(IP), cond);
3008   }
3009 }
3010 
TestImmediate(Register rn,int32_t imm,Condition cond)3011 void Assembler::TestImmediate(Register rn, int32_t imm, Condition cond) {
3012   Operand o;
3013   if (Operand::CanHold(imm, &o)) {
3014     tst(rn, o, cond);
3015   } else {
3016     LoadImmediate(IP, imm);
3017     tst(rn, Operand(IP), cond);
3018   }
3019 }
3020 
IntegerDivide(Register result,Register left,Register right,DRegister tmpl,DRegister tmpr)3021 void Assembler::IntegerDivide(Register result, Register left, Register right,
3022                               DRegister tmpl, DRegister tmpr) {
3023   ASSERT(tmpl != tmpr);
3024   if (TargetCPUFeatures::integer_division_supported()) {
3025     sdiv(result, left, right);
3026   } else {
3027     ASSERT(TargetCPUFeatures::vfp_supported());
3028     SRegister stmpl = static_cast<SRegister>(2 * tmpl);
3029     SRegister stmpr = static_cast<SRegister>(2 * tmpr);
3030     vmovsr(stmpl, left);
3031     vcvtdi(tmpl, stmpl); // left is in tmpl.
3032     vmovsr(stmpr, right);
3033     vcvtdi(tmpr, stmpr); // right is in tmpr.
3034     vdivd(tmpr, tmpl, tmpr);
3035     vcvtid(stmpr, tmpr);
3036     vmovrs(result, stmpr);
3037   }
3038 }
3039 
NumRegsBelowFP(RegList regs)3040 static int NumRegsBelowFP(RegList regs) {
3041   int count = 0;
3042   for (int i = 0; i < FP; i++) {
3043     if ((regs & (1 << i)) != 0) {
3044       count++;
3045     }
3046   }
3047   return count;
3048 }
3049 
EnterFrame(RegList regs,intptr_t frame_size)3050 void Assembler::EnterFrame(RegList regs, intptr_t frame_size) {
3051   if (prologue_offset_ == -1) {
3052     prologue_offset_ = CodeSize();
3053   }
3054   PushList(regs);
3055   if ((regs & (1 << FP)) != 0) {
3056     // Set FP to the saved previous FP.
3057     add(FP, SP, Operand(4 * NumRegsBelowFP(regs)));
3058   }
3059   AddImmediate(SP, -frame_size);
3060 }
3061 
LeaveFrame(RegList regs)3062 void Assembler::LeaveFrame(RegList regs) {
3063   ASSERT((regs & (1 << PC)) == 0); // Must not pop PC.
3064   if ((regs & (1 << FP)) != 0) {
3065     // Use FP to set SP.
3066     sub(SP, FP, Operand(4 * NumRegsBelowFP(regs)));
3067   }
3068   PopList(regs);
3069 }
3070 
Ret()3071 void Assembler::Ret() { bx(LR); }
3072 
ReserveAlignedFrameSpace(intptr_t frame_space)3073 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
3074   // Reserve space for arguments and align frame before entering
3075   // the C++ world.
3076   AddImmediate(SP, -frame_space);
3077   if (OS::ActivationFrameAlignment() > 1) {
3078     bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1));
3079   }
3080 }
3081 
EnterCallRuntimeFrame(intptr_t frame_space)3082 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
3083   // Preserve volatile CPU registers and PP.
3084   EnterFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP), 0);
3085   COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3086 
3087   // Preserve all volatile FPU registers.
3088   if (TargetCPUFeatures::vfp_supported()) {
3089     DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
3090     DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
3091     if ((lastv - firstv + 1) >= 16) {
3092       DRegister mid = static_cast<DRegister>(firstv + 16);
3093       vstmd(DB_W, SP, mid, lastv - mid + 1);
3094       vstmd(DB_W, SP, firstv, 16);
3095     } else {
3096       vstmd(DB_W, SP, firstv, lastv - firstv + 1);
3097     }
3098   }
3099 
3100   LoadPoolPointer();
3101 
3102   ReserveAlignedFrameSpace(frame_space);
3103 }
3104 
LeaveCallRuntimeFrame()3105 void Assembler::LeaveCallRuntimeFrame() {
3106   // SP might have been modified to reserve space for arguments
3107   // and ensure proper alignment of the stack frame.
3108   // We need to restore it before restoring registers.
3109   const intptr_t kPushedFpuRegisterSize =
3110       TargetCPUFeatures::vfp_supported()
3111           ? kDartVolatileFpuRegCount * kFpuRegisterSize
3112           : 0;
3113 
3114   COMPILE_ASSERT(PP < FP);
3115   COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3116   // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
3117   // it is pushed ahead of FP.
3118   const intptr_t kPushedRegistersSize =
3119       kDartVolatileCpuRegCount * kWordSize + kPushedFpuRegisterSize;
3120   AddImmediate(SP, FP, -kPushedRegistersSize);
3121 
3122   // Restore all volatile FPU registers.
3123   if (TargetCPUFeatures::vfp_supported()) {
3124     DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
3125     DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
3126     if ((lastv - firstv + 1) >= 16) {
3127       DRegister mid = static_cast<DRegister>(firstv + 16);
3128       vldmd(IA_W, SP, firstv, 16);
3129       vldmd(IA_W, SP, mid, lastv - mid + 1);
3130     } else {
3131       vldmd(IA_W, SP, firstv, lastv - firstv + 1);
3132     }
3133   }
3134 
3135   // Restore volatile CPU registers.
3136   LeaveFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP));
3137 }
3138 
CallRuntime(const RuntimeEntry & entry,intptr_t argument_count)3139 void Assembler::CallRuntime(const RuntimeEntry &entry,
3140                             intptr_t argument_count) {
3141   entry.Call(this, argument_count);
3142 }
3143 
EnterDartFrame(intptr_t frame_size)3144 void Assembler::EnterDartFrame(intptr_t frame_size) {
3145   ASSERT(!constant_pool_allowed());
3146 
3147   // Registers are pushed in descending order: R9 | R10 | R11 | R14.
3148   EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0);
3149 
3150   // Setup pool pointer for this dart function.
3151   LoadPoolPointer();
3152 
3153   // Reserve space for locals.
3154   AddImmediate(SP, -frame_size);
3155 }
3156 
3157 // On entry to a function compiled for OSR, the caller's frame pointer, the
3158 // stack locals, and any copied parameters are already in place.  The frame
3159 // pointer is already set up.  The PC marker is not correct for the
3160 // optimized function and there may be extra space for spill slots to
3161 // allocate. We must also set up the pool pointer for the function.
EnterOsrFrame(intptr_t extra_size)3162 void Assembler::EnterOsrFrame(intptr_t extra_size) {
3163   ASSERT(!constant_pool_allowed());
3164   Comment("EnterOsrFrame");
3165   RestoreCodePointer();
3166   LoadPoolPointer();
3167 
3168   AddImmediate(SP, -extra_size);
3169 }
3170 
LeaveDartFrame(RestorePP restore_pp)3171 void Assembler::LeaveDartFrame(RestorePP restore_pp) {
3172   if (restore_pp == kRestoreCallerPP) {
3173     ldr(PP, Address(FP, kSavedCallerPpSlotFromFp * kWordSize));
3174     set_constant_pool_allowed(false);
3175   }
3176   Drop(2); // Drop saved PP, PC marker.
3177   LeaveFrame((1 << FP) | (1 << LR));
3178 }
3179 
EnterStubFrame()3180 void Assembler::EnterStubFrame() { EnterDartFrame(0); }
3181 
LeaveStubFrame()3182 void Assembler::LeaveStubFrame() { LeaveDartFrame(); }
3183 
LoadAllocationStatsAddress(Register dest,intptr_t cid,bool inline_isolate)3184 void Assembler::LoadAllocationStatsAddress(Register dest, intptr_t cid,
3185                                            bool inline_isolate) {
3186   ASSERT(dest != kNoRegister);
3187   ASSERT(dest != TMP);
3188   ASSERT(cid > 0);
3189   const intptr_t class_offset = ClassTable::ClassOffsetFor(cid);
3190   if (inline_isolate) {
3191     ASSERT(FLAG_allow_absolute_addresses);
3192     ClassTable *class_table = Isolate::Current()->class_table();
3193     ClassHeapStats **table_ptr = class_table->TableAddressFor(cid);
3194     if (cid < kNumPredefinedCids) {
3195       LoadImmediate(dest, reinterpret_cast<uword>(*table_ptr) + class_offset);
3196     } else {
3197       LoadImmediate(dest, reinterpret_cast<uword>(table_ptr));
3198       ldr(dest, Address(dest, 0));
3199       AddImmediate(dest, class_offset);
3200     }
3201   } else {
3202     LoadIsolate(dest);
3203     intptr_t table_offset =
3204         Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
3205     ldr(dest, Address(dest, table_offset));
3206     AddImmediate(dest, class_offset);
3207   }
3208 }
3209 
MaybeTraceAllocation(intptr_t cid,Register temp_reg,Label * trace,bool inline_isolate)3210 void Assembler::MaybeTraceAllocation(intptr_t cid, Register temp_reg,
3211                                      Label *trace, bool inline_isolate) {
3212   LoadAllocationStatsAddress(temp_reg, cid, inline_isolate);
3213   const uword state_offset = ClassHeapStats::state_offset();
3214   ldr(temp_reg, Address(temp_reg, state_offset));
3215   tst(temp_reg, Operand(ClassHeapStats::TraceAllocationMask()));
3216   b(trace, NE);
3217 }
3218 
IncrementAllocationStats(Register stats_addr_reg,intptr_t cid,Heap::Space space)3219 void Assembler::IncrementAllocationStats(Register stats_addr_reg, intptr_t cid,
3220                                          Heap::Space space) {
3221   ASSERT(stats_addr_reg != kNoRegister);
3222   ASSERT(stats_addr_reg != TMP);
3223   ASSERT(cid > 0);
3224   const uword count_field_offset =
3225       (space == Heap::kNew)
3226           ? ClassHeapStats::allocated_since_gc_new_space_offset()
3227           : ClassHeapStats::allocated_since_gc_old_space_offset();
3228   const Address &count_address = Address(stats_addr_reg, count_field_offset);
3229   ldr(TMP, count_address);
3230   AddImmediate(TMP, 1);
3231   str(TMP, count_address);
3232 }
3233 
IncrementAllocationStatsWithSize(Register stats_addr_reg,Register size_reg,Heap::Space space)3234 void Assembler::IncrementAllocationStatsWithSize(Register stats_addr_reg,
3235                                                  Register size_reg,
3236                                                  Heap::Space space) {
3237   ASSERT(stats_addr_reg != kNoRegister);
3238   ASSERT(stats_addr_reg != TMP);
3239   const uword count_field_offset =
3240       (space == Heap::kNew)
3241           ? ClassHeapStats::allocated_since_gc_new_space_offset()
3242           : ClassHeapStats::allocated_since_gc_old_space_offset();
3243   const uword size_field_offset =
3244       (space == Heap::kNew)
3245           ? ClassHeapStats::allocated_size_since_gc_new_space_offset()
3246           : ClassHeapStats::allocated_size_since_gc_old_space_offset();
3247   const Address &count_address = Address(stats_addr_reg, count_field_offset);
3248   const Address &size_address = Address(stats_addr_reg, size_field_offset);
3249   ldr(TMP, count_address);
3250   AddImmediate(TMP, 1);
3251   str(TMP, count_address);
3252   ldr(TMP, size_address);
3253   add(TMP, TMP, Operand(size_reg));
3254   str(TMP, size_address);
3255 }
3256 
TryAllocate(const Class & cls,Label * failure,Register instance_reg,Register temp_reg)3257 void Assembler::TryAllocate(const Class &cls, Label *failure,
3258                             Register instance_reg, Register temp_reg) {
3259   ASSERT(failure != NULL);
3260   if (FLAG_inline_alloc) {
3261     ASSERT(instance_reg != temp_reg);
3262     ASSERT(temp_reg != IP);
3263     const intptr_t instance_size = cls.instance_size();
3264     ASSERT(instance_size != 0);
3265     // If this allocation is traced, program will jump to failure path
3266     // (i.e. the allocation stub) which will allocate the object and trace the
3267     // allocation call site.
3268     MaybeTraceAllocation(cls.id(), temp_reg, failure,
3269                          /* inline_isolate = */ false);
3270     Heap::Space space = Heap::SpaceForAllocation(cls.id());
3271     ldr(temp_reg, Address(THR, Thread::heap_offset()));
3272     ldr(instance_reg, Address(temp_reg, Heap::TopOffset(space)));
3273     // TODO(koda): Protect against unsigned overflow here.
3274     AddImmediateSetFlags(instance_reg, instance_reg, instance_size);
3275 
3276     // instance_reg: potential next object start.
3277     ldr(IP, Address(temp_reg, Heap::EndOffset(space)));
3278     cmp(IP, Operand(instance_reg));
3279     // fail if heap end unsigned less than or equal to instance_reg.
3280     b(failure, LS);
3281 
3282     // Successfully allocated the object, now update top to point to
3283     // next object start and store the class in the class field of object.
3284     str(instance_reg, Address(temp_reg, Heap::TopOffset(space)));
3285 
3286     LoadAllocationStatsAddress(temp_reg, cls.id(),
3287                                /* inline_isolate = */ false);
3288 
3289     ASSERT(instance_size >= kHeapObjectTag);
3290     AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
3291 
3292     uword tags = 0;
3293     tags = RawObject::SizeTag::update(instance_size, tags);
3294     ASSERT(cls.id() != kIllegalCid);
3295     tags = RawObject::ClassIdTag::update(cls.id(), tags);
3296     LoadImmediate(IP, tags);
3297     str(IP, FieldAddress(instance_reg, Object::tags_offset()));
3298 
3299     IncrementAllocationStats(temp_reg, cls.id(), space);
3300   } else {
3301     b(failure);
3302   }
3303 }
3304 
TryAllocateArray(intptr_t cid,intptr_t instance_size,Label * failure,Register instance,Register end_address,Register temp1,Register temp2)3305 void Assembler::TryAllocateArray(intptr_t cid, intptr_t instance_size,
3306                                  Label *failure, Register instance,
3307                                  Register end_address, Register temp1,
3308                                  Register temp2) {
3309   if (FLAG_inline_alloc) {
3310     // If this allocation is traced, program will jump to failure path
3311     // (i.e. the allocation stub) which will allocate the object and trace the
3312     // allocation call site.
3313     MaybeTraceAllocation(cid, temp1, failure, /* inline_isolate = */ false);
3314     Heap::Space space = Heap::SpaceForAllocation(cid);
3315     ldr(temp1, Address(THR, Thread::heap_offset()));
3316     // Potential new object start.
3317     ldr(instance, Address(temp1, Heap::TopOffset(space)));
3318     AddImmediateSetFlags(end_address, instance, instance_size);
3319     b(failure, CS); // Branch if unsigned overflow.
3320 
3321     // Check if the allocation fits into the remaining space.
3322     // instance: potential new object start.
3323     // end_address: potential next object start.
3324     ldr(temp2, Address(temp1, Heap::EndOffset(space)));
3325     cmp(end_address, Operand(temp2));
3326     b(failure, CS);
3327 
3328     LoadAllocationStatsAddress(temp2, cid, /* inline_isolate = */ false);
3329 
3330     // Successfully allocated the object(s), now update top to point to
3331     // next object start and initialize the object.
3332     str(end_address, Address(temp1, Heap::TopOffset(space)));
3333     add(instance, instance, Operand(kHeapObjectTag));
3334 
3335     // Initialize the tags.
3336     // instance: new object start as a tagged pointer.
3337     uword tags = 0;
3338     tags = RawObject::ClassIdTag::update(cid, tags);
3339     tags = RawObject::SizeTag::update(instance_size, tags);
3340     LoadImmediate(temp1, tags);
3341     str(temp1, FieldAddress(instance, Array::tags_offset())); // Store tags.
3342 
3343     LoadImmediate(temp1, instance_size);
3344     IncrementAllocationStatsWithSize(temp2, temp1, space);
3345   } else {
3346     b(failure);
3347   }
3348 }
3349 
Stop(const char * message)3350 void Assembler::Stop(const char *message) {
3351   if (FLAG_print_stop_message) {
3352     PushList((1 << R0) | (1 << IP) | (1 << LR)); // Preserve R0, IP, LR.
3353     LoadImmediate(R0, reinterpret_cast<int32_t>(message));
3354     // PrintStopMessage() preserves all registers.
3355     BranchLink(&StubCode::PrintStopMessage_entry()->label());
3356     PopList((1 << R0) | (1 << IP) | (1 << LR)); // Restore R0, IP, LR.
3357   }
3358   // Emit the message address before the svc instruction, so that we can
3359   // 'unstop' and continue execution in the simulator or jump to the next
3360   // instruction in gdb.
3361   Label stop;
3362   b(&stop);
3363   Emit(reinterpret_cast<int32_t>(message));
3364   Bind(&stop);
3365   bkpt(Instr::kStopMessageCode);
3366 }
3367 
ElementAddressForIntIndex(bool is_load,bool is_external,intptr_t cid,intptr_t index_scale,Register array,intptr_t index,Register temp)3368 Address Assembler::ElementAddressForIntIndex(bool is_load, bool is_external,
3369                                              intptr_t cid, intptr_t index_scale,
3370                                              Register array, intptr_t index,
3371                                              Register temp) {
3372   const int64_t offset_base =
3373       (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
3374   const int64_t offset =
3375       offset_base + static_cast<int64_t>(index) * index_scale;
3376   ASSERT(Utils::IsInt(32, offset));
3377 
3378   if (Address::CanHoldImmediateOffset(is_load, cid, offset)) {
3379     return Address(array, static_cast<int32_t>(offset));
3380   } else {
3381     ASSERT(Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base));
3382     AddImmediate(temp, array, static_cast<int32_t>(offset_base));
3383     return Address(temp, static_cast<int32_t>(offset - offset_base));
3384   }
3385 }
3386 
ElementAddressForRegIndex(bool is_load,bool is_external,intptr_t cid,intptr_t index_scale,Register array,Register index)3387 Address Assembler::ElementAddressForRegIndex(bool is_load, bool is_external,
3388                                              intptr_t cid, intptr_t index_scale,
3389                                              Register array, Register index) {
3390   // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays.
3391   const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift;
3392   int32_t offset =
3393       is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag);
3394   const OperandSize size = Address::OperandSizeFor(cid);
3395   ASSERT(array != IP);
3396   ASSERT(index != IP);
3397   const Register base = is_load ? IP : index;
3398   if ((offset != 0) || (size == kSWord) || (size == kDWord) ||
3399       (size == kRegList)) {
3400     if (shift < 0) {
3401       ASSERT(shift == -1);
3402       add(base, array, Operand(index, ASR, 1));
3403     } else {
3404       add(base, array, Operand(index, LSL, shift));
3405     }
3406   } else {
3407     if (shift < 0) {
3408       ASSERT(shift == -1);
3409       return Address(array, index, ASR, 1);
3410     } else {
3411       return Address(array, index, LSL, shift);
3412     }
3413   }
3414   int32_t offset_mask = 0;
3415   if ((is_load && !Address::CanHoldLoadOffset(size, offset, &offset_mask)) ||
3416       (!is_load && !Address::CanHoldStoreOffset(size, offset, &offset_mask))) {
3417     AddImmediate(base, offset & ~offset_mask);
3418     offset = offset & offset_mask;
3419   }
3420   return Address(base, offset);
3421 }
3422 
3423 static const char *cpu_reg_names[kNumberOfCpuRegisters] = {
3424     "r0", "r1",  "r2", "r3", "r4", "r5", "r6", "r7",
3425     "r8", "ctx", "pp", "fp", "ip", "sp", "lr", "pc",
3426 };
3427 
RegisterName(Register reg)3428 const char *Assembler::RegisterName(Register reg) {
3429   ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters));
3430   return cpu_reg_names[reg];
3431 }
3432 
3433 static const char *fpu_reg_names[kNumberOfFpuRegisters] = {
3434     "q0", "q1", "q2",  "q3",  "q4",  "q5",  "q6",  "q7",
3435 #if defined(VFPv3_D32)
3436     "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
3437 #endif
3438 };
3439 
FpuRegisterName(FpuRegister reg)3440 const char *Assembler::FpuRegisterName(FpuRegister reg) {
3441   ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters));
3442   return fpu_reg_names[reg];
3443 }
3444 
3445 } // namespace dart
3446 
3447 #endif // defined TARGET_ARCH_ARM
3448