1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6
7 #if V8_TARGET_ARCH_ARM
8
9 #include "src/assembler-inl.h"
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/base/utils/random-number-generator.h"
13 #include "src/bootstrapper.h"
14 #include "src/callable.h"
15 #include "src/code-factory.h"
16 #include "src/code-stubs.h"
17 #include "src/counters.h"
18 #include "src/debug/debug.h"
19 #include "src/double.h"
20 #include "src/external-reference-table.h"
21 #include "src/frames-inl.h"
22 #include "src/instruction-stream.h"
23 #include "src/objects-inl.h"
24 #include "src/register-configuration.h"
25 #include "src/runtime/runtime.h"
26 #include "src/snapshot/snapshot.h"
27 #include "src/wasm/wasm-code-manager.h"
28
29 #include "src/arm/macro-assembler-arm.h"
30
31 namespace v8 {
32 namespace internal {
33
MacroAssembler(Isolate * isolate,const AssemblerOptions & options,void * buffer,int size,CodeObjectRequired create_code_object)34 MacroAssembler::MacroAssembler(Isolate* isolate,
35 const AssemblerOptions& options, void* buffer,
36 int size, CodeObjectRequired create_code_object)
37 : TurboAssembler(isolate, options, buffer, size, create_code_object) {
38 if (create_code_object == CodeObjectRequired::kYes) {
39 // Unlike TurboAssembler, which can be used off the main thread and may not
40 // allocate, macro assembler creates its own copy of the self-reference
41 // marker in order to disambiguate between self-references during nested
42 // code generation (e.g.: codegen of the current object triggers stub
43 // compilation through CodeStub::GetCode()).
44 code_object_ = Handle<HeapObject>::New(
45 *isolate->factory()->NewSelfReferenceMarker(), isolate);
46 }
47 }
48
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const49 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
50 Register exclusion1,
51 Register exclusion2,
52 Register exclusion3) const {
53 int bytes = 0;
54 RegList exclusions = 0;
55 if (exclusion1 != no_reg) {
56 exclusions |= exclusion1.bit();
57 if (exclusion2 != no_reg) {
58 exclusions |= exclusion2.bit();
59 if (exclusion3 != no_reg) {
60 exclusions |= exclusion3.bit();
61 }
62 }
63 }
64
65 RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
66
67 bytes += NumRegs(list) * kPointerSize;
68
69 if (fp_mode == kSaveFPRegs) {
70 bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
71 }
72
73 return bytes;
74 }
75
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)76 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
77 Register exclusion2, Register exclusion3) {
78 int bytes = 0;
79 RegList exclusions = 0;
80 if (exclusion1 != no_reg) {
81 exclusions |= exclusion1.bit();
82 if (exclusion2 != no_reg) {
83 exclusions |= exclusion2.bit();
84 if (exclusion3 != no_reg) {
85 exclusions |= exclusion3.bit();
86 }
87 }
88 }
89
90 RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
91 stm(db_w, sp, list);
92
93 bytes += NumRegs(list) * kPointerSize;
94
95 if (fp_mode == kSaveFPRegs) {
96 SaveFPRegs(sp, lr);
97 bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
98 }
99
100 return bytes;
101 }
102
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)103 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
104 Register exclusion2, Register exclusion3) {
105 int bytes = 0;
106 if (fp_mode == kSaveFPRegs) {
107 RestoreFPRegs(sp, lr);
108 bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
109 }
110
111 RegList exclusions = 0;
112 if (exclusion1 != no_reg) {
113 exclusions |= exclusion1.bit();
114 if (exclusion2 != no_reg) {
115 exclusions |= exclusion2.bit();
116 if (exclusion3 != no_reg) {
117 exclusions |= exclusion3.bit();
118 }
119 }
120 }
121
122 RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
123 ldm(ia_w, sp, list);
124
125 bytes += NumRegs(list) * kPointerSize;
126
127 return bytes;
128 }
129
LoadFromConstantsTable(Register destination,int constant_index)130 void TurboAssembler::LoadFromConstantsTable(Register destination,
131 int constant_index) {
132 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
133 Heap::kBuiltinsConstantsTableRootIndex));
134
135 // The ldr call below could end up clobbering ip when the offset does not fit
136 // into 12 bits (and thus needs to be loaded from the constant pool). In that
137 // case, we need to be extra-careful and temporarily use another register as
138 // the target.
139
140 const uint32_t offset =
141 FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
142 const bool could_clobber_ip = !is_uint12(offset);
143
144 Register reg = destination;
145 if (could_clobber_ip) {
146 Push(r7);
147 reg = r7;
148 }
149
150 LoadRoot(reg, Heap::kBuiltinsConstantsTableRootIndex);
151 ldr(destination, MemOperand(reg, offset));
152
153 if (could_clobber_ip) {
154 DCHECK_EQ(reg, r7);
155 Pop(r7);
156 }
157 }
158
LoadRootRelative(Register destination,int32_t offset)159 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
160 ldr(destination, MemOperand(kRootRegister, offset));
161 }
162
LoadRootRegisterOffset(Register destination,intptr_t offset)163 void TurboAssembler::LoadRootRegisterOffset(Register destination,
164 intptr_t offset) {
165 if (offset == 0) {
166 Move(destination, kRootRegister);
167 } else {
168 add(destination, kRootRegister, Operand(offset));
169 }
170 }
171
Jump(Register target,Condition cond)172 void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
173
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)174 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
175 Condition cond) {
176 mov(pc, Operand(target, rmode), LeaveCC, cond);
177 }
178
Jump(Address target,RelocInfo::Mode rmode,Condition cond)179 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
180 Condition cond) {
181 DCHECK(!RelocInfo::IsCodeTarget(rmode));
182 Jump(static_cast<intptr_t>(target), rmode, cond);
183 }
184
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)185 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
186 Condition cond) {
187 DCHECK(RelocInfo::IsCodeTarget(rmode));
188 if (FLAG_embedded_builtins) {
189 int builtin_index = Builtins::kNoBuiltinId;
190 bool target_is_isolate_independent_builtin =
191 isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
192 Builtins::IsIsolateIndependent(builtin_index);
193 if (target_is_isolate_independent_builtin &&
194 options().use_pc_relative_calls_and_jumps) {
195 int32_t code_target_index = AddCodeTarget(code);
196 b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
197 return;
198 } else if (root_array_available_ && options().isolate_independent_code) {
199 UseScratchRegisterScope temps(this);
200 Register scratch = temps.Acquire();
201 IndirectLoadConstant(scratch, code);
202 add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
203 Jump(scratch, cond);
204 return;
205 } else if (target_is_isolate_independent_builtin &&
206 options().inline_offheap_trampolines) {
207 // Inline the trampoline.
208 RecordCommentForOffHeapTrampoline(builtin_index);
209 EmbeddedData d = EmbeddedData::FromBlob();
210 Address entry = d.InstructionStartOfBuiltin(builtin_index);
211 // Use ip directly instead of using UseScratchRegisterScope, as we do not
212 // preserve scratch registers across calls.
213 mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
214 Jump(ip, cond);
215 return;
216 }
217 }
218 // 'code' is always generated ARM code, never THUMB code
219 Jump(static_cast<intptr_t>(code.address()), rmode, cond);
220 }
221
Call(Register target,Condition cond)222 void TurboAssembler::Call(Register target, Condition cond) {
223 // Block constant pool for the call instruction sequence.
224 BlockConstPoolScope block_const_pool(this);
225 blx(target, cond);
226 }
227
Call(Address target,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode,bool check_constant_pool)228 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
229 TargetAddressStorageMode mode,
230 bool check_constant_pool) {
231 // Check if we have to emit the constant pool before we block it.
232 if (check_constant_pool) MaybeCheckConstPool();
233 // Block constant pool for the call instruction sequence.
234 BlockConstPoolScope block_const_pool(this);
235
236 bool old_predictable_code_size = predictable_code_size();
237 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
238 set_predictable_code_size(true);
239 }
240
241 // Use ip directly instead of using UseScratchRegisterScope, as we do not
242 // preserve scratch registers across calls.
243
244 // Call sequence on V7 or later may be :
245 // movw ip, #... @ call address low 16
246 // movt ip, #... @ call address high 16
247 // blx ip
248 // @ return address
249 // Or for pre-V7 or values that may be back-patched
250 // to avoid ICache flushes:
251 // ldr ip, [pc, #...] @ call address
252 // blx ip
253 // @ return address
254
255 mov(ip, Operand(target, rmode));
256 blx(ip, cond);
257
258 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
259 set_predictable_code_size(old_predictable_code_size);
260 }
261 }
262
Call(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode,bool check_constant_pool)263 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
264 Condition cond, TargetAddressStorageMode mode,
265 bool check_constant_pool) {
266 DCHECK(RelocInfo::IsCodeTarget(rmode));
267 if (FLAG_embedded_builtins) {
268 int builtin_index = Builtins::kNoBuiltinId;
269 bool target_is_isolate_independent_builtin =
270 isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
271 Builtins::IsIsolateIndependent(builtin_index);
272 if (target_is_isolate_independent_builtin &&
273 options().use_pc_relative_calls_and_jumps) {
274 int32_t code_target_index = AddCodeTarget(code);
275 bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
276 return;
277 } else if (root_array_available_ && options().isolate_independent_code) {
278 // Use ip directly instead of using UseScratchRegisterScope, as we do not
279 // preserve scratch registers across calls.
280 IndirectLoadConstant(ip, code);
281 add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
282 Call(ip, cond);
283 return;
284 } else if (target_is_isolate_independent_builtin &&
285 options().inline_offheap_trampolines) {
286 // Inline the trampoline.
287 RecordCommentForOffHeapTrampoline(builtin_index);
288 EmbeddedData d = EmbeddedData::FromBlob();
289 Address entry = d.InstructionStartOfBuiltin(builtin_index);
290 // Use ip directly instead of using UseScratchRegisterScope, as we do not
291 // preserve scratch registers across calls.
292 mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
293 Call(ip, cond);
294 return;
295 }
296 }
297 // 'code' is always generated ARM code, never THUMB code
298 Call(code.address(), rmode, cond, mode);
299 }
300
Ret(Condition cond)301 void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
302
Drop(int count,Condition cond)303 void TurboAssembler::Drop(int count, Condition cond) {
304 if (count > 0) {
305 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
306 }
307 }
308
Drop(Register count,Condition cond)309 void TurboAssembler::Drop(Register count, Condition cond) {
310 add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
311 }
312
Ret(int drop,Condition cond)313 void TurboAssembler::Ret(int drop, Condition cond) {
314 Drop(drop, cond);
315 Ret(cond);
316 }
317
Call(Label * target)318 void TurboAssembler::Call(Label* target) { bl(target); }
319
Push(Handle<HeapObject> handle)320 void TurboAssembler::Push(Handle<HeapObject> handle) {
321 UseScratchRegisterScope temps(this);
322 Register scratch = temps.Acquire();
323 mov(scratch, Operand(handle));
324 push(scratch);
325 }
326
Push(Smi * smi)327 void TurboAssembler::Push(Smi* smi) {
328 UseScratchRegisterScope temps(this);
329 Register scratch = temps.Acquire();
330 mov(scratch, Operand(smi));
331 push(scratch);
332 }
333
Move(Register dst,Smi * smi)334 void TurboAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
335
Move(Register dst,Handle<HeapObject> value)336 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
337 if (FLAG_embedded_builtins) {
338 if (root_array_available_ && options().isolate_independent_code) {
339 IndirectLoadConstant(dst, value);
340 return;
341 }
342 }
343 mov(dst, Operand(value));
344 }
345
Move(Register dst,ExternalReference reference)346 void TurboAssembler::Move(Register dst, ExternalReference reference) {
347 if (FLAG_embedded_builtins) {
348 if (root_array_available_ && options().isolate_independent_code) {
349 IndirectLoadExternalReference(dst, reference);
350 return;
351 }
352 }
353 mov(dst, Operand(reference));
354 }
355
Move(Register dst,Register src,Condition cond)356 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
357 if (dst != src) {
358 mov(dst, src, LeaveCC, cond);
359 }
360 }
361
Move(SwVfpRegister dst,SwVfpRegister src,Condition cond)362 void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
363 Condition cond) {
364 if (dst != src) {
365 vmov(dst, src, cond);
366 }
367 }
368
Move(DwVfpRegister dst,DwVfpRegister src,Condition cond)369 void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
370 Condition cond) {
371 if (dst != src) {
372 vmov(dst, src, cond);
373 }
374 }
375
Move(QwNeonRegister dst,QwNeonRegister src)376 void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
377 if (dst != src) {
378 vmov(dst, src);
379 }
380 }
381
Swap(Register srcdst0,Register srcdst1)382 void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
383 DCHECK(srcdst0 != srcdst1);
384 UseScratchRegisterScope temps(this);
385 Register scratch = temps.Acquire();
386 mov(scratch, srcdst0);
387 mov(srcdst0, srcdst1);
388 mov(srcdst1, scratch);
389 }
390
Swap(DwVfpRegister srcdst0,DwVfpRegister srcdst1)391 void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
392 DCHECK(srcdst0 != srcdst1);
393 DCHECK(VfpRegisterIsAvailable(srcdst0));
394 DCHECK(VfpRegisterIsAvailable(srcdst1));
395
396 if (CpuFeatures::IsSupported(NEON)) {
397 vswp(srcdst0, srcdst1);
398 } else {
399 UseScratchRegisterScope temps(this);
400 DwVfpRegister scratch = temps.AcquireD();
401 vmov(scratch, srcdst0);
402 vmov(srcdst0, srcdst1);
403 vmov(srcdst1, scratch);
404 }
405 }
406
Swap(QwNeonRegister srcdst0,QwNeonRegister srcdst1)407 void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
408 DCHECK(srcdst0 != srcdst1);
409 vswp(srcdst0, srcdst1);
410 }
411
Mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)412 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
413 Register srcA, Condition cond) {
414 if (CpuFeatures::IsSupported(ARMv7)) {
415 CpuFeatureScope scope(this, ARMv7);
416 mls(dst, src1, src2, srcA, cond);
417 } else {
418 UseScratchRegisterScope temps(this);
419 Register scratch = temps.Acquire();
420 DCHECK(srcA != scratch);
421 mul(scratch, src1, src2, LeaveCC, cond);
422 sub(dst, srcA, scratch, LeaveCC, cond);
423 }
424 }
425
426
And(Register dst,Register src1,const Operand & src2,Condition cond)427 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
428 Condition cond) {
429 if (!src2.IsRegister() && !src2.MustOutputRelocInfo(this) &&
430 src2.immediate() == 0) {
431 mov(dst, Operand::Zero(), LeaveCC, cond);
432 } else if (!(src2.InstructionsRequired(this) == 1) &&
433 !src2.MustOutputRelocInfo(this) &&
434 CpuFeatures::IsSupported(ARMv7) &&
435 base::bits::IsPowerOfTwo(src2.immediate() + 1)) {
436 CpuFeatureScope scope(this, ARMv7);
437 ubfx(dst, src1, 0,
438 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
439 } else {
440 and_(dst, src1, src2, LeaveCC, cond);
441 }
442 }
443
444
Ubfx(Register dst,Register src1,int lsb,int width,Condition cond)445 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
446 Condition cond) {
447 DCHECK_LT(lsb, 32);
448 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
449 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
450 and_(dst, src1, Operand(mask), LeaveCC, cond);
451 if (lsb != 0) {
452 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
453 }
454 } else {
455 CpuFeatureScope scope(this, ARMv7);
456 ubfx(dst, src1, lsb, width, cond);
457 }
458 }
459
460
Sbfx(Register dst,Register src1,int lsb,int width,Condition cond)461 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
462 Condition cond) {
463 DCHECK_LT(lsb, 32);
464 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
465 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
466 and_(dst, src1, Operand(mask), LeaveCC, cond);
467 int shift_up = 32 - lsb - width;
468 int shift_down = lsb + shift_up;
469 if (shift_up != 0) {
470 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
471 }
472 if (shift_down != 0) {
473 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
474 }
475 } else {
476 CpuFeatureScope scope(this, ARMv7);
477 sbfx(dst, src1, lsb, width, cond);
478 }
479 }
480
481
Bfc(Register dst,Register src,int lsb,int width,Condition cond)482 void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
483 Condition cond) {
484 DCHECK_LT(lsb, 32);
485 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
486 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
487 bic(dst, src, Operand(mask));
488 } else {
489 CpuFeatureScope scope(this, ARMv7);
490 Move(dst, src, cond);
491 bfc(dst, lsb, width, cond);
492 }
493 }
494
Load(Register dst,const MemOperand & src,Representation r)495 void MacroAssembler::Load(Register dst,
496 const MemOperand& src,
497 Representation r) {
498 DCHECK(!r.IsDouble());
499 if (r.IsInteger8()) {
500 ldrsb(dst, src);
501 } else if (r.IsUInteger8()) {
502 ldrb(dst, src);
503 } else if (r.IsInteger16()) {
504 ldrsh(dst, src);
505 } else if (r.IsUInteger16()) {
506 ldrh(dst, src);
507 } else {
508 ldr(dst, src);
509 }
510 }
511
Store(Register src,const MemOperand & dst,Representation r)512 void MacroAssembler::Store(Register src,
513 const MemOperand& dst,
514 Representation r) {
515 DCHECK(!r.IsDouble());
516 if (r.IsInteger8() || r.IsUInteger8()) {
517 strb(src, dst);
518 } else if (r.IsInteger16() || r.IsUInteger16()) {
519 strh(src, dst);
520 } else {
521 if (r.IsHeapObject()) {
522 AssertNotSmi(src);
523 } else if (r.IsSmi()) {
524 AssertSmi(src);
525 }
526 str(src, dst);
527 }
528 }
529
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond)530 void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
531 Condition cond) {
532 ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), cond);
533 }
534
535
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)536 void MacroAssembler::RecordWriteField(Register object, int offset,
537 Register value, Register dst,
538 LinkRegisterStatus lr_status,
539 SaveFPRegsMode save_fp,
540 RememberedSetAction remembered_set_action,
541 SmiCheck smi_check) {
542 // First, check if a write barrier is even needed. The tests below
543 // catch stores of Smis.
544 Label done;
545
546 // Skip barrier if writing a smi.
547 if (smi_check == INLINE_SMI_CHECK) {
548 JumpIfSmi(value, &done);
549 }
550
551 // Although the object register is tagged, the offset is relative to the start
552 // of the object, so so offset must be a multiple of kPointerSize.
553 DCHECK(IsAligned(offset, kPointerSize));
554
555 add(dst, object, Operand(offset - kHeapObjectTag));
556 if (emit_debug_code()) {
557 Label ok;
558 tst(dst, Operand(kPointerSize - 1));
559 b(eq, &ok);
560 stop("Unaligned cell in write barrier");
561 bind(&ok);
562 }
563
564 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
565 OMIT_SMI_CHECK);
566
567 bind(&done);
568
569 // Clobber clobbered input registers when running with the debug-code flag
570 // turned on to provoke errors.
571 if (emit_debug_code()) {
572 mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
573 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
574 }
575 }
576
SaveRegisters(RegList registers)577 void TurboAssembler::SaveRegisters(RegList registers) {
578 DCHECK_GT(NumRegs(registers), 0);
579 RegList regs = 0;
580 for (int i = 0; i < Register::kNumRegisters; ++i) {
581 if ((registers >> i) & 1u) {
582 regs |= Register::from_code(i).bit();
583 }
584 }
585
586 stm(db_w, sp, regs);
587 }
588
RestoreRegisters(RegList registers)589 void TurboAssembler::RestoreRegisters(RegList registers) {
590 DCHECK_GT(NumRegs(registers), 0);
591 RegList regs = 0;
592 for (int i = 0; i < Register::kNumRegisters; ++i) {
593 if ((registers >> i) & 1u) {
594 regs |= Register::from_code(i).bit();
595 }
596 }
597 ldm(ia_w, sp, regs);
598 }
599
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)600 void TurboAssembler::CallRecordWriteStub(
601 Register object, Register address,
602 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
603 // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
604 // i.e. always emit remember set and save FP registers in RecordWriteStub. If
605 // large performance regression is observed, we should use these values to
606 // avoid unnecessary work.
607
608 Callable const callable =
609 Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
610 RegList registers = callable.descriptor().allocatable_registers();
611
612 SaveRegisters(registers);
613
614 Register object_parameter(callable.descriptor().GetRegisterParameter(
615 RecordWriteDescriptor::kObject));
616 Register slot_parameter(
617 callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
618 Register isolate_parameter(callable.descriptor().GetRegisterParameter(
619 RecordWriteDescriptor::kIsolate));
620 Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
621 RecordWriteDescriptor::kRememberedSet));
622 Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
623 RecordWriteDescriptor::kFPMode));
624
625 Push(object);
626 Push(address);
627
628 Pop(slot_parameter);
629 Pop(object_parameter);
630
631 Move(isolate_parameter, ExternalReference::isolate_address(isolate()));
632 Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
633 Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
634 Call(callable.code(), RelocInfo::CODE_TARGET);
635
636 RestoreRegisters(registers);
637 }
638
639 // Will clobber 3 registers: object, address, and value. The register 'object'
640 // contains a heap object pointer. The heap object tag is shifted away.
641 // A scratch register also needs to be available.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)642 void MacroAssembler::RecordWrite(Register object, Register address,
643 Register value, LinkRegisterStatus lr_status,
644 SaveFPRegsMode fp_mode,
645 RememberedSetAction remembered_set_action,
646 SmiCheck smi_check) {
647 DCHECK(object != value);
648 if (emit_debug_code()) {
649 {
650 UseScratchRegisterScope temps(this);
651 Register scratch = temps.Acquire();
652 ldr(scratch, MemOperand(address));
653 cmp(scratch, value);
654 }
655 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
656 }
657
658 if (remembered_set_action == OMIT_REMEMBERED_SET &&
659 !FLAG_incremental_marking) {
660 return;
661 }
662
663 // First, check if a write barrier is even needed. The tests below
664 // catch stores of smis and stores into the young generation.
665 Label done;
666
667 if (smi_check == INLINE_SMI_CHECK) {
668 JumpIfSmi(value, &done);
669 }
670
671 CheckPageFlag(value,
672 value, // Used as scratch.
673 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
674 CheckPageFlag(object,
675 value, // Used as scratch.
676 MemoryChunk::kPointersFromHereAreInterestingMask,
677 eq,
678 &done);
679
680 // Record the actual write.
681 if (lr_status == kLRHasNotBeenSaved) {
682 push(lr);
683 }
684 CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
685 if (lr_status == kLRHasNotBeenSaved) {
686 pop(lr);
687 }
688
689 bind(&done);
690
691 // Count number of write barriers in generated code.
692 isolate()->counters()->write_barriers_static()->Increment();
693 {
694 UseScratchRegisterScope temps(this);
695 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
696 temps.Acquire(), value);
697 }
698
699 // Clobber clobbered registers when running with the debug-code flag
700 // turned on to provoke errors.
701 if (emit_debug_code()) {
702 mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
703 mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
704 }
705 }
706
PushCommonFrame(Register marker_reg)707 void TurboAssembler::PushCommonFrame(Register marker_reg) {
708 if (marker_reg.is_valid()) {
709 if (marker_reg.code() > fp.code()) {
710 stm(db_w, sp, fp.bit() | lr.bit());
711 mov(fp, Operand(sp));
712 Push(marker_reg);
713 } else {
714 stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
715 add(fp, sp, Operand(kPointerSize));
716 }
717 } else {
718 stm(db_w, sp, fp.bit() | lr.bit());
719 mov(fp, sp);
720 }
721 }
722
PushStandardFrame(Register function_reg)723 void TurboAssembler::PushStandardFrame(Register function_reg) {
724 DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
725 stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
726 fp.bit() | lr.bit());
727 int offset = -StandardFrameConstants::kContextOffset;
728 offset += function_reg.is_valid() ? kPointerSize : 0;
729 add(fp, sp, Operand(offset));
730 }
731
732
733 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()734 void MacroAssembler::PushSafepointRegisters() {
735 // Safepoints expect a block of contiguous register values starting with r0.
736 DCHECK_EQ(kSafepointSavedRegisters, (1 << kNumSafepointSavedRegisters) - 1);
737 // Safepoints expect a block of kNumSafepointRegisters values on the
738 // stack, so adjust the stack for unsaved registers.
739 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
740 DCHECK_GE(num_unsaved, 0);
741 sub(sp, sp, Operand(num_unsaved * kPointerSize));
742 stm(db_w, sp, kSafepointSavedRegisters);
743 }
744
PopSafepointRegisters()745 void MacroAssembler::PopSafepointRegisters() {
746 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
747 ldm(ia_w, sp, kSafepointSavedRegisters);
748 add(sp, sp, Operand(num_unsaved * kPointerSize));
749 }
750
SafepointRegisterStackIndex(int reg_code)751 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
752 // The registers are pushed starting with the highest encoding,
753 // which means that lowest encodings are closest to the stack pointer.
754 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
755 return reg_code;
756 }
757
VFPCanonicalizeNaN(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)758 void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
759 const DwVfpRegister src,
760 const Condition cond) {
761 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
762 // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
763 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
764 vsub(dst, src, kDoubleRegZero, cond);
765 }
766
VFPCompareAndSetFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)767 void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
768 const SwVfpRegister src2,
769 const Condition cond) {
770 // Compare and move FPSCR flags to the normal condition flags.
771 VFPCompareAndLoadFlags(src1, src2, pc, cond);
772 }
773
VFPCompareAndSetFlags(const SwVfpRegister src1,const float src2,const Condition cond)774 void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
775 const float src2,
776 const Condition cond) {
777 // Compare and move FPSCR flags to the normal condition flags.
778 VFPCompareAndLoadFlags(src1, src2, pc, cond);
779 }
780
VFPCompareAndSetFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)781 void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
782 const DwVfpRegister src2,
783 const Condition cond) {
784 // Compare and move FPSCR flags to the normal condition flags.
785 VFPCompareAndLoadFlags(src1, src2, pc, cond);
786 }
787
VFPCompareAndSetFlags(const DwVfpRegister src1,const double src2,const Condition cond)788 void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
789 const double src2,
790 const Condition cond) {
791 // Compare and move FPSCR flags to the normal condition flags.
792 VFPCompareAndLoadFlags(src1, src2, pc, cond);
793 }
794
VFPCompareAndLoadFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Register fpscr_flags,const Condition cond)795 void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
796 const SwVfpRegister src2,
797 const Register fpscr_flags,
798 const Condition cond) {
799 // Compare and load FPSCR.
800 vcmp(src1, src2, cond);
801 vmrs(fpscr_flags, cond);
802 }
803
VFPCompareAndLoadFlags(const SwVfpRegister src1,const float src2,const Register fpscr_flags,const Condition cond)804 void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
805 const float src2,
806 const Register fpscr_flags,
807 const Condition cond) {
808 // Compare and load FPSCR.
809 vcmp(src1, src2, cond);
810 vmrs(fpscr_flags, cond);
811 }
812
VFPCompareAndLoadFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Register fpscr_flags,const Condition cond)813 void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
814 const DwVfpRegister src2,
815 const Register fpscr_flags,
816 const Condition cond) {
817 // Compare and load FPSCR.
818 vcmp(src1, src2, cond);
819 vmrs(fpscr_flags, cond);
820 }
821
VFPCompareAndLoadFlags(const DwVfpRegister src1,const double src2,const Register fpscr_flags,const Condition cond)822 void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
823 const double src2,
824 const Register fpscr_flags,
825 const Condition cond) {
826 // Compare and load FPSCR.
827 vcmp(src1, src2, cond);
828 vmrs(fpscr_flags, cond);
829 }
830
VmovHigh(Register dst,DwVfpRegister src)831 void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
832 if (src.code() < 16) {
833 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
834 vmov(dst, loc.high());
835 } else {
836 vmov(NeonS32, dst, src, 1);
837 }
838 }
839
VmovHigh(DwVfpRegister dst,Register src)840 void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
841 if (dst.code() < 16) {
842 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
843 vmov(loc.high(), src);
844 } else {
845 vmov(NeonS32, dst, 1, src);
846 }
847 }
848
VmovLow(Register dst,DwVfpRegister src)849 void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
850 if (src.code() < 16) {
851 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
852 vmov(dst, loc.low());
853 } else {
854 vmov(NeonS32, dst, src, 0);
855 }
856 }
857
VmovLow(DwVfpRegister dst,Register src)858 void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
859 if (dst.code() < 16) {
860 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
861 vmov(loc.low(), src);
862 } else {
863 vmov(NeonS32, dst, 0, src);
864 }
865 }
866
VmovExtended(Register dst,int src_code)867 void TurboAssembler::VmovExtended(Register dst, int src_code) {
868 DCHECK_LE(SwVfpRegister::kNumRegisters, src_code);
869 DCHECK_GT(SwVfpRegister::kNumRegisters * 2, src_code);
870 if (src_code & 0x1) {
871 VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
872 } else {
873 VmovLow(dst, DwVfpRegister::from_code(src_code / 2));
874 }
875 }
876
VmovExtended(int dst_code,Register src)877 void TurboAssembler::VmovExtended(int dst_code, Register src) {
878 DCHECK_LE(SwVfpRegister::kNumRegisters, dst_code);
879 DCHECK_GT(SwVfpRegister::kNumRegisters * 2, dst_code);
880 if (dst_code & 0x1) {
881 VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
882 } else {
883 VmovLow(DwVfpRegister::from_code(dst_code / 2), src);
884 }
885 }
886
VmovExtended(int dst_code,int src_code)887 void TurboAssembler::VmovExtended(int dst_code, int src_code) {
888 if (src_code == dst_code) return;
889
890 if (src_code < SwVfpRegister::kNumRegisters &&
891 dst_code < SwVfpRegister::kNumRegisters) {
892 // src and dst are both s-registers.
893 vmov(SwVfpRegister::from_code(dst_code),
894 SwVfpRegister::from_code(src_code));
895 return;
896 }
897 DwVfpRegister dst_d_reg = DwVfpRegister::from_code(dst_code / 2);
898 DwVfpRegister src_d_reg = DwVfpRegister::from_code(src_code / 2);
899 int dst_offset = dst_code & 1;
900 int src_offset = src_code & 1;
901 if (CpuFeatures::IsSupported(NEON)) {
902 UseScratchRegisterScope temps(this);
903 DwVfpRegister scratch = temps.AcquireD();
904 // On Neon we can shift and insert from d-registers.
905 if (src_offset == dst_offset) {
906 // Offsets are the same, use vdup to copy the source to the opposite lane.
907 vdup(Neon32, scratch, src_d_reg, src_offset);
908 // Here we are extending the lifetime of scratch.
909 src_d_reg = scratch;
910 src_offset = dst_offset ^ 1;
911 }
912 if (dst_offset) {
913 if (dst_d_reg == src_d_reg) {
914 vdup(Neon32, dst_d_reg, src_d_reg, 0);
915 } else {
916 vsli(Neon64, dst_d_reg, src_d_reg, 32);
917 }
918 } else {
919 if (dst_d_reg == src_d_reg) {
920 vdup(Neon32, dst_d_reg, src_d_reg, 1);
921 } else {
922 vsri(Neon64, dst_d_reg, src_d_reg, 32);
923 }
924 }
925 return;
926 }
927
928 // Without Neon, use the scratch registers to move src and/or dst into
929 // s-registers.
930 UseScratchRegisterScope temps(this);
931 LowDwVfpRegister d_scratch = temps.AcquireLowD();
932 LowDwVfpRegister d_scratch2 = temps.AcquireLowD();
933 int s_scratch_code = d_scratch.low().code();
934 int s_scratch_code2 = d_scratch2.low().code();
935 if (src_code < SwVfpRegister::kNumRegisters) {
936 // src is an s-register, dst is not.
937 vmov(d_scratch, dst_d_reg);
938 vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
939 SwVfpRegister::from_code(src_code));
940 vmov(dst_d_reg, d_scratch);
941 } else if (dst_code < SwVfpRegister::kNumRegisters) {
942 // dst is an s-register, src is not.
943 vmov(d_scratch, src_d_reg);
944 vmov(SwVfpRegister::from_code(dst_code),
945 SwVfpRegister::from_code(s_scratch_code + src_offset));
946 } else {
947 // Neither src or dst are s-registers. Both scratch double registers are
948 // available when there are 32 VFP registers.
949 vmov(d_scratch, src_d_reg);
950 vmov(d_scratch2, dst_d_reg);
951 vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
952 SwVfpRegister::from_code(s_scratch_code2 + src_offset));
953 vmov(dst_d_reg, d_scratch2);
954 }
955 }
956
VmovExtended(int dst_code,const MemOperand & src)957 void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
958 if (dst_code < SwVfpRegister::kNumRegisters) {
959 vldr(SwVfpRegister::from_code(dst_code), src);
960 } else {
961 UseScratchRegisterScope temps(this);
962 LowDwVfpRegister scratch = temps.AcquireLowD();
963 // TODO(bbudge) If Neon supported, use load single lane form of vld1.
964 int dst_s_code = scratch.low().code() + (dst_code & 1);
965 vmov(scratch, DwVfpRegister::from_code(dst_code / 2));
966 vldr(SwVfpRegister::from_code(dst_s_code), src);
967 vmov(DwVfpRegister::from_code(dst_code / 2), scratch);
968 }
969 }
970
VmovExtended(const MemOperand & dst,int src_code)971 void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
972 if (src_code < SwVfpRegister::kNumRegisters) {
973 vstr(SwVfpRegister::from_code(src_code), dst);
974 } else {
975 // TODO(bbudge) If Neon supported, use store single lane form of vst1.
976 UseScratchRegisterScope temps(this);
977 LowDwVfpRegister scratch = temps.AcquireLowD();
978 int src_s_code = scratch.low().code() + (src_code & 1);
979 vmov(scratch, DwVfpRegister::from_code(src_code / 2));
980 vstr(SwVfpRegister::from_code(src_s_code), dst);
981 }
982 }
983
ExtractLane(Register dst,QwNeonRegister src,NeonDataType dt,int lane)984 void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src,
985 NeonDataType dt, int lane) {
986 int size = NeonSz(dt); // 0, 1, 2
987 int byte = lane << size;
988 int double_word = byte >> kDoubleSizeLog2;
989 int double_byte = byte & (kDoubleSize - 1);
990 int double_lane = double_byte >> size;
991 DwVfpRegister double_source =
992 DwVfpRegister::from_code(src.code() * 2 + double_word);
993 vmov(dt, dst, double_source, double_lane);
994 }
995
ExtractLane(Register dst,DwVfpRegister src,NeonDataType dt,int lane)996 void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src,
997 NeonDataType dt, int lane) {
998 int size = NeonSz(dt); // 0, 1, 2
999 int byte = lane << size;
1000 int double_byte = byte & (kDoubleSize - 1);
1001 int double_lane = double_byte >> size;
1002 vmov(dt, dst, src, double_lane);
1003 }
1004
ExtractLane(SwVfpRegister dst,QwNeonRegister src,int lane)1005 void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
1006 int lane) {
1007 int s_code = src.code() * 4 + lane;
1008 VmovExtended(dst.code(), s_code);
1009 }
1010
ReplaceLane(QwNeonRegister dst,QwNeonRegister src,Register src_lane,NeonDataType dt,int lane)1011 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1012 Register src_lane, NeonDataType dt, int lane) {
1013 Move(dst, src);
1014 int size = NeonSz(dt); // 0, 1, 2
1015 int byte = lane << size;
1016 int double_word = byte >> kDoubleSizeLog2;
1017 int double_byte = byte & (kDoubleSize - 1);
1018 int double_lane = double_byte >> size;
1019 DwVfpRegister double_dst =
1020 DwVfpRegister::from_code(dst.code() * 2 + double_word);
1021 vmov(dt, double_dst, double_lane, src_lane);
1022 }
1023
ReplaceLane(QwNeonRegister dst,QwNeonRegister src,SwVfpRegister src_lane,int lane)1024 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1025 SwVfpRegister src_lane, int lane) {
1026 Move(dst, src);
1027 int s_code = dst.code() * 4 + lane;
1028 VmovExtended(s_code, src_lane.code());
1029 }
1030
LslPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1031 void TurboAssembler::LslPair(Register dst_low, Register dst_high,
1032 Register src_low, Register src_high,
1033 Register shift) {
1034 DCHECK(!AreAliased(dst_high, src_low));
1035 DCHECK(!AreAliased(dst_high, shift));
1036 UseScratchRegisterScope temps(this);
1037 Register scratch = temps.Acquire();
1038
1039 Label less_than_32;
1040 Label done;
1041 rsb(scratch, shift, Operand(32), SetCC);
1042 b(gt, &less_than_32);
1043 // If shift >= 32
1044 and_(scratch, shift, Operand(0x1F));
1045 lsl(dst_high, src_low, Operand(scratch));
1046 mov(dst_low, Operand(0));
1047 jmp(&done);
1048 bind(&less_than_32);
1049 // If shift < 32
1050 lsl(dst_high, src_high, Operand(shift));
1051 orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
1052 lsl(dst_low, src_low, Operand(shift));
1053 bind(&done);
1054 }
1055
LslPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1056 void TurboAssembler::LslPair(Register dst_low, Register dst_high,
1057 Register src_low, Register src_high,
1058 uint32_t shift) {
1059 DCHECK(!AreAliased(dst_high, src_low));
1060 Label less_than_32;
1061 Label done;
1062 if (shift == 0) {
1063 Move(dst_high, src_high);
1064 Move(dst_low, src_low);
1065 } else if (shift == 32) {
1066 Move(dst_high, src_low);
1067 Move(dst_low, Operand(0));
1068 } else if (shift >= 32) {
1069 shift &= 0x1F;
1070 lsl(dst_high, src_low, Operand(shift));
1071 mov(dst_low, Operand(0));
1072 } else {
1073 lsl(dst_high, src_high, Operand(shift));
1074 orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
1075 lsl(dst_low, src_low, Operand(shift));
1076 }
1077 }
1078
LsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1079 void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
1080 Register src_low, Register src_high,
1081 Register shift) {
1082 DCHECK(!AreAliased(dst_low, src_high));
1083 DCHECK(!AreAliased(dst_low, shift));
1084 UseScratchRegisterScope temps(this);
1085 Register scratch = temps.Acquire();
1086
1087 Label less_than_32;
1088 Label done;
1089 rsb(scratch, shift, Operand(32), SetCC);
1090 b(gt, &less_than_32);
1091 // If shift >= 32
1092 and_(scratch, shift, Operand(0x1F));
1093 lsr(dst_low, src_high, Operand(scratch));
1094 mov(dst_high, Operand(0));
1095 jmp(&done);
1096 bind(&less_than_32);
1097 // If shift < 32
1098
1099 lsr(dst_low, src_low, Operand(shift));
1100 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1101 lsr(dst_high, src_high, Operand(shift));
1102 bind(&done);
1103 }
1104
LsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1105 void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
1106 Register src_low, Register src_high,
1107 uint32_t shift) {
1108 DCHECK(!AreAliased(dst_low, src_high));
1109 Label less_than_32;
1110 Label done;
1111 if (shift == 32) {
1112 mov(dst_low, src_high);
1113 mov(dst_high, Operand(0));
1114 } else if (shift > 32) {
1115 shift &= 0x1F;
1116 lsr(dst_low, src_high, Operand(shift));
1117 mov(dst_high, Operand(0));
1118 } else if (shift == 0) {
1119 Move(dst_low, src_low);
1120 Move(dst_high, src_high);
1121 } else {
1122 lsr(dst_low, src_low, Operand(shift));
1123 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1124 lsr(dst_high, src_high, Operand(shift));
1125 }
1126 }
1127
AsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register shift)1128 void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
1129 Register src_low, Register src_high,
1130 Register shift) {
1131 DCHECK(!AreAliased(dst_low, src_high));
1132 DCHECK(!AreAliased(dst_low, shift));
1133 UseScratchRegisterScope temps(this);
1134 Register scratch = temps.Acquire();
1135
1136 Label less_than_32;
1137 Label done;
1138 rsb(scratch, shift, Operand(32), SetCC);
1139 b(gt, &less_than_32);
1140 // If shift >= 32
1141 and_(scratch, shift, Operand(0x1F));
1142 asr(dst_low, src_high, Operand(scratch));
1143 asr(dst_high, src_high, Operand(31));
1144 jmp(&done);
1145 bind(&less_than_32);
1146 // If shift < 32
1147 lsr(dst_low, src_low, Operand(shift));
1148 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1149 asr(dst_high, src_high, Operand(shift));
1150 bind(&done);
1151 }
1152
AsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1153 void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
1154 Register src_low, Register src_high,
1155 uint32_t shift) {
1156 DCHECK(!AreAliased(dst_low, src_high));
1157 Label less_than_32;
1158 Label done;
1159 if (shift == 32) {
1160 mov(dst_low, src_high);
1161 asr(dst_high, src_high, Operand(31));
1162 } else if (shift > 32) {
1163 shift &= 0x1F;
1164 asr(dst_low, src_high, Operand(shift));
1165 asr(dst_high, src_high, Operand(31));
1166 } else if (shift == 0) {
1167 Move(dst_low, src_low);
1168 Move(dst_high, src_high);
1169 } else {
1170 lsr(dst_low, src_low, Operand(shift));
1171 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1172 asr(dst_high, src_high, Operand(shift));
1173 }
1174 }
1175
StubPrologue(StackFrame::Type type)1176 void TurboAssembler::StubPrologue(StackFrame::Type type) {
1177 UseScratchRegisterScope temps(this);
1178 Register scratch = temps.Acquire();
1179 mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1180 PushCommonFrame(scratch);
1181 }
1182
Prologue()1183 void TurboAssembler::Prologue() { PushStandardFrame(r1); }
1184
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1185 void TurboAssembler::EnterFrame(StackFrame::Type type,
1186 bool load_constant_pool_pointer_reg) {
1187 // r0-r3: preserved
1188 UseScratchRegisterScope temps(this);
1189 Register scratch = temps.Acquire();
1190 mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1191 PushCommonFrame(scratch);
1192 }
1193
LeaveFrame(StackFrame::Type type)1194 int TurboAssembler::LeaveFrame(StackFrame::Type type) {
1195 // r0: preserved
1196 // r1: preserved
1197 // r2: preserved
1198
1199 // Drop the execution stack down to the frame pointer and restore
1200 // the caller frame pointer and return address.
1201 mov(sp, fp);
1202 int frame_ends = pc_offset();
1203 ldm(ia_w, sp, fp.bit() | lr.bit());
1204 return frame_ends;
1205 }
1206
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1207 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1208 StackFrame::Type frame_type) {
1209 DCHECK(frame_type == StackFrame::EXIT ||
1210 frame_type == StackFrame::BUILTIN_EXIT);
1211 UseScratchRegisterScope temps(this);
1212 Register scratch = temps.Acquire();
1213
1214 // Set up the frame structure on the stack.
1215 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1216 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1217 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1218 mov(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
1219 PushCommonFrame(scratch);
1220 // Reserve room for saved entry sp and code object.
1221 sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1222 if (emit_debug_code()) {
1223 mov(scratch, Operand::Zero());
1224 str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
1225 }
1226 Move(scratch, CodeObject());
1227 str(scratch, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1228
1229 // Save the frame pointer and the context in top.
1230 Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1231 isolate()));
1232 str(fp, MemOperand(scratch));
1233 Move(scratch,
1234 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1235 str(cp, MemOperand(scratch));
1236
1237 // Optionally save all double registers.
1238 if (save_doubles) {
1239 SaveFPRegs(sp, scratch);
1240 // Note that d0 will be accessible at
1241 // fp - ExitFrameConstants::kFrameSize -
1242 // DwVfpRegister::kNumRegisters * kDoubleSize,
1243 // since the sp slot and code slot were pushed after the fp.
1244 }
1245
1246 // Reserve place for the return address and stack space and align the frame
1247 // preparing for calling the runtime function.
1248 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1249 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1250 if (frame_alignment > 0) {
1251 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1252 and_(sp, sp, Operand(-frame_alignment));
1253 }
1254
1255 // Set the exit frame sp value to point just before the return address
1256 // location.
1257 add(scratch, sp, Operand(kPointerSize));
1258 str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
1259 }
1260
ActivationFrameAlignment()1261 int TurboAssembler::ActivationFrameAlignment() {
1262 #if V8_HOST_ARCH_ARM
1263 // Running on the real platform. Use the alignment as mandated by the local
1264 // environment.
1265 // Note: This will break if we ever start generating snapshots on one ARM
1266 // platform for another ARM platform with a different alignment.
1267 return base::OS::ActivationFrameAlignment();
1268 #else // V8_HOST_ARCH_ARM
1269 // If we are using the simulator then we should always align to the expected
1270 // alignment. As the simulator is used to generate snapshots we do not know
1271 // if the target platform will need alignment, so this is controlled from a
1272 // flag.
1273 return FLAG_sim_stack_alignment;
1274 #endif // V8_HOST_ARCH_ARM
1275 }
1276
LeaveExitFrame(bool save_doubles,Register argument_count,bool argument_count_is_length)1277 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1278 bool argument_count_is_length) {
1279 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1280 UseScratchRegisterScope temps(this);
1281 Register scratch = temps.Acquire();
1282
1283 // Optionally restore all double registers.
1284 if (save_doubles) {
1285 // Calculate the stack location of the saved doubles and restore them.
1286 const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
1287 sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
1288 RestoreFPRegs(r3, scratch);
1289 }
1290
1291 // Clear top frame.
1292 mov(r3, Operand::Zero());
1293 Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1294 isolate()));
1295 str(r3, MemOperand(scratch));
1296
1297 // Restore current context from top and clear it in debug mode.
1298 Move(scratch,
1299 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1300 ldr(cp, MemOperand(scratch));
1301 #ifdef DEBUG
1302 mov(r3, Operand(Context::kInvalidContext));
1303 Move(scratch,
1304 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1305 str(r3, MemOperand(scratch));
1306 #endif
1307
1308 // Tear down the exit frame, pop the arguments, and return.
1309 mov(sp, Operand(fp));
1310 ldm(ia_w, sp, fp.bit() | lr.bit());
1311 if (argument_count.is_valid()) {
1312 if (argument_count_is_length) {
1313 add(sp, sp, argument_count);
1314 } else {
1315 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1316 }
1317 }
1318 }
1319
MovFromFloatResult(const DwVfpRegister dst)1320 void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1321 if (use_eabi_hardfloat()) {
1322 Move(dst, d0);
1323 } else {
1324 vmov(dst, r0, r1);
1325 }
1326 }
1327
1328
1329 // On ARM this is just a synonym to make the purpose clear.
MovFromFloatParameter(DwVfpRegister dst)1330 void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1331 MovFromFloatResult(dst);
1332 }
1333
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1334 void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1335 Register caller_args_count_reg,
1336 Register scratch0, Register scratch1) {
1337 #if DEBUG
1338 if (callee_args_count.is_reg()) {
1339 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1340 scratch1));
1341 } else {
1342 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1343 }
1344 #endif
1345
1346 // Calculate the end of destination area where we will put the arguments
1347 // after we drop current frame. We add kPointerSize to count the receiver
1348 // argument which is not included into formal parameters count.
1349 Register dst_reg = scratch0;
1350 add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
1351 add(dst_reg, dst_reg,
1352 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1353
1354 Register src_reg = caller_args_count_reg;
1355 // Calculate the end of source area. +kPointerSize is for the receiver.
1356 if (callee_args_count.is_reg()) {
1357 add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
1358 add(src_reg, src_reg, Operand(kPointerSize));
1359 } else {
1360 add(src_reg, sp,
1361 Operand((callee_args_count.immediate() + 1) * kPointerSize));
1362 }
1363
1364 if (FLAG_debug_code) {
1365 cmp(src_reg, dst_reg);
1366 Check(lo, AbortReason::kStackAccessBelowStackPointer);
1367 }
1368
1369 // Restore caller's frame pointer and return address now as they will be
1370 // overwritten by the copying loop.
1371 ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1372 ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1373
1374 // Now copy callee arguments to the caller frame going backwards to avoid
1375 // callee arguments corruption (source and destination areas could overlap).
1376
1377 // Both src_reg and dst_reg are pointing to the word after the one to copy,
1378 // so they must be pre-decremented in the loop.
1379 Register tmp_reg = scratch1;
1380 Label loop, entry;
1381 b(&entry);
1382 bind(&loop);
1383 ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
1384 str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
1385 bind(&entry);
1386 cmp(sp, src_reg);
1387 b(ne, &loop);
1388
1389 // Leave current frame.
1390 mov(sp, dst_reg);
1391 }
1392
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag)1393 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1394 const ParameterCount& actual, Label* done,
1395 bool* definitely_mismatches,
1396 InvokeFlag flag) {
1397 bool definitely_matches = false;
1398 *definitely_mismatches = false;
1399 Label regular_invoke;
1400
1401 // Check whether the expected and actual arguments count match. If not,
1402 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1403 // r0: actual arguments count
1404 // r1: function (passed through to callee)
1405 // r2: expected arguments count
1406
1407 // The code below is made a lot easier because the calling code already sets
1408 // up actual and expected registers according to the contract if values are
1409 // passed in registers.
1410 DCHECK(actual.is_immediate() || actual.reg() == r0);
1411 DCHECK(expected.is_immediate() || expected.reg() == r2);
1412
1413 if (expected.is_immediate()) {
1414 DCHECK(actual.is_immediate());
1415 mov(r0, Operand(actual.immediate()));
1416 if (expected.immediate() == actual.immediate()) {
1417 definitely_matches = true;
1418 } else {
1419 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1420 if (expected.immediate() == sentinel) {
1421 // Don't worry about adapting arguments for builtins that
1422 // don't want that done. Skip adaption code by making it look
1423 // like we have a match between expected and actual number of
1424 // arguments.
1425 definitely_matches = true;
1426 } else {
1427 *definitely_mismatches = true;
1428 mov(r2, Operand(expected.immediate()));
1429 }
1430 }
1431 } else {
1432 if (actual.is_immediate()) {
1433 mov(r0, Operand(actual.immediate()));
1434 cmp(expected.reg(), Operand(actual.immediate()));
1435 b(eq, ®ular_invoke);
1436 } else {
1437 cmp(expected.reg(), Operand(actual.reg()));
1438 b(eq, ®ular_invoke);
1439 }
1440 }
1441
1442 if (!definitely_matches) {
1443 Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1444 if (flag == CALL_FUNCTION) {
1445 Call(adaptor);
1446 if (!*definitely_mismatches) {
1447 b(done);
1448 }
1449 } else {
1450 Jump(adaptor, RelocInfo::CODE_TARGET);
1451 }
1452 bind(®ular_invoke);
1453 }
1454 }
1455
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1456 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1457 const ParameterCount& expected,
1458 const ParameterCount& actual) {
1459 Label skip_hook;
1460
1461 ExternalReference debug_hook_active =
1462 ExternalReference::debug_hook_on_function_call_address(isolate());
1463 Move(r4, debug_hook_active);
1464 ldrsb(r4, MemOperand(r4));
1465 cmp(r4, Operand(0));
1466 b(eq, &skip_hook);
1467
1468 {
1469 // Load receiver to pass it later to DebugOnFunctionCall hook.
1470 if (actual.is_reg()) {
1471 mov(r4, actual.reg());
1472 } else {
1473 mov(r4, Operand(actual.immediate()));
1474 }
1475 ldr(r4, MemOperand(sp, r4, LSL, kPointerSizeLog2));
1476 FrameScope frame(this,
1477 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1478 if (expected.is_reg()) {
1479 SmiTag(expected.reg());
1480 Push(expected.reg());
1481 }
1482 if (actual.is_reg()) {
1483 SmiTag(actual.reg());
1484 Push(actual.reg());
1485 }
1486 if (new_target.is_valid()) {
1487 Push(new_target);
1488 }
1489 Push(fun);
1490 Push(fun);
1491 Push(r4);
1492 CallRuntime(Runtime::kDebugOnFunctionCall);
1493 Pop(fun);
1494 if (new_target.is_valid()) {
1495 Pop(new_target);
1496 }
1497 if (actual.is_reg()) {
1498 Pop(actual.reg());
1499 SmiUntag(actual.reg());
1500 }
1501 if (expected.is_reg()) {
1502 Pop(expected.reg());
1503 SmiUntag(expected.reg());
1504 }
1505 }
1506 bind(&skip_hook);
1507 }
1508
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)1509 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1510 const ParameterCount& expected,
1511 const ParameterCount& actual,
1512 InvokeFlag flag) {
1513 // You can't call a function without a valid frame.
1514 DCHECK(flag == JUMP_FUNCTION || has_frame());
1515 DCHECK(function == r1);
1516 DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
1517
1518 // On function call, call into the debugger if necessary.
1519 CheckDebugHook(function, new_target, expected, actual);
1520
1521 // Clear the new.target register if not given.
1522 if (!new_target.is_valid()) {
1523 LoadRoot(r3, Heap::kUndefinedValueRootIndex);
1524 }
1525
1526 Label done;
1527 bool definitely_mismatches = false;
1528 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
1529 if (!definitely_mismatches) {
1530 // We call indirectly through the code field in the function to
1531 // allow recompilation to take effect without changing any of the
1532 // call sites.
1533 Register code = kJavaScriptCallCodeStartRegister;
1534 ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
1535 add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
1536 if (flag == CALL_FUNCTION) {
1537 Call(code);
1538 } else {
1539 DCHECK(flag == JUMP_FUNCTION);
1540 Jump(code);
1541 }
1542
1543 // Continue here if InvokePrologue does handle the invocation due to
1544 // mismatched parameter counts.
1545 bind(&done);
1546 }
1547 }
1548
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag)1549 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1550 const ParameterCount& actual,
1551 InvokeFlag flag) {
1552 // You can't call a function without a valid frame.
1553 DCHECK(flag == JUMP_FUNCTION || has_frame());
1554
1555 // Contract with called JS functions requires that function is passed in r1.
1556 DCHECK(fun == r1);
1557
1558 Register expected_reg = r2;
1559 Register temp_reg = r4;
1560
1561 ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1562 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1563 ldrh(expected_reg,
1564 FieldMemOperand(temp_reg,
1565 SharedFunctionInfo::kFormalParameterCountOffset));
1566
1567 ParameterCount expected(expected_reg);
1568 InvokeFunctionCode(fun, new_target, expected, actual, flag);
1569 }
1570
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)1571 void MacroAssembler::InvokeFunction(Register function,
1572 const ParameterCount& expected,
1573 const ParameterCount& actual,
1574 InvokeFlag flag) {
1575 // You can't call a function without a valid frame.
1576 DCHECK(flag == JUMP_FUNCTION || has_frame());
1577
1578 // Contract with called JS functions requires that function is passed in r1.
1579 DCHECK(function == r1);
1580
1581 // Get the function and setup the context.
1582 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1583
1584 InvokeFunctionCode(r1, no_reg, expected, actual, flag);
1585 }
1586
MaybeDropFrames()1587 void MacroAssembler::MaybeDropFrames() {
1588 // Check whether we need to drop frames to restart a function on the stack.
1589 ExternalReference restart_fp =
1590 ExternalReference::debug_restart_fp_address(isolate());
1591 Move(r1, restart_fp);
1592 ldr(r1, MemOperand(r1));
1593 tst(r1, r1);
1594 Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1595 ne);
1596 }
1597
PushStackHandler()1598 void MacroAssembler::PushStackHandler() {
1599 // Adjust this code if not the case.
1600 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1601 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1602
1603 Push(Smi::kZero); // Padding.
1604 // Link the current handler as the next handler.
1605 mov(r6, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1606 isolate())));
1607 ldr(r5, MemOperand(r6));
1608 push(r5);
1609 // Set this new handler as the current one.
1610 str(sp, MemOperand(r6));
1611 }
1612
1613
PopStackHandler()1614 void MacroAssembler::PopStackHandler() {
1615 UseScratchRegisterScope temps(this);
1616 Register scratch = temps.Acquire();
1617 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1618 pop(r1);
1619 mov(scratch, Operand(ExternalReference::Create(
1620 IsolateAddressId::kHandlerAddress, isolate())));
1621 str(r1, MemOperand(scratch));
1622 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1623 }
1624
1625
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1626 void MacroAssembler::CompareObjectType(Register object,
1627 Register map,
1628 Register type_reg,
1629 InstanceType type) {
1630 UseScratchRegisterScope temps(this);
1631 const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
1632
1633 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
1634 CompareInstanceType(map, temp, type);
1635 }
1636
1637
CompareInstanceType(Register map,Register type_reg,InstanceType type)1638 void MacroAssembler::CompareInstanceType(Register map,
1639 Register type_reg,
1640 InstanceType type) {
1641 ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1642 cmp(type_reg, Operand(type));
1643 }
1644
1645
CompareRoot(Register obj,Heap::RootListIndex index)1646 void MacroAssembler::CompareRoot(Register obj,
1647 Heap::RootListIndex index) {
1648 UseScratchRegisterScope temps(this);
1649 Register scratch = temps.Acquire();
1650 DCHECK(obj != scratch);
1651 LoadRoot(scratch, index);
1652 cmp(obj, scratch);
1653 }
1654
CallStub(CodeStub * stub,Condition cond)1655 void MacroAssembler::CallStub(CodeStub* stub,
1656 Condition cond) {
1657 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1658 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, CAN_INLINE_TARGET_ADDRESS,
1659 false);
1660 }
1661
CallStubDelayed(CodeStub * stub)1662 void TurboAssembler::CallStubDelayed(CodeStub* stub) {
1663 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1664
1665 // Block constant pool for the call instruction sequence.
1666 BlockConstPoolScope block_const_pool(this);
1667
1668 #ifdef DEBUG
1669 Label start;
1670 bind(&start);
1671 #endif
1672
1673 // Call sequence on V7 or later may be :
1674 // movw ip, #... @ call address low 16
1675 // movt ip, #... @ call address high 16
1676 // blx ip
1677 // @ return address
1678 // Or for pre-V7 or values that may be back-patched
1679 // to avoid ICache flushes:
1680 // ldr ip, [pc, #...] @ call address
1681 // blx ip
1682 // @ return address
1683
1684 mov(ip, Operand::EmbeddedCode(stub));
1685 blx(ip, al);
1686
1687 DCHECK_EQ(kCallStubSize, SizeOfCodeGeneratedSince(&start));
1688 }
1689
TailCallStub(CodeStub * stub,Condition cond)1690 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1691 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1692 }
1693
AllowThisStubCall(CodeStub * stub)1694 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
1695 return has_frame() || !stub->SometimesSetsUpAFrame();
1696 }
1697
TryDoubleToInt32Exact(Register result,DwVfpRegister double_input,LowDwVfpRegister double_scratch)1698 void MacroAssembler::TryDoubleToInt32Exact(Register result,
1699 DwVfpRegister double_input,
1700 LowDwVfpRegister double_scratch) {
1701 DCHECK(double_input != double_scratch);
1702 vcvt_s32_f64(double_scratch.low(), double_input);
1703 vmov(result, double_scratch.low());
1704 vcvt_f64_s32(double_scratch, double_scratch.low());
1705 VFPCompareAndSetFlags(double_input, double_scratch);
1706 }
1707
TryInlineTruncateDoubleToI(Register result,DwVfpRegister double_input,Label * done)1708 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1709 DwVfpRegister double_input,
1710 Label* done) {
1711 UseScratchRegisterScope temps(this);
1712 SwVfpRegister single_scratch = SwVfpRegister::no_reg();
1713 if (temps.CanAcquireVfp<SwVfpRegister>()) {
1714 single_scratch = temps.AcquireS();
1715 } else {
1716 // Re-use the input as a scratch register. However, we can only do this if
1717 // the input register is d0-d15 as there are no s32+ registers.
1718 DCHECK_LT(double_input.code(), LowDwVfpRegister::kNumRegisters);
1719 LowDwVfpRegister double_scratch =
1720 LowDwVfpRegister::from_code(double_input.code());
1721 single_scratch = double_scratch.low();
1722 }
1723 vcvt_s32_f64(single_scratch, double_input);
1724 vmov(result, single_scratch);
1725
1726 Register scratch = temps.Acquire();
1727 // If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
1728 sub(scratch, result, Operand(1));
1729 cmp(scratch, Operand(0x7FFFFFFE));
1730 b(lt, done);
1731 }
1732
TruncateDoubleToI(Isolate * isolate,Zone * zone,Register result,DwVfpRegister double_input,StubCallMode stub_mode)1733 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1734 Register result,
1735 DwVfpRegister double_input,
1736 StubCallMode stub_mode) {
1737 Label done;
1738
1739 TryInlineTruncateDoubleToI(result, double_input, &done);
1740
1741 // If we fell through then inline version didn't succeed - call stub instead.
1742 push(lr);
1743 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1744 vstr(double_input, MemOperand(sp, 0));
1745
1746 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1747 Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1748 } else {
1749 Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1750 }
1751 ldr(result, MemOperand(sp, 0));
1752
1753 add(sp, sp, Operand(kDoubleSize));
1754 pop(lr);
1755
1756 bind(&done);
1757 }
1758
CallRuntimeWithCEntry(Runtime::FunctionId fid,Register centry)1759 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
1760 Register centry) {
1761 const Runtime::Function* f = Runtime::FunctionForId(fid);
1762 // TODO(1236192): Most runtime routines don't need the number of
1763 // arguments passed in because it is constant. At some point we
1764 // should remove this need and make the runtime routine entry code
1765 // smarter.
1766 mov(r0, Operand(f->nargs));
1767 Move(r1, ExternalReference::Create(f));
1768 DCHECK(!AreAliased(centry, r0, r1));
1769 add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
1770 Call(centry);
1771 }
1772
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1773 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1774 int num_arguments,
1775 SaveFPRegsMode save_doubles) {
1776 // All parameters are on the stack. r0 has the return value after call.
1777
1778 // If the expected number of arguments of the runtime function is
1779 // constant, we check that the actual number of arguments match the
1780 // expectation.
1781 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1782
1783 // TODO(1236192): Most runtime routines don't need the number of
1784 // arguments passed in because it is constant. At some point we
1785 // should remove this need and make the runtime routine entry code
1786 // smarter.
1787 mov(r0, Operand(num_arguments));
1788 Move(r1, ExternalReference::Create(f));
1789 Handle<Code> code =
1790 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1791 Call(code, RelocInfo::CODE_TARGET);
1792 }
1793
TailCallRuntime(Runtime::FunctionId fid)1794 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1795 const Runtime::Function* function = Runtime::FunctionForId(fid);
1796 DCHECK_EQ(1, function->result_size);
1797 if (function->nargs >= 0) {
1798 // TODO(1236192): Most runtime routines don't need the number of
1799 // arguments passed in because it is constant. At some point we
1800 // should remove this need and make the runtime routine entry code
1801 // smarter.
1802 mov(r0, Operand(function->nargs));
1803 }
1804 JumpToExternalReference(ExternalReference::Create(fid));
1805 }
1806
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1807 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1808 bool builtin_exit_frame) {
1809 #if defined(__thumb__)
1810 // Thumb mode builtin.
1811 DCHECK_EQ(builtin.address() & 1, 1);
1812 #endif
1813 Move(r1, builtin);
1814 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1815 kArgvOnStack, builtin_exit_frame);
1816 Jump(code, RelocInfo::CODE_TARGET);
1817 }
1818
JumpToInstructionStream(Address entry)1819 void MacroAssembler::JumpToInstructionStream(Address entry) {
1820 mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1821 Jump(kOffHeapTrampolineRegister);
1822 }
1823
LoadWeakValue(Register out,Register in,Label * target_if_cleared)1824 void MacroAssembler::LoadWeakValue(Register out, Register in,
1825 Label* target_if_cleared) {
1826 cmp(in, Operand(kClearedWeakHeapObject));
1827 b(eq, target_if_cleared);
1828
1829 and_(out, in, Operand(~kWeakHeapObjectMask));
1830 }
1831
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1832 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1833 Register scratch1, Register scratch2) {
1834 DCHECK_GT(value, 0);
1835 if (FLAG_native_code_counters && counter->Enabled()) {
1836 Move(scratch2, ExternalReference::Create(counter));
1837 ldr(scratch1, MemOperand(scratch2));
1838 add(scratch1, scratch1, Operand(value));
1839 str(scratch1, MemOperand(scratch2));
1840 }
1841 }
1842
1843
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1844 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1845 Register scratch1, Register scratch2) {
1846 DCHECK_GT(value, 0);
1847 if (FLAG_native_code_counters && counter->Enabled()) {
1848 Move(scratch2, ExternalReference::Create(counter));
1849 ldr(scratch1, MemOperand(scratch2));
1850 sub(scratch1, scratch1, Operand(value));
1851 str(scratch1, MemOperand(scratch2));
1852 }
1853 }
1854
Assert(Condition cond,AbortReason reason)1855 void TurboAssembler::Assert(Condition cond, AbortReason reason) {
1856 if (emit_debug_code())
1857 Check(cond, reason);
1858 }
1859
Check(Condition cond,AbortReason reason)1860 void TurboAssembler::Check(Condition cond, AbortReason reason) {
1861 Label L;
1862 b(cond, &L);
1863 Abort(reason);
1864 // will not return here
1865 bind(&L);
1866 }
1867
Abort(AbortReason reason)1868 void TurboAssembler::Abort(AbortReason reason) {
1869 Label abort_start;
1870 bind(&abort_start);
1871 const char* msg = GetAbortReason(reason);
1872 #ifdef DEBUG
1873 RecordComment("Abort message: ");
1874 RecordComment(msg);
1875 #endif
1876
1877 // Avoid emitting call to builtin if requested.
1878 if (trap_on_abort()) {
1879 stop(msg);
1880 return;
1881 }
1882
1883 if (should_abort_hard()) {
1884 // We don't care if we constructed a frame. Just pretend we did.
1885 FrameScope assume_frame(this, StackFrame::NONE);
1886 Move32BitImmediate(r0, Operand(static_cast<int>(reason)));
1887 PrepareCallCFunction(1, 0, r1);
1888 Move(r1, ExternalReference::abort_with_reason());
1889 // Use Call directly to avoid any unneeded overhead. The function won't
1890 // return anyway.
1891 Call(r1);
1892 return;
1893 }
1894
1895 Move(r1, Smi::FromInt(static_cast<int>(reason)));
1896
1897 // Disable stub call restrictions to always allow calls to abort.
1898 if (!has_frame()) {
1899 // We don't actually want to generate a pile of code for this, so just
1900 // claim there is a stack frame, without generating one.
1901 FrameScope scope(this, StackFrame::NONE);
1902 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1903 } else {
1904 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1905 }
1906 // will not return here
1907 }
1908
LoadNativeContextSlot(int index,Register dst)1909 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
1910 ldr(dst, NativeContextMemOperand());
1911 ldr(dst, ContextMemOperand(dst, index));
1912 }
1913
1914
InitializeRootRegister()1915 void TurboAssembler::InitializeRootRegister() {
1916 ExternalReference roots_array_start =
1917 ExternalReference::roots_array_start(isolate());
1918 mov(kRootRegister, Operand(roots_array_start));
1919 add(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
1920 }
1921
SmiTag(Register reg,SBit s)1922 void MacroAssembler::SmiTag(Register reg, SBit s) {
1923 add(reg, reg, Operand(reg), s);
1924 }
1925
SmiTag(Register dst,Register src,SBit s)1926 void MacroAssembler::SmiTag(Register dst, Register src, SBit s) {
1927 add(dst, src, Operand(src), s);
1928 }
1929
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)1930 void MacroAssembler::UntagAndJumpIfSmi(
1931 Register dst, Register src, Label* smi_case) {
1932 STATIC_ASSERT(kSmiTag == 0);
1933 SmiUntag(dst, src, SetCC);
1934 b(cc, smi_case); // Shifter carry is not set for a smi.
1935 }
1936
SmiTst(Register value)1937 void MacroAssembler::SmiTst(Register value) {
1938 tst(value, Operand(kSmiTagMask));
1939 }
1940
JumpIfSmi(Register value,Label * smi_label)1941 void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
1942 tst(value, Operand(kSmiTagMask));
1943 b(eq, smi_label);
1944 }
1945
JumpIfEqual(Register x,int32_t y,Label * dest)1946 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
1947 cmp(x, Operand(y));
1948 b(eq, dest);
1949 }
1950
JumpIfLessThan(Register x,int32_t y,Label * dest)1951 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
1952 cmp(x, Operand(y));
1953 b(lt, dest);
1954 }
1955
JumpIfNotSmi(Register value,Label * not_smi_label)1956 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
1957 tst(value, Operand(kSmiTagMask));
1958 b(ne, not_smi_label);
1959 }
1960
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)1961 void MacroAssembler::JumpIfEitherSmi(Register reg1,
1962 Register reg2,
1963 Label* on_either_smi) {
1964 STATIC_ASSERT(kSmiTag == 0);
1965 tst(reg1, Operand(kSmiTagMask));
1966 tst(reg2, Operand(kSmiTagMask), ne);
1967 b(eq, on_either_smi);
1968 }
1969
AssertNotSmi(Register object)1970 void MacroAssembler::AssertNotSmi(Register object) {
1971 if (emit_debug_code()) {
1972 STATIC_ASSERT(kSmiTag == 0);
1973 tst(object, Operand(kSmiTagMask));
1974 Check(ne, AbortReason::kOperandIsASmi);
1975 }
1976 }
1977
1978
AssertSmi(Register object)1979 void MacroAssembler::AssertSmi(Register object) {
1980 if (emit_debug_code()) {
1981 STATIC_ASSERT(kSmiTag == 0);
1982 tst(object, Operand(kSmiTagMask));
1983 Check(eq, AbortReason::kOperandIsNotASmi);
1984 }
1985 }
1986
AssertConstructor(Register object)1987 void MacroAssembler::AssertConstructor(Register object) {
1988 if (emit_debug_code()) {
1989 STATIC_ASSERT(kSmiTag == 0);
1990 tst(object, Operand(kSmiTagMask));
1991 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
1992 push(object);
1993 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
1994 ldrb(object, FieldMemOperand(object, Map::kBitFieldOffset));
1995 tst(object, Operand(Map::IsConstructorBit::kMask));
1996 pop(object);
1997 Check(ne, AbortReason::kOperandIsNotAConstructor);
1998 }
1999 }
2000
AssertFunction(Register object)2001 void MacroAssembler::AssertFunction(Register object) {
2002 if (emit_debug_code()) {
2003 STATIC_ASSERT(kSmiTag == 0);
2004 tst(object, Operand(kSmiTagMask));
2005 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
2006 push(object);
2007 CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2008 pop(object);
2009 Check(eq, AbortReason::kOperandIsNotAFunction);
2010 }
2011 }
2012
2013
AssertBoundFunction(Register object)2014 void MacroAssembler::AssertBoundFunction(Register object) {
2015 if (emit_debug_code()) {
2016 STATIC_ASSERT(kSmiTag == 0);
2017 tst(object, Operand(kSmiTagMask));
2018 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
2019 push(object);
2020 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2021 pop(object);
2022 Check(eq, AbortReason::kOperandIsNotABoundFunction);
2023 }
2024 }
2025
AssertGeneratorObject(Register object)2026 void MacroAssembler::AssertGeneratorObject(Register object) {
2027 if (!emit_debug_code()) return;
2028 tst(object, Operand(kSmiTagMask));
2029 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
2030
2031 // Load map
2032 Register map = object;
2033 push(object);
2034 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2035
2036 // Check if JSGeneratorObject
2037 Label do_check;
2038 Register instance_type = object;
2039 CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
2040 b(eq, &do_check);
2041
2042 // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
2043 cmp(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
2044
2045 bind(&do_check);
2046 // Restore generator object to register and perform assertion
2047 pop(object);
2048 Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
2049 }
2050
AssertUndefinedOrAllocationSite(Register object,Register scratch)2051 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2052 Register scratch) {
2053 if (emit_debug_code()) {
2054 Label done_checking;
2055 AssertNotSmi(object);
2056 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2057 b(eq, &done_checking);
2058 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2059 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
2060 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
2061 bind(&done_checking);
2062 }
2063 }
2064
2065
CheckFor32DRegs(Register scratch)2066 void TurboAssembler::CheckFor32DRegs(Register scratch) {
2067 Move(scratch, ExternalReference::cpu_features());
2068 ldr(scratch, MemOperand(scratch));
2069 tst(scratch, Operand(1u << VFP32DREGS));
2070 }
2071
SaveFPRegs(Register location,Register scratch)2072 void TurboAssembler::SaveFPRegs(Register location, Register scratch) {
2073 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2074 CheckFor32DRegs(scratch);
2075 vstm(db_w, location, d16, d31, ne);
2076 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2077 vstm(db_w, location, d0, d15);
2078 }
2079
RestoreFPRegs(Register location,Register scratch)2080 void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
2081 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2082 CheckFor32DRegs(scratch);
2083 vldm(ia_w, location, d0, d15);
2084 vldm(ia_w, location, d16, d31, ne);
2085 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2086 }
2087
2088 template <typename T>
FloatMaxHelper(T result,T left,T right,Label * out_of_line)2089 void TurboAssembler::FloatMaxHelper(T result, T left, T right,
2090 Label* out_of_line) {
2091 // This trivial case is caught sooner, so that the out-of-line code can be
2092 // completely avoided.
2093 DCHECK(left != right);
2094
2095 if (CpuFeatures::IsSupported(ARMv8)) {
2096 CpuFeatureScope scope(this, ARMv8);
2097 VFPCompareAndSetFlags(left, right);
2098 b(vs, out_of_line);
2099 vmaxnm(result, left, right);
2100 } else {
2101 Label done;
2102 VFPCompareAndSetFlags(left, right);
2103 b(vs, out_of_line);
2104 // Avoid a conditional instruction if the result register is unique.
2105 bool aliased_result_reg = result == left || result == right;
2106 Move(result, right, aliased_result_reg ? mi : al);
2107 Move(result, left, gt);
2108 b(ne, &done);
2109 // Left and right are equal, but check for +/-0.
2110 VFPCompareAndSetFlags(left, 0.0);
2111 b(eq, out_of_line);
2112 // The arguments are equal and not zero, so it doesn't matter which input we
2113 // pick. We have already moved one input into the result (if it didn't
2114 // already alias) so there's nothing more to do.
2115 bind(&done);
2116 }
2117 }
2118
2119 template <typename T>
FloatMaxOutOfLineHelper(T result,T left,T right)2120 void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
2121 DCHECK(left != right);
2122
2123 // ARMv8: At least one of left and right is a NaN.
2124 // Anything else: At least one of left and right is a NaN, or both left and
2125 // right are zeroes with unknown sign.
2126
2127 // If left and right are +/-0, select the one with the most positive sign.
2128 // If left or right are NaN, vadd propagates the appropriate one.
2129 vadd(result, left, right);
2130 }
2131
2132 template <typename T>
FloatMinHelper(T result,T left,T right,Label * out_of_line)2133 void TurboAssembler::FloatMinHelper(T result, T left, T right,
2134 Label* out_of_line) {
2135 // This trivial case is caught sooner, so that the out-of-line code can be
2136 // completely avoided.
2137 DCHECK(left != right);
2138
2139 if (CpuFeatures::IsSupported(ARMv8)) {
2140 CpuFeatureScope scope(this, ARMv8);
2141 VFPCompareAndSetFlags(left, right);
2142 b(vs, out_of_line);
2143 vminnm(result, left, right);
2144 } else {
2145 Label done;
2146 VFPCompareAndSetFlags(left, right);
2147 b(vs, out_of_line);
2148 // Avoid a conditional instruction if the result register is unique.
2149 bool aliased_result_reg = result == left || result == right;
2150 Move(result, left, aliased_result_reg ? mi : al);
2151 Move(result, right, gt);
2152 b(ne, &done);
2153 // Left and right are equal, but check for +/-0.
2154 VFPCompareAndSetFlags(left, 0.0);
2155 // If the arguments are equal and not zero, it doesn't matter which input we
2156 // pick. We have already moved one input into the result (if it didn't
2157 // already alias) so there's nothing more to do.
2158 b(ne, &done);
2159 // At this point, both left and right are either 0 or -0.
2160 // We could use a single 'vorr' instruction here if we had NEON support.
2161 // The algorithm used is -((-L) + (-R)), which is most efficiently expressed
2162 // as -((-L) - R).
2163 if (left == result) {
2164 DCHECK(right != result);
2165 vneg(result, left);
2166 vsub(result, result, right);
2167 vneg(result, result);
2168 } else {
2169 DCHECK(left != result);
2170 vneg(result, right);
2171 vsub(result, result, left);
2172 vneg(result, result);
2173 }
2174 bind(&done);
2175 }
2176 }
2177
2178 template <typename T>
FloatMinOutOfLineHelper(T result,T left,T right)2179 void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
2180 DCHECK(left != right);
2181
2182 // At least one of left and right is a NaN. Use vadd to propagate the NaN
2183 // appropriately. +/-0 is handled inline.
2184 vadd(result, left, right);
2185 }
2186
FloatMax(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right,Label * out_of_line)2187 void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
2188 SwVfpRegister right, Label* out_of_line) {
2189 FloatMaxHelper(result, left, right, out_of_line);
2190 }
2191
FloatMin(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right,Label * out_of_line)2192 void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
2193 SwVfpRegister right, Label* out_of_line) {
2194 FloatMinHelper(result, left, right, out_of_line);
2195 }
2196
FloatMax(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right,Label * out_of_line)2197 void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
2198 DwVfpRegister right, Label* out_of_line) {
2199 FloatMaxHelper(result, left, right, out_of_line);
2200 }
2201
FloatMin(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right,Label * out_of_line)2202 void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
2203 DwVfpRegister right, Label* out_of_line) {
2204 FloatMinHelper(result, left, right, out_of_line);
2205 }
2206
FloatMaxOutOfLine(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right)2207 void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
2208 SwVfpRegister right) {
2209 FloatMaxOutOfLineHelper(result, left, right);
2210 }
2211
FloatMinOutOfLine(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right)2212 void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
2213 SwVfpRegister right) {
2214 FloatMinOutOfLineHelper(result, left, right);
2215 }
2216
FloatMaxOutOfLine(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right)2217 void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
2218 DwVfpRegister right) {
2219 FloatMaxOutOfLineHelper(result, left, right);
2220 }
2221
FloatMinOutOfLine(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right)2222 void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
2223 DwVfpRegister right) {
2224 FloatMinOutOfLineHelper(result, left, right);
2225 }
2226
2227 static const int kRegisterPassedArguments = 4;
2228
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)2229 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
2230 int num_double_arguments) {
2231 int stack_passed_words = 0;
2232 if (use_eabi_hardfloat()) {
2233 // In the hard floating point calling convention, we can use
2234 // all double registers to pass doubles.
2235 if (num_double_arguments > DoubleRegister::NumRegisters()) {
2236 stack_passed_words +=
2237 2 * (num_double_arguments - DoubleRegister::NumRegisters());
2238 }
2239 } else {
2240 // In the soft floating point calling convention, every double
2241 // argument is passed using two registers.
2242 num_reg_arguments += 2 * num_double_arguments;
2243 }
2244 // Up to four simple arguments are passed in registers r0..r3.
2245 if (num_reg_arguments > kRegisterPassedArguments) {
2246 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2247 }
2248 return stack_passed_words;
2249 }
2250
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)2251 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
2252 int num_double_arguments,
2253 Register scratch) {
2254 int frame_alignment = ActivationFrameAlignment();
2255 int stack_passed_arguments = CalculateStackPassedWords(
2256 num_reg_arguments, num_double_arguments);
2257 if (frame_alignment > kPointerSize) {
2258 UseScratchRegisterScope temps(this);
2259 if (!scratch.is_valid()) scratch = temps.Acquire();
2260 // Make stack end at alignment and make room for num_arguments - 4 words
2261 // and the original value of sp.
2262 mov(scratch, sp);
2263 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
2264 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2265 and_(sp, sp, Operand(-frame_alignment));
2266 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
2267 } else if (stack_passed_arguments > 0) {
2268 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2269 }
2270 }
2271
MovToFloatParameter(DwVfpRegister src)2272 void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
2273 DCHECK(src == d0);
2274 if (!use_eabi_hardfloat()) {
2275 vmov(r0, r1, src);
2276 }
2277 }
2278
2279
2280 // On ARM this is just a synonym to make the purpose clear.
MovToFloatResult(DwVfpRegister src)2281 void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
2282 MovToFloatParameter(src);
2283 }
2284
MovToFloatParameters(DwVfpRegister src1,DwVfpRegister src2)2285 void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
2286 DwVfpRegister src2) {
2287 DCHECK(src1 == d0);
2288 DCHECK(src2 == d1);
2289 if (!use_eabi_hardfloat()) {
2290 vmov(r0, r1, src1);
2291 vmov(r2, r3, src2);
2292 }
2293 }
2294
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)2295 void TurboAssembler::CallCFunction(ExternalReference function,
2296 int num_reg_arguments,
2297 int num_double_arguments) {
2298 UseScratchRegisterScope temps(this);
2299 Register scratch = temps.Acquire();
2300 Move(scratch, function);
2301 CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
2302 }
2303
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)2304 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
2305 int num_double_arguments) {
2306 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
2307 }
2308
CallCFunction(ExternalReference function,int num_arguments)2309 void TurboAssembler::CallCFunction(ExternalReference function,
2310 int num_arguments) {
2311 CallCFunction(function, num_arguments, 0);
2312 }
2313
CallCFunction(Register function,int num_arguments)2314 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
2315 CallCFunction(function, num_arguments, 0);
2316 }
2317
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)2318 void TurboAssembler::CallCFunctionHelper(Register function,
2319 int num_reg_arguments,
2320 int num_double_arguments) {
2321 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2322 DCHECK(has_frame());
2323 // Make sure that the stack is aligned before calling a C function unless
2324 // running in the simulator. The simulator has its own alignment check which
2325 // provides more information.
2326 #if V8_HOST_ARCH_ARM
2327 if (emit_debug_code()) {
2328 int frame_alignment = base::OS::ActivationFrameAlignment();
2329 int frame_alignment_mask = frame_alignment - 1;
2330 if (frame_alignment > kPointerSize) {
2331 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2332 Label alignment_as_expected;
2333 tst(sp, Operand(frame_alignment_mask));
2334 b(eq, &alignment_as_expected);
2335 // Don't use Check here, as it will call Runtime_Abort possibly
2336 // re-entering here.
2337 stop("Unexpected alignment");
2338 bind(&alignment_as_expected);
2339 }
2340 }
2341 #endif
2342
2343 // Just call directly. The function called cannot cause a GC, or
2344 // allow preemption, so the return address in the link register
2345 // stays correct.
2346 Call(function);
2347 int stack_passed_arguments = CalculateStackPassedWords(
2348 num_reg_arguments, num_double_arguments);
2349 if (ActivationFrameAlignment() > kPointerSize) {
2350 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
2351 } else {
2352 add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2353 }
2354 }
2355
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)2356 void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
2357 Condition cc, Label* condition_met) {
2358 DCHECK(cc == eq || cc == ne);
2359 Bfc(scratch, object, 0, kPageSizeBits);
2360 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2361 tst(scratch, Operand(mask));
2362 b(cc, condition_met);
2363 }
2364
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)2365 Register GetRegisterThatIsNotOneOf(Register reg1,
2366 Register reg2,
2367 Register reg3,
2368 Register reg4,
2369 Register reg5,
2370 Register reg6) {
2371 RegList regs = 0;
2372 if (reg1.is_valid()) regs |= reg1.bit();
2373 if (reg2.is_valid()) regs |= reg2.bit();
2374 if (reg3.is_valid()) regs |= reg3.bit();
2375 if (reg4.is_valid()) regs |= reg4.bit();
2376 if (reg5.is_valid()) regs |= reg5.bit();
2377 if (reg6.is_valid()) regs |= reg6.bit();
2378
2379 const RegisterConfiguration* config = RegisterConfiguration::Default();
2380 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2381 int code = config->GetAllocatableGeneralCode(i);
2382 Register candidate = Register::from_code(code);
2383 if (regs & candidate.bit()) continue;
2384 return candidate;
2385 }
2386 UNREACHABLE();
2387 }
2388
ComputeCodeStartAddress(Register dst)2389 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
2390 // We can use the register pc - 8 for the address of the current instruction.
2391 sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
2392 }
2393
ResetSpeculationPoisonRegister()2394 void TurboAssembler::ResetSpeculationPoisonRegister() {
2395 mov(kSpeculationPoisonRegister, Operand(-1));
2396 }
2397
2398 } // namespace internal
2399 } // namespace v8
2400
2401 #endif // V8_TARGET_ARCH_ARM
2402