1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6
7 #if V8_TARGET_ARCH_ARM
8
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/debug/debug.h"
14 #include "src/register-configuration.h"
15 #include "src/runtime/runtime.h"
16
17 #include "src/arm/macro-assembler-arm.h"
18
19 namespace v8 {
20 namespace internal {
21
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
23 CodeObjectRequired create_code_object)
24 : Assembler(arg_isolate, buffer, size),
25 generating_stub_(false),
26 has_frame_(false) {
27 if (create_code_object == CodeObjectRequired::kYes) {
28 code_object_ =
29 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
30 }
31 }
32
33
Jump(Register target,Condition cond)34 void MacroAssembler::Jump(Register target, Condition cond) {
35 bx(target, cond);
36 }
37
38
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)39 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
40 Condition cond) {
41 DCHECK(RelocInfo::IsCodeTarget(rmode));
42 mov(pc, Operand(target, rmode), LeaveCC, cond);
43 }
44
45
Jump(Address target,RelocInfo::Mode rmode,Condition cond)46 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
47 Condition cond) {
48 DCHECK(!RelocInfo::IsCodeTarget(rmode));
49 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
50 }
51
52
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)53 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
54 Condition cond) {
55 DCHECK(RelocInfo::IsCodeTarget(rmode));
56 // 'code' is always generated ARM code, never THUMB code
57 AllowDeferredHandleDereference embedding_raw_address;
58 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
59 }
60
61
CallSize(Register target,Condition cond)62 int MacroAssembler::CallSize(Register target, Condition cond) {
63 return kInstrSize;
64 }
65
66
Call(Register target,Condition cond)67 void MacroAssembler::Call(Register target, Condition cond) {
68 // Block constant pool for the call instruction sequence.
69 BlockConstPoolScope block_const_pool(this);
70 Label start;
71 bind(&start);
72 blx(target, cond);
73 DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
74 }
75
76
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)77 int MacroAssembler::CallSize(
78 Address target, RelocInfo::Mode rmode, Condition cond) {
79 Instr mov_instr = cond | MOV | LeaveCC;
80 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
81 return kInstrSize +
82 mov_operand.instructions_required(this, mov_instr) * kInstrSize;
83 }
84
85
CallStubSize(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)86 int MacroAssembler::CallStubSize(
87 CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
88 return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
89 }
90
91
Call(Address target,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode)92 void MacroAssembler::Call(Address target,
93 RelocInfo::Mode rmode,
94 Condition cond,
95 TargetAddressStorageMode mode) {
96 // Block constant pool for the call instruction sequence.
97 BlockConstPoolScope block_const_pool(this);
98 Label start;
99 bind(&start);
100
101 bool old_predictable_code_size = predictable_code_size();
102 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
103 set_predictable_code_size(true);
104 }
105
106 #ifdef DEBUG
107 // Check the expected size before generating code to ensure we assume the same
108 // constant pool availability (e.g., whether constant pool is full or not).
109 int expected_size = CallSize(target, rmode, cond);
110 #endif
111
112 // Call sequence on V7 or later may be :
113 // movw ip, #... @ call address low 16
114 // movt ip, #... @ call address high 16
115 // blx ip
116 // @ return address
117 // Or for pre-V7 or values that may be back-patched
118 // to avoid ICache flushes:
119 // ldr ip, [pc, #...] @ call address
120 // blx ip
121 // @ return address
122
123 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
124 blx(ip, cond);
125
126 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
127 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
128 set_predictable_code_size(old_predictable_code_size);
129 }
130 }
131
132
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)133 int MacroAssembler::CallSize(Handle<Code> code,
134 RelocInfo::Mode rmode,
135 TypeFeedbackId ast_id,
136 Condition cond) {
137 AllowDeferredHandleDereference using_raw_address;
138 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
139 }
140
141
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,TargetAddressStorageMode mode)142 void MacroAssembler::Call(Handle<Code> code,
143 RelocInfo::Mode rmode,
144 TypeFeedbackId ast_id,
145 Condition cond,
146 TargetAddressStorageMode mode) {
147 Label start;
148 bind(&start);
149 DCHECK(RelocInfo::IsCodeTarget(rmode));
150 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
151 SetRecordedAstId(ast_id);
152 rmode = RelocInfo::CODE_TARGET_WITH_ID;
153 }
154 // 'code' is always generated ARM code, never THUMB code
155 AllowDeferredHandleDereference embedding_raw_address;
156 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
157 }
158
CallDeoptimizer(Address target)159 void MacroAssembler::CallDeoptimizer(Address target) {
160 BlockConstPoolScope block_const_pool(this);
161
162 uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
163
164 // We use blx, like a call, but it does not return here. The link register is
165 // used by the deoptimizer to work out what called it.
166 if (CpuFeatures::IsSupported(ARMv7)) {
167 CpuFeatureScope scope(this, ARMv7);
168 movw(ip, target_raw & 0xffff);
169 movt(ip, (target_raw >> 16) & 0xffff);
170 blx(ip);
171 } else {
172 // We need to load a literal, but we can't use the usual constant pool
173 // because we call this from a patcher, and cannot afford the guard
174 // instruction and other administrative overhead.
175 ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
176 blx(ip);
177 dd(target_raw);
178 }
179 }
180
CallDeoptimizerSize()181 int MacroAssembler::CallDeoptimizerSize() {
182 // ARMv7+:
183 // movw ip, ...
184 // movt ip, ...
185 // blx ip @ This never returns.
186 //
187 // ARMv6:
188 // ldr ip, =address
189 // blx ip @ This never returns.
190 // .word address
191 return 3 * kInstrSize;
192 }
193
Ret(Condition cond)194 void MacroAssembler::Ret(Condition cond) {
195 bx(lr, cond);
196 }
197
198
Drop(int count,Condition cond)199 void MacroAssembler::Drop(int count, Condition cond) {
200 if (count > 0) {
201 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
202 }
203 }
204
Drop(Register count,Condition cond)205 void MacroAssembler::Drop(Register count, Condition cond) {
206 add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
207 }
208
Ret(int drop,Condition cond)209 void MacroAssembler::Ret(int drop, Condition cond) {
210 Drop(drop, cond);
211 Ret(cond);
212 }
213
214
Swap(Register reg1,Register reg2,Register scratch,Condition cond)215 void MacroAssembler::Swap(Register reg1,
216 Register reg2,
217 Register scratch,
218 Condition cond) {
219 if (scratch.is(no_reg)) {
220 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
221 eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
222 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
223 } else {
224 mov(scratch, reg1, LeaveCC, cond);
225 mov(reg1, reg2, LeaveCC, cond);
226 mov(reg2, scratch, LeaveCC, cond);
227 }
228 }
229
230
Call(Label * target)231 void MacroAssembler::Call(Label* target) {
232 bl(target);
233 }
234
235
Push(Handle<Object> handle)236 void MacroAssembler::Push(Handle<Object> handle) {
237 mov(ip, Operand(handle));
238 push(ip);
239 }
240
241
Move(Register dst,Handle<Object> value)242 void MacroAssembler::Move(Register dst, Handle<Object> value) {
243 mov(dst, Operand(value));
244 }
245
246
Move(Register dst,Register src,Condition cond)247 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
248 if (!dst.is(src)) {
249 mov(dst, src, LeaveCC, cond);
250 }
251 }
252
Move(SwVfpRegister dst,SwVfpRegister src,Condition cond)253 void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
254 Condition cond) {
255 if (!dst.is(src)) {
256 vmov(dst, src, cond);
257 }
258 }
259
Move(DwVfpRegister dst,DwVfpRegister src,Condition cond)260 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
261 Condition cond) {
262 if (!dst.is(src)) {
263 vmov(dst, src, cond);
264 }
265 }
266
Move(QwNeonRegister dst,QwNeonRegister src)267 void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
268 if (!dst.is(src)) {
269 vmov(dst, src);
270 }
271 }
272
Swap(DwVfpRegister srcdst0,DwVfpRegister srcdst1)273 void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
274 if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
275
276 DCHECK(VfpRegisterIsAvailable(srcdst0));
277 DCHECK(VfpRegisterIsAvailable(srcdst1));
278
279 if (CpuFeatures::IsSupported(NEON)) {
280 vswp(srcdst0, srcdst1);
281 } else {
282 DCHECK(!srcdst0.is(kScratchDoubleReg));
283 DCHECK(!srcdst1.is(kScratchDoubleReg));
284 vmov(kScratchDoubleReg, srcdst0);
285 vmov(srcdst0, srcdst1);
286 vmov(srcdst1, kScratchDoubleReg);
287 }
288 }
289
Swap(QwNeonRegister srcdst0,QwNeonRegister srcdst1)290 void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
291 if (!srcdst0.is(srcdst1)) {
292 vswp(srcdst0, srcdst1);
293 }
294 }
295
Mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)296 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
297 Register srcA, Condition cond) {
298 if (CpuFeatures::IsSupported(ARMv7)) {
299 CpuFeatureScope scope(this, ARMv7);
300 mls(dst, src1, src2, srcA, cond);
301 } else {
302 DCHECK(!srcA.is(ip));
303 mul(ip, src1, src2, LeaveCC, cond);
304 sub(dst, srcA, ip, LeaveCC, cond);
305 }
306 }
307
308
And(Register dst,Register src1,const Operand & src2,Condition cond)309 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
310 Condition cond) {
311 if (!src2.is_reg() &&
312 !src2.must_output_reloc_info(this) &&
313 src2.immediate() == 0) {
314 mov(dst, Operand::Zero(), LeaveCC, cond);
315 } else if (!(src2.instructions_required(this) == 1) &&
316 !src2.must_output_reloc_info(this) &&
317 CpuFeatures::IsSupported(ARMv7) &&
318 base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
319 CpuFeatureScope scope(this, ARMv7);
320 ubfx(dst, src1, 0,
321 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
322 } else {
323 and_(dst, src1, src2, LeaveCC, cond);
324 }
325 }
326
327
Ubfx(Register dst,Register src1,int lsb,int width,Condition cond)328 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
329 Condition cond) {
330 DCHECK(lsb < 32);
331 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
332 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
333 and_(dst, src1, Operand(mask), LeaveCC, cond);
334 if (lsb != 0) {
335 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
336 }
337 } else {
338 CpuFeatureScope scope(this, ARMv7);
339 ubfx(dst, src1, lsb, width, cond);
340 }
341 }
342
343
Sbfx(Register dst,Register src1,int lsb,int width,Condition cond)344 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
345 Condition cond) {
346 DCHECK(lsb < 32);
347 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
348 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
349 and_(dst, src1, Operand(mask), LeaveCC, cond);
350 int shift_up = 32 - lsb - width;
351 int shift_down = lsb + shift_up;
352 if (shift_up != 0) {
353 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
354 }
355 if (shift_down != 0) {
356 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
357 }
358 } else {
359 CpuFeatureScope scope(this, ARMv7);
360 sbfx(dst, src1, lsb, width, cond);
361 }
362 }
363
364
Bfi(Register dst,Register src,Register scratch,int lsb,int width,Condition cond)365 void MacroAssembler::Bfi(Register dst,
366 Register src,
367 Register scratch,
368 int lsb,
369 int width,
370 Condition cond) {
371 DCHECK(0 <= lsb && lsb < 32);
372 DCHECK(0 <= width && width < 32);
373 DCHECK(lsb + width < 32);
374 DCHECK(!scratch.is(dst));
375 if (width == 0) return;
376 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
377 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
378 bic(dst, dst, Operand(mask));
379 and_(scratch, src, Operand((1 << width) - 1));
380 mov(scratch, Operand(scratch, LSL, lsb));
381 orr(dst, dst, scratch);
382 } else {
383 CpuFeatureScope scope(this, ARMv7);
384 bfi(dst, src, lsb, width, cond);
385 }
386 }
387
388
Bfc(Register dst,Register src,int lsb,int width,Condition cond)389 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
390 Condition cond) {
391 DCHECK(lsb < 32);
392 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
393 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
394 bic(dst, src, Operand(mask));
395 } else {
396 CpuFeatureScope scope(this, ARMv7);
397 Move(dst, src, cond);
398 bfc(dst, lsb, width, cond);
399 }
400 }
401
402
Load(Register dst,const MemOperand & src,Representation r)403 void MacroAssembler::Load(Register dst,
404 const MemOperand& src,
405 Representation r) {
406 DCHECK(!r.IsDouble());
407 if (r.IsInteger8()) {
408 ldrsb(dst, src);
409 } else if (r.IsUInteger8()) {
410 ldrb(dst, src);
411 } else if (r.IsInteger16()) {
412 ldrsh(dst, src);
413 } else if (r.IsUInteger16()) {
414 ldrh(dst, src);
415 } else {
416 ldr(dst, src);
417 }
418 }
419
420
Store(Register src,const MemOperand & dst,Representation r)421 void MacroAssembler::Store(Register src,
422 const MemOperand& dst,
423 Representation r) {
424 DCHECK(!r.IsDouble());
425 if (r.IsInteger8() || r.IsUInteger8()) {
426 strb(src, dst);
427 } else if (r.IsInteger16() || r.IsUInteger16()) {
428 strh(src, dst);
429 } else {
430 if (r.IsHeapObject()) {
431 AssertNotSmi(src);
432 } else if (r.IsSmi()) {
433 AssertSmi(src);
434 }
435 str(src, dst);
436 }
437 }
438
439
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond)440 void MacroAssembler::LoadRoot(Register destination,
441 Heap::RootListIndex index,
442 Condition cond) {
443 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
444 }
445
446
StoreRoot(Register source,Heap::RootListIndex index,Condition cond)447 void MacroAssembler::StoreRoot(Register source,
448 Heap::RootListIndex index,
449 Condition cond) {
450 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
451 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
452 }
453
454
InNewSpace(Register object,Register scratch,Condition cond,Label * branch)455 void MacroAssembler::InNewSpace(Register object,
456 Register scratch,
457 Condition cond,
458 Label* branch) {
459 DCHECK(cond == eq || cond == ne);
460 CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
461 }
462
463
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)464 void MacroAssembler::RecordWriteField(
465 Register object,
466 int offset,
467 Register value,
468 Register dst,
469 LinkRegisterStatus lr_status,
470 SaveFPRegsMode save_fp,
471 RememberedSetAction remembered_set_action,
472 SmiCheck smi_check,
473 PointersToHereCheck pointers_to_here_check_for_value) {
474 // First, check if a write barrier is even needed. The tests below
475 // catch stores of Smis.
476 Label done;
477
478 // Skip barrier if writing a smi.
479 if (smi_check == INLINE_SMI_CHECK) {
480 JumpIfSmi(value, &done);
481 }
482
483 // Although the object register is tagged, the offset is relative to the start
484 // of the object, so so offset must be a multiple of kPointerSize.
485 DCHECK(IsAligned(offset, kPointerSize));
486
487 add(dst, object, Operand(offset - kHeapObjectTag));
488 if (emit_debug_code()) {
489 Label ok;
490 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
491 b(eq, &ok);
492 stop("Unaligned cell in write barrier");
493 bind(&ok);
494 }
495
496 RecordWrite(object,
497 dst,
498 value,
499 lr_status,
500 save_fp,
501 remembered_set_action,
502 OMIT_SMI_CHECK,
503 pointers_to_here_check_for_value);
504
505 bind(&done);
506
507 // Clobber clobbered input registers when running with the debug-code flag
508 // turned on to provoke errors.
509 if (emit_debug_code()) {
510 mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
511 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
512 }
513 }
514
515
516 // Will clobber 4 registers: object, map, dst, ip. The
517 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)518 void MacroAssembler::RecordWriteForMap(Register object,
519 Register map,
520 Register dst,
521 LinkRegisterStatus lr_status,
522 SaveFPRegsMode fp_mode) {
523 if (emit_debug_code()) {
524 ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
525 cmp(dst, Operand(isolate()->factory()->meta_map()));
526 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
527 }
528
529 if (!FLAG_incremental_marking) {
530 return;
531 }
532
533 if (emit_debug_code()) {
534 ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
535 cmp(ip, map);
536 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
537 }
538
539 Label done;
540
541 // A single check of the map's pages interesting flag suffices, since it is
542 // only set during incremental collection, and then it's also guaranteed that
543 // the from object's page's interesting flag is also set. This optimization
544 // relies on the fact that maps can never be in new space.
545 CheckPageFlag(map,
546 map, // Used as scratch.
547 MemoryChunk::kPointersToHereAreInterestingMask,
548 eq,
549 &done);
550
551 add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
552 if (emit_debug_code()) {
553 Label ok;
554 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
555 b(eq, &ok);
556 stop("Unaligned cell in write barrier");
557 bind(&ok);
558 }
559
560 // Record the actual write.
561 if (lr_status == kLRHasNotBeenSaved) {
562 push(lr);
563 }
564 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
565 fp_mode);
566 CallStub(&stub);
567 if (lr_status == kLRHasNotBeenSaved) {
568 pop(lr);
569 }
570
571 bind(&done);
572
573 // Count number of write barriers in generated code.
574 isolate()->counters()->write_barriers_static()->Increment();
575 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
576
577 // Clobber clobbered registers when running with the debug-code flag
578 // turned on to provoke errors.
579 if (emit_debug_code()) {
580 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
581 mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
582 }
583 }
584
585
586 // Will clobber 4 registers: object, address, scratch, ip. The
587 // register 'object' contains a heap object pointer. The heap object
588 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)589 void MacroAssembler::RecordWrite(
590 Register object,
591 Register address,
592 Register value,
593 LinkRegisterStatus lr_status,
594 SaveFPRegsMode fp_mode,
595 RememberedSetAction remembered_set_action,
596 SmiCheck smi_check,
597 PointersToHereCheck pointers_to_here_check_for_value) {
598 DCHECK(!object.is(value));
599 if (emit_debug_code()) {
600 ldr(ip, MemOperand(address));
601 cmp(ip, value);
602 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
603 }
604
605 if (remembered_set_action == OMIT_REMEMBERED_SET &&
606 !FLAG_incremental_marking) {
607 return;
608 }
609
610 // First, check if a write barrier is even needed. The tests below
611 // catch stores of smis and stores into the young generation.
612 Label done;
613
614 if (smi_check == INLINE_SMI_CHECK) {
615 JumpIfSmi(value, &done);
616 }
617
618 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
619 CheckPageFlag(value,
620 value, // Used as scratch.
621 MemoryChunk::kPointersToHereAreInterestingMask,
622 eq,
623 &done);
624 }
625 CheckPageFlag(object,
626 value, // Used as scratch.
627 MemoryChunk::kPointersFromHereAreInterestingMask,
628 eq,
629 &done);
630
631 // Record the actual write.
632 if (lr_status == kLRHasNotBeenSaved) {
633 push(lr);
634 }
635 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
636 fp_mode);
637 CallStub(&stub);
638 if (lr_status == kLRHasNotBeenSaved) {
639 pop(lr);
640 }
641
642 bind(&done);
643
644 // Count number of write barriers in generated code.
645 isolate()->counters()->write_barriers_static()->Increment();
646 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
647 value);
648
649 // Clobber clobbered registers when running with the debug-code flag
650 // turned on to provoke errors.
651 if (emit_debug_code()) {
652 mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
653 mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
654 }
655 }
656
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)657 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
658 Register code_entry,
659 Register scratch) {
660 const int offset = JSFunction::kCodeEntryOffset;
661
662 // Since a code entry (value) is always in old space, we don't need to update
663 // remembered set. If incremental marking is off, there is nothing for us to
664 // do.
665 if (!FLAG_incremental_marking) return;
666
667 DCHECK(js_function.is(r1));
668 DCHECK(code_entry.is(r4));
669 DCHECK(scratch.is(r5));
670 AssertNotSmi(js_function);
671
672 if (emit_debug_code()) {
673 add(scratch, js_function, Operand(offset - kHeapObjectTag));
674 ldr(ip, MemOperand(scratch));
675 cmp(ip, code_entry);
676 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
677 }
678
679 // First, check if a write barrier is even needed. The tests below
680 // catch stores of Smis and stores into young gen.
681 Label done;
682
683 CheckPageFlag(code_entry, scratch,
684 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
685 CheckPageFlag(js_function, scratch,
686 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
687
688 const Register dst = scratch;
689 add(dst, js_function, Operand(offset - kHeapObjectTag));
690
691 push(code_entry);
692
693 // Save caller-saved registers, which includes js_function.
694 DCHECK((kCallerSaved & js_function.bit()) != 0);
695 DCHECK_EQ(kCallerSaved & code_entry.bit(), 0u);
696 stm(db_w, sp, (kCallerSaved | lr.bit()));
697
698 int argument_count = 3;
699 PrepareCallCFunction(argument_count, code_entry);
700
701 mov(r0, js_function);
702 mov(r1, dst);
703 mov(r2, Operand(ExternalReference::isolate_address(isolate())));
704
705 {
706 AllowExternalCallThatCantCauseGC scope(this);
707 CallCFunction(
708 ExternalReference::incremental_marking_record_write_code_entry_function(
709 isolate()),
710 argument_count);
711 }
712
713 // Restore caller-saved registers (including js_function and code_entry).
714 ldm(ia_w, sp, (kCallerSaved | lr.bit()));
715
716 pop(code_entry);
717
718 bind(&done);
719 }
720
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)721 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
722 Register address,
723 Register scratch,
724 SaveFPRegsMode fp_mode,
725 RememberedSetFinalAction and_then) {
726 Label done;
727 if (emit_debug_code()) {
728 Label ok;
729 JumpIfNotInNewSpace(object, scratch, &ok);
730 stop("Remembered set pointer is in new space");
731 bind(&ok);
732 }
733 // Load store buffer top.
734 ExternalReference store_buffer =
735 ExternalReference::store_buffer_top(isolate());
736 mov(ip, Operand(store_buffer));
737 ldr(scratch, MemOperand(ip));
738 // Store pointer to buffer and increment buffer top.
739 str(address, MemOperand(scratch, kPointerSize, PostIndex));
740 // Write back new top of buffer.
741 str(scratch, MemOperand(ip));
742 // Call stub on end of buffer.
743 // Check for end of buffer.
744 tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
745 if (and_then == kFallThroughAtEnd) {
746 b(ne, &done);
747 } else {
748 DCHECK(and_then == kReturnAtEnd);
749 Ret(ne);
750 }
751 push(lr);
752 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
753 CallStub(&store_buffer_overflow);
754 pop(lr);
755 bind(&done);
756 if (and_then == kReturnAtEnd) {
757 Ret();
758 }
759 }
760
PushCommonFrame(Register marker_reg)761 void MacroAssembler::PushCommonFrame(Register marker_reg) {
762 if (marker_reg.is_valid()) {
763 if (FLAG_enable_embedded_constant_pool) {
764 if (marker_reg.code() > pp.code()) {
765 stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
766 add(fp, sp, Operand(kPointerSize));
767 Push(marker_reg);
768 } else {
769 stm(db_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
770 add(fp, sp, Operand(2 * kPointerSize));
771 }
772 } else {
773 if (marker_reg.code() > fp.code()) {
774 stm(db_w, sp, fp.bit() | lr.bit());
775 mov(fp, Operand(sp));
776 Push(marker_reg);
777 } else {
778 stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
779 add(fp, sp, Operand(kPointerSize));
780 }
781 }
782 } else {
783 stm(db_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
784 fp.bit() | lr.bit());
785 add(fp, sp, Operand(FLAG_enable_embedded_constant_pool ? kPointerSize : 0));
786 }
787 }
788
PopCommonFrame(Register marker_reg)789 void MacroAssembler::PopCommonFrame(Register marker_reg) {
790 if (marker_reg.is_valid()) {
791 if (FLAG_enable_embedded_constant_pool) {
792 if (marker_reg.code() > pp.code()) {
793 pop(marker_reg);
794 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
795 } else {
796 ldm(ia_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
797 }
798 } else {
799 if (marker_reg.code() > fp.code()) {
800 pop(marker_reg);
801 ldm(ia_w, sp, fp.bit() | lr.bit());
802 } else {
803 ldm(ia_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
804 }
805 }
806 } else {
807 ldm(ia_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
808 fp.bit() | lr.bit());
809 }
810 }
811
PushStandardFrame(Register function_reg)812 void MacroAssembler::PushStandardFrame(Register function_reg) {
813 DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
814 stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
815 (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
816 fp.bit() | lr.bit());
817 int offset = -StandardFrameConstants::kContextOffset;
818 offset += function_reg.is_valid() ? kPointerSize : 0;
819 add(fp, sp, Operand(offset));
820 }
821
822
823 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()824 void MacroAssembler::PushSafepointRegisters() {
825 // Safepoints expect a block of contiguous register values starting with r0.
826 // except when FLAG_enable_embedded_constant_pool, which omits pp.
827 DCHECK(kSafepointSavedRegisters ==
828 (FLAG_enable_embedded_constant_pool
829 ? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
830 : (1 << kNumSafepointSavedRegisters) - 1));
831 // Safepoints expect a block of kNumSafepointRegisters values on the
832 // stack, so adjust the stack for unsaved registers.
833 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
834 DCHECK(num_unsaved >= 0);
835 sub(sp, sp, Operand(num_unsaved * kPointerSize));
836 stm(db_w, sp, kSafepointSavedRegisters);
837 }
838
839
PopSafepointRegisters()840 void MacroAssembler::PopSafepointRegisters() {
841 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
842 ldm(ia_w, sp, kSafepointSavedRegisters);
843 add(sp, sp, Operand(num_unsaved * kPointerSize));
844 }
845
846
StoreToSafepointRegisterSlot(Register src,Register dst)847 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
848 str(src, SafepointRegisterSlot(dst));
849 }
850
851
LoadFromSafepointRegisterSlot(Register dst,Register src)852 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
853 ldr(dst, SafepointRegisterSlot(src));
854 }
855
856
SafepointRegisterStackIndex(int reg_code)857 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
858 // The registers are pushed starting with the highest encoding,
859 // which means that lowest encodings are closest to the stack pointer.
860 if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
861 // RegList omits pp.
862 reg_code -= 1;
863 }
864 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
865 return reg_code;
866 }
867
868
SafepointRegisterSlot(Register reg)869 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
870 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
871 }
872
873
SafepointRegistersAndDoublesSlot(Register reg)874 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
875 // Number of d-regs not known at snapshot time.
876 DCHECK(!serializer_enabled());
877 // General purpose registers are pushed last on the stack.
878 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
879 int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
880 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
881 return MemOperand(sp, doubles_size + register_offset);
882 }
883
884
Ldrd(Register dst1,Register dst2,const MemOperand & src,Condition cond)885 void MacroAssembler::Ldrd(Register dst1, Register dst2,
886 const MemOperand& src, Condition cond) {
887 DCHECK(src.rm().is(no_reg));
888 DCHECK(!dst1.is(lr)); // r14.
889
890 // V8 does not use this addressing mode, so the fallback code
891 // below doesn't support it yet.
892 DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
893
894 // Generate two ldr instructions if ldrd is not applicable.
895 if ((dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
896 ldrd(dst1, dst2, src, cond);
897 } else {
898 if ((src.am() == Offset) || (src.am() == NegOffset)) {
899 MemOperand src2(src);
900 src2.set_offset(src2.offset() + 4);
901 if (dst1.is(src.rn())) {
902 ldr(dst2, src2, cond);
903 ldr(dst1, src, cond);
904 } else {
905 ldr(dst1, src, cond);
906 ldr(dst2, src2, cond);
907 }
908 } else { // PostIndex or NegPostIndex.
909 DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
910 if (dst1.is(src.rn())) {
911 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
912 ldr(dst1, src, cond);
913 } else {
914 MemOperand src2(src);
915 src2.set_offset(src2.offset() - 4);
916 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
917 ldr(dst2, src2, cond);
918 }
919 }
920 }
921 }
922
923
Strd(Register src1,Register src2,const MemOperand & dst,Condition cond)924 void MacroAssembler::Strd(Register src1, Register src2,
925 const MemOperand& dst, Condition cond) {
926 DCHECK(dst.rm().is(no_reg));
927 DCHECK(!src1.is(lr)); // r14.
928
929 // V8 does not use this addressing mode, so the fallback code
930 // below doesn't support it yet.
931 DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
932
933 // Generate two str instructions if strd is not applicable.
934 if ((src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
935 strd(src1, src2, dst, cond);
936 } else {
937 MemOperand dst2(dst);
938 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
939 dst2.set_offset(dst2.offset() + 4);
940 str(src1, dst, cond);
941 str(src2, dst2, cond);
942 } else { // PostIndex or NegPostIndex.
943 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
944 dst2.set_offset(dst2.offset() - 4);
945 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
946 str(src2, dst2, cond);
947 }
948 }
949 }
950
VFPCanonicalizeNaN(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)951 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
952 const DwVfpRegister src,
953 const Condition cond) {
954 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
955 // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
956 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
957 vsub(dst, src, kDoubleRegZero, cond);
958 }
959
960
VFPCompareAndSetFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)961 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
962 const SwVfpRegister src2,
963 const Condition cond) {
964 // Compare and move FPSCR flags to the normal condition flags.
965 VFPCompareAndLoadFlags(src1, src2, pc, cond);
966 }
967
VFPCompareAndSetFlags(const SwVfpRegister src1,const float src2,const Condition cond)968 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
969 const float src2,
970 const Condition cond) {
971 // Compare and move FPSCR flags to the normal condition flags.
972 VFPCompareAndLoadFlags(src1, src2, pc, cond);
973 }
974
975
VFPCompareAndSetFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)976 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
977 const DwVfpRegister src2,
978 const Condition cond) {
979 // Compare and move FPSCR flags to the normal condition flags.
980 VFPCompareAndLoadFlags(src1, src2, pc, cond);
981 }
982
VFPCompareAndSetFlags(const DwVfpRegister src1,const double src2,const Condition cond)983 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
984 const double src2,
985 const Condition cond) {
986 // Compare and move FPSCR flags to the normal condition flags.
987 VFPCompareAndLoadFlags(src1, src2, pc, cond);
988 }
989
990
VFPCompareAndLoadFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Register fpscr_flags,const Condition cond)991 void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
992 const SwVfpRegister src2,
993 const Register fpscr_flags,
994 const Condition cond) {
995 // Compare and load FPSCR.
996 vcmp(src1, src2, cond);
997 vmrs(fpscr_flags, cond);
998 }
999
VFPCompareAndLoadFlags(const SwVfpRegister src1,const float src2,const Register fpscr_flags,const Condition cond)1000 void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
1001 const float src2,
1002 const Register fpscr_flags,
1003 const Condition cond) {
1004 // Compare and load FPSCR.
1005 vcmp(src1, src2, cond);
1006 vmrs(fpscr_flags, cond);
1007 }
1008
1009
VFPCompareAndLoadFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Register fpscr_flags,const Condition cond)1010 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
1011 const DwVfpRegister src2,
1012 const Register fpscr_flags,
1013 const Condition cond) {
1014 // Compare and load FPSCR.
1015 vcmp(src1, src2, cond);
1016 vmrs(fpscr_flags, cond);
1017 }
1018
VFPCompareAndLoadFlags(const DwVfpRegister src1,const double src2,const Register fpscr_flags,const Condition cond)1019 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
1020 const double src2,
1021 const Register fpscr_flags,
1022 const Condition cond) {
1023 // Compare and load FPSCR.
1024 vcmp(src1, src2, cond);
1025 vmrs(fpscr_flags, cond);
1026 }
1027
1028
Vmov(const DwVfpRegister dst,const double imm,const Register scratch)1029 void MacroAssembler::Vmov(const DwVfpRegister dst,
1030 const double imm,
1031 const Register scratch) {
1032 int64_t imm_bits = bit_cast<int64_t>(imm);
1033 // Handle special values first.
1034 if (imm_bits == bit_cast<int64_t>(0.0)) {
1035 vmov(dst, kDoubleRegZero);
1036 } else if (imm_bits == bit_cast<int64_t>(-0.0)) {
1037 vneg(dst, kDoubleRegZero);
1038 } else {
1039 vmov(dst, imm, scratch);
1040 }
1041 }
1042
1043
VmovHigh(Register dst,DwVfpRegister src)1044 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
1045 if (src.code() < 16) {
1046 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
1047 vmov(dst, loc.high());
1048 } else {
1049 vmov(dst, VmovIndexHi, src);
1050 }
1051 }
1052
1053
VmovHigh(DwVfpRegister dst,Register src)1054 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
1055 if (dst.code() < 16) {
1056 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
1057 vmov(loc.high(), src);
1058 } else {
1059 vmov(dst, VmovIndexHi, src);
1060 }
1061 }
1062
1063
VmovLow(Register dst,DwVfpRegister src)1064 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
1065 if (src.code() < 16) {
1066 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
1067 vmov(dst, loc.low());
1068 } else {
1069 vmov(dst, VmovIndexLo, src);
1070 }
1071 }
1072
1073
VmovLow(DwVfpRegister dst,Register src)1074 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
1075 if (dst.code() < 16) {
1076 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
1077 vmov(loc.low(), src);
1078 } else {
1079 vmov(dst, VmovIndexLo, src);
1080 }
1081 }
1082
VmovExtended(Register dst,int src_code)1083 void MacroAssembler::VmovExtended(Register dst, int src_code) {
1084 DCHECK_LE(SwVfpRegister::kMaxNumRegisters, src_code);
1085 DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
1086 if (src_code & 0x1) {
1087 VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
1088 } else {
1089 VmovLow(dst, DwVfpRegister::from_code(src_code / 2));
1090 }
1091 }
1092
VmovExtended(int dst_code,Register src)1093 void MacroAssembler::VmovExtended(int dst_code, Register src) {
1094 DCHECK_LE(SwVfpRegister::kMaxNumRegisters, dst_code);
1095 DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
1096 if (dst_code & 0x1) {
1097 VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
1098 } else {
1099 VmovLow(DwVfpRegister::from_code(dst_code / 2), src);
1100 }
1101 }
1102
VmovExtended(int dst_code,int src_code,Register scratch)1103 void MacroAssembler::VmovExtended(int dst_code, int src_code,
1104 Register scratch) {
1105 if (src_code < SwVfpRegister::kMaxNumRegisters &&
1106 dst_code < SwVfpRegister::kMaxNumRegisters) {
1107 // src and dst are both s-registers.
1108 vmov(SwVfpRegister::from_code(dst_code),
1109 SwVfpRegister::from_code(src_code));
1110 } else if (src_code < SwVfpRegister::kMaxNumRegisters) {
1111 // src is an s-register.
1112 vmov(scratch, SwVfpRegister::from_code(src_code));
1113 VmovExtended(dst_code, scratch);
1114 } else if (dst_code < SwVfpRegister::kMaxNumRegisters) {
1115 // dst is an s-register.
1116 VmovExtended(scratch, src_code);
1117 vmov(SwVfpRegister::from_code(dst_code), scratch);
1118 } else {
1119 // Neither src or dst are s-registers.
1120 DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
1121 DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
1122 VmovExtended(scratch, src_code);
1123 VmovExtended(dst_code, scratch);
1124 }
1125 }
1126
VmovExtended(int dst_code,const MemOperand & src,Register scratch)1127 void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
1128 Register scratch) {
1129 if (dst_code >= SwVfpRegister::kMaxNumRegisters) {
1130 ldr(scratch, src);
1131 VmovExtended(dst_code, scratch);
1132 } else {
1133 vldr(SwVfpRegister::from_code(dst_code), src);
1134 }
1135 }
1136
VmovExtended(const MemOperand & dst,int src_code,Register scratch)1137 void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
1138 Register scratch) {
1139 if (src_code >= SwVfpRegister::kMaxNumRegisters) {
1140 VmovExtended(scratch, src_code);
1141 str(scratch, dst);
1142 } else {
1143 vstr(SwVfpRegister::from_code(src_code), dst);
1144 }
1145 }
1146
ExtractLane(Register dst,QwNeonRegister src,NeonDataType dt,int lane)1147 void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
1148 NeonDataType dt, int lane) {
1149 int size = NeonSz(dt); // 0, 1, 2
1150 int byte = lane << size;
1151 int double_word = byte >> kDoubleSizeLog2;
1152 int double_byte = byte & (kDoubleSize - 1);
1153 int double_lane = double_byte >> size;
1154 DwVfpRegister double_source =
1155 DwVfpRegister::from_code(src.code() * 2 + double_word);
1156 vmov(dt, dst, double_source, double_lane);
1157 }
1158
ExtractLane(SwVfpRegister dst,QwNeonRegister src,Register scratch,int lane)1159 void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
1160 Register scratch, int lane) {
1161 int s_code = src.code() * 4 + lane;
1162 VmovExtended(dst.code(), s_code, scratch);
1163 }
1164
ReplaceLane(QwNeonRegister dst,QwNeonRegister src,Register src_lane,NeonDataType dt,int lane)1165 void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1166 Register src_lane, NeonDataType dt, int lane) {
1167 Move(dst, src);
1168 int size = NeonSz(dt); // 0, 1, 2
1169 int byte = lane << size;
1170 int double_word = byte >> kDoubleSizeLog2;
1171 int double_byte = byte & (kDoubleSize - 1);
1172 int double_lane = double_byte >> size;
1173 DwVfpRegister double_dst =
1174 DwVfpRegister::from_code(dst.code() * 2 + double_word);
1175 vmov(dt, double_dst, double_lane, src_lane);
1176 }
1177
ReplaceLane(QwNeonRegister dst,QwNeonRegister src,SwVfpRegister src_lane,Register scratch,int lane)1178 void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1179 SwVfpRegister src_lane, Register scratch,
1180 int lane) {
1181 Move(dst, src);
1182 int s_code = dst.code() * 4 + lane;
1183 VmovExtended(s_code, src_lane.code(), scratch);
1184 }
1185
Swizzle(QwNeonRegister dst,QwNeonRegister src,Register scratch,NeonSize size,uint32_t lanes)1186 void MacroAssembler::Swizzle(QwNeonRegister dst, QwNeonRegister src,
1187 Register scratch, NeonSize size, uint32_t lanes) {
1188 // TODO(bbudge) Handle Int16x8, Int8x16 vectors.
1189 DCHECK_EQ(Neon32, size);
1190 DCHECK_IMPLIES(size == Neon32, lanes < 0xFFFFu);
1191 if (size == Neon32) {
1192 switch (lanes) {
1193 // TODO(bbudge) Handle more special cases.
1194 case 0x3210: // Identity.
1195 Move(dst, src);
1196 return;
1197 case 0x1032: // Swap top and bottom.
1198 vext(dst, src, src, 8);
1199 return;
1200 case 0x2103: // Rotation.
1201 vext(dst, src, src, 12);
1202 return;
1203 case 0x0321: // Rotation.
1204 vext(dst, src, src, 4);
1205 return;
1206 case 0x0000: // Equivalent to vdup.
1207 case 0x1111:
1208 case 0x2222:
1209 case 0x3333: {
1210 int lane_code = src.code() * 4 + (lanes & 0xF);
1211 if (lane_code >= SwVfpRegister::kMaxNumRegisters) {
1212 // TODO(bbudge) use vdup (vdup.32 dst, D<src>[lane]) once implemented.
1213 int temp_code = kScratchDoubleReg.code() * 2;
1214 VmovExtended(temp_code, lane_code, scratch);
1215 lane_code = temp_code;
1216 }
1217 vdup(dst, SwVfpRegister::from_code(lane_code));
1218 return;
1219 }
1220 case 0x2301: // Swap lanes 0, 1 and lanes 2, 3.
1221 vrev64(Neon32, dst, src);
1222 return;
1223 default: // Handle all other cases with vmovs.
1224 int src_code = src.code() * 4;
1225 int dst_code = dst.code() * 4;
1226 bool in_place = src.is(dst);
1227 if (in_place) {
1228 vmov(kScratchQuadReg, src);
1229 src_code = kScratchQuadReg.code() * 4;
1230 }
1231 for (int i = 0; i < 4; i++) {
1232 int lane = (lanes >> (i * 4) & 0xF);
1233 VmovExtended(dst_code + i, src_code + lane, scratch);
1234 }
1235 if (in_place) {
1236 // Restore zero reg.
1237 veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
1238 }
1239 return;
1240 }
1241 }
1242 }
1243
LslPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1244 void MacroAssembler::LslPair(Register dst_low, Register dst_high,
1245 Register src_low, Register src_high,
1246 Register scratch, Register shift) {
1247 DCHECK(!AreAliased(dst_high, src_low));
1248 DCHECK(!AreAliased(dst_high, shift));
1249
1250 Label less_than_32;
1251 Label done;
1252 rsb(scratch, shift, Operand(32), SetCC);
1253 b(gt, &less_than_32);
1254 // If shift >= 32
1255 and_(scratch, shift, Operand(0x1f));
1256 lsl(dst_high, src_low, Operand(scratch));
1257 mov(dst_low, Operand(0));
1258 jmp(&done);
1259 bind(&less_than_32);
1260 // If shift < 32
1261 lsl(dst_high, src_high, Operand(shift));
1262 orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
1263 lsl(dst_low, src_low, Operand(shift));
1264 bind(&done);
1265 }
1266
LslPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1267 void MacroAssembler::LslPair(Register dst_low, Register dst_high,
1268 Register src_low, Register src_high,
1269 uint32_t shift) {
1270 DCHECK(!AreAliased(dst_high, src_low));
1271 Label less_than_32;
1272 Label done;
1273 if (shift == 0) {
1274 Move(dst_high, src_high);
1275 Move(dst_low, src_low);
1276 } else if (shift == 32) {
1277 Move(dst_high, src_low);
1278 Move(dst_low, Operand(0));
1279 } else if (shift >= 32) {
1280 shift &= 0x1f;
1281 lsl(dst_high, src_low, Operand(shift));
1282 mov(dst_low, Operand(0));
1283 } else {
1284 lsl(dst_high, src_high, Operand(shift));
1285 orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
1286 lsl(dst_low, src_low, Operand(shift));
1287 }
1288 }
1289
LsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1290 void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
1291 Register src_low, Register src_high,
1292 Register scratch, Register shift) {
1293 DCHECK(!AreAliased(dst_low, src_high));
1294 DCHECK(!AreAliased(dst_low, shift));
1295
1296 Label less_than_32;
1297 Label done;
1298 rsb(scratch, shift, Operand(32), SetCC);
1299 b(gt, &less_than_32);
1300 // If shift >= 32
1301 and_(scratch, shift, Operand(0x1f));
1302 lsr(dst_low, src_high, Operand(scratch));
1303 mov(dst_high, Operand(0));
1304 jmp(&done);
1305 bind(&less_than_32);
1306 // If shift < 32
1307
1308 lsr(dst_low, src_low, Operand(shift));
1309 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1310 lsr(dst_high, src_high, Operand(shift));
1311 bind(&done);
1312 }
1313
LsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1314 void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
1315 Register src_low, Register src_high,
1316 uint32_t shift) {
1317 DCHECK(!AreAliased(dst_low, src_high));
1318 Label less_than_32;
1319 Label done;
1320 if (shift == 32) {
1321 mov(dst_low, src_high);
1322 mov(dst_high, Operand(0));
1323 } else if (shift > 32) {
1324 shift &= 0x1f;
1325 lsr(dst_low, src_high, Operand(shift));
1326 mov(dst_high, Operand(0));
1327 } else if (shift == 0) {
1328 Move(dst_low, src_low);
1329 Move(dst_high, src_high);
1330 } else {
1331 lsr(dst_low, src_low, Operand(shift));
1332 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1333 lsr(dst_high, src_high, Operand(shift));
1334 }
1335 }
1336
AsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1337 void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
1338 Register src_low, Register src_high,
1339 Register scratch, Register shift) {
1340 DCHECK(!AreAliased(dst_low, src_high));
1341 DCHECK(!AreAliased(dst_low, shift));
1342
1343 Label less_than_32;
1344 Label done;
1345 rsb(scratch, shift, Operand(32), SetCC);
1346 b(gt, &less_than_32);
1347 // If shift >= 32
1348 and_(scratch, shift, Operand(0x1f));
1349 asr(dst_low, src_high, Operand(scratch));
1350 asr(dst_high, src_high, Operand(31));
1351 jmp(&done);
1352 bind(&less_than_32);
1353 // If shift < 32
1354 lsr(dst_low, src_low, Operand(shift));
1355 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1356 asr(dst_high, src_high, Operand(shift));
1357 bind(&done);
1358 }
1359
AsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1360 void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
1361 Register src_low, Register src_high,
1362 uint32_t shift) {
1363 DCHECK(!AreAliased(dst_low, src_high));
1364 Label less_than_32;
1365 Label done;
1366 if (shift == 32) {
1367 mov(dst_low, src_high);
1368 asr(dst_high, src_high, Operand(31));
1369 } else if (shift > 32) {
1370 shift &= 0x1f;
1371 asr(dst_low, src_high, Operand(shift));
1372 asr(dst_high, src_high, Operand(31));
1373 } else if (shift == 0) {
1374 Move(dst_low, src_low);
1375 Move(dst_high, src_high);
1376 } else {
1377 lsr(dst_low, src_low, Operand(shift));
1378 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1379 asr(dst_high, src_high, Operand(shift));
1380 }
1381 }
1382
LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address)1383 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1384 Register code_target_address) {
1385 DCHECK(FLAG_enable_embedded_constant_pool);
1386 ldr(pp, MemOperand(code_target_address,
1387 Code::kConstantPoolOffset - Code::kHeaderSize));
1388 add(pp, pp, code_target_address);
1389 }
1390
1391
LoadConstantPoolPointerRegister()1392 void MacroAssembler::LoadConstantPoolPointerRegister() {
1393 DCHECK(FLAG_enable_embedded_constant_pool);
1394 int entry_offset = pc_offset() + Instruction::kPCReadOffset;
1395 sub(ip, pc, Operand(entry_offset));
1396 LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
1397 }
1398
StubPrologue(StackFrame::Type type)1399 void MacroAssembler::StubPrologue(StackFrame::Type type) {
1400 mov(ip, Operand(StackFrame::TypeToMarker(type)));
1401 PushCommonFrame(ip);
1402 if (FLAG_enable_embedded_constant_pool) {
1403 LoadConstantPoolPointerRegister();
1404 set_constant_pool_available(true);
1405 }
1406 }
1407
Prologue(bool code_pre_aging)1408 void MacroAssembler::Prologue(bool code_pre_aging) {
1409 { PredictableCodeSizeScope predictible_code_size_scope(
1410 this, kNoCodeAgeSequenceLength);
1411 // The following three instructions must remain together and unmodified
1412 // for code aging to work properly.
1413 if (code_pre_aging) {
1414 // Pre-age the code.
1415 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
1416 add(r0, pc, Operand(-8));
1417 ldr(pc, MemOperand(pc, -4));
1418 emit_code_stub_address(stub);
1419 } else {
1420 PushStandardFrame(r1);
1421 nop(ip.code());
1422 }
1423 }
1424 if (FLAG_enable_embedded_constant_pool) {
1425 LoadConstantPoolPointerRegister();
1426 set_constant_pool_available(true);
1427 }
1428 }
1429
EmitLoadFeedbackVector(Register vector)1430 void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
1431 ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1432 ldr(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
1433 ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
1434 }
1435
1436
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1437 void MacroAssembler::EnterFrame(StackFrame::Type type,
1438 bool load_constant_pool_pointer_reg) {
1439 // r0-r3: preserved
1440 mov(ip, Operand(StackFrame::TypeToMarker(type)));
1441 PushCommonFrame(ip);
1442 if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
1443 LoadConstantPoolPointerRegister();
1444 }
1445 if (type == StackFrame::INTERNAL) {
1446 mov(ip, Operand(CodeObject()));
1447 push(ip);
1448 }
1449 }
1450
1451
LeaveFrame(StackFrame::Type type)1452 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
1453 // r0: preserved
1454 // r1: preserved
1455 // r2: preserved
1456
1457 // Drop the execution stack down to the frame pointer and restore
1458 // the caller frame pointer, return address and constant pool pointer
1459 // (if FLAG_enable_embedded_constant_pool).
1460 int frame_ends;
1461 if (FLAG_enable_embedded_constant_pool) {
1462 add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
1463 frame_ends = pc_offset();
1464 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
1465 } else {
1466 mov(sp, fp);
1467 frame_ends = pc_offset();
1468 ldm(ia_w, sp, fp.bit() | lr.bit());
1469 }
1470 return frame_ends;
1471 }
1472
EnterBuiltinFrame(Register context,Register target,Register argc)1473 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
1474 Register argc) {
1475 Push(lr, fp, context, target);
1476 add(fp, sp, Operand(2 * kPointerSize));
1477 Push(argc);
1478 }
1479
LeaveBuiltinFrame(Register context,Register target,Register argc)1480 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
1481 Register argc) {
1482 Pop(argc);
1483 Pop(lr, fp, context, target);
1484 }
1485
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1486 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1487 StackFrame::Type frame_type) {
1488 DCHECK(frame_type == StackFrame::EXIT ||
1489 frame_type == StackFrame::BUILTIN_EXIT);
1490
1491 // Set up the frame structure on the stack.
1492 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1493 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1494 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1495 mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
1496 PushCommonFrame(ip);
1497 // Reserve room for saved entry sp and code object.
1498 sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1499 if (emit_debug_code()) {
1500 mov(ip, Operand::Zero());
1501 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1502 }
1503 if (FLAG_enable_embedded_constant_pool) {
1504 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1505 }
1506 mov(ip, Operand(CodeObject()));
1507 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1508
1509 // Save the frame pointer and the context in top.
1510 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1511 str(fp, MemOperand(ip));
1512 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1513 str(cp, MemOperand(ip));
1514
1515 // Optionally save all double registers.
1516 if (save_doubles) {
1517 SaveFPRegs(sp, ip);
1518 // Note that d0 will be accessible at
1519 // fp - ExitFrameConstants::kFrameSize -
1520 // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
1521 // since the sp slot, code slot and constant pool slot (if
1522 // FLAG_enable_embedded_constant_pool) were pushed after the fp.
1523 }
1524
1525 // Reserve place for the return address and stack space and align the frame
1526 // preparing for calling the runtime function.
1527 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1528 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1529 if (frame_alignment > 0) {
1530 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1531 and_(sp, sp, Operand(-frame_alignment));
1532 }
1533
1534 // Set the exit frame sp value to point just before the return address
1535 // location.
1536 add(ip, sp, Operand(kPointerSize));
1537 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1538 }
1539
ActivationFrameAlignment()1540 int MacroAssembler::ActivationFrameAlignment() {
1541 #if V8_HOST_ARCH_ARM
1542 // Running on the real platform. Use the alignment as mandated by the local
1543 // environment.
1544 // Note: This will break if we ever start generating snapshots on one ARM
1545 // platform for another ARM platform with a different alignment.
1546 return base::OS::ActivationFrameAlignment();
1547 #else // V8_HOST_ARCH_ARM
1548 // If we are using the simulator then we should always align to the expected
1549 // alignment. As the simulator is used to generate snapshots we do not know
1550 // if the target platform will need alignment, so this is controlled from a
1551 // flag.
1552 return FLAG_sim_stack_alignment;
1553 #endif // V8_HOST_ARCH_ARM
1554 }
1555
1556
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool argument_count_is_length)1557 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1558 bool restore_context,
1559 bool argument_count_is_length) {
1560 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1561
1562 // Optionally restore all double registers.
1563 if (save_doubles) {
1564 // Calculate the stack location of the saved doubles and restore them.
1565 const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
1566 sub(r3, fp,
1567 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1568 RestoreFPRegs(r3, ip);
1569 }
1570
1571 // Clear top frame.
1572 mov(r3, Operand::Zero());
1573 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1574 str(r3, MemOperand(ip));
1575
1576 // Restore current context from top and clear it in debug mode.
1577 if (restore_context) {
1578 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1579 ldr(cp, MemOperand(ip));
1580 }
1581 #ifdef DEBUG
1582 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1583 str(r3, MemOperand(ip));
1584 #endif
1585
1586 // Tear down the exit frame, pop the arguments, and return.
1587 if (FLAG_enable_embedded_constant_pool) {
1588 ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1589 }
1590 mov(sp, Operand(fp));
1591 ldm(ia_w, sp, fp.bit() | lr.bit());
1592 if (argument_count.is_valid()) {
1593 if (argument_count_is_length) {
1594 add(sp, sp, argument_count);
1595 } else {
1596 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1597 }
1598 }
1599 }
1600
1601
MovFromFloatResult(const DwVfpRegister dst)1602 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1603 if (use_eabi_hardfloat()) {
1604 Move(dst, d0);
1605 } else {
1606 vmov(dst, r0, r1);
1607 }
1608 }
1609
1610
1611 // On ARM this is just a synonym to make the purpose clear.
MovFromFloatParameter(DwVfpRegister dst)1612 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1613 MovFromFloatResult(dst);
1614 }
1615
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1616 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1617 Register caller_args_count_reg,
1618 Register scratch0, Register scratch1) {
1619 #if DEBUG
1620 if (callee_args_count.is_reg()) {
1621 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1622 scratch1));
1623 } else {
1624 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1625 }
1626 #endif
1627
1628 // Calculate the end of destination area where we will put the arguments
1629 // after we drop current frame. We add kPointerSize to count the receiver
1630 // argument which is not included into formal parameters count.
1631 Register dst_reg = scratch0;
1632 add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
1633 add(dst_reg, dst_reg,
1634 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1635
1636 Register src_reg = caller_args_count_reg;
1637 // Calculate the end of source area. +kPointerSize is for the receiver.
1638 if (callee_args_count.is_reg()) {
1639 add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
1640 add(src_reg, src_reg, Operand(kPointerSize));
1641 } else {
1642 add(src_reg, sp,
1643 Operand((callee_args_count.immediate() + 1) * kPointerSize));
1644 }
1645
1646 if (FLAG_debug_code) {
1647 cmp(src_reg, dst_reg);
1648 Check(lo, kStackAccessBelowStackPointer);
1649 }
1650
1651 // Restore caller's frame pointer and return address now as they will be
1652 // overwritten by the copying loop.
1653 ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1654 ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1655
1656 // Now copy callee arguments to the caller frame going backwards to avoid
1657 // callee arguments corruption (source and destination areas could overlap).
1658
1659 // Both src_reg and dst_reg are pointing to the word after the one to copy,
1660 // so they must be pre-decremented in the loop.
1661 Register tmp_reg = scratch1;
1662 Label loop, entry;
1663 b(&entry);
1664 bind(&loop);
1665 ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
1666 str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
1667 bind(&entry);
1668 cmp(sp, src_reg);
1669 b(ne, &loop);
1670
1671 // Leave current frame.
1672 mov(sp, dst_reg);
1673 }
1674
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)1675 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1676 const ParameterCount& actual,
1677 Label* done,
1678 bool* definitely_mismatches,
1679 InvokeFlag flag,
1680 const CallWrapper& call_wrapper) {
1681 bool definitely_matches = false;
1682 *definitely_mismatches = false;
1683 Label regular_invoke;
1684
1685 // Check whether the expected and actual arguments count match. If not,
1686 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1687 // r0: actual arguments count
1688 // r1: function (passed through to callee)
1689 // r2: expected arguments count
1690
1691 // The code below is made a lot easier because the calling code already sets
1692 // up actual and expected registers according to the contract if values are
1693 // passed in registers.
1694 DCHECK(actual.is_immediate() || actual.reg().is(r0));
1695 DCHECK(expected.is_immediate() || expected.reg().is(r2));
1696
1697 if (expected.is_immediate()) {
1698 DCHECK(actual.is_immediate());
1699 mov(r0, Operand(actual.immediate()));
1700 if (expected.immediate() == actual.immediate()) {
1701 definitely_matches = true;
1702 } else {
1703 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1704 if (expected.immediate() == sentinel) {
1705 // Don't worry about adapting arguments for builtins that
1706 // don't want that done. Skip adaption code by making it look
1707 // like we have a match between expected and actual number of
1708 // arguments.
1709 definitely_matches = true;
1710 } else {
1711 *definitely_mismatches = true;
1712 mov(r2, Operand(expected.immediate()));
1713 }
1714 }
1715 } else {
1716 if (actual.is_immediate()) {
1717 mov(r0, Operand(actual.immediate()));
1718 cmp(expected.reg(), Operand(actual.immediate()));
1719 b(eq, ®ular_invoke);
1720 } else {
1721 cmp(expected.reg(), Operand(actual.reg()));
1722 b(eq, ®ular_invoke);
1723 }
1724 }
1725
1726 if (!definitely_matches) {
1727 Handle<Code> adaptor =
1728 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1729 if (flag == CALL_FUNCTION) {
1730 call_wrapper.BeforeCall(CallSize(adaptor));
1731 Call(adaptor);
1732 call_wrapper.AfterCall();
1733 if (!*definitely_mismatches) {
1734 b(done);
1735 }
1736 } else {
1737 Jump(adaptor, RelocInfo::CODE_TARGET);
1738 }
1739 bind(®ular_invoke);
1740 }
1741 }
1742
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1743 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1744 const ParameterCount& expected,
1745 const ParameterCount& actual) {
1746 Label skip_hook;
1747 ExternalReference debug_hook_avtive =
1748 ExternalReference::debug_hook_on_function_call_address(isolate());
1749 mov(r4, Operand(debug_hook_avtive));
1750 ldrsb(r4, MemOperand(r4));
1751 cmp(r4, Operand(0));
1752 b(eq, &skip_hook);
1753 {
1754 FrameScope frame(this,
1755 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1756 if (expected.is_reg()) {
1757 SmiTag(expected.reg());
1758 Push(expected.reg());
1759 }
1760 if (actual.is_reg()) {
1761 SmiTag(actual.reg());
1762 Push(actual.reg());
1763 }
1764 if (new_target.is_valid()) {
1765 Push(new_target);
1766 }
1767 Push(fun);
1768 Push(fun);
1769 CallRuntime(Runtime::kDebugOnFunctionCall);
1770 Pop(fun);
1771 if (new_target.is_valid()) {
1772 Pop(new_target);
1773 }
1774 if (actual.is_reg()) {
1775 Pop(actual.reg());
1776 SmiUntag(actual.reg());
1777 }
1778 if (expected.is_reg()) {
1779 Pop(expected.reg());
1780 SmiUntag(expected.reg());
1781 }
1782 }
1783 bind(&skip_hook);
1784 }
1785
1786
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1787 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1788 const ParameterCount& expected,
1789 const ParameterCount& actual,
1790 InvokeFlag flag,
1791 const CallWrapper& call_wrapper) {
1792 // You can't call a function without a valid frame.
1793 DCHECK(flag == JUMP_FUNCTION || has_frame());
1794 DCHECK(function.is(r1));
1795 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
1796
1797 if (call_wrapper.NeedsDebugHookCheck()) {
1798 CheckDebugHook(function, new_target, expected, actual);
1799 }
1800
1801 // Clear the new.target register if not given.
1802 if (!new_target.is_valid()) {
1803 LoadRoot(r3, Heap::kUndefinedValueRootIndex);
1804 }
1805
1806 Label done;
1807 bool definitely_mismatches = false;
1808 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1809 call_wrapper);
1810 if (!definitely_mismatches) {
1811 // We call indirectly through the code field in the function to
1812 // allow recompilation to take effect without changing any of the
1813 // call sites.
1814 Register code = r4;
1815 ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1816 if (flag == CALL_FUNCTION) {
1817 call_wrapper.BeforeCall(CallSize(code));
1818 Call(code);
1819 call_wrapper.AfterCall();
1820 } else {
1821 DCHECK(flag == JUMP_FUNCTION);
1822 Jump(code);
1823 }
1824
1825 // Continue here if InvokePrologue does handle the invocation due to
1826 // mismatched parameter counts.
1827 bind(&done);
1828 }
1829 }
1830
1831
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1832 void MacroAssembler::InvokeFunction(Register fun,
1833 Register new_target,
1834 const ParameterCount& actual,
1835 InvokeFlag flag,
1836 const CallWrapper& call_wrapper) {
1837 // You can't call a function without a valid frame.
1838 DCHECK(flag == JUMP_FUNCTION || has_frame());
1839
1840 // Contract with called JS functions requires that function is passed in r1.
1841 DCHECK(fun.is(r1));
1842
1843 Register expected_reg = r2;
1844 Register temp_reg = r4;
1845
1846 ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1847 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1848 ldr(expected_reg,
1849 FieldMemOperand(temp_reg,
1850 SharedFunctionInfo::kFormalParameterCountOffset));
1851 SmiUntag(expected_reg);
1852
1853 ParameterCount expected(expected_reg);
1854 InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1855 }
1856
1857
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1858 void MacroAssembler::InvokeFunction(Register function,
1859 const ParameterCount& expected,
1860 const ParameterCount& actual,
1861 InvokeFlag flag,
1862 const CallWrapper& call_wrapper) {
1863 // You can't call a function without a valid frame.
1864 DCHECK(flag == JUMP_FUNCTION || has_frame());
1865
1866 // Contract with called JS functions requires that function is passed in r1.
1867 DCHECK(function.is(r1));
1868
1869 // Get the function and setup the context.
1870 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1871
1872 InvokeFunctionCode(r1, no_reg, expected, actual, flag, call_wrapper);
1873 }
1874
1875
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1876 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1877 const ParameterCount& expected,
1878 const ParameterCount& actual,
1879 InvokeFlag flag,
1880 const CallWrapper& call_wrapper) {
1881 Move(r1, function);
1882 InvokeFunction(r1, expected, actual, flag, call_wrapper);
1883 }
1884
1885
IsObjectJSStringType(Register object,Register scratch,Label * fail)1886 void MacroAssembler::IsObjectJSStringType(Register object,
1887 Register scratch,
1888 Label* fail) {
1889 DCHECK(kNotStringTag != 0);
1890
1891 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1892 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1893 tst(scratch, Operand(kIsNotStringMask));
1894 b(ne, fail);
1895 }
1896
1897
IsObjectNameType(Register object,Register scratch,Label * fail)1898 void MacroAssembler::IsObjectNameType(Register object,
1899 Register scratch,
1900 Label* fail) {
1901 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1902 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1903 cmp(scratch, Operand(LAST_NAME_TYPE));
1904 b(hi, fail);
1905 }
1906
MaybeDropFrames()1907 void MacroAssembler::MaybeDropFrames() {
1908 // Check whether we need to drop frames to restart a function on the stack.
1909 ExternalReference restart_fp =
1910 ExternalReference::debug_restart_fp_address(isolate());
1911 mov(r1, Operand(restart_fp));
1912 ldr(r1, MemOperand(r1));
1913 tst(r1, r1);
1914 Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
1915 ne);
1916 }
1917
PushStackHandler()1918 void MacroAssembler::PushStackHandler() {
1919 // Adjust this code if not the case.
1920 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1921 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1922
1923 // Link the current handler as the next handler.
1924 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1925 ldr(r5, MemOperand(r6));
1926 push(r5);
1927
1928 // Set this new handler as the current one.
1929 str(sp, MemOperand(r6));
1930 }
1931
1932
PopStackHandler()1933 void MacroAssembler::PopStackHandler() {
1934 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1935 pop(r1);
1936 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1937 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1938 str(r1, MemOperand(ip));
1939 }
1940
1941
1942 // Compute the hash code from the untagged key. This must be kept in sync with
1943 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1944 // code-stub-hydrogen.cc
GetNumberHash(Register t0,Register scratch)1945 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1946 // First of all we assign the hash seed to scratch.
1947 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1948 SmiUntag(scratch);
1949
1950 // Xor original key with a seed.
1951 eor(t0, t0, Operand(scratch));
1952
1953 // Compute the hash code from the untagged key. This must be kept in sync
1954 // with ComputeIntegerHash in utils.h.
1955 //
1956 // hash = ~hash + (hash << 15);
1957 mvn(scratch, Operand(t0));
1958 add(t0, scratch, Operand(t0, LSL, 15));
1959 // hash = hash ^ (hash >> 12);
1960 eor(t0, t0, Operand(t0, LSR, 12));
1961 // hash = hash + (hash << 2);
1962 add(t0, t0, Operand(t0, LSL, 2));
1963 // hash = hash ^ (hash >> 4);
1964 eor(t0, t0, Operand(t0, LSR, 4));
1965 // hash = hash * 2057;
1966 mov(scratch, Operand(t0, LSL, 11));
1967 add(t0, t0, Operand(t0, LSL, 3));
1968 add(t0, t0, scratch);
1969 // hash = hash ^ (hash >> 16);
1970 eor(t0, t0, Operand(t0, LSR, 16));
1971 bic(t0, t0, Operand(0xc0000000u));
1972 }
1973
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1974 void MacroAssembler::Allocate(int object_size,
1975 Register result,
1976 Register scratch1,
1977 Register scratch2,
1978 Label* gc_required,
1979 AllocationFlags flags) {
1980 DCHECK(object_size <= kMaxRegularHeapObjectSize);
1981 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1982 if (!FLAG_inline_new) {
1983 if (emit_debug_code()) {
1984 // Trash the registers to simulate an allocation failure.
1985 mov(result, Operand(0x7091));
1986 mov(scratch1, Operand(0x7191));
1987 mov(scratch2, Operand(0x7291));
1988 }
1989 jmp(gc_required);
1990 return;
1991 }
1992
1993 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1994
1995 // Make object size into bytes.
1996 if ((flags & SIZE_IN_WORDS) != 0) {
1997 object_size *= kPointerSize;
1998 }
1999 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
2000
2001 // Check relative positions of allocation top and limit addresses.
2002 // The values must be adjacent in memory to allow the use of LDM.
2003 // Also, assert that the registers are numbered such that the values
2004 // are loaded in the correct order.
2005 ExternalReference allocation_top =
2006 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2007 ExternalReference allocation_limit =
2008 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2009
2010 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
2011 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
2012 DCHECK((limit - top) == kPointerSize);
2013 DCHECK(result.code() < ip.code());
2014
2015 // Set up allocation top address register.
2016 Register top_address = scratch1;
2017 // This code stores a temporary value in ip. This is OK, as the code below
2018 // does not need ip for implicit literal generation.
2019 Register alloc_limit = ip;
2020 Register result_end = scratch2;
2021 mov(top_address, Operand(allocation_top));
2022
2023 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2024 // Load allocation top into result and allocation limit into alloc_limit.
2025 ldm(ia, top_address, result.bit() | alloc_limit.bit());
2026 } else {
2027 if (emit_debug_code()) {
2028 // Assert that result actually contains top on entry.
2029 ldr(alloc_limit, MemOperand(top_address));
2030 cmp(result, alloc_limit);
2031 Check(eq, kUnexpectedAllocationTop);
2032 }
2033 // Load allocation limit. Result already contains allocation top.
2034 ldr(alloc_limit, MemOperand(top_address, limit - top));
2035 }
2036
2037 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2038 // Align the next allocation. Storing the filler map without checking top is
2039 // safe in new-space because the limit of the heap is aligned there.
2040 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2041 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2042 Label aligned;
2043 b(eq, &aligned);
2044 if ((flags & PRETENURE) != 0) {
2045 cmp(result, Operand(alloc_limit));
2046 b(hs, gc_required);
2047 }
2048 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2049 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2050 bind(&aligned);
2051 }
2052
2053 // Calculate new top and bail out if new space is exhausted. Use result
2054 // to calculate the new top. We must preserve the ip register at this
2055 // point, so we cannot just use add().
2056 DCHECK(object_size > 0);
2057 Register source = result;
2058 int shift = 0;
2059 while (object_size != 0) {
2060 if (((object_size >> shift) & 0x03) == 0) {
2061 shift += 2;
2062 } else {
2063 int bits = object_size & (0xff << shift);
2064 object_size -= bits;
2065 shift += 8;
2066 Operand bits_operand(bits);
2067 DCHECK(bits_operand.instructions_required(this) == 1);
2068 add(result_end, source, bits_operand);
2069 source = result_end;
2070 }
2071 }
2072
2073 cmp(result_end, Operand(alloc_limit));
2074 b(hi, gc_required);
2075
2076 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
2077 // The top pointer is not updated for allocation folding dominators.
2078 str(result_end, MemOperand(top_address));
2079 }
2080
2081 // Tag object.
2082 add(result, result, Operand(kHeapObjectTag));
2083 }
2084
2085
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)2086 void MacroAssembler::Allocate(Register object_size, Register result,
2087 Register result_end, Register scratch,
2088 Label* gc_required, AllocationFlags flags) {
2089 DCHECK((flags & ALLOCATION_FOLDED) == 0);
2090 if (!FLAG_inline_new) {
2091 if (emit_debug_code()) {
2092 // Trash the registers to simulate an allocation failure.
2093 mov(result, Operand(0x7091));
2094 mov(scratch, Operand(0x7191));
2095 mov(result_end, Operand(0x7291));
2096 }
2097 jmp(gc_required);
2098 return;
2099 }
2100
2101 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
2102 // is not specified. Other registers must not overlap.
2103 DCHECK(!AreAliased(object_size, result, scratch, ip));
2104 DCHECK(!AreAliased(result_end, result, scratch, ip));
2105 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
2106
2107 // Check relative positions of allocation top and limit addresses.
2108 // The values must be adjacent in memory to allow the use of LDM.
2109 // Also, assert that the registers are numbered such that the values
2110 // are loaded in the correct order.
2111 ExternalReference allocation_top =
2112 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2113 ExternalReference allocation_limit =
2114 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2115 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
2116 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
2117 DCHECK((limit - top) == kPointerSize);
2118 DCHECK(result.code() < ip.code());
2119
2120 // Set up allocation top address and allocation limit registers.
2121 Register top_address = scratch;
2122 // This code stores a temporary value in ip. This is OK, as the code below
2123 // does not need ip for implicit literal generation.
2124 Register alloc_limit = ip;
2125 mov(top_address, Operand(allocation_top));
2126
2127 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2128 // Load allocation top into result and allocation limit into alloc_limit.
2129 ldm(ia, top_address, result.bit() | alloc_limit.bit());
2130 } else {
2131 if (emit_debug_code()) {
2132 // Assert that result actually contains top on entry.
2133 ldr(alloc_limit, MemOperand(top_address));
2134 cmp(result, alloc_limit);
2135 Check(eq, kUnexpectedAllocationTop);
2136 }
2137 // Load allocation limit. Result already contains allocation top.
2138 ldr(alloc_limit, MemOperand(top_address, limit - top));
2139 }
2140
2141 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2142 // Align the next allocation. Storing the filler map without checking top is
2143 // safe in new-space because the limit of the heap is aligned there.
2144 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
2145 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2146 Label aligned;
2147 b(eq, &aligned);
2148 if ((flags & PRETENURE) != 0) {
2149 cmp(result, Operand(alloc_limit));
2150 b(hs, gc_required);
2151 }
2152 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2153 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2154 bind(&aligned);
2155 }
2156
2157 // Calculate new top and bail out if new space is exhausted. Use result
2158 // to calculate the new top. Object size may be in words so a shift is
2159 // required to get the number of bytes.
2160 if ((flags & SIZE_IN_WORDS) != 0) {
2161 add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
2162 } else {
2163 add(result_end, result, Operand(object_size), SetCC);
2164 }
2165
2166 cmp(result_end, Operand(alloc_limit));
2167 b(hi, gc_required);
2168
2169 // Update allocation top. result temporarily holds the new top.
2170 if (emit_debug_code()) {
2171 tst(result_end, Operand(kObjectAlignmentMask));
2172 Check(eq, kUnalignedAllocationInNewSpace);
2173 }
2174 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
2175 // The top pointer is not updated for allocation folding dominators.
2176 str(result_end, MemOperand(top_address));
2177 }
2178
2179 // Tag object.
2180 add(result, result, Operand(kHeapObjectTag));
2181 }
2182
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)2183 void MacroAssembler::FastAllocate(Register object_size, Register result,
2184 Register result_end, Register scratch,
2185 AllocationFlags flags) {
2186 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
2187 // is not specified. Other registers must not overlap.
2188 DCHECK(!AreAliased(object_size, result, scratch, ip));
2189 DCHECK(!AreAliased(result_end, result, scratch, ip));
2190 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
2191
2192 ExternalReference allocation_top =
2193 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2194
2195 Register top_address = scratch;
2196 mov(top_address, Operand(allocation_top));
2197 ldr(result, MemOperand(top_address));
2198
2199 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2200 // Align the next allocation. Storing the filler map without checking top is
2201 // safe in new-space because the limit of the heap is aligned there.
2202 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
2203 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2204 Label aligned;
2205 b(eq, &aligned);
2206 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2207 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2208 bind(&aligned);
2209 }
2210
2211 // Calculate new top using result. Object size may be in words so a shift is
2212 // required to get the number of bytes.
2213 if ((flags & SIZE_IN_WORDS) != 0) {
2214 add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
2215 } else {
2216 add(result_end, result, Operand(object_size), SetCC);
2217 }
2218
2219 // Update allocation top. result temporarily holds the new top.
2220 if (emit_debug_code()) {
2221 tst(result_end, Operand(kObjectAlignmentMask));
2222 Check(eq, kUnalignedAllocationInNewSpace);
2223 }
2224 // The top pointer is not updated for allocation folding dominators.
2225 str(result_end, MemOperand(top_address));
2226
2227 add(result, result, Operand(kHeapObjectTag));
2228 }
2229
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)2230 void MacroAssembler::FastAllocate(int object_size, Register result,
2231 Register scratch1, Register scratch2,
2232 AllocationFlags flags) {
2233 DCHECK(object_size <= kMaxRegularHeapObjectSize);
2234 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
2235
2236 // Make object size into bytes.
2237 if ((flags & SIZE_IN_WORDS) != 0) {
2238 object_size *= kPointerSize;
2239 }
2240 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
2241
2242 ExternalReference allocation_top =
2243 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2244
2245 // Set up allocation top address register.
2246 Register top_address = scratch1;
2247 Register result_end = scratch2;
2248 mov(top_address, Operand(allocation_top));
2249 ldr(result, MemOperand(top_address));
2250
2251 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2252 // Align the next allocation. Storing the filler map without checking top is
2253 // safe in new-space because the limit of the heap is aligned there.
2254 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2255 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2256 Label aligned;
2257 b(eq, &aligned);
2258 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2259 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2260 bind(&aligned);
2261 }
2262
2263 // Calculate new top using result. Object size may be in words so a shift is
2264 // required to get the number of bytes. We must preserve the ip register at
2265 // this point, so we cannot just use add().
2266 DCHECK(object_size > 0);
2267 Register source = result;
2268 int shift = 0;
2269 while (object_size != 0) {
2270 if (((object_size >> shift) & 0x03) == 0) {
2271 shift += 2;
2272 } else {
2273 int bits = object_size & (0xff << shift);
2274 object_size -= bits;
2275 shift += 8;
2276 Operand bits_operand(bits);
2277 DCHECK(bits_operand.instructions_required(this) == 1);
2278 add(result_end, source, bits_operand);
2279 source = result_end;
2280 }
2281 }
2282
2283 // The top pointer is not updated for allocation folding dominators.
2284 str(result_end, MemOperand(top_address));
2285
2286 add(result, result, Operand(kHeapObjectTag));
2287 }
2288
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)2289 void MacroAssembler::CompareObjectType(Register object,
2290 Register map,
2291 Register type_reg,
2292 InstanceType type) {
2293 const Register temp = type_reg.is(no_reg) ? ip : type_reg;
2294
2295 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2296 CompareInstanceType(map, temp, type);
2297 }
2298
2299
CompareInstanceType(Register map,Register type_reg,InstanceType type)2300 void MacroAssembler::CompareInstanceType(Register map,
2301 Register type_reg,
2302 InstanceType type) {
2303 // Registers map and type_reg can be ip. These two lines assert
2304 // that ip can be used with the two instructions (the constants
2305 // will never need ip).
2306 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2307 STATIC_ASSERT(LAST_TYPE < 256);
2308 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2309 cmp(type_reg, Operand(type));
2310 }
2311
2312
CompareRoot(Register obj,Heap::RootListIndex index)2313 void MacroAssembler::CompareRoot(Register obj,
2314 Heap::RootListIndex index) {
2315 DCHECK(!obj.is(ip));
2316 LoadRoot(ip, index);
2317 cmp(obj, ip);
2318 }
2319
CompareMap(Register obj,Register scratch,Handle<Map> map,Label * early_success)2320 void MacroAssembler::CompareMap(Register obj,
2321 Register scratch,
2322 Handle<Map> map,
2323 Label* early_success) {
2324 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2325 CompareMap(scratch, map, early_success);
2326 }
2327
2328
CompareMap(Register obj_map,Handle<Map> map,Label * early_success)2329 void MacroAssembler::CompareMap(Register obj_map,
2330 Handle<Map> map,
2331 Label* early_success) {
2332 cmp(obj_map, Operand(map));
2333 }
2334
2335
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)2336 void MacroAssembler::CheckMap(Register obj,
2337 Register scratch,
2338 Handle<Map> map,
2339 Label* fail,
2340 SmiCheckType smi_check_type) {
2341 if (smi_check_type == DO_SMI_CHECK) {
2342 JumpIfSmi(obj, fail);
2343 }
2344
2345 Label success;
2346 CompareMap(obj, scratch, map, &success);
2347 b(ne, fail);
2348 bind(&success);
2349 }
2350
2351
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)2352 void MacroAssembler::CheckMap(Register obj,
2353 Register scratch,
2354 Heap::RootListIndex index,
2355 Label* fail,
2356 SmiCheckType smi_check_type) {
2357 if (smi_check_type == DO_SMI_CHECK) {
2358 JumpIfSmi(obj, fail);
2359 }
2360 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2361 LoadRoot(ip, index);
2362 cmp(scratch, ip);
2363 b(ne, fail);
2364 }
2365
2366
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)2367 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2368 Register scratch2, Handle<WeakCell> cell,
2369 Handle<Code> success,
2370 SmiCheckType smi_check_type) {
2371 Label fail;
2372 if (smi_check_type == DO_SMI_CHECK) {
2373 JumpIfSmi(obj, &fail);
2374 }
2375 ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2376 CmpWeakValue(scratch1, cell, scratch2);
2377 Jump(success, RelocInfo::CODE_TARGET, eq);
2378 bind(&fail);
2379 }
2380
2381
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch)2382 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2383 Register scratch) {
2384 mov(scratch, Operand(cell));
2385 ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2386 cmp(value, scratch);
2387 }
2388
2389
GetWeakValue(Register value,Handle<WeakCell> cell)2390 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2391 mov(value, Operand(cell));
2392 ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
2393 }
2394
2395
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)2396 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2397 Label* miss) {
2398 GetWeakValue(value, cell);
2399 JumpIfSmi(value, miss);
2400 }
2401
2402
GetMapConstructor(Register result,Register map,Register temp,Register temp2)2403 void MacroAssembler::GetMapConstructor(Register result, Register map,
2404 Register temp, Register temp2) {
2405 Label done, loop;
2406 ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2407 bind(&loop);
2408 JumpIfSmi(result, &done);
2409 CompareObjectType(result, temp, temp2, MAP_TYPE);
2410 b(ne, &done);
2411 ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2412 b(&loop);
2413 bind(&done);
2414 }
2415
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)2416 void MacroAssembler::CallStub(CodeStub* stub,
2417 TypeFeedbackId ast_id,
2418 Condition cond) {
2419 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2420 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2421 }
2422
2423
TailCallStub(CodeStub * stub,Condition cond)2424 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2425 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2426 }
2427
2428
AllowThisStubCall(CodeStub * stub)2429 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2430 return has_frame_ || !stub->SometimesSetsUpAFrame();
2431 }
2432
SmiToDouble(LowDwVfpRegister value,Register smi)2433 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2434 if (CpuFeatures::IsSupported(VFPv3)) {
2435 CpuFeatureScope scope(this, VFPv3);
2436 vmov(value.low(), smi);
2437 vcvt_f64_s32(value, 1);
2438 } else {
2439 SmiUntag(ip, smi);
2440 vmov(value.low(), ip);
2441 vcvt_f64_s32(value, value.low());
2442 }
2443 }
2444
2445
TestDoubleIsInt32(DwVfpRegister double_input,LowDwVfpRegister double_scratch)2446 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2447 LowDwVfpRegister double_scratch) {
2448 DCHECK(!double_input.is(double_scratch));
2449 vcvt_s32_f64(double_scratch.low(), double_input);
2450 vcvt_f64_s32(double_scratch, double_scratch.low());
2451 VFPCompareAndSetFlags(double_input, double_scratch);
2452 }
2453
2454
TryDoubleToInt32Exact(Register result,DwVfpRegister double_input,LowDwVfpRegister double_scratch)2455 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2456 DwVfpRegister double_input,
2457 LowDwVfpRegister double_scratch) {
2458 DCHECK(!double_input.is(double_scratch));
2459 vcvt_s32_f64(double_scratch.low(), double_input);
2460 vmov(result, double_scratch.low());
2461 vcvt_f64_s32(double_scratch, double_scratch.low());
2462 VFPCompareAndSetFlags(double_input, double_scratch);
2463 }
2464
2465
TryInt32Floor(Register result,DwVfpRegister double_input,Register input_high,LowDwVfpRegister double_scratch,Label * done,Label * exact)2466 void MacroAssembler::TryInt32Floor(Register result,
2467 DwVfpRegister double_input,
2468 Register input_high,
2469 LowDwVfpRegister double_scratch,
2470 Label* done,
2471 Label* exact) {
2472 DCHECK(!result.is(input_high));
2473 DCHECK(!double_input.is(double_scratch));
2474 Label negative, exception;
2475
2476 VmovHigh(input_high, double_input);
2477
2478 // Test for NaN and infinities.
2479 Sbfx(result, input_high,
2480 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2481 cmp(result, Operand(-1));
2482 b(eq, &exception);
2483 // Test for values that can be exactly represented as a
2484 // signed 32-bit integer.
2485 TryDoubleToInt32Exact(result, double_input, double_scratch);
2486 // If exact, return (result already fetched).
2487 b(eq, exact);
2488 cmp(input_high, Operand::Zero());
2489 b(mi, &negative);
2490
2491 // Input is in ]+0, +inf[.
2492 // If result equals 0x7fffffff input was out of range or
2493 // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2494 // could fits into an int32, that means we always think input was
2495 // out of range and always go to exception.
2496 // If result < 0x7fffffff, go to done, result fetched.
2497 cmn(result, Operand(1));
2498 b(mi, &exception);
2499 b(done);
2500
2501 // Input is in ]-inf, -0[.
2502 // If x is a non integer negative number,
2503 // floor(x) <=> round_to_zero(x) - 1.
2504 bind(&negative);
2505 sub(result, result, Operand(1), SetCC);
2506 // If result is still negative, go to done, result fetched.
2507 // Else, we had an overflow and we fall through exception.
2508 b(mi, done);
2509 bind(&exception);
2510 }
2511
TryInlineTruncateDoubleToI(Register result,DwVfpRegister double_input,Label * done)2512 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2513 DwVfpRegister double_input,
2514 Label* done) {
2515 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2516 vcvt_s32_f64(double_scratch.low(), double_input);
2517 vmov(result, double_scratch.low());
2518
2519 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2520 sub(ip, result, Operand(1));
2521 cmp(ip, Operand(0x7ffffffe));
2522 b(lt, done);
2523 }
2524
2525
TruncateDoubleToI(Register result,DwVfpRegister double_input)2526 void MacroAssembler::TruncateDoubleToI(Register result,
2527 DwVfpRegister double_input) {
2528 Label done;
2529
2530 TryInlineTruncateDoubleToI(result, double_input, &done);
2531
2532 // If we fell through then inline version didn't succeed - call stub instead.
2533 push(lr);
2534 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2535 vstr(double_input, MemOperand(sp, 0));
2536
2537 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2538 CallStub(&stub);
2539
2540 add(sp, sp, Operand(kDoubleSize));
2541 pop(lr);
2542
2543 bind(&done);
2544 }
2545
2546
TruncateHeapNumberToI(Register result,Register object)2547 void MacroAssembler::TruncateHeapNumberToI(Register result,
2548 Register object) {
2549 Label done;
2550 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2551 DCHECK(!result.is(object));
2552
2553 vldr(double_scratch,
2554 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2555 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2556
2557 // If we fell through then inline version didn't succeed - call stub instead.
2558 push(lr);
2559 DoubleToIStub stub(isolate(),
2560 object,
2561 result,
2562 HeapNumber::kValueOffset - kHeapObjectTag,
2563 true,
2564 true);
2565 CallStub(&stub);
2566 pop(lr);
2567
2568 bind(&done);
2569 }
2570
2571
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch1,Label * not_number)2572 void MacroAssembler::TruncateNumberToI(Register object,
2573 Register result,
2574 Register heap_number_map,
2575 Register scratch1,
2576 Label* not_number) {
2577 Label done;
2578 DCHECK(!result.is(object));
2579
2580 UntagAndJumpIfSmi(result, object, &done);
2581 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2582 TruncateHeapNumberToI(result, object);
2583
2584 bind(&done);
2585 }
2586
2587
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2588 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2589 Register src,
2590 int num_least_bits) {
2591 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2592 CpuFeatureScope scope(this, ARMv7);
2593 ubfx(dst, src, kSmiTagSize, num_least_bits);
2594 } else {
2595 SmiUntag(dst, src);
2596 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2597 }
2598 }
2599
2600
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2601 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2602 Register src,
2603 int num_least_bits) {
2604 and_(dst, src, Operand((1 << num_least_bits) - 1));
2605 }
2606
2607
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2608 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2609 int num_arguments,
2610 SaveFPRegsMode save_doubles) {
2611 // All parameters are on the stack. r0 has the return value after call.
2612
2613 // If the expected number of arguments of the runtime function is
2614 // constant, we check that the actual number of arguments match the
2615 // expectation.
2616 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2617
2618 // TODO(1236192): Most runtime routines don't need the number of
2619 // arguments passed in because it is constant. At some point we
2620 // should remove this need and make the runtime routine entry code
2621 // smarter.
2622 mov(r0, Operand(num_arguments));
2623 mov(r1, Operand(ExternalReference(f, isolate())));
2624 CEntryStub stub(isolate(), 1, save_doubles);
2625 CallStub(&stub);
2626 }
2627
2628
CallExternalReference(const ExternalReference & ext,int num_arguments)2629 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2630 int num_arguments) {
2631 mov(r0, Operand(num_arguments));
2632 mov(r1, Operand(ext));
2633
2634 CEntryStub stub(isolate(), 1);
2635 CallStub(&stub);
2636 }
2637
2638
TailCallRuntime(Runtime::FunctionId fid)2639 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2640 const Runtime::Function* function = Runtime::FunctionForId(fid);
2641 DCHECK_EQ(1, function->result_size);
2642 if (function->nargs >= 0) {
2643 // TODO(1236192): Most runtime routines don't need the number of
2644 // arguments passed in because it is constant. At some point we
2645 // should remove this need and make the runtime routine entry code
2646 // smarter.
2647 mov(r0, Operand(function->nargs));
2648 }
2649 JumpToExternalReference(ExternalReference(fid, isolate()));
2650 }
2651
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)2652 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2653 bool builtin_exit_frame) {
2654 #if defined(__thumb__)
2655 // Thumb mode builtin.
2656 DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2657 #endif
2658 mov(r1, Operand(builtin));
2659 CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
2660 builtin_exit_frame);
2661 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2662 }
2663
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2664 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2665 Register scratch1, Register scratch2) {
2666 if (FLAG_native_code_counters && counter->Enabled()) {
2667 mov(scratch1, Operand(value));
2668 mov(scratch2, Operand(ExternalReference(counter)));
2669 str(scratch1, MemOperand(scratch2));
2670 }
2671 }
2672
2673
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2674 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2675 Register scratch1, Register scratch2) {
2676 DCHECK(value > 0);
2677 if (FLAG_native_code_counters && counter->Enabled()) {
2678 mov(scratch2, Operand(ExternalReference(counter)));
2679 ldr(scratch1, MemOperand(scratch2));
2680 add(scratch1, scratch1, Operand(value));
2681 str(scratch1, MemOperand(scratch2));
2682 }
2683 }
2684
2685
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2686 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2687 Register scratch1, Register scratch2) {
2688 DCHECK(value > 0);
2689 if (FLAG_native_code_counters && counter->Enabled()) {
2690 mov(scratch2, Operand(ExternalReference(counter)));
2691 ldr(scratch1, MemOperand(scratch2));
2692 sub(scratch1, scratch1, Operand(value));
2693 str(scratch1, MemOperand(scratch2));
2694 }
2695 }
2696
2697
Assert(Condition cond,BailoutReason reason)2698 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2699 if (emit_debug_code())
2700 Check(cond, reason);
2701 }
2702
2703
AssertFastElements(Register elements)2704 void MacroAssembler::AssertFastElements(Register elements) {
2705 if (emit_debug_code()) {
2706 DCHECK(!elements.is(ip));
2707 Label ok;
2708 push(elements);
2709 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2710 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2711 cmp(elements, ip);
2712 b(eq, &ok);
2713 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2714 cmp(elements, ip);
2715 b(eq, &ok);
2716 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2717 cmp(elements, ip);
2718 b(eq, &ok);
2719 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2720 bind(&ok);
2721 pop(elements);
2722 }
2723 }
2724
2725
Check(Condition cond,BailoutReason reason)2726 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2727 Label L;
2728 b(cond, &L);
2729 Abort(reason);
2730 // will not return here
2731 bind(&L);
2732 }
2733
2734
Abort(BailoutReason reason)2735 void MacroAssembler::Abort(BailoutReason reason) {
2736 Label abort_start;
2737 bind(&abort_start);
2738 #ifdef DEBUG
2739 const char* msg = GetBailoutReason(reason);
2740 if (msg != NULL) {
2741 RecordComment("Abort message: ");
2742 RecordComment(msg);
2743 }
2744
2745 if (FLAG_trap_on_abort) {
2746 stop(msg);
2747 return;
2748 }
2749 #endif
2750
2751 // Check if Abort() has already been initialized.
2752 DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
2753
2754 Move(r1, Smi::FromInt(static_cast<int>(reason)));
2755
2756 // Disable stub call restrictions to always allow calls to abort.
2757 if (!has_frame_) {
2758 // We don't actually want to generate a pile of code for this, so just
2759 // claim there is a stack frame, without generating one.
2760 FrameScope scope(this, StackFrame::NONE);
2761 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2762 } else {
2763 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2764 }
2765 // will not return here
2766 if (is_const_pool_blocked()) {
2767 // If the calling code cares about the exact number of
2768 // instructions generated, we insert padding here to keep the size
2769 // of the Abort macro constant.
2770 static const int kExpectedAbortInstructions = 7;
2771 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2772 DCHECK(abort_instructions <= kExpectedAbortInstructions);
2773 while (abort_instructions++ < kExpectedAbortInstructions) {
2774 nop();
2775 }
2776 }
2777 }
2778
2779
LoadContext(Register dst,int context_chain_length)2780 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2781 if (context_chain_length > 0) {
2782 // Move up the chain of contexts to the context containing the slot.
2783 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2784 for (int i = 1; i < context_chain_length; i++) {
2785 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2786 }
2787 } else {
2788 // Slot is in the current function context. Move it into the
2789 // destination register in case we store into it (the write barrier
2790 // cannot be allowed to destroy the context in esi).
2791 mov(dst, cp);
2792 }
2793 }
2794
LoadNativeContextSlot(int index,Register dst)2795 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2796 ldr(dst, NativeContextMemOperand());
2797 ldr(dst, ContextMemOperand(dst, index));
2798 }
2799
2800
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)2801 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2802 Register map,
2803 Register scratch) {
2804 // Load the initial map. The global functions all have initial maps.
2805 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2806 if (emit_debug_code()) {
2807 Label ok, fail;
2808 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2809 b(&ok);
2810 bind(&fail);
2811 Abort(kGlobalFunctionsMustHaveInitialMap);
2812 bind(&ok);
2813 }
2814 }
2815
2816
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)2817 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2818 Register reg,
2819 Register scratch,
2820 Label* not_power_of_two_or_zero) {
2821 sub(scratch, reg, Operand(1), SetCC);
2822 b(mi, not_power_of_two_or_zero);
2823 tst(scratch, reg);
2824 b(ne, not_power_of_two_or_zero);
2825 }
2826
2827
JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,Register scratch,Label * zero_and_neg,Label * not_power_of_two)2828 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2829 Register reg,
2830 Register scratch,
2831 Label* zero_and_neg,
2832 Label* not_power_of_two) {
2833 sub(scratch, reg, Operand(1), SetCC);
2834 b(mi, zero_and_neg);
2835 tst(scratch, reg);
2836 b(ne, not_power_of_two);
2837 }
2838
2839
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)2840 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2841 Register reg2,
2842 Label* on_not_both_smi) {
2843 STATIC_ASSERT(kSmiTag == 0);
2844 tst(reg1, Operand(kSmiTagMask));
2845 tst(reg2, Operand(kSmiTagMask), eq);
2846 b(ne, on_not_both_smi);
2847 }
2848
2849
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)2850 void MacroAssembler::UntagAndJumpIfSmi(
2851 Register dst, Register src, Label* smi_case) {
2852 STATIC_ASSERT(kSmiTag == 0);
2853 SmiUntag(dst, src, SetCC);
2854 b(cc, smi_case); // Shifter carry is not set for a smi.
2855 }
2856
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)2857 void MacroAssembler::JumpIfEitherSmi(Register reg1,
2858 Register reg2,
2859 Label* on_either_smi) {
2860 STATIC_ASSERT(kSmiTag == 0);
2861 tst(reg1, Operand(kSmiTagMask));
2862 tst(reg2, Operand(kSmiTagMask), ne);
2863 b(eq, on_either_smi);
2864 }
2865
AssertNotNumber(Register object)2866 void MacroAssembler::AssertNotNumber(Register object) {
2867 if (emit_debug_code()) {
2868 STATIC_ASSERT(kSmiTag == 0);
2869 tst(object, Operand(kSmiTagMask));
2870 Check(ne, kOperandIsANumber);
2871 push(object);
2872 CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
2873 pop(object);
2874 Check(ne, kOperandIsANumber);
2875 }
2876 }
2877
AssertNotSmi(Register object)2878 void MacroAssembler::AssertNotSmi(Register object) {
2879 if (emit_debug_code()) {
2880 STATIC_ASSERT(kSmiTag == 0);
2881 tst(object, Operand(kSmiTagMask));
2882 Check(ne, kOperandIsASmi);
2883 }
2884 }
2885
2886
AssertSmi(Register object)2887 void MacroAssembler::AssertSmi(Register object) {
2888 if (emit_debug_code()) {
2889 STATIC_ASSERT(kSmiTag == 0);
2890 tst(object, Operand(kSmiTagMask));
2891 Check(eq, kOperandIsNotSmi);
2892 }
2893 }
2894
2895
AssertString(Register object)2896 void MacroAssembler::AssertString(Register object) {
2897 if (emit_debug_code()) {
2898 STATIC_ASSERT(kSmiTag == 0);
2899 tst(object, Operand(kSmiTagMask));
2900 Check(ne, kOperandIsASmiAndNotAString);
2901 push(object);
2902 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2903 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2904 pop(object);
2905 Check(lo, kOperandIsNotAString);
2906 }
2907 }
2908
2909
AssertName(Register object)2910 void MacroAssembler::AssertName(Register object) {
2911 if (emit_debug_code()) {
2912 STATIC_ASSERT(kSmiTag == 0);
2913 tst(object, Operand(kSmiTagMask));
2914 Check(ne, kOperandIsASmiAndNotAName);
2915 push(object);
2916 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2917 CompareInstanceType(object, object, LAST_NAME_TYPE);
2918 pop(object);
2919 Check(le, kOperandIsNotAName);
2920 }
2921 }
2922
2923
AssertFunction(Register object)2924 void MacroAssembler::AssertFunction(Register object) {
2925 if (emit_debug_code()) {
2926 STATIC_ASSERT(kSmiTag == 0);
2927 tst(object, Operand(kSmiTagMask));
2928 Check(ne, kOperandIsASmiAndNotAFunction);
2929 push(object);
2930 CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2931 pop(object);
2932 Check(eq, kOperandIsNotAFunction);
2933 }
2934 }
2935
2936
AssertBoundFunction(Register object)2937 void MacroAssembler::AssertBoundFunction(Register object) {
2938 if (emit_debug_code()) {
2939 STATIC_ASSERT(kSmiTag == 0);
2940 tst(object, Operand(kSmiTagMask));
2941 Check(ne, kOperandIsASmiAndNotABoundFunction);
2942 push(object);
2943 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2944 pop(object);
2945 Check(eq, kOperandIsNotABoundFunction);
2946 }
2947 }
2948
AssertGeneratorObject(Register object)2949 void MacroAssembler::AssertGeneratorObject(Register object) {
2950 if (emit_debug_code()) {
2951 STATIC_ASSERT(kSmiTag == 0);
2952 tst(object, Operand(kSmiTagMask));
2953 Check(ne, kOperandIsASmiAndNotAGeneratorObject);
2954 push(object);
2955 CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
2956 pop(object);
2957 Check(eq, kOperandIsNotAGeneratorObject);
2958 }
2959 }
2960
AssertReceiver(Register object)2961 void MacroAssembler::AssertReceiver(Register object) {
2962 if (emit_debug_code()) {
2963 STATIC_ASSERT(kSmiTag == 0);
2964 tst(object, Operand(kSmiTagMask));
2965 Check(ne, kOperandIsASmiAndNotAReceiver);
2966 push(object);
2967 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2968 CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
2969 pop(object);
2970 Check(hs, kOperandIsNotAReceiver);
2971 }
2972 }
2973
2974
AssertUndefinedOrAllocationSite(Register object,Register scratch)2975 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2976 Register scratch) {
2977 if (emit_debug_code()) {
2978 Label done_checking;
2979 AssertNotSmi(object);
2980 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2981 b(eq, &done_checking);
2982 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2983 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2984 Assert(eq, kExpectedUndefinedOrCell);
2985 bind(&done_checking);
2986 }
2987 }
2988
2989
AssertIsRoot(Register reg,Heap::RootListIndex index)2990 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2991 if (emit_debug_code()) {
2992 CompareRoot(reg, index);
2993 Check(eq, kHeapNumberMapRegisterClobbered);
2994 }
2995 }
2996
2997
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)2998 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2999 Register heap_number_map,
3000 Register scratch,
3001 Label* on_not_heap_number) {
3002 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3003 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3004 cmp(scratch, heap_number_map);
3005 b(ne, on_not_heap_number);
3006 }
3007
3008
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3009 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
3010 Register first, Register second, Register scratch1, Register scratch2,
3011 Label* failure) {
3012 // Test that both first and second are sequential one-byte strings.
3013 // Assume that they are non-smis.
3014 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3015 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3016 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3017 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3018
3019 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
3020 scratch2, failure);
3021 }
3022
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3023 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
3024 Register second,
3025 Register scratch1,
3026 Register scratch2,
3027 Label* failure) {
3028 // Check that neither is a smi.
3029 and_(scratch1, first, Operand(second));
3030 JumpIfSmi(scratch1, failure);
3031 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
3032 scratch2, failure);
3033 }
3034
3035
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)3036 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3037 Label* not_unique_name) {
3038 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3039 Label succeed;
3040 tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3041 b(eq, &succeed);
3042 cmp(reg, Operand(SYMBOL_TYPE));
3043 b(ne, not_unique_name);
3044
3045 bind(&succeed);
3046 }
3047
3048
3049 // Allocates a heap number or jumps to the need_gc label if the young space
3050 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required,MutableMode mode)3051 void MacroAssembler::AllocateHeapNumber(Register result,
3052 Register scratch1,
3053 Register scratch2,
3054 Register heap_number_map,
3055 Label* gc_required,
3056 MutableMode mode) {
3057 // Allocate an object in the heap for the heap number and tag it as a heap
3058 // object.
3059 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3060 NO_ALLOCATION_FLAGS);
3061
3062 Heap::RootListIndex map_index = mode == MUTABLE
3063 ? Heap::kMutableHeapNumberMapRootIndex
3064 : Heap::kHeapNumberMapRootIndex;
3065 AssertIsRoot(heap_number_map, map_index);
3066
3067 // Store heap number map in the allocated object.
3068 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3069 }
3070
3071
AllocateHeapNumberWithValue(Register result,DwVfpRegister value,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required)3072 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3073 DwVfpRegister value,
3074 Register scratch1,
3075 Register scratch2,
3076 Register heap_number_map,
3077 Label* gc_required) {
3078 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3079 sub(scratch1, result, Operand(kHeapObjectTag));
3080 vstr(value, scratch1, HeapNumber::kValueOffset);
3081 }
3082
3083
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)3084 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3085 Register value, Register scratch1,
3086 Register scratch2, Label* gc_required) {
3087 DCHECK(!result.is(constructor));
3088 DCHECK(!result.is(scratch1));
3089 DCHECK(!result.is(scratch2));
3090 DCHECK(!result.is(value));
3091
3092 // Allocate JSValue in new space.
3093 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
3094 NO_ALLOCATION_FLAGS);
3095
3096 // Initialize the JSValue.
3097 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3098 str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
3099 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3100 str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
3101 str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
3102 str(value, FieldMemOperand(result, JSValue::kValueOffset));
3103 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3104 }
3105
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)3106 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
3107 Register end_address,
3108 Register filler) {
3109 Label loop, entry;
3110 b(&entry);
3111 bind(&loop);
3112 str(filler, MemOperand(current_address, kPointerSize, PostIndex));
3113 bind(&entry);
3114 cmp(current_address, end_address);
3115 b(lo, &loop);
3116 }
3117
3118
CheckFor32DRegs(Register scratch)3119 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3120 mov(scratch, Operand(ExternalReference::cpu_features()));
3121 ldr(scratch, MemOperand(scratch));
3122 tst(scratch, Operand(1u << VFP32DREGS));
3123 }
3124
3125
SaveFPRegs(Register location,Register scratch)3126 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3127 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
3128 CheckFor32DRegs(scratch);
3129 vstm(db_w, location, d16, d31, ne);
3130 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3131 vstm(db_w, location, d0, d15);
3132 }
3133
3134
RestoreFPRegs(Register location,Register scratch)3135 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3136 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
3137 CheckFor32DRegs(scratch);
3138 vldm(ia_w, location, d0, d15);
3139 vldm(ia_w, location, d16, d31, ne);
3140 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3141 }
3142
3143 template <typename T>
FloatMaxHelper(T result,T left,T right,Label * out_of_line)3144 void MacroAssembler::FloatMaxHelper(T result, T left, T right,
3145 Label* out_of_line) {
3146 // This trivial case is caught sooner, so that the out-of-line code can be
3147 // completely avoided.
3148 DCHECK(!left.is(right));
3149
3150 if (CpuFeatures::IsSupported(ARMv8)) {
3151 CpuFeatureScope scope(this, ARMv8);
3152 VFPCompareAndSetFlags(left, right);
3153 b(vs, out_of_line);
3154 vmaxnm(result, left, right);
3155 } else {
3156 Label done;
3157 VFPCompareAndSetFlags(left, right);
3158 b(vs, out_of_line);
3159 // Avoid a conditional instruction if the result register is unique.
3160 bool aliased_result_reg = result.is(left) || result.is(right);
3161 Move(result, right, aliased_result_reg ? mi : al);
3162 Move(result, left, gt);
3163 b(ne, &done);
3164 // Left and right are equal, but check for +/-0.
3165 VFPCompareAndSetFlags(left, 0.0);
3166 b(eq, out_of_line);
3167 // The arguments are equal and not zero, so it doesn't matter which input we
3168 // pick. We have already moved one input into the result (if it didn't
3169 // already alias) so there's nothing more to do.
3170 bind(&done);
3171 }
3172 }
3173
3174 template <typename T>
FloatMaxOutOfLineHelper(T result,T left,T right)3175 void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
3176 DCHECK(!left.is(right));
3177
3178 // ARMv8: At least one of left and right is a NaN.
3179 // Anything else: At least one of left and right is a NaN, or both left and
3180 // right are zeroes with unknown sign.
3181
3182 // If left and right are +/-0, select the one with the most positive sign.
3183 // If left or right are NaN, vadd propagates the appropriate one.
3184 vadd(result, left, right);
3185 }
3186
3187 template <typename T>
FloatMinHelper(T result,T left,T right,Label * out_of_line)3188 void MacroAssembler::FloatMinHelper(T result, T left, T right,
3189 Label* out_of_line) {
3190 // This trivial case is caught sooner, so that the out-of-line code can be
3191 // completely avoided.
3192 DCHECK(!left.is(right));
3193
3194 if (CpuFeatures::IsSupported(ARMv8)) {
3195 CpuFeatureScope scope(this, ARMv8);
3196 VFPCompareAndSetFlags(left, right);
3197 b(vs, out_of_line);
3198 vminnm(result, left, right);
3199 } else {
3200 Label done;
3201 VFPCompareAndSetFlags(left, right);
3202 b(vs, out_of_line);
3203 // Avoid a conditional instruction if the result register is unique.
3204 bool aliased_result_reg = result.is(left) || result.is(right);
3205 Move(result, left, aliased_result_reg ? mi : al);
3206 Move(result, right, gt);
3207 b(ne, &done);
3208 // Left and right are equal, but check for +/-0.
3209 VFPCompareAndSetFlags(left, 0.0);
3210 // If the arguments are equal and not zero, it doesn't matter which input we
3211 // pick. We have already moved one input into the result (if it didn't
3212 // already alias) so there's nothing more to do.
3213 b(ne, &done);
3214 // At this point, both left and right are either 0 or -0.
3215 // We could use a single 'vorr' instruction here if we had NEON support.
3216 // The algorithm used is -((-L) + (-R)), which is most efficiently expressed
3217 // as -((-L) - R).
3218 if (left.is(result)) {
3219 DCHECK(!right.is(result));
3220 vneg(result, left);
3221 vsub(result, result, right);
3222 vneg(result, result);
3223 } else {
3224 DCHECK(!left.is(result));
3225 vneg(result, right);
3226 vsub(result, result, left);
3227 vneg(result, result);
3228 }
3229 bind(&done);
3230 }
3231 }
3232
3233 template <typename T>
FloatMinOutOfLineHelper(T result,T left,T right)3234 void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
3235 DCHECK(!left.is(right));
3236
3237 // At least one of left and right is a NaN. Use vadd to propagate the NaN
3238 // appropriately. +/-0 is handled inline.
3239 vadd(result, left, right);
3240 }
3241
FloatMax(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right,Label * out_of_line)3242 void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
3243 SwVfpRegister right, Label* out_of_line) {
3244 FloatMaxHelper(result, left, right, out_of_line);
3245 }
3246
FloatMin(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right,Label * out_of_line)3247 void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
3248 SwVfpRegister right, Label* out_of_line) {
3249 FloatMinHelper(result, left, right, out_of_line);
3250 }
3251
FloatMax(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right,Label * out_of_line)3252 void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
3253 DwVfpRegister right, Label* out_of_line) {
3254 FloatMaxHelper(result, left, right, out_of_line);
3255 }
3256
FloatMin(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right,Label * out_of_line)3257 void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
3258 DwVfpRegister right, Label* out_of_line) {
3259 FloatMinHelper(result, left, right, out_of_line);
3260 }
3261
FloatMaxOutOfLine(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right)3262 void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
3263 SwVfpRegister right) {
3264 FloatMaxOutOfLineHelper(result, left, right);
3265 }
3266
FloatMinOutOfLine(SwVfpRegister result,SwVfpRegister left,SwVfpRegister right)3267 void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
3268 SwVfpRegister right) {
3269 FloatMinOutOfLineHelper(result, left, right);
3270 }
3271
FloatMaxOutOfLine(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right)3272 void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
3273 DwVfpRegister right) {
3274 FloatMaxOutOfLineHelper(result, left, right);
3275 }
3276
FloatMinOutOfLine(DwVfpRegister result,DwVfpRegister left,DwVfpRegister right)3277 void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
3278 DwVfpRegister right) {
3279 FloatMinOutOfLineHelper(result, left, right);
3280 }
3281
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3282 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3283 Register first, Register second, Register scratch1, Register scratch2,
3284 Label* failure) {
3285 const int kFlatOneByteStringMask =
3286 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3287 const int kFlatOneByteStringTag =
3288 kStringTag | kOneByteStringTag | kSeqStringTag;
3289 and_(scratch1, first, Operand(kFlatOneByteStringMask));
3290 and_(scratch2, second, Operand(kFlatOneByteStringMask));
3291 cmp(scratch1, Operand(kFlatOneByteStringTag));
3292 // Ignore second test if first test failed.
3293 cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
3294 b(ne, failure);
3295 }
3296
3297 static const int kRegisterPassedArguments = 4;
3298
3299
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)3300 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3301 int num_double_arguments) {
3302 int stack_passed_words = 0;
3303 if (use_eabi_hardfloat()) {
3304 // In the hard floating point calling convention, we can use
3305 // all double registers to pass doubles.
3306 if (num_double_arguments > DoubleRegister::NumRegisters()) {
3307 stack_passed_words +=
3308 2 * (num_double_arguments - DoubleRegister::NumRegisters());
3309 }
3310 } else {
3311 // In the soft floating point calling convention, every double
3312 // argument is passed using two registers.
3313 num_reg_arguments += 2 * num_double_arguments;
3314 }
3315 // Up to four simple arguments are passed in registers r0..r3.
3316 if (num_reg_arguments > kRegisterPassedArguments) {
3317 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3318 }
3319 return stack_passed_words;
3320 }
3321
3322
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)3323 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3324 Register index,
3325 Register value,
3326 uint32_t encoding_mask) {
3327 Label is_object;
3328 SmiTst(string);
3329 Check(ne, kNonObject);
3330
3331 ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3332 ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3333
3334 and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3335 cmp(ip, Operand(encoding_mask));
3336 Check(eq, kUnexpectedStringType);
3337
3338 // The index is assumed to be untagged coming in, tag it to compare with the
3339 // string length without using a temp register, it is restored at the end of
3340 // this function.
3341 Label index_tag_ok, index_tag_bad;
3342 TrySmiTag(index, index, &index_tag_bad);
3343 b(&index_tag_ok);
3344 bind(&index_tag_bad);
3345 Abort(kIndexIsTooLarge);
3346 bind(&index_tag_ok);
3347
3348 ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3349 cmp(index, ip);
3350 Check(lt, kIndexIsTooLarge);
3351
3352 cmp(index, Operand(Smi::kZero));
3353 Check(ge, kIndexIsNegative);
3354
3355 SmiUntag(index, index);
3356 }
3357
3358
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)3359 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3360 int num_double_arguments,
3361 Register scratch) {
3362 int frame_alignment = ActivationFrameAlignment();
3363 int stack_passed_arguments = CalculateStackPassedWords(
3364 num_reg_arguments, num_double_arguments);
3365 if (frame_alignment > kPointerSize) {
3366 // Make stack end at alignment and make room for num_arguments - 4 words
3367 // and the original value of sp.
3368 mov(scratch, sp);
3369 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3370 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3371 and_(sp, sp, Operand(-frame_alignment));
3372 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3373 } else {
3374 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3375 }
3376 }
3377
3378
PrepareCallCFunction(int num_reg_arguments,Register scratch)3379 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3380 Register scratch) {
3381 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3382 }
3383
3384
MovToFloatParameter(DwVfpRegister src)3385 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3386 DCHECK(src.is(d0));
3387 if (!use_eabi_hardfloat()) {
3388 vmov(r0, r1, src);
3389 }
3390 }
3391
3392
3393 // On ARM this is just a synonym to make the purpose clear.
MovToFloatResult(DwVfpRegister src)3394 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3395 MovToFloatParameter(src);
3396 }
3397
3398
MovToFloatParameters(DwVfpRegister src1,DwVfpRegister src2)3399 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3400 DwVfpRegister src2) {
3401 DCHECK(src1.is(d0));
3402 DCHECK(src2.is(d1));
3403 if (!use_eabi_hardfloat()) {
3404 vmov(r0, r1, src1);
3405 vmov(r2, r3, src2);
3406 }
3407 }
3408
3409
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)3410 void MacroAssembler::CallCFunction(ExternalReference function,
3411 int num_reg_arguments,
3412 int num_double_arguments) {
3413 mov(ip, Operand(function));
3414 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3415 }
3416
3417
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)3418 void MacroAssembler::CallCFunction(Register function,
3419 int num_reg_arguments,
3420 int num_double_arguments) {
3421 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3422 }
3423
3424
CallCFunction(ExternalReference function,int num_arguments)3425 void MacroAssembler::CallCFunction(ExternalReference function,
3426 int num_arguments) {
3427 CallCFunction(function, num_arguments, 0);
3428 }
3429
3430
CallCFunction(Register function,int num_arguments)3431 void MacroAssembler::CallCFunction(Register function,
3432 int num_arguments) {
3433 CallCFunction(function, num_arguments, 0);
3434 }
3435
3436
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)3437 void MacroAssembler::CallCFunctionHelper(Register function,
3438 int num_reg_arguments,
3439 int num_double_arguments) {
3440 DCHECK(has_frame());
3441 // Make sure that the stack is aligned before calling a C function unless
3442 // running in the simulator. The simulator has its own alignment check which
3443 // provides more information.
3444 #if V8_HOST_ARCH_ARM
3445 if (emit_debug_code()) {
3446 int frame_alignment = base::OS::ActivationFrameAlignment();
3447 int frame_alignment_mask = frame_alignment - 1;
3448 if (frame_alignment > kPointerSize) {
3449 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3450 Label alignment_as_expected;
3451 tst(sp, Operand(frame_alignment_mask));
3452 b(eq, &alignment_as_expected);
3453 // Don't use Check here, as it will call Runtime_Abort possibly
3454 // re-entering here.
3455 stop("Unexpected alignment");
3456 bind(&alignment_as_expected);
3457 }
3458 }
3459 #endif
3460
3461 // Just call directly. The function called cannot cause a GC, or
3462 // allow preemption, so the return address in the link register
3463 // stays correct.
3464 Call(function);
3465 int stack_passed_arguments = CalculateStackPassedWords(
3466 num_reg_arguments, num_double_arguments);
3467 if (ActivationFrameAlignment() > kPointerSize) {
3468 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3469 } else {
3470 add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3471 }
3472 }
3473
3474
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)3475 void MacroAssembler::CheckPageFlag(
3476 Register object,
3477 Register scratch,
3478 int mask,
3479 Condition cc,
3480 Label* condition_met) {
3481 DCHECK(cc == eq || cc == ne);
3482 Bfc(scratch, object, 0, kPageSizeBits);
3483 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3484 tst(scratch, Operand(mask));
3485 b(cc, condition_met);
3486 }
3487
3488
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)3489 void MacroAssembler::JumpIfBlack(Register object,
3490 Register scratch0,
3491 Register scratch1,
3492 Label* on_black) {
3493 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
3494 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3495 }
3496
3497
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)3498 void MacroAssembler::HasColor(Register object,
3499 Register bitmap_scratch,
3500 Register mask_scratch,
3501 Label* has_color,
3502 int first_bit,
3503 int second_bit) {
3504 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3505
3506 GetMarkBits(object, bitmap_scratch, mask_scratch);
3507
3508 Label other_color, word_boundary;
3509 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3510 tst(ip, Operand(mask_scratch));
3511 b(first_bit == 1 ? eq : ne, &other_color);
3512 // Shift left 1 by adding.
3513 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3514 b(eq, &word_boundary);
3515 tst(ip, Operand(mask_scratch));
3516 b(second_bit == 1 ? ne : eq, has_color);
3517 jmp(&other_color);
3518
3519 bind(&word_boundary);
3520 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3521 tst(ip, Operand(1));
3522 b(second_bit == 1 ? ne : eq, has_color);
3523 bind(&other_color);
3524 }
3525
3526
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)3527 void MacroAssembler::GetMarkBits(Register addr_reg,
3528 Register bitmap_reg,
3529 Register mask_reg) {
3530 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3531 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3532 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3533 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3534 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3535 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3536 mov(ip, Operand(1));
3537 mov(mask_reg, Operand(ip, LSL, mask_reg));
3538 }
3539
3540
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)3541 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3542 Register mask_scratch, Register load_scratch,
3543 Label* value_is_white) {
3544 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3545 GetMarkBits(value, bitmap_scratch, mask_scratch);
3546
3547 // If the value is black or grey we don't need to do anything.
3548 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3549 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3550 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3551 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3552
3553 // Since both black and grey have a 1 in the first position and white does
3554 // not have a 1 there we only need to check one bit.
3555 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3556 tst(mask_scratch, load_scratch);
3557 b(eq, value_is_white);
3558 }
3559
3560
ClampUint8(Register output_reg,Register input_reg)3561 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3562 usat(output_reg, 8, Operand(input_reg));
3563 }
3564
3565
ClampDoubleToUint8(Register result_reg,DwVfpRegister input_reg,LowDwVfpRegister double_scratch)3566 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3567 DwVfpRegister input_reg,
3568 LowDwVfpRegister double_scratch) {
3569 Label done;
3570
3571 // Handle inputs >= 255 (including +infinity).
3572 Vmov(double_scratch, 255.0, result_reg);
3573 mov(result_reg, Operand(255));
3574 VFPCompareAndSetFlags(input_reg, double_scratch);
3575 b(ge, &done);
3576
3577 // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
3578 // rounding mode will provide the correct result.
3579 vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3580 vmov(result_reg, double_scratch.low());
3581
3582 bind(&done);
3583 }
3584
3585
LoadInstanceDescriptors(Register map,Register descriptors)3586 void MacroAssembler::LoadInstanceDescriptors(Register map,
3587 Register descriptors) {
3588 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3589 }
3590
3591
NumberOfOwnDescriptors(Register dst,Register map)3592 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3593 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3594 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3595 }
3596
3597
EnumLength(Register dst,Register map)3598 void MacroAssembler::EnumLength(Register dst, Register map) {
3599 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3600 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3601 and_(dst, dst, Operand(Map::EnumLengthBits::kMask));
3602 SmiTag(dst);
3603 }
3604
3605
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3606 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3607 int accessor_index,
3608 AccessorComponent accessor) {
3609 ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3610 LoadInstanceDescriptors(dst, dst);
3611 ldr(dst,
3612 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3613 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3614 : AccessorPair::kSetterOffset;
3615 ldr(dst, FieldMemOperand(dst, offset));
3616 }
3617
3618
CheckEnumCache(Label * call_runtime)3619 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3620 Register null_value = r5;
3621 Register empty_fixed_array_value = r6;
3622 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3623 Label next, start;
3624 mov(r2, r0);
3625
3626 // Check if the enum length field is properly initialized, indicating that
3627 // there is an enum cache.
3628 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3629
3630 EnumLength(r3, r1);
3631 cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3632 b(eq, call_runtime);
3633
3634 LoadRoot(null_value, Heap::kNullValueRootIndex);
3635 jmp(&start);
3636
3637 bind(&next);
3638 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3639
3640 // For all objects but the receiver, check that the cache is empty.
3641 EnumLength(r3, r1);
3642 cmp(r3, Operand(Smi::kZero));
3643 b(ne, call_runtime);
3644
3645 bind(&start);
3646
3647 // Check that there are no elements. Register r2 contains the current JS
3648 // object we've reached through the prototype chain.
3649 Label no_elements;
3650 ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3651 cmp(r2, empty_fixed_array_value);
3652 b(eq, &no_elements);
3653
3654 // Second chance, the object may be using the empty slow element dictionary.
3655 CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3656 b(ne, call_runtime);
3657
3658 bind(&no_elements);
3659 ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3660 cmp(r2, null_value);
3661 b(ne, &next);
3662 }
3663
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)3664 void MacroAssembler::TestJSArrayForAllocationMemento(
3665 Register receiver_reg,
3666 Register scratch_reg,
3667 Label* no_memento_found) {
3668 Label map_check;
3669 Label top_check;
3670 ExternalReference new_space_allocation_top_adr =
3671 ExternalReference::new_space_allocation_top_address(isolate());
3672 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
3673 const int kMementoLastWordOffset =
3674 kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
3675
3676 // Bail out if the object is not in new space.
3677 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
3678 // If the object is in new space, we need to check whether it is on the same
3679 // page as the current top.
3680 add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
3681 mov(ip, Operand(new_space_allocation_top_adr));
3682 ldr(ip, MemOperand(ip));
3683 eor(scratch_reg, scratch_reg, Operand(ip));
3684 tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
3685 b(eq, &top_check);
3686 // The object is on a different page than allocation top. Bail out if the
3687 // object sits on the page boundary as no memento can follow and we cannot
3688 // touch the memory following it.
3689 add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
3690 eor(scratch_reg, scratch_reg, Operand(receiver_reg));
3691 tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
3692 b(ne, no_memento_found);
3693 // Continue with the actual map check.
3694 jmp(&map_check);
3695 // If top is on the same page as the current object, we need to check whether
3696 // we are below top.
3697 bind(&top_check);
3698 add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
3699 mov(ip, Operand(new_space_allocation_top_adr));
3700 ldr(ip, MemOperand(ip));
3701 cmp(scratch_reg, ip);
3702 b(ge, no_memento_found);
3703 // Memento map check.
3704 bind(&map_check);
3705 ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
3706 cmp(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
3707 }
3708
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)3709 Register GetRegisterThatIsNotOneOf(Register reg1,
3710 Register reg2,
3711 Register reg3,
3712 Register reg4,
3713 Register reg5,
3714 Register reg6) {
3715 RegList regs = 0;
3716 if (reg1.is_valid()) regs |= reg1.bit();
3717 if (reg2.is_valid()) regs |= reg2.bit();
3718 if (reg3.is_valid()) regs |= reg3.bit();
3719 if (reg4.is_valid()) regs |= reg4.bit();
3720 if (reg5.is_valid()) regs |= reg5.bit();
3721 if (reg6.is_valid()) regs |= reg6.bit();
3722
3723 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
3724 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3725 int code = config->GetAllocatableGeneralCode(i);
3726 Register candidate = Register::from_code(code);
3727 if (regs & candidate.bit()) continue;
3728 return candidate;
3729 }
3730 UNREACHABLE();
3731 return no_reg;
3732 }
3733
3734 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)3735 bool AreAliased(Register reg1,
3736 Register reg2,
3737 Register reg3,
3738 Register reg4,
3739 Register reg5,
3740 Register reg6,
3741 Register reg7,
3742 Register reg8) {
3743 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3744 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
3745 reg7.is_valid() + reg8.is_valid();
3746
3747 RegList regs = 0;
3748 if (reg1.is_valid()) regs |= reg1.bit();
3749 if (reg2.is_valid()) regs |= reg2.bit();
3750 if (reg3.is_valid()) regs |= reg3.bit();
3751 if (reg4.is_valid()) regs |= reg4.bit();
3752 if (reg5.is_valid()) regs |= reg5.bit();
3753 if (reg6.is_valid()) regs |= reg6.bit();
3754 if (reg7.is_valid()) regs |= reg7.bit();
3755 if (reg8.is_valid()) regs |= reg8.bit();
3756 int n_of_non_aliasing_regs = NumRegs(regs);
3757
3758 return n_of_valid_regs != n_of_non_aliasing_regs;
3759 }
3760 #endif
3761
3762
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)3763 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
3764 FlushICache flush_cache)
3765 : address_(address),
3766 size_(instructions * Assembler::kInstrSize),
3767 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
3768 flush_cache_(flush_cache) {
3769 // Create a new macro assembler pointing to the address of the code to patch.
3770 // The size is adjusted with kGap on order for the assembler to generate size
3771 // bytes of instructions without failing with buffer size constraints.
3772 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3773 }
3774
3775
~CodePatcher()3776 CodePatcher::~CodePatcher() {
3777 // Indicate that code has changed.
3778 if (flush_cache_ == FLUSH) {
3779 Assembler::FlushICache(masm_.isolate(), address_, size_);
3780 }
3781
3782 // Check that we don't have any pending constant pools.
3783 DCHECK(masm_.pending_32_bit_constants_.empty());
3784 DCHECK(masm_.pending_64_bit_constants_.empty());
3785
3786 // Check that the code was patched as expected.
3787 DCHECK(masm_.pc_ == address_ + size_);
3788 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3789 }
3790
3791
Emit(Instr instr)3792 void CodePatcher::Emit(Instr instr) {
3793 masm()->emit(instr);
3794 }
3795
3796
Emit(Address addr)3797 void CodePatcher::Emit(Address addr) {
3798 masm()->emit(reinterpret_cast<Instr>(addr));
3799 }
3800
3801
EmitCondition(Condition cond)3802 void CodePatcher::EmitCondition(Condition cond) {
3803 Instr instr = Assembler::instr_at(masm_.pc_);
3804 instr = (instr & ~kCondMask) | cond;
3805 masm_.emit(instr);
3806 }
3807
3808
TruncatingDiv(Register result,Register dividend,int32_t divisor)3809 void MacroAssembler::TruncatingDiv(Register result,
3810 Register dividend,
3811 int32_t divisor) {
3812 DCHECK(!dividend.is(result));
3813 DCHECK(!dividend.is(ip));
3814 DCHECK(!result.is(ip));
3815 base::MagicNumbersForDivision<uint32_t> mag =
3816 base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
3817 mov(ip, Operand(mag.multiplier));
3818 bool neg = (mag.multiplier & (1U << 31)) != 0;
3819 if (divisor > 0 && neg) {
3820 smmla(result, dividend, ip, dividend);
3821 } else {
3822 smmul(result, dividend, ip);
3823 if (divisor < 0 && !neg && mag.multiplier > 0) {
3824 sub(result, result, Operand(dividend));
3825 }
3826 }
3827 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
3828 add(result, result, Operand(dividend, LSR, 31));
3829 }
3830
3831 } // namespace internal
3832 } // namespace v8
3833
3834 #endif // V8_TARGET_ARCH_ARM
3835