1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6
7 #if V8_TARGET_ARCH_ARM
8
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/debug/debug.h"
14 #include "src/register-configuration.h"
15 #include "src/runtime/runtime.h"
16
17 #include "src/arm/macro-assembler-arm.h"
18
19 namespace v8 {
20 namespace internal {
21
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
23 CodeObjectRequired create_code_object)
24 : Assembler(arg_isolate, buffer, size),
25 generating_stub_(false),
26 has_frame_(false) {
27 if (create_code_object == CodeObjectRequired::kYes) {
28 code_object_ =
29 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
30 }
31 }
32
33
Jump(Register target,Condition cond)34 void MacroAssembler::Jump(Register target, Condition cond) {
35 bx(target, cond);
36 }
37
38
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)39 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
40 Condition cond) {
41 DCHECK(RelocInfo::IsCodeTarget(rmode));
42 mov(pc, Operand(target, rmode), LeaveCC, cond);
43 }
44
45
Jump(Address target,RelocInfo::Mode rmode,Condition cond)46 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
47 Condition cond) {
48 DCHECK(!RelocInfo::IsCodeTarget(rmode));
49 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
50 }
51
52
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)53 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
54 Condition cond) {
55 DCHECK(RelocInfo::IsCodeTarget(rmode));
56 // 'code' is always generated ARM code, never THUMB code
57 AllowDeferredHandleDereference embedding_raw_address;
58 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
59 }
60
61
CallSize(Register target,Condition cond)62 int MacroAssembler::CallSize(Register target, Condition cond) {
63 return kInstrSize;
64 }
65
66
Call(Register target,Condition cond)67 void MacroAssembler::Call(Register target, Condition cond) {
68 // Block constant pool for the call instruction sequence.
69 BlockConstPoolScope block_const_pool(this);
70 Label start;
71 bind(&start);
72 blx(target, cond);
73 DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
74 }
75
76
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)77 int MacroAssembler::CallSize(
78 Address target, RelocInfo::Mode rmode, Condition cond) {
79 Instr mov_instr = cond | MOV | LeaveCC;
80 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
81 return kInstrSize +
82 mov_operand.instructions_required(this, mov_instr) * kInstrSize;
83 }
84
85
CallStubSize(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)86 int MacroAssembler::CallStubSize(
87 CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
88 return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
89 }
90
91
Call(Address target,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode)92 void MacroAssembler::Call(Address target,
93 RelocInfo::Mode rmode,
94 Condition cond,
95 TargetAddressStorageMode mode) {
96 // Block constant pool for the call instruction sequence.
97 BlockConstPoolScope block_const_pool(this);
98 Label start;
99 bind(&start);
100
101 bool old_predictable_code_size = predictable_code_size();
102 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
103 set_predictable_code_size(true);
104 }
105
106 #ifdef DEBUG
107 // Check the expected size before generating code to ensure we assume the same
108 // constant pool availability (e.g., whether constant pool is full or not).
109 int expected_size = CallSize(target, rmode, cond);
110 #endif
111
112 // Call sequence on V7 or later may be :
113 // movw ip, #... @ call address low 16
114 // movt ip, #... @ call address high 16
115 // blx ip
116 // @ return address
117 // Or for pre-V7 or values that may be back-patched
118 // to avoid ICache flushes:
119 // ldr ip, [pc, #...] @ call address
120 // blx ip
121 // @ return address
122
123 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
124 blx(ip, cond);
125
126 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
127 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
128 set_predictable_code_size(old_predictable_code_size);
129 }
130 }
131
132
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)133 int MacroAssembler::CallSize(Handle<Code> code,
134 RelocInfo::Mode rmode,
135 TypeFeedbackId ast_id,
136 Condition cond) {
137 AllowDeferredHandleDereference using_raw_address;
138 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
139 }
140
141
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,TargetAddressStorageMode mode)142 void MacroAssembler::Call(Handle<Code> code,
143 RelocInfo::Mode rmode,
144 TypeFeedbackId ast_id,
145 Condition cond,
146 TargetAddressStorageMode mode) {
147 Label start;
148 bind(&start);
149 DCHECK(RelocInfo::IsCodeTarget(rmode));
150 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
151 SetRecordedAstId(ast_id);
152 rmode = RelocInfo::CODE_TARGET_WITH_ID;
153 }
154 // 'code' is always generated ARM code, never THUMB code
155 AllowDeferredHandleDereference embedding_raw_address;
156 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
157 }
158
CallDeoptimizer(Address target)159 void MacroAssembler::CallDeoptimizer(Address target) {
160 BlockConstPoolScope block_const_pool(this);
161
162 uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
163
164 // We use blx, like a call, but it does not return here. The link register is
165 // used by the deoptimizer to work out what called it.
166 if (CpuFeatures::IsSupported(ARMv7)) {
167 CpuFeatureScope scope(this, ARMv7);
168 movw(ip, target_raw & 0xffff);
169 movt(ip, (target_raw >> 16) & 0xffff);
170 blx(ip);
171 } else {
172 // We need to load a literal, but we can't use the usual constant pool
173 // because we call this from a patcher, and cannot afford the guard
174 // instruction and other administrative overhead.
175 ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
176 blx(ip);
177 dd(target_raw);
178 }
179 }
180
CallDeoptimizerSize()181 int MacroAssembler::CallDeoptimizerSize() {
182 // ARMv7+:
183 // movw ip, ...
184 // movt ip, ...
185 // blx ip @ This never returns.
186 //
187 // ARMv6:
188 // ldr ip, =address
189 // blx ip @ This never returns.
190 // .word address
191 return 3 * kInstrSize;
192 }
193
Ret(Condition cond)194 void MacroAssembler::Ret(Condition cond) {
195 bx(lr, cond);
196 }
197
198
Drop(int count,Condition cond)199 void MacroAssembler::Drop(int count, Condition cond) {
200 if (count > 0) {
201 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
202 }
203 }
204
Drop(Register count,Condition cond)205 void MacroAssembler::Drop(Register count, Condition cond) {
206 add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
207 }
208
Ret(int drop,Condition cond)209 void MacroAssembler::Ret(int drop, Condition cond) {
210 Drop(drop, cond);
211 Ret(cond);
212 }
213
214
Swap(Register reg1,Register reg2,Register scratch,Condition cond)215 void MacroAssembler::Swap(Register reg1,
216 Register reg2,
217 Register scratch,
218 Condition cond) {
219 if (scratch.is(no_reg)) {
220 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
221 eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
222 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
223 } else {
224 mov(scratch, reg1, LeaveCC, cond);
225 mov(reg1, reg2, LeaveCC, cond);
226 mov(reg2, scratch, LeaveCC, cond);
227 }
228 }
229
230
Call(Label * target)231 void MacroAssembler::Call(Label* target) {
232 bl(target);
233 }
234
235
Push(Handle<Object> handle)236 void MacroAssembler::Push(Handle<Object> handle) {
237 mov(ip, Operand(handle));
238 push(ip);
239 }
240
241
Move(Register dst,Handle<Object> value)242 void MacroAssembler::Move(Register dst, Handle<Object> value) {
243 AllowDeferredHandleDereference smi_check;
244 if (value->IsSmi()) {
245 mov(dst, Operand(value));
246 } else {
247 DCHECK(value->IsHeapObject());
248 if (isolate()->heap()->InNewSpace(*value)) {
249 Handle<Cell> cell = isolate()->factory()->NewCell(value);
250 mov(dst, Operand(cell));
251 ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
252 } else {
253 mov(dst, Operand(value));
254 }
255 }
256 }
257
258
Move(Register dst,Register src,Condition cond)259 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
260 if (!dst.is(src)) {
261 mov(dst, src, LeaveCC, cond);
262 }
263 }
264
Move(SwVfpRegister dst,SwVfpRegister src)265 void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src) {
266 if (!dst.is(src)) {
267 vmov(dst, src);
268 }
269 }
270
Move(DwVfpRegister dst,DwVfpRegister src)271 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
272 if (!dst.is(src)) {
273 vmov(dst, src);
274 }
275 }
276
Mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)277 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
278 Register srcA, Condition cond) {
279 if (CpuFeatures::IsSupported(ARMv7)) {
280 CpuFeatureScope scope(this, ARMv7);
281 mls(dst, src1, src2, srcA, cond);
282 } else {
283 DCHECK(!srcA.is(ip));
284 mul(ip, src1, src2, LeaveCC, cond);
285 sub(dst, srcA, ip, LeaveCC, cond);
286 }
287 }
288
289
And(Register dst,Register src1,const Operand & src2,Condition cond)290 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
291 Condition cond) {
292 if (!src2.is_reg() &&
293 !src2.must_output_reloc_info(this) &&
294 src2.immediate() == 0) {
295 mov(dst, Operand::Zero(), LeaveCC, cond);
296 } else if (!(src2.instructions_required(this) == 1) &&
297 !src2.must_output_reloc_info(this) &&
298 CpuFeatures::IsSupported(ARMv7) &&
299 base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
300 ubfx(dst, src1, 0,
301 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
302 } else {
303 and_(dst, src1, src2, LeaveCC, cond);
304 }
305 }
306
307
Ubfx(Register dst,Register src1,int lsb,int width,Condition cond)308 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
309 Condition cond) {
310 DCHECK(lsb < 32);
311 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
312 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
313 and_(dst, src1, Operand(mask), LeaveCC, cond);
314 if (lsb != 0) {
315 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
316 }
317 } else {
318 ubfx(dst, src1, lsb, width, cond);
319 }
320 }
321
322
Sbfx(Register dst,Register src1,int lsb,int width,Condition cond)323 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
324 Condition cond) {
325 DCHECK(lsb < 32);
326 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
327 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
328 and_(dst, src1, Operand(mask), LeaveCC, cond);
329 int shift_up = 32 - lsb - width;
330 int shift_down = lsb + shift_up;
331 if (shift_up != 0) {
332 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
333 }
334 if (shift_down != 0) {
335 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
336 }
337 } else {
338 sbfx(dst, src1, lsb, width, cond);
339 }
340 }
341
342
Bfi(Register dst,Register src,Register scratch,int lsb,int width,Condition cond)343 void MacroAssembler::Bfi(Register dst,
344 Register src,
345 Register scratch,
346 int lsb,
347 int width,
348 Condition cond) {
349 DCHECK(0 <= lsb && lsb < 32);
350 DCHECK(0 <= width && width < 32);
351 DCHECK(lsb + width < 32);
352 DCHECK(!scratch.is(dst));
353 if (width == 0) return;
354 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
355 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
356 bic(dst, dst, Operand(mask));
357 and_(scratch, src, Operand((1 << width) - 1));
358 mov(scratch, Operand(scratch, LSL, lsb));
359 orr(dst, dst, scratch);
360 } else {
361 bfi(dst, src, lsb, width, cond);
362 }
363 }
364
365
Bfc(Register dst,Register src,int lsb,int width,Condition cond)366 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
367 Condition cond) {
368 DCHECK(lsb < 32);
369 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
370 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
371 bic(dst, src, Operand(mask));
372 } else {
373 Move(dst, src, cond);
374 bfc(dst, lsb, width, cond);
375 }
376 }
377
378
Load(Register dst,const MemOperand & src,Representation r)379 void MacroAssembler::Load(Register dst,
380 const MemOperand& src,
381 Representation r) {
382 DCHECK(!r.IsDouble());
383 if (r.IsInteger8()) {
384 ldrsb(dst, src);
385 } else if (r.IsUInteger8()) {
386 ldrb(dst, src);
387 } else if (r.IsInteger16()) {
388 ldrsh(dst, src);
389 } else if (r.IsUInteger16()) {
390 ldrh(dst, src);
391 } else {
392 ldr(dst, src);
393 }
394 }
395
396
Store(Register src,const MemOperand & dst,Representation r)397 void MacroAssembler::Store(Register src,
398 const MemOperand& dst,
399 Representation r) {
400 DCHECK(!r.IsDouble());
401 if (r.IsInteger8() || r.IsUInteger8()) {
402 strb(src, dst);
403 } else if (r.IsInteger16() || r.IsUInteger16()) {
404 strh(src, dst);
405 } else {
406 if (r.IsHeapObject()) {
407 AssertNotSmi(src);
408 } else if (r.IsSmi()) {
409 AssertSmi(src);
410 }
411 str(src, dst);
412 }
413 }
414
415
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond)416 void MacroAssembler::LoadRoot(Register destination,
417 Heap::RootListIndex index,
418 Condition cond) {
419 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
420 isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
421 !predictable_code_size()) {
422 // The CPU supports fast immediate values, and this root will never
423 // change. We will load it as a relocatable immediate value.
424 Handle<Object> root = isolate()->heap()->root_handle(index);
425 mov(destination, Operand(root), LeaveCC, cond);
426 return;
427 }
428 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
429 }
430
431
StoreRoot(Register source,Heap::RootListIndex index,Condition cond)432 void MacroAssembler::StoreRoot(Register source,
433 Heap::RootListIndex index,
434 Condition cond) {
435 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
436 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
437 }
438
439
InNewSpace(Register object,Register scratch,Condition cond,Label * branch)440 void MacroAssembler::InNewSpace(Register object,
441 Register scratch,
442 Condition cond,
443 Label* branch) {
444 DCHECK(cond == eq || cond == ne);
445 const int mask =
446 (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
447 CheckPageFlag(object, scratch, mask, cond, branch);
448 }
449
450
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)451 void MacroAssembler::RecordWriteField(
452 Register object,
453 int offset,
454 Register value,
455 Register dst,
456 LinkRegisterStatus lr_status,
457 SaveFPRegsMode save_fp,
458 RememberedSetAction remembered_set_action,
459 SmiCheck smi_check,
460 PointersToHereCheck pointers_to_here_check_for_value) {
461 // First, check if a write barrier is even needed. The tests below
462 // catch stores of Smis.
463 Label done;
464
465 // Skip barrier if writing a smi.
466 if (smi_check == INLINE_SMI_CHECK) {
467 JumpIfSmi(value, &done);
468 }
469
470 // Although the object register is tagged, the offset is relative to the start
471 // of the object, so so offset must be a multiple of kPointerSize.
472 DCHECK(IsAligned(offset, kPointerSize));
473
474 add(dst, object, Operand(offset - kHeapObjectTag));
475 if (emit_debug_code()) {
476 Label ok;
477 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
478 b(eq, &ok);
479 stop("Unaligned cell in write barrier");
480 bind(&ok);
481 }
482
483 RecordWrite(object,
484 dst,
485 value,
486 lr_status,
487 save_fp,
488 remembered_set_action,
489 OMIT_SMI_CHECK,
490 pointers_to_here_check_for_value);
491
492 bind(&done);
493
494 // Clobber clobbered input registers when running with the debug-code flag
495 // turned on to provoke errors.
496 if (emit_debug_code()) {
497 mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
498 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
499 }
500 }
501
502
503 // Will clobber 4 registers: object, map, dst, ip. The
504 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)505 void MacroAssembler::RecordWriteForMap(Register object,
506 Register map,
507 Register dst,
508 LinkRegisterStatus lr_status,
509 SaveFPRegsMode fp_mode) {
510 if (emit_debug_code()) {
511 ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
512 cmp(dst, Operand(isolate()->factory()->meta_map()));
513 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
514 }
515
516 if (!FLAG_incremental_marking) {
517 return;
518 }
519
520 if (emit_debug_code()) {
521 ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
522 cmp(ip, map);
523 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
524 }
525
526 Label done;
527
528 // A single check of the map's pages interesting flag suffices, since it is
529 // only set during incremental collection, and then it's also guaranteed that
530 // the from object's page's interesting flag is also set. This optimization
531 // relies on the fact that maps can never be in new space.
532 CheckPageFlag(map,
533 map, // Used as scratch.
534 MemoryChunk::kPointersToHereAreInterestingMask,
535 eq,
536 &done);
537
538 add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
539 if (emit_debug_code()) {
540 Label ok;
541 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
542 b(eq, &ok);
543 stop("Unaligned cell in write barrier");
544 bind(&ok);
545 }
546
547 // Record the actual write.
548 if (lr_status == kLRHasNotBeenSaved) {
549 push(lr);
550 }
551 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
552 fp_mode);
553 CallStub(&stub);
554 if (lr_status == kLRHasNotBeenSaved) {
555 pop(lr);
556 }
557
558 bind(&done);
559
560 // Count number of write barriers in generated code.
561 isolate()->counters()->write_barriers_static()->Increment();
562 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
563
564 // Clobber clobbered registers when running with the debug-code flag
565 // turned on to provoke errors.
566 if (emit_debug_code()) {
567 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
568 mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
569 }
570 }
571
572
573 // Will clobber 4 registers: object, address, scratch, ip. The
574 // register 'object' contains a heap object pointer. The heap object
575 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)576 void MacroAssembler::RecordWrite(
577 Register object,
578 Register address,
579 Register value,
580 LinkRegisterStatus lr_status,
581 SaveFPRegsMode fp_mode,
582 RememberedSetAction remembered_set_action,
583 SmiCheck smi_check,
584 PointersToHereCheck pointers_to_here_check_for_value) {
585 DCHECK(!object.is(value));
586 if (emit_debug_code()) {
587 ldr(ip, MemOperand(address));
588 cmp(ip, value);
589 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
590 }
591
592 if (remembered_set_action == OMIT_REMEMBERED_SET &&
593 !FLAG_incremental_marking) {
594 return;
595 }
596
597 // First, check if a write barrier is even needed. The tests below
598 // catch stores of smis and stores into the young generation.
599 Label done;
600
601 if (smi_check == INLINE_SMI_CHECK) {
602 JumpIfSmi(value, &done);
603 }
604
605 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
606 CheckPageFlag(value,
607 value, // Used as scratch.
608 MemoryChunk::kPointersToHereAreInterestingMask,
609 eq,
610 &done);
611 }
612 CheckPageFlag(object,
613 value, // Used as scratch.
614 MemoryChunk::kPointersFromHereAreInterestingMask,
615 eq,
616 &done);
617
618 // Record the actual write.
619 if (lr_status == kLRHasNotBeenSaved) {
620 push(lr);
621 }
622 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
623 fp_mode);
624 CallStub(&stub);
625 if (lr_status == kLRHasNotBeenSaved) {
626 pop(lr);
627 }
628
629 bind(&done);
630
631 // Count number of write barriers in generated code.
632 isolate()->counters()->write_barriers_static()->Increment();
633 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
634 value);
635
636 // Clobber clobbered registers when running with the debug-code flag
637 // turned on to provoke errors.
638 if (emit_debug_code()) {
639 mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
640 mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
641 }
642 }
643
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)644 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
645 Register code_entry,
646 Register scratch) {
647 const int offset = JSFunction::kCodeEntryOffset;
648
649 // Since a code entry (value) is always in old space, we don't need to update
650 // remembered set. If incremental marking is off, there is nothing for us to
651 // do.
652 if (!FLAG_incremental_marking) return;
653
654 DCHECK(js_function.is(r1));
655 DCHECK(code_entry.is(r4));
656 DCHECK(scratch.is(r5));
657 AssertNotSmi(js_function);
658
659 if (emit_debug_code()) {
660 add(scratch, js_function, Operand(offset - kHeapObjectTag));
661 ldr(ip, MemOperand(scratch));
662 cmp(ip, code_entry);
663 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
664 }
665
666 // First, check if a write barrier is even needed. The tests below
667 // catch stores of Smis and stores into young gen.
668 Label done;
669
670 CheckPageFlag(code_entry, scratch,
671 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
672 CheckPageFlag(js_function, scratch,
673 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
674
675 const Register dst = scratch;
676 add(dst, js_function, Operand(offset - kHeapObjectTag));
677
678 push(code_entry);
679
680 // Save caller-saved registers, which includes js_function.
681 DCHECK((kCallerSaved & js_function.bit()) != 0);
682 DCHECK_EQ(kCallerSaved & code_entry.bit(), 0);
683 stm(db_w, sp, (kCallerSaved | lr.bit()));
684
685 int argument_count = 3;
686 PrepareCallCFunction(argument_count, code_entry);
687
688 mov(r0, js_function);
689 mov(r1, dst);
690 mov(r2, Operand(ExternalReference::isolate_address(isolate())));
691
692 {
693 AllowExternalCallThatCantCauseGC scope(this);
694 CallCFunction(
695 ExternalReference::incremental_marking_record_write_code_entry_function(
696 isolate()),
697 argument_count);
698 }
699
700 // Restore caller-saved registers (including js_function and code_entry).
701 ldm(ia_w, sp, (kCallerSaved | lr.bit()));
702
703 pop(code_entry);
704
705 bind(&done);
706 }
707
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)708 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
709 Register address,
710 Register scratch,
711 SaveFPRegsMode fp_mode,
712 RememberedSetFinalAction and_then) {
713 Label done;
714 if (emit_debug_code()) {
715 Label ok;
716 JumpIfNotInNewSpace(object, scratch, &ok);
717 stop("Remembered set pointer is in new space");
718 bind(&ok);
719 }
720 // Load store buffer top.
721 ExternalReference store_buffer =
722 ExternalReference::store_buffer_top(isolate());
723 mov(ip, Operand(store_buffer));
724 ldr(scratch, MemOperand(ip));
725 // Store pointer to buffer and increment buffer top.
726 str(address, MemOperand(scratch, kPointerSize, PostIndex));
727 // Write back new top of buffer.
728 str(scratch, MemOperand(ip));
729 // Call stub on end of buffer.
730 // Check for end of buffer.
731 tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
732 if (and_then == kFallThroughAtEnd) {
733 b(ne, &done);
734 } else {
735 DCHECK(and_then == kReturnAtEnd);
736 Ret(ne);
737 }
738 push(lr);
739 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
740 CallStub(&store_buffer_overflow);
741 pop(lr);
742 bind(&done);
743 if (and_then == kReturnAtEnd) {
744 Ret();
745 }
746 }
747
PushCommonFrame(Register marker_reg)748 void MacroAssembler::PushCommonFrame(Register marker_reg) {
749 if (marker_reg.is_valid()) {
750 if (FLAG_enable_embedded_constant_pool) {
751 if (marker_reg.code() > pp.code()) {
752 stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
753 add(fp, sp, Operand(kPointerSize));
754 Push(marker_reg);
755 } else {
756 stm(db_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
757 add(fp, sp, Operand(2 * kPointerSize));
758 }
759 } else {
760 if (marker_reg.code() > fp.code()) {
761 stm(db_w, sp, fp.bit() | lr.bit());
762 mov(fp, Operand(sp));
763 Push(marker_reg);
764 } else {
765 stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
766 add(fp, sp, Operand(kPointerSize));
767 }
768 }
769 } else {
770 stm(db_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
771 fp.bit() | lr.bit());
772 add(fp, sp, Operand(FLAG_enable_embedded_constant_pool ? kPointerSize : 0));
773 }
774 }
775
PopCommonFrame(Register marker_reg)776 void MacroAssembler::PopCommonFrame(Register marker_reg) {
777 if (marker_reg.is_valid()) {
778 if (FLAG_enable_embedded_constant_pool) {
779 if (marker_reg.code() > pp.code()) {
780 pop(marker_reg);
781 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
782 } else {
783 ldm(ia_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
784 }
785 } else {
786 if (marker_reg.code() > fp.code()) {
787 pop(marker_reg);
788 ldm(ia_w, sp, fp.bit() | lr.bit());
789 } else {
790 ldm(ia_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
791 }
792 }
793 } else {
794 ldm(ia_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
795 fp.bit() | lr.bit());
796 }
797 }
798
PushStandardFrame(Register function_reg)799 void MacroAssembler::PushStandardFrame(Register function_reg) {
800 DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
801 stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
802 (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
803 fp.bit() | lr.bit());
804 int offset = -StandardFrameConstants::kContextOffset;
805 offset += function_reg.is_valid() ? kPointerSize : 0;
806 add(fp, sp, Operand(offset));
807 }
808
809
810 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()811 void MacroAssembler::PushSafepointRegisters() {
812 // Safepoints expect a block of contiguous register values starting with r0.
813 // except when FLAG_enable_embedded_constant_pool, which omits pp.
814 DCHECK(kSafepointSavedRegisters ==
815 (FLAG_enable_embedded_constant_pool
816 ? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
817 : (1 << kNumSafepointSavedRegisters) - 1));
818 // Safepoints expect a block of kNumSafepointRegisters values on the
819 // stack, so adjust the stack for unsaved registers.
820 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
821 DCHECK(num_unsaved >= 0);
822 sub(sp, sp, Operand(num_unsaved * kPointerSize));
823 stm(db_w, sp, kSafepointSavedRegisters);
824 }
825
826
PopSafepointRegisters()827 void MacroAssembler::PopSafepointRegisters() {
828 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
829 ldm(ia_w, sp, kSafepointSavedRegisters);
830 add(sp, sp, Operand(num_unsaved * kPointerSize));
831 }
832
833
StoreToSafepointRegisterSlot(Register src,Register dst)834 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
835 str(src, SafepointRegisterSlot(dst));
836 }
837
838
LoadFromSafepointRegisterSlot(Register dst,Register src)839 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
840 ldr(dst, SafepointRegisterSlot(src));
841 }
842
843
SafepointRegisterStackIndex(int reg_code)844 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
845 // The registers are pushed starting with the highest encoding,
846 // which means that lowest encodings are closest to the stack pointer.
847 if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
848 // RegList omits pp.
849 reg_code -= 1;
850 }
851 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
852 return reg_code;
853 }
854
855
SafepointRegisterSlot(Register reg)856 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
857 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
858 }
859
860
SafepointRegistersAndDoublesSlot(Register reg)861 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
862 // Number of d-regs not known at snapshot time.
863 DCHECK(!serializer_enabled());
864 // General purpose registers are pushed last on the stack.
865 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
866 int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
867 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
868 return MemOperand(sp, doubles_size + register_offset);
869 }
870
871
Ldrd(Register dst1,Register dst2,const MemOperand & src,Condition cond)872 void MacroAssembler::Ldrd(Register dst1, Register dst2,
873 const MemOperand& src, Condition cond) {
874 DCHECK(src.rm().is(no_reg));
875 DCHECK(!dst1.is(lr)); // r14.
876
877 // V8 does not use this addressing mode, so the fallback code
878 // below doesn't support it yet.
879 DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
880
881 // Generate two ldr instructions if ldrd is not applicable.
882 if ((dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
883 ldrd(dst1, dst2, src, cond);
884 } else {
885 if ((src.am() == Offset) || (src.am() == NegOffset)) {
886 MemOperand src2(src);
887 src2.set_offset(src2.offset() + 4);
888 if (dst1.is(src.rn())) {
889 ldr(dst2, src2, cond);
890 ldr(dst1, src, cond);
891 } else {
892 ldr(dst1, src, cond);
893 ldr(dst2, src2, cond);
894 }
895 } else { // PostIndex or NegPostIndex.
896 DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
897 if (dst1.is(src.rn())) {
898 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
899 ldr(dst1, src, cond);
900 } else {
901 MemOperand src2(src);
902 src2.set_offset(src2.offset() - 4);
903 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
904 ldr(dst2, src2, cond);
905 }
906 }
907 }
908 }
909
910
Strd(Register src1,Register src2,const MemOperand & dst,Condition cond)911 void MacroAssembler::Strd(Register src1, Register src2,
912 const MemOperand& dst, Condition cond) {
913 DCHECK(dst.rm().is(no_reg));
914 DCHECK(!src1.is(lr)); // r14.
915
916 // V8 does not use this addressing mode, so the fallback code
917 // below doesn't support it yet.
918 DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
919
920 // Generate two str instructions if strd is not applicable.
921 if ((src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
922 strd(src1, src2, dst, cond);
923 } else {
924 MemOperand dst2(dst);
925 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
926 dst2.set_offset(dst2.offset() + 4);
927 str(src1, dst, cond);
928 str(src2, dst2, cond);
929 } else { // PostIndex or NegPostIndex.
930 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
931 dst2.set_offset(dst2.offset() - 4);
932 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
933 str(src2, dst2, cond);
934 }
935 }
936 }
937
VFPCanonicalizeNaN(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)938 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
939 const DwVfpRegister src,
940 const Condition cond) {
941 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
942 // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
943 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
944 vsub(dst, src, kDoubleRegZero, cond);
945 }
946
947
VFPCompareAndSetFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)948 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
949 const SwVfpRegister src2,
950 const Condition cond) {
951 // Compare and move FPSCR flags to the normal condition flags.
952 VFPCompareAndLoadFlags(src1, src2, pc, cond);
953 }
954
VFPCompareAndSetFlags(const SwVfpRegister src1,const float src2,const Condition cond)955 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
956 const float src2,
957 const Condition cond) {
958 // Compare and move FPSCR flags to the normal condition flags.
959 VFPCompareAndLoadFlags(src1, src2, pc, cond);
960 }
961
962
VFPCompareAndSetFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)963 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
964 const DwVfpRegister src2,
965 const Condition cond) {
966 // Compare and move FPSCR flags to the normal condition flags.
967 VFPCompareAndLoadFlags(src1, src2, pc, cond);
968 }
969
VFPCompareAndSetFlags(const DwVfpRegister src1,const double src2,const Condition cond)970 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
971 const double src2,
972 const Condition cond) {
973 // Compare and move FPSCR flags to the normal condition flags.
974 VFPCompareAndLoadFlags(src1, src2, pc, cond);
975 }
976
977
VFPCompareAndLoadFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Register fpscr_flags,const Condition cond)978 void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
979 const SwVfpRegister src2,
980 const Register fpscr_flags,
981 const Condition cond) {
982 // Compare and load FPSCR.
983 vcmp(src1, src2, cond);
984 vmrs(fpscr_flags, cond);
985 }
986
VFPCompareAndLoadFlags(const SwVfpRegister src1,const float src2,const Register fpscr_flags,const Condition cond)987 void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
988 const float src2,
989 const Register fpscr_flags,
990 const Condition cond) {
991 // Compare and load FPSCR.
992 vcmp(src1, src2, cond);
993 vmrs(fpscr_flags, cond);
994 }
995
996
VFPCompareAndLoadFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Register fpscr_flags,const Condition cond)997 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
998 const DwVfpRegister src2,
999 const Register fpscr_flags,
1000 const Condition cond) {
1001 // Compare and load FPSCR.
1002 vcmp(src1, src2, cond);
1003 vmrs(fpscr_flags, cond);
1004 }
1005
VFPCompareAndLoadFlags(const DwVfpRegister src1,const double src2,const Register fpscr_flags,const Condition cond)1006 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
1007 const double src2,
1008 const Register fpscr_flags,
1009 const Condition cond) {
1010 // Compare and load FPSCR.
1011 vcmp(src1, src2, cond);
1012 vmrs(fpscr_flags, cond);
1013 }
1014
1015
Vmov(const DwVfpRegister dst,const double imm,const Register scratch)1016 void MacroAssembler::Vmov(const DwVfpRegister dst,
1017 const double imm,
1018 const Register scratch) {
1019 static const DoubleRepresentation minus_zero(-0.0);
1020 static const DoubleRepresentation zero(0.0);
1021 DoubleRepresentation value_rep(imm);
1022 // Handle special values first.
1023 if (value_rep == zero) {
1024 vmov(dst, kDoubleRegZero);
1025 } else if (value_rep == minus_zero) {
1026 vneg(dst, kDoubleRegZero);
1027 } else {
1028 vmov(dst, imm, scratch);
1029 }
1030 }
1031
1032
VmovHigh(Register dst,DwVfpRegister src)1033 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
1034 if (src.code() < 16) {
1035 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
1036 vmov(dst, loc.high());
1037 } else {
1038 vmov(dst, VmovIndexHi, src);
1039 }
1040 }
1041
1042
VmovHigh(DwVfpRegister dst,Register src)1043 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
1044 if (dst.code() < 16) {
1045 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
1046 vmov(loc.high(), src);
1047 } else {
1048 vmov(dst, VmovIndexHi, src);
1049 }
1050 }
1051
1052
VmovLow(Register dst,DwVfpRegister src)1053 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
1054 if (src.code() < 16) {
1055 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
1056 vmov(dst, loc.low());
1057 } else {
1058 vmov(dst, VmovIndexLo, src);
1059 }
1060 }
1061
1062
VmovLow(DwVfpRegister dst,Register src)1063 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
1064 if (dst.code() < 16) {
1065 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
1066 vmov(loc.low(), src);
1067 } else {
1068 vmov(dst, VmovIndexLo, src);
1069 }
1070 }
LslPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1071 void MacroAssembler::LslPair(Register dst_low, Register dst_high,
1072 Register src_low, Register src_high,
1073 Register scratch, Register shift) {
1074 DCHECK(!AreAliased(dst_high, src_low));
1075 DCHECK(!AreAliased(dst_high, shift));
1076
1077 Label less_than_32;
1078 Label done;
1079 rsb(scratch, shift, Operand(32), SetCC);
1080 b(gt, &less_than_32);
1081 // If shift >= 32
1082 and_(scratch, shift, Operand(0x1f));
1083 lsl(dst_high, src_low, Operand(scratch));
1084 mov(dst_low, Operand(0));
1085 jmp(&done);
1086 bind(&less_than_32);
1087 // If shift < 32
1088 lsl(dst_high, src_high, Operand(shift));
1089 orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
1090 lsl(dst_low, src_low, Operand(shift));
1091 bind(&done);
1092 }
1093
LslPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1094 void MacroAssembler::LslPair(Register dst_low, Register dst_high,
1095 Register src_low, Register src_high,
1096 uint32_t shift) {
1097 DCHECK(!AreAliased(dst_high, src_low));
1098 Label less_than_32;
1099 Label done;
1100 if (shift == 0) {
1101 Move(dst_high, src_high);
1102 Move(dst_low, src_low);
1103 } else if (shift == 32) {
1104 Move(dst_high, src_low);
1105 Move(dst_low, Operand(0));
1106 } else if (shift >= 32) {
1107 shift &= 0x1f;
1108 lsl(dst_high, src_low, Operand(shift));
1109 mov(dst_low, Operand(0));
1110 } else {
1111 lsl(dst_high, src_high, Operand(shift));
1112 orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
1113 lsl(dst_low, src_low, Operand(shift));
1114 }
1115 }
1116
LsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1117 void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
1118 Register src_low, Register src_high,
1119 Register scratch, Register shift) {
1120 DCHECK(!AreAliased(dst_low, src_high));
1121 DCHECK(!AreAliased(dst_low, shift));
1122
1123 Label less_than_32;
1124 Label done;
1125 rsb(scratch, shift, Operand(32), SetCC);
1126 b(gt, &less_than_32);
1127 // If shift >= 32
1128 and_(scratch, shift, Operand(0x1f));
1129 lsr(dst_low, src_high, Operand(scratch));
1130 mov(dst_high, Operand(0));
1131 jmp(&done);
1132 bind(&less_than_32);
1133 // If shift < 32
1134
1135 lsr(dst_low, src_low, Operand(shift));
1136 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1137 lsr(dst_high, src_high, Operand(shift));
1138 bind(&done);
1139 }
1140
LsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1141 void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
1142 Register src_low, Register src_high,
1143 uint32_t shift) {
1144 DCHECK(!AreAliased(dst_low, src_high));
1145 Label less_than_32;
1146 Label done;
1147 if (shift == 32) {
1148 mov(dst_low, src_high);
1149 mov(dst_high, Operand(0));
1150 } else if (shift > 32) {
1151 shift &= 0x1f;
1152 lsr(dst_low, src_high, Operand(shift));
1153 mov(dst_high, Operand(0));
1154 } else if (shift == 0) {
1155 Move(dst_low, src_low);
1156 Move(dst_high, src_high);
1157 } else {
1158 lsr(dst_low, src_low, Operand(shift));
1159 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1160 lsr(dst_high, src_high, Operand(shift));
1161 }
1162 }
1163
AsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1164 void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
1165 Register src_low, Register src_high,
1166 Register scratch, Register shift) {
1167 DCHECK(!AreAliased(dst_low, src_high));
1168 DCHECK(!AreAliased(dst_low, shift));
1169
1170 Label less_than_32;
1171 Label done;
1172 rsb(scratch, shift, Operand(32), SetCC);
1173 b(gt, &less_than_32);
1174 // If shift >= 32
1175 and_(scratch, shift, Operand(0x1f));
1176 asr(dst_low, src_high, Operand(scratch));
1177 asr(dst_high, src_high, Operand(31));
1178 jmp(&done);
1179 bind(&less_than_32);
1180 // If shift < 32
1181 lsr(dst_low, src_low, Operand(shift));
1182 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1183 asr(dst_high, src_high, Operand(shift));
1184 bind(&done);
1185 }
1186
AsrPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1187 void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
1188 Register src_low, Register src_high,
1189 uint32_t shift) {
1190 DCHECK(!AreAliased(dst_low, src_high));
1191 Label less_than_32;
1192 Label done;
1193 if (shift == 32) {
1194 mov(dst_low, src_high);
1195 asr(dst_high, src_high, Operand(31));
1196 } else if (shift > 32) {
1197 shift &= 0x1f;
1198 asr(dst_low, src_high, Operand(shift));
1199 asr(dst_high, src_high, Operand(31));
1200 } else if (shift == 0) {
1201 Move(dst_low, src_low);
1202 Move(dst_high, src_high);
1203 } else {
1204 lsr(dst_low, src_low, Operand(shift));
1205 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1206 asr(dst_high, src_high, Operand(shift));
1207 }
1208 }
1209
LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address)1210 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1211 Register code_target_address) {
1212 DCHECK(FLAG_enable_embedded_constant_pool);
1213 ldr(pp, MemOperand(code_target_address,
1214 Code::kConstantPoolOffset - Code::kHeaderSize));
1215 add(pp, pp, code_target_address);
1216 }
1217
1218
LoadConstantPoolPointerRegister()1219 void MacroAssembler::LoadConstantPoolPointerRegister() {
1220 DCHECK(FLAG_enable_embedded_constant_pool);
1221 int entry_offset = pc_offset() + Instruction::kPCReadOffset;
1222 sub(ip, pc, Operand(entry_offset));
1223 LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
1224 }
1225
StubPrologue(StackFrame::Type type)1226 void MacroAssembler::StubPrologue(StackFrame::Type type) {
1227 mov(ip, Operand(Smi::FromInt(type)));
1228 PushCommonFrame(ip);
1229 if (FLAG_enable_embedded_constant_pool) {
1230 LoadConstantPoolPointerRegister();
1231 set_constant_pool_available(true);
1232 }
1233 }
1234
Prologue(bool code_pre_aging)1235 void MacroAssembler::Prologue(bool code_pre_aging) {
1236 { PredictableCodeSizeScope predictible_code_size_scope(
1237 this, kNoCodeAgeSequenceLength);
1238 // The following three instructions must remain together and unmodified
1239 // for code aging to work properly.
1240 if (code_pre_aging) {
1241 // Pre-age the code.
1242 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
1243 add(r0, pc, Operand(-8));
1244 ldr(pc, MemOperand(pc, -4));
1245 emit_code_stub_address(stub);
1246 } else {
1247 PushStandardFrame(r1);
1248 nop(ip.code());
1249 }
1250 }
1251 if (FLAG_enable_embedded_constant_pool) {
1252 LoadConstantPoolPointerRegister();
1253 set_constant_pool_available(true);
1254 }
1255 }
1256
1257
EmitLoadTypeFeedbackVector(Register vector)1258 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
1259 ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1260 ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
1261 ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
1262 }
1263
1264
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1265 void MacroAssembler::EnterFrame(StackFrame::Type type,
1266 bool load_constant_pool_pointer_reg) {
1267 // r0-r3: preserved
1268 mov(ip, Operand(Smi::FromInt(type)));
1269 PushCommonFrame(ip);
1270 if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
1271 LoadConstantPoolPointerRegister();
1272 }
1273 if (type == StackFrame::INTERNAL) {
1274 mov(ip, Operand(CodeObject()));
1275 push(ip);
1276 }
1277 }
1278
1279
LeaveFrame(StackFrame::Type type)1280 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
1281 // r0: preserved
1282 // r1: preserved
1283 // r2: preserved
1284
1285 // Drop the execution stack down to the frame pointer and restore
1286 // the caller frame pointer, return address and constant pool pointer
1287 // (if FLAG_enable_embedded_constant_pool).
1288 int frame_ends;
1289 if (FLAG_enable_embedded_constant_pool) {
1290 add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
1291 frame_ends = pc_offset();
1292 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
1293 } else {
1294 mov(sp, fp);
1295 frame_ends = pc_offset();
1296 ldm(ia_w, sp, fp.bit() | lr.bit());
1297 }
1298 return frame_ends;
1299 }
1300
1301
EnterExitFrame(bool save_doubles,int stack_space)1302 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
1303 // Set up the frame structure on the stack.
1304 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1305 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1306 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1307 mov(ip, Operand(Smi::FromInt(StackFrame::EXIT)));
1308 PushCommonFrame(ip);
1309 // Reserve room for saved entry sp and code object.
1310 sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1311 if (emit_debug_code()) {
1312 mov(ip, Operand::Zero());
1313 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1314 }
1315 if (FLAG_enable_embedded_constant_pool) {
1316 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1317 }
1318 mov(ip, Operand(CodeObject()));
1319 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1320
1321 // Save the frame pointer and the context in top.
1322 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1323 str(fp, MemOperand(ip));
1324 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1325 str(cp, MemOperand(ip));
1326
1327 // Optionally save all double registers.
1328 if (save_doubles) {
1329 SaveFPRegs(sp, ip);
1330 // Note that d0 will be accessible at
1331 // fp - ExitFrameConstants::kFrameSize -
1332 // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
1333 // since the sp slot, code slot and constant pool slot (if
1334 // FLAG_enable_embedded_constant_pool) were pushed after the fp.
1335 }
1336
1337 // Reserve place for the return address and stack space and align the frame
1338 // preparing for calling the runtime function.
1339 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1340 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1341 if (frame_alignment > 0) {
1342 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1343 and_(sp, sp, Operand(-frame_alignment));
1344 }
1345
1346 // Set the exit frame sp value to point just before the return address
1347 // location.
1348 add(ip, sp, Operand(kPointerSize));
1349 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1350 }
1351
1352
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)1353 void MacroAssembler::InitializeNewString(Register string,
1354 Register length,
1355 Heap::RootListIndex map_index,
1356 Register scratch1,
1357 Register scratch2) {
1358 SmiTag(scratch1, length);
1359 LoadRoot(scratch2, map_index);
1360 str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1361 mov(scratch1, Operand(String::kEmptyHashField));
1362 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1363 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1364 }
1365
1366
ActivationFrameAlignment()1367 int MacroAssembler::ActivationFrameAlignment() {
1368 #if V8_HOST_ARCH_ARM
1369 // Running on the real platform. Use the alignment as mandated by the local
1370 // environment.
1371 // Note: This will break if we ever start generating snapshots on one ARM
1372 // platform for another ARM platform with a different alignment.
1373 return base::OS::ActivationFrameAlignment();
1374 #else // V8_HOST_ARCH_ARM
1375 // If we are using the simulator then we should always align to the expected
1376 // alignment. As the simulator is used to generate snapshots we do not know
1377 // if the target platform will need alignment, so this is controlled from a
1378 // flag.
1379 return FLAG_sim_stack_alignment;
1380 #endif // V8_HOST_ARCH_ARM
1381 }
1382
1383
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool argument_count_is_length)1384 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1385 bool restore_context,
1386 bool argument_count_is_length) {
1387 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1388
1389 // Optionally restore all double registers.
1390 if (save_doubles) {
1391 // Calculate the stack location of the saved doubles and restore them.
1392 const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
1393 sub(r3, fp,
1394 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1395 RestoreFPRegs(r3, ip);
1396 }
1397
1398 // Clear top frame.
1399 mov(r3, Operand::Zero());
1400 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1401 str(r3, MemOperand(ip));
1402
1403 // Restore current context from top and clear it in debug mode.
1404 if (restore_context) {
1405 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1406 ldr(cp, MemOperand(ip));
1407 }
1408 #ifdef DEBUG
1409 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1410 str(r3, MemOperand(ip));
1411 #endif
1412
1413 // Tear down the exit frame, pop the arguments, and return.
1414 if (FLAG_enable_embedded_constant_pool) {
1415 ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1416 }
1417 mov(sp, Operand(fp));
1418 ldm(ia_w, sp, fp.bit() | lr.bit());
1419 if (argument_count.is_valid()) {
1420 if (argument_count_is_length) {
1421 add(sp, sp, argument_count);
1422 } else {
1423 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1424 }
1425 }
1426 }
1427
1428
MovFromFloatResult(const DwVfpRegister dst)1429 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1430 if (use_eabi_hardfloat()) {
1431 Move(dst, d0);
1432 } else {
1433 vmov(dst, r0, r1);
1434 }
1435 }
1436
1437
1438 // On ARM this is just a synonym to make the purpose clear.
MovFromFloatParameter(DwVfpRegister dst)1439 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1440 MovFromFloatResult(dst);
1441 }
1442
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1443 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1444 Register caller_args_count_reg,
1445 Register scratch0, Register scratch1) {
1446 #if DEBUG
1447 if (callee_args_count.is_reg()) {
1448 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1449 scratch1));
1450 } else {
1451 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1452 }
1453 #endif
1454
1455 // Calculate the end of destination area where we will put the arguments
1456 // after we drop current frame. We add kPointerSize to count the receiver
1457 // argument which is not included into formal parameters count.
1458 Register dst_reg = scratch0;
1459 add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
1460 add(dst_reg, dst_reg,
1461 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1462
1463 Register src_reg = caller_args_count_reg;
1464 // Calculate the end of source area. +kPointerSize is for the receiver.
1465 if (callee_args_count.is_reg()) {
1466 add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
1467 add(src_reg, src_reg, Operand(kPointerSize));
1468 } else {
1469 add(src_reg, sp,
1470 Operand((callee_args_count.immediate() + 1) * kPointerSize));
1471 }
1472
1473 if (FLAG_debug_code) {
1474 cmp(src_reg, dst_reg);
1475 Check(lo, kStackAccessBelowStackPointer);
1476 }
1477
1478 // Restore caller's frame pointer and return address now as they will be
1479 // overwritten by the copying loop.
1480 ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1481 ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1482
1483 // Now copy callee arguments to the caller frame going backwards to avoid
1484 // callee arguments corruption (source and destination areas could overlap).
1485
1486 // Both src_reg and dst_reg are pointing to the word after the one to copy,
1487 // so they must be pre-decremented in the loop.
1488 Register tmp_reg = scratch1;
1489 Label loop, entry;
1490 b(&entry);
1491 bind(&loop);
1492 ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
1493 str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
1494 bind(&entry);
1495 cmp(sp, src_reg);
1496 b(ne, &loop);
1497
1498 // Leave current frame.
1499 mov(sp, dst_reg);
1500 }
1501
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)1502 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1503 const ParameterCount& actual,
1504 Label* done,
1505 bool* definitely_mismatches,
1506 InvokeFlag flag,
1507 const CallWrapper& call_wrapper) {
1508 bool definitely_matches = false;
1509 *definitely_mismatches = false;
1510 Label regular_invoke;
1511
1512 // Check whether the expected and actual arguments count match. If not,
1513 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1514 // r0: actual arguments count
1515 // r1: function (passed through to callee)
1516 // r2: expected arguments count
1517
1518 // The code below is made a lot easier because the calling code already sets
1519 // up actual and expected registers according to the contract if values are
1520 // passed in registers.
1521 DCHECK(actual.is_immediate() || actual.reg().is(r0));
1522 DCHECK(expected.is_immediate() || expected.reg().is(r2));
1523
1524 if (expected.is_immediate()) {
1525 DCHECK(actual.is_immediate());
1526 mov(r0, Operand(actual.immediate()));
1527 if (expected.immediate() == actual.immediate()) {
1528 definitely_matches = true;
1529 } else {
1530 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1531 if (expected.immediate() == sentinel) {
1532 // Don't worry about adapting arguments for builtins that
1533 // don't want that done. Skip adaption code by making it look
1534 // like we have a match between expected and actual number of
1535 // arguments.
1536 definitely_matches = true;
1537 } else {
1538 *definitely_mismatches = true;
1539 mov(r2, Operand(expected.immediate()));
1540 }
1541 }
1542 } else {
1543 if (actual.is_immediate()) {
1544 mov(r0, Operand(actual.immediate()));
1545 cmp(expected.reg(), Operand(actual.immediate()));
1546 b(eq, ®ular_invoke);
1547 } else {
1548 cmp(expected.reg(), Operand(actual.reg()));
1549 b(eq, ®ular_invoke);
1550 }
1551 }
1552
1553 if (!definitely_matches) {
1554 Handle<Code> adaptor =
1555 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1556 if (flag == CALL_FUNCTION) {
1557 call_wrapper.BeforeCall(CallSize(adaptor));
1558 Call(adaptor);
1559 call_wrapper.AfterCall();
1560 if (!*definitely_mismatches) {
1561 b(done);
1562 }
1563 } else {
1564 Jump(adaptor, RelocInfo::CODE_TARGET);
1565 }
1566 bind(®ular_invoke);
1567 }
1568 }
1569
1570
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1571 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
1572 const ParameterCount& expected,
1573 const ParameterCount& actual) {
1574 Label skip_flooding;
1575 ExternalReference last_step_action =
1576 ExternalReference::debug_last_step_action_address(isolate());
1577 STATIC_ASSERT(StepFrame > StepIn);
1578 mov(r4, Operand(last_step_action));
1579 ldrsb(r4, MemOperand(r4));
1580 cmp(r4, Operand(StepIn));
1581 b(lt, &skip_flooding);
1582 {
1583 FrameScope frame(this,
1584 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1585 if (expected.is_reg()) {
1586 SmiTag(expected.reg());
1587 Push(expected.reg());
1588 }
1589 if (actual.is_reg()) {
1590 SmiTag(actual.reg());
1591 Push(actual.reg());
1592 }
1593 if (new_target.is_valid()) {
1594 Push(new_target);
1595 }
1596 Push(fun);
1597 Push(fun);
1598 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
1599 Pop(fun);
1600 if (new_target.is_valid()) {
1601 Pop(new_target);
1602 }
1603 if (actual.is_reg()) {
1604 Pop(actual.reg());
1605 SmiUntag(actual.reg());
1606 }
1607 if (expected.is_reg()) {
1608 Pop(expected.reg());
1609 SmiUntag(expected.reg());
1610 }
1611 }
1612 bind(&skip_flooding);
1613 }
1614
1615
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1616 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1617 const ParameterCount& expected,
1618 const ParameterCount& actual,
1619 InvokeFlag flag,
1620 const CallWrapper& call_wrapper) {
1621 // You can't call a function without a valid frame.
1622 DCHECK(flag == JUMP_FUNCTION || has_frame());
1623 DCHECK(function.is(r1));
1624 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
1625
1626 if (call_wrapper.NeedsDebugStepCheck()) {
1627 FloodFunctionIfStepping(function, new_target, expected, actual);
1628 }
1629
1630 // Clear the new.target register if not given.
1631 if (!new_target.is_valid()) {
1632 LoadRoot(r3, Heap::kUndefinedValueRootIndex);
1633 }
1634
1635 Label done;
1636 bool definitely_mismatches = false;
1637 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1638 call_wrapper);
1639 if (!definitely_mismatches) {
1640 // We call indirectly through the code field in the function to
1641 // allow recompilation to take effect without changing any of the
1642 // call sites.
1643 Register code = r4;
1644 ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1645 if (flag == CALL_FUNCTION) {
1646 call_wrapper.BeforeCall(CallSize(code));
1647 Call(code);
1648 call_wrapper.AfterCall();
1649 } else {
1650 DCHECK(flag == JUMP_FUNCTION);
1651 Jump(code);
1652 }
1653
1654 // Continue here if InvokePrologue does handle the invocation due to
1655 // mismatched parameter counts.
1656 bind(&done);
1657 }
1658 }
1659
1660
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1661 void MacroAssembler::InvokeFunction(Register fun,
1662 Register new_target,
1663 const ParameterCount& actual,
1664 InvokeFlag flag,
1665 const CallWrapper& call_wrapper) {
1666 // You can't call a function without a valid frame.
1667 DCHECK(flag == JUMP_FUNCTION || has_frame());
1668
1669 // Contract with called JS functions requires that function is passed in r1.
1670 DCHECK(fun.is(r1));
1671
1672 Register expected_reg = r2;
1673 Register temp_reg = r4;
1674
1675 ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1676 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1677 ldr(expected_reg,
1678 FieldMemOperand(temp_reg,
1679 SharedFunctionInfo::kFormalParameterCountOffset));
1680 SmiUntag(expected_reg);
1681
1682 ParameterCount expected(expected_reg);
1683 InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1684 }
1685
1686
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1687 void MacroAssembler::InvokeFunction(Register function,
1688 const ParameterCount& expected,
1689 const ParameterCount& actual,
1690 InvokeFlag flag,
1691 const CallWrapper& call_wrapper) {
1692 // You can't call a function without a valid frame.
1693 DCHECK(flag == JUMP_FUNCTION || has_frame());
1694
1695 // Contract with called JS functions requires that function is passed in r1.
1696 DCHECK(function.is(r1));
1697
1698 // Get the function and setup the context.
1699 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1700
1701 InvokeFunctionCode(r1, no_reg, expected, actual, flag, call_wrapper);
1702 }
1703
1704
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1705 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1706 const ParameterCount& expected,
1707 const ParameterCount& actual,
1708 InvokeFlag flag,
1709 const CallWrapper& call_wrapper) {
1710 Move(r1, function);
1711 InvokeFunction(r1, expected, actual, flag, call_wrapper);
1712 }
1713
1714
IsObjectJSStringType(Register object,Register scratch,Label * fail)1715 void MacroAssembler::IsObjectJSStringType(Register object,
1716 Register scratch,
1717 Label* fail) {
1718 DCHECK(kNotStringTag != 0);
1719
1720 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1721 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1722 tst(scratch, Operand(kIsNotStringMask));
1723 b(ne, fail);
1724 }
1725
1726
IsObjectNameType(Register object,Register scratch,Label * fail)1727 void MacroAssembler::IsObjectNameType(Register object,
1728 Register scratch,
1729 Label* fail) {
1730 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1731 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1732 cmp(scratch, Operand(LAST_NAME_TYPE));
1733 b(hi, fail);
1734 }
1735
1736
DebugBreak()1737 void MacroAssembler::DebugBreak() {
1738 mov(r0, Operand::Zero());
1739 mov(r1,
1740 Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
1741 CEntryStub ces(isolate(), 1);
1742 DCHECK(AllowThisStubCall(&ces));
1743 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
1744 }
1745
1746
PushStackHandler()1747 void MacroAssembler::PushStackHandler() {
1748 // Adjust this code if not the case.
1749 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1750 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1751
1752 // Link the current handler as the next handler.
1753 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1754 ldr(r5, MemOperand(r6));
1755 push(r5);
1756
1757 // Set this new handler as the current one.
1758 str(sp, MemOperand(r6));
1759 }
1760
1761
PopStackHandler()1762 void MacroAssembler::PopStackHandler() {
1763 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1764 pop(r1);
1765 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1766 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1767 str(r1, MemOperand(ip));
1768 }
1769
1770
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)1771 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1772 Register scratch,
1773 Label* miss) {
1774 Label same_contexts;
1775
1776 DCHECK(!holder_reg.is(scratch));
1777 DCHECK(!holder_reg.is(ip));
1778 DCHECK(!scratch.is(ip));
1779
1780 // Load current lexical context from the active StandardFrame, which
1781 // may require crawling past STUB frames.
1782 Label load_context;
1783 Label has_context;
1784 DCHECK(!ip.is(scratch));
1785 mov(ip, fp);
1786 bind(&load_context);
1787 ldr(scratch, MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
1788 JumpIfNotSmi(scratch, &has_context);
1789 ldr(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
1790 b(&load_context);
1791 bind(&has_context);
1792
1793 // In debug mode, make sure the lexical context is set.
1794 #ifdef DEBUG
1795 cmp(scratch, Operand::Zero());
1796 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1797 #endif
1798
1799 // Load the native context of the current context.
1800 ldr(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
1801
1802 // Check the context is a native context.
1803 if (emit_debug_code()) {
1804 // Cannot use ip as a temporary in this verification code. Due to the fact
1805 // that ip is clobbered as part of cmp with an object Operand.
1806 push(holder_reg); // Temporarily save holder on the stack.
1807 // Read the first word and compare to the native_context_map.
1808 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1809 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1810 cmp(holder_reg, ip);
1811 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1812 pop(holder_reg); // Restore holder.
1813 }
1814
1815 // Check if both contexts are the same.
1816 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1817 cmp(scratch, Operand(ip));
1818 b(eq, &same_contexts);
1819
1820 // Check the context is a native context.
1821 if (emit_debug_code()) {
1822 // Cannot use ip as a temporary in this verification code. Due to the fact
1823 // that ip is clobbered as part of cmp with an object Operand.
1824 push(holder_reg); // Temporarily save holder on the stack.
1825 mov(holder_reg, ip); // Move ip to its holding place.
1826 LoadRoot(ip, Heap::kNullValueRootIndex);
1827 cmp(holder_reg, ip);
1828 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1829
1830 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1831 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1832 cmp(holder_reg, ip);
1833 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1834 // Restore ip is not needed. ip is reloaded below.
1835 pop(holder_reg); // Restore holder.
1836 // Restore ip to holder's context.
1837 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1838 }
1839
1840 // Check that the security token in the calling global object is
1841 // compatible with the security token in the receiving global
1842 // object.
1843 int token_offset = Context::kHeaderSize +
1844 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1845
1846 ldr(scratch, FieldMemOperand(scratch, token_offset));
1847 ldr(ip, FieldMemOperand(ip, token_offset));
1848 cmp(scratch, Operand(ip));
1849 b(ne, miss);
1850
1851 bind(&same_contexts);
1852 }
1853
1854
1855 // Compute the hash code from the untagged key. This must be kept in sync with
1856 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1857 // code-stub-hydrogen.cc
GetNumberHash(Register t0,Register scratch)1858 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1859 // First of all we assign the hash seed to scratch.
1860 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1861 SmiUntag(scratch);
1862
1863 // Xor original key with a seed.
1864 eor(t0, t0, Operand(scratch));
1865
1866 // Compute the hash code from the untagged key. This must be kept in sync
1867 // with ComputeIntegerHash in utils.h.
1868 //
1869 // hash = ~hash + (hash << 15);
1870 mvn(scratch, Operand(t0));
1871 add(t0, scratch, Operand(t0, LSL, 15));
1872 // hash = hash ^ (hash >> 12);
1873 eor(t0, t0, Operand(t0, LSR, 12));
1874 // hash = hash + (hash << 2);
1875 add(t0, t0, Operand(t0, LSL, 2));
1876 // hash = hash ^ (hash >> 4);
1877 eor(t0, t0, Operand(t0, LSR, 4));
1878 // hash = hash * 2057;
1879 mov(scratch, Operand(t0, LSL, 11));
1880 add(t0, t0, Operand(t0, LSL, 3));
1881 add(t0, t0, scratch);
1882 // hash = hash ^ (hash >> 16);
1883 eor(t0, t0, Operand(t0, LSR, 16));
1884 bic(t0, t0, Operand(0xc0000000u));
1885 }
1886
1887
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register t0,Register t1,Register t2)1888 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1889 Register elements,
1890 Register key,
1891 Register result,
1892 Register t0,
1893 Register t1,
1894 Register t2) {
1895 // Register use:
1896 //
1897 // elements - holds the slow-case elements of the receiver on entry.
1898 // Unchanged unless 'result' is the same register.
1899 //
1900 // key - holds the smi key on entry.
1901 // Unchanged unless 'result' is the same register.
1902 //
1903 // result - holds the result on exit if the load succeeded.
1904 // Allowed to be the same as 'key' or 'result'.
1905 // Unchanged on bailout so 'key' or 'result' can be used
1906 // in further computation.
1907 //
1908 // Scratch registers:
1909 //
1910 // t0 - holds the untagged key on entry and holds the hash once computed.
1911 //
1912 // t1 - used to hold the capacity mask of the dictionary
1913 //
1914 // t2 - used for the index into the dictionary.
1915 Label done;
1916
1917 GetNumberHash(t0, t1);
1918
1919 // Compute the capacity mask.
1920 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1921 SmiUntag(t1);
1922 sub(t1, t1, Operand(1));
1923
1924 // Generate an unrolled loop that performs a few probes before giving up.
1925 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1926 // Use t2 for index calculations and keep the hash intact in t0.
1927 mov(t2, t0);
1928 // Compute the masked index: (hash + i + i * i) & mask.
1929 if (i > 0) {
1930 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1931 }
1932 and_(t2, t2, Operand(t1));
1933
1934 // Scale the index by multiplying by the element size.
1935 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1936 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
1937
1938 // Check if the key is identical to the name.
1939 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1940 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1941 cmp(key, Operand(ip));
1942 if (i != kNumberDictionaryProbes - 1) {
1943 b(eq, &done);
1944 } else {
1945 b(ne, miss);
1946 }
1947 }
1948
1949 bind(&done);
1950 // Check that the value is a field property.
1951 // t2: elements + (index * kPointerSize)
1952 const int kDetailsOffset =
1953 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1954 ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1955 DCHECK_EQ(DATA, 0);
1956 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1957 b(ne, miss);
1958
1959 // Get the value at the masked, scaled index and return.
1960 const int kValueOffset =
1961 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1962 ldr(result, FieldMemOperand(t2, kValueOffset));
1963 }
1964
1965
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1966 void MacroAssembler::Allocate(int object_size,
1967 Register result,
1968 Register scratch1,
1969 Register scratch2,
1970 Label* gc_required,
1971 AllocationFlags flags) {
1972 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1973 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1974 if (!FLAG_inline_new) {
1975 if (emit_debug_code()) {
1976 // Trash the registers to simulate an allocation failure.
1977 mov(result, Operand(0x7091));
1978 mov(scratch1, Operand(0x7191));
1979 mov(scratch2, Operand(0x7291));
1980 }
1981 jmp(gc_required);
1982 return;
1983 }
1984
1985 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1986
1987 // Make object size into bytes.
1988 if ((flags & SIZE_IN_WORDS) != 0) {
1989 object_size *= kPointerSize;
1990 }
1991 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1992
1993 // Check relative positions of allocation top and limit addresses.
1994 // The values must be adjacent in memory to allow the use of LDM.
1995 // Also, assert that the registers are numbered such that the values
1996 // are loaded in the correct order.
1997 ExternalReference allocation_top =
1998 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1999 ExternalReference allocation_limit =
2000 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2001
2002 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
2003 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
2004 DCHECK((limit - top) == kPointerSize);
2005 DCHECK(result.code() < ip.code());
2006
2007 // Set up allocation top address register.
2008 Register top_address = scratch1;
2009 // This code stores a temporary value in ip. This is OK, as the code below
2010 // does not need ip for implicit literal generation.
2011 Register alloc_limit = ip;
2012 Register result_end = scratch2;
2013 mov(top_address, Operand(allocation_top));
2014
2015 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2016 // Load allocation top into result and allocation limit into alloc_limit.
2017 ldm(ia, top_address, result.bit() | alloc_limit.bit());
2018 } else {
2019 if (emit_debug_code()) {
2020 // Assert that result actually contains top on entry.
2021 ldr(alloc_limit, MemOperand(top_address));
2022 cmp(result, alloc_limit);
2023 Check(eq, kUnexpectedAllocationTop);
2024 }
2025 // Load allocation limit. Result already contains allocation top.
2026 ldr(alloc_limit, MemOperand(top_address, limit - top));
2027 }
2028
2029 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2030 // Align the next allocation. Storing the filler map without checking top is
2031 // safe in new-space because the limit of the heap is aligned there.
2032 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2033 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2034 Label aligned;
2035 b(eq, &aligned);
2036 if ((flags & PRETENURE) != 0) {
2037 cmp(result, Operand(alloc_limit));
2038 b(hs, gc_required);
2039 }
2040 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2041 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2042 bind(&aligned);
2043 }
2044
2045 // Calculate new top and bail out if new space is exhausted. Use result
2046 // to calculate the new top. We must preserve the ip register at this
2047 // point, so we cannot just use add().
2048 DCHECK(object_size > 0);
2049 Register source = result;
2050 Condition cond = al;
2051 int shift = 0;
2052 while (object_size != 0) {
2053 if (((object_size >> shift) & 0x03) == 0) {
2054 shift += 2;
2055 } else {
2056 int bits = object_size & (0xff << shift);
2057 object_size -= bits;
2058 shift += 8;
2059 Operand bits_operand(bits);
2060 DCHECK(bits_operand.instructions_required(this) == 1);
2061 add(result_end, source, bits_operand, LeaveCC, cond);
2062 source = result_end;
2063 cond = cc;
2064 }
2065 }
2066
2067 cmp(result_end, Operand(alloc_limit));
2068 b(hi, gc_required);
2069
2070 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
2071 // The top pointer is not updated for allocation folding dominators.
2072 str(result_end, MemOperand(top_address));
2073 }
2074
2075 // Tag object.
2076 add(result, result, Operand(kHeapObjectTag));
2077 }
2078
2079
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)2080 void MacroAssembler::Allocate(Register object_size, Register result,
2081 Register result_end, Register scratch,
2082 Label* gc_required, AllocationFlags flags) {
2083 DCHECK((flags & ALLOCATION_FOLDED) == 0);
2084 if (!FLAG_inline_new) {
2085 if (emit_debug_code()) {
2086 // Trash the registers to simulate an allocation failure.
2087 mov(result, Operand(0x7091));
2088 mov(scratch, Operand(0x7191));
2089 mov(result_end, Operand(0x7291));
2090 }
2091 jmp(gc_required);
2092 return;
2093 }
2094
2095 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
2096 // is not specified. Other registers must not overlap.
2097 DCHECK(!AreAliased(object_size, result, scratch, ip));
2098 DCHECK(!AreAliased(result_end, result, scratch, ip));
2099 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
2100
2101 // Check relative positions of allocation top and limit addresses.
2102 // The values must be adjacent in memory to allow the use of LDM.
2103 // Also, assert that the registers are numbered such that the values
2104 // are loaded in the correct order.
2105 ExternalReference allocation_top =
2106 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2107 ExternalReference allocation_limit =
2108 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2109 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
2110 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
2111 DCHECK((limit - top) == kPointerSize);
2112 DCHECK(result.code() < ip.code());
2113
2114 // Set up allocation top address and allocation limit registers.
2115 Register top_address = scratch;
2116 // This code stores a temporary value in ip. This is OK, as the code below
2117 // does not need ip for implicit literal generation.
2118 Register alloc_limit = ip;
2119 mov(top_address, Operand(allocation_top));
2120
2121 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2122 // Load allocation top into result and allocation limit into alloc_limit.
2123 ldm(ia, top_address, result.bit() | alloc_limit.bit());
2124 } else {
2125 if (emit_debug_code()) {
2126 // Assert that result actually contains top on entry.
2127 ldr(alloc_limit, MemOperand(top_address));
2128 cmp(result, alloc_limit);
2129 Check(eq, kUnexpectedAllocationTop);
2130 }
2131 // Load allocation limit. Result already contains allocation top.
2132 ldr(alloc_limit, MemOperand(top_address, limit - top));
2133 }
2134
2135 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2136 // Align the next allocation. Storing the filler map without checking top is
2137 // safe in new-space because the limit of the heap is aligned there.
2138 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
2139 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2140 Label aligned;
2141 b(eq, &aligned);
2142 if ((flags & PRETENURE) != 0) {
2143 cmp(result, Operand(alloc_limit));
2144 b(hs, gc_required);
2145 }
2146 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2147 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2148 bind(&aligned);
2149 }
2150
2151 // Calculate new top and bail out if new space is exhausted. Use result
2152 // to calculate the new top. Object size may be in words so a shift is
2153 // required to get the number of bytes.
2154 if ((flags & SIZE_IN_WORDS) != 0) {
2155 add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
2156 } else {
2157 add(result_end, result, Operand(object_size), SetCC);
2158 }
2159
2160 cmp(result_end, Operand(alloc_limit));
2161 b(hi, gc_required);
2162
2163 // Update allocation top. result temporarily holds the new top.
2164 if (emit_debug_code()) {
2165 tst(result_end, Operand(kObjectAlignmentMask));
2166 Check(eq, kUnalignedAllocationInNewSpace);
2167 }
2168 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
2169 // The top pointer is not updated for allocation folding dominators.
2170 str(result_end, MemOperand(top_address));
2171 }
2172
2173 // Tag object.
2174 add(result, result, Operand(kHeapObjectTag));
2175 }
2176
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)2177 void MacroAssembler::FastAllocate(Register object_size, Register result,
2178 Register result_end, Register scratch,
2179 AllocationFlags flags) {
2180 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
2181 // is not specified. Other registers must not overlap.
2182 DCHECK(!AreAliased(object_size, result, scratch, ip));
2183 DCHECK(!AreAliased(result_end, result, scratch, ip));
2184 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
2185
2186 ExternalReference allocation_top =
2187 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2188
2189 Register top_address = scratch;
2190 mov(top_address, Operand(allocation_top));
2191 ldr(result, MemOperand(top_address));
2192
2193 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2194 // Align the next allocation. Storing the filler map without checking top is
2195 // safe in new-space because the limit of the heap is aligned there.
2196 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
2197 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2198 Label aligned;
2199 b(eq, &aligned);
2200 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2201 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2202 bind(&aligned);
2203 }
2204
2205 // Calculate new top using result. Object size may be in words so a shift is
2206 // required to get the number of bytes.
2207 if ((flags & SIZE_IN_WORDS) != 0) {
2208 add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
2209 } else {
2210 add(result_end, result, Operand(object_size), SetCC);
2211 }
2212
2213 // Update allocation top. result temporarily holds the new top.
2214 if (emit_debug_code()) {
2215 tst(result_end, Operand(kObjectAlignmentMask));
2216 Check(eq, kUnalignedAllocationInNewSpace);
2217 }
2218 // The top pointer is not updated for allocation folding dominators.
2219 str(result_end, MemOperand(top_address));
2220
2221 add(result, result, Operand(kHeapObjectTag));
2222 }
2223
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)2224 void MacroAssembler::FastAllocate(int object_size, Register result,
2225 Register scratch1, Register scratch2,
2226 AllocationFlags flags) {
2227 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2228 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
2229
2230 // Make object size into bytes.
2231 if ((flags & SIZE_IN_WORDS) != 0) {
2232 object_size *= kPointerSize;
2233 }
2234 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
2235
2236 ExternalReference allocation_top =
2237 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2238
2239 // Set up allocation top address register.
2240 Register top_address = scratch1;
2241 Register result_end = scratch2;
2242 mov(top_address, Operand(allocation_top));
2243 ldr(result, MemOperand(top_address));
2244
2245 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2246 // Align the next allocation. Storing the filler map without checking top is
2247 // safe in new-space because the limit of the heap is aligned there.
2248 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2249 and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
2250 Label aligned;
2251 b(eq, &aligned);
2252 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
2253 str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
2254 bind(&aligned);
2255 }
2256
2257 // Calculate new top using result. Object size may be in words so a shift is
2258 // required to get the number of bytes. We must preserve the ip register at
2259 // this point, so we cannot just use add().
2260 DCHECK(object_size > 0);
2261 Register source = result;
2262 Condition cond = al;
2263 int shift = 0;
2264 while (object_size != 0) {
2265 if (((object_size >> shift) & 0x03) == 0) {
2266 shift += 2;
2267 } else {
2268 int bits = object_size & (0xff << shift);
2269 object_size -= bits;
2270 shift += 8;
2271 Operand bits_operand(bits);
2272 DCHECK(bits_operand.instructions_required(this) == 1);
2273 add(result_end, source, bits_operand, LeaveCC, cond);
2274 source = result_end;
2275 cond = cc;
2276 }
2277 }
2278
2279 // The top pointer is not updated for allocation folding dominators.
2280 str(result_end, MemOperand(top_address));
2281
2282 add(result, result, Operand(kHeapObjectTag));
2283 }
2284
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)2285 void MacroAssembler::AllocateTwoByteString(Register result,
2286 Register length,
2287 Register scratch1,
2288 Register scratch2,
2289 Register scratch3,
2290 Label* gc_required) {
2291 // Calculate the number of bytes needed for the characters in the string while
2292 // observing object alignment.
2293 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2294 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
2295 add(scratch1, scratch1,
2296 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
2297 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2298
2299 // Allocate two-byte string in new space.
2300 Allocate(scratch1, result, scratch2, scratch3, gc_required,
2301 NO_ALLOCATION_FLAGS);
2302
2303 // Set the map, length and hash field.
2304 InitializeNewString(result,
2305 length,
2306 Heap::kStringMapRootIndex,
2307 scratch1,
2308 scratch2);
2309 }
2310
2311
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)2312 void MacroAssembler::AllocateOneByteString(Register result, Register length,
2313 Register scratch1, Register scratch2,
2314 Register scratch3,
2315 Label* gc_required) {
2316 // Calculate the number of bytes needed for the characters in the string while
2317 // observing object alignment.
2318 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2319 DCHECK(kCharSize == 1);
2320 add(scratch1, length,
2321 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
2322 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2323
2324 // Allocate one-byte string in new space.
2325 Allocate(scratch1, result, scratch2, scratch3, gc_required,
2326 NO_ALLOCATION_FLAGS);
2327
2328 // Set the map, length and hash field.
2329 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
2330 scratch1, scratch2);
2331 }
2332
2333
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2334 void MacroAssembler::AllocateTwoByteConsString(Register result,
2335 Register length,
2336 Register scratch1,
2337 Register scratch2,
2338 Label* gc_required) {
2339 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
2340 NO_ALLOCATION_FLAGS);
2341
2342 InitializeNewString(result,
2343 length,
2344 Heap::kConsStringMapRootIndex,
2345 scratch1,
2346 scratch2);
2347 }
2348
2349
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2350 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
2351 Register scratch1,
2352 Register scratch2,
2353 Label* gc_required) {
2354 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
2355 NO_ALLOCATION_FLAGS);
2356
2357 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
2358 scratch1, scratch2);
2359 }
2360
2361
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2362 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2363 Register length,
2364 Register scratch1,
2365 Register scratch2,
2366 Label* gc_required) {
2367 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2368 NO_ALLOCATION_FLAGS);
2369
2370 InitializeNewString(result,
2371 length,
2372 Heap::kSlicedStringMapRootIndex,
2373 scratch1,
2374 scratch2);
2375 }
2376
2377
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2378 void MacroAssembler::AllocateOneByteSlicedString(Register result,
2379 Register length,
2380 Register scratch1,
2381 Register scratch2,
2382 Label* gc_required) {
2383 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2384 NO_ALLOCATION_FLAGS);
2385
2386 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
2387 scratch1, scratch2);
2388 }
2389
2390
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)2391 void MacroAssembler::CompareObjectType(Register object,
2392 Register map,
2393 Register type_reg,
2394 InstanceType type) {
2395 const Register temp = type_reg.is(no_reg) ? ip : type_reg;
2396
2397 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2398 CompareInstanceType(map, temp, type);
2399 }
2400
2401
CompareInstanceType(Register map,Register type_reg,InstanceType type)2402 void MacroAssembler::CompareInstanceType(Register map,
2403 Register type_reg,
2404 InstanceType type) {
2405 // Registers map and type_reg can be ip. These two lines assert
2406 // that ip can be used with the two instructions (the constants
2407 // will never need ip).
2408 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2409 STATIC_ASSERT(LAST_TYPE < 256);
2410 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2411 cmp(type_reg, Operand(type));
2412 }
2413
2414
CompareRoot(Register obj,Heap::RootListIndex index)2415 void MacroAssembler::CompareRoot(Register obj,
2416 Heap::RootListIndex index) {
2417 DCHECK(!obj.is(ip));
2418 LoadRoot(ip, index);
2419 cmp(obj, ip);
2420 }
2421
2422
CheckFastElements(Register map,Register scratch,Label * fail)2423 void MacroAssembler::CheckFastElements(Register map,
2424 Register scratch,
2425 Label* fail) {
2426 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2427 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2428 STATIC_ASSERT(FAST_ELEMENTS == 2);
2429 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2430 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2431 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2432 b(hi, fail);
2433 }
2434
2435
CheckFastObjectElements(Register map,Register scratch,Label * fail)2436 void MacroAssembler::CheckFastObjectElements(Register map,
2437 Register scratch,
2438 Label* fail) {
2439 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2440 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2441 STATIC_ASSERT(FAST_ELEMENTS == 2);
2442 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2443 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2444 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2445 b(ls, fail);
2446 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2447 b(hi, fail);
2448 }
2449
2450
CheckFastSmiElements(Register map,Register scratch,Label * fail)2451 void MacroAssembler::CheckFastSmiElements(Register map,
2452 Register scratch,
2453 Label* fail) {
2454 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2455 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2456 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2457 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2458 b(hi, fail);
2459 }
2460
2461
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,LowDwVfpRegister double_scratch,Label * fail,int elements_offset)2462 void MacroAssembler::StoreNumberToDoubleElements(
2463 Register value_reg,
2464 Register key_reg,
2465 Register elements_reg,
2466 Register scratch1,
2467 LowDwVfpRegister double_scratch,
2468 Label* fail,
2469 int elements_offset) {
2470 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
2471 Label smi_value, store;
2472
2473 // Handle smi values specially.
2474 JumpIfSmi(value_reg, &smi_value);
2475
2476 // Ensure that the object is a heap number
2477 CheckMap(value_reg,
2478 scratch1,
2479 isolate()->factory()->heap_number_map(),
2480 fail,
2481 DONT_DO_SMI_CHECK);
2482
2483 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2484 VFPCanonicalizeNaN(double_scratch);
2485 b(&store);
2486
2487 bind(&smi_value);
2488 SmiToDouble(double_scratch, value_reg);
2489
2490 bind(&store);
2491 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2492 vstr(double_scratch,
2493 FieldMemOperand(scratch1,
2494 FixedDoubleArray::kHeaderSize - elements_offset));
2495 }
2496
2497
CompareMap(Register obj,Register scratch,Handle<Map> map,Label * early_success)2498 void MacroAssembler::CompareMap(Register obj,
2499 Register scratch,
2500 Handle<Map> map,
2501 Label* early_success) {
2502 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2503 CompareMap(scratch, map, early_success);
2504 }
2505
2506
CompareMap(Register obj_map,Handle<Map> map,Label * early_success)2507 void MacroAssembler::CompareMap(Register obj_map,
2508 Handle<Map> map,
2509 Label* early_success) {
2510 cmp(obj_map, Operand(map));
2511 }
2512
2513
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)2514 void MacroAssembler::CheckMap(Register obj,
2515 Register scratch,
2516 Handle<Map> map,
2517 Label* fail,
2518 SmiCheckType smi_check_type) {
2519 if (smi_check_type == DO_SMI_CHECK) {
2520 JumpIfSmi(obj, fail);
2521 }
2522
2523 Label success;
2524 CompareMap(obj, scratch, map, &success);
2525 b(ne, fail);
2526 bind(&success);
2527 }
2528
2529
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)2530 void MacroAssembler::CheckMap(Register obj,
2531 Register scratch,
2532 Heap::RootListIndex index,
2533 Label* fail,
2534 SmiCheckType smi_check_type) {
2535 if (smi_check_type == DO_SMI_CHECK) {
2536 JumpIfSmi(obj, fail);
2537 }
2538 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2539 LoadRoot(ip, index);
2540 cmp(scratch, ip);
2541 b(ne, fail);
2542 }
2543
2544
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)2545 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2546 Register scratch2, Handle<WeakCell> cell,
2547 Handle<Code> success,
2548 SmiCheckType smi_check_type) {
2549 Label fail;
2550 if (smi_check_type == DO_SMI_CHECK) {
2551 JumpIfSmi(obj, &fail);
2552 }
2553 ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2554 CmpWeakValue(scratch1, cell, scratch2);
2555 Jump(success, RelocInfo::CODE_TARGET, eq);
2556 bind(&fail);
2557 }
2558
2559
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch)2560 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2561 Register scratch) {
2562 mov(scratch, Operand(cell));
2563 ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2564 cmp(value, scratch);
2565 }
2566
2567
GetWeakValue(Register value,Handle<WeakCell> cell)2568 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2569 mov(value, Operand(cell));
2570 ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
2571 }
2572
2573
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)2574 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2575 Label* miss) {
2576 GetWeakValue(value, cell);
2577 JumpIfSmi(value, miss);
2578 }
2579
2580
GetMapConstructor(Register result,Register map,Register temp,Register temp2)2581 void MacroAssembler::GetMapConstructor(Register result, Register map,
2582 Register temp, Register temp2) {
2583 Label done, loop;
2584 ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2585 bind(&loop);
2586 JumpIfSmi(result, &done);
2587 CompareObjectType(result, temp, temp2, MAP_TYPE);
2588 b(ne, &done);
2589 ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2590 b(&loop);
2591 bind(&done);
2592 }
2593
2594
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)2595 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2596 Register scratch, Label* miss) {
2597 // Get the prototype or initial map from the function.
2598 ldr(result,
2599 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2600
2601 // If the prototype or initial map is the hole, don't return it and
2602 // simply miss the cache instead. This will allow us to allocate a
2603 // prototype object on-demand in the runtime system.
2604 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2605 cmp(result, ip);
2606 b(eq, miss);
2607
2608 // If the function does not have an initial map, we're done.
2609 Label done;
2610 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2611 b(ne, &done);
2612
2613 // Get the prototype from the initial map.
2614 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2615
2616 // All done.
2617 bind(&done);
2618 }
2619
2620
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)2621 void MacroAssembler::CallStub(CodeStub* stub,
2622 TypeFeedbackId ast_id,
2623 Condition cond) {
2624 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2625 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2626 }
2627
2628
TailCallStub(CodeStub * stub,Condition cond)2629 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2630 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2631 }
2632
2633
AllowThisStubCall(CodeStub * stub)2634 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2635 return has_frame_ || !stub->SometimesSetsUpAFrame();
2636 }
2637
2638
IndexFromHash(Register hash,Register index)2639 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2640 // If the hash field contains an array index pick it out. The assert checks
2641 // that the constants for the maximum number of digits for an array index
2642 // cached in the hash field and the number of bits reserved for it does not
2643 // conflict.
2644 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2645 (1 << String::kArrayIndexValueBits));
2646 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2647 }
2648
2649
SmiToDouble(LowDwVfpRegister value,Register smi)2650 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2651 if (CpuFeatures::IsSupported(VFP3)) {
2652 vmov(value.low(), smi);
2653 vcvt_f64_s32(value, 1);
2654 } else {
2655 SmiUntag(ip, smi);
2656 vmov(value.low(), ip);
2657 vcvt_f64_s32(value, value.low());
2658 }
2659 }
2660
2661
TestDoubleIsInt32(DwVfpRegister double_input,LowDwVfpRegister double_scratch)2662 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2663 LowDwVfpRegister double_scratch) {
2664 DCHECK(!double_input.is(double_scratch));
2665 vcvt_s32_f64(double_scratch.low(), double_input);
2666 vcvt_f64_s32(double_scratch, double_scratch.low());
2667 VFPCompareAndSetFlags(double_input, double_scratch);
2668 }
2669
2670
TryDoubleToInt32Exact(Register result,DwVfpRegister double_input,LowDwVfpRegister double_scratch)2671 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2672 DwVfpRegister double_input,
2673 LowDwVfpRegister double_scratch) {
2674 DCHECK(!double_input.is(double_scratch));
2675 vcvt_s32_f64(double_scratch.low(), double_input);
2676 vmov(result, double_scratch.low());
2677 vcvt_f64_s32(double_scratch, double_scratch.low());
2678 VFPCompareAndSetFlags(double_input, double_scratch);
2679 }
2680
2681
TryInt32Floor(Register result,DwVfpRegister double_input,Register input_high,LowDwVfpRegister double_scratch,Label * done,Label * exact)2682 void MacroAssembler::TryInt32Floor(Register result,
2683 DwVfpRegister double_input,
2684 Register input_high,
2685 LowDwVfpRegister double_scratch,
2686 Label* done,
2687 Label* exact) {
2688 DCHECK(!result.is(input_high));
2689 DCHECK(!double_input.is(double_scratch));
2690 Label negative, exception;
2691
2692 VmovHigh(input_high, double_input);
2693
2694 // Test for NaN and infinities.
2695 Sbfx(result, input_high,
2696 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2697 cmp(result, Operand(-1));
2698 b(eq, &exception);
2699 // Test for values that can be exactly represented as a
2700 // signed 32-bit integer.
2701 TryDoubleToInt32Exact(result, double_input, double_scratch);
2702 // If exact, return (result already fetched).
2703 b(eq, exact);
2704 cmp(input_high, Operand::Zero());
2705 b(mi, &negative);
2706
2707 // Input is in ]+0, +inf[.
2708 // If result equals 0x7fffffff input was out of range or
2709 // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2710 // could fits into an int32, that means we always think input was
2711 // out of range and always go to exception.
2712 // If result < 0x7fffffff, go to done, result fetched.
2713 cmn(result, Operand(1));
2714 b(mi, &exception);
2715 b(done);
2716
2717 // Input is in ]-inf, -0[.
2718 // If x is a non integer negative number,
2719 // floor(x) <=> round_to_zero(x) - 1.
2720 bind(&negative);
2721 sub(result, result, Operand(1), SetCC);
2722 // If result is still negative, go to done, result fetched.
2723 // Else, we had an overflow and we fall through exception.
2724 b(mi, done);
2725 bind(&exception);
2726 }
2727
TryInlineTruncateDoubleToI(Register result,DwVfpRegister double_input,Label * done)2728 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2729 DwVfpRegister double_input,
2730 Label* done) {
2731 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2732 vcvt_s32_f64(double_scratch.low(), double_input);
2733 vmov(result, double_scratch.low());
2734
2735 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2736 sub(ip, result, Operand(1));
2737 cmp(ip, Operand(0x7ffffffe));
2738 b(lt, done);
2739 }
2740
2741
TruncateDoubleToI(Register result,DwVfpRegister double_input)2742 void MacroAssembler::TruncateDoubleToI(Register result,
2743 DwVfpRegister double_input) {
2744 Label done;
2745
2746 TryInlineTruncateDoubleToI(result, double_input, &done);
2747
2748 // If we fell through then inline version didn't succeed - call stub instead.
2749 push(lr);
2750 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2751 vstr(double_input, MemOperand(sp, 0));
2752
2753 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2754 CallStub(&stub);
2755
2756 add(sp, sp, Operand(kDoubleSize));
2757 pop(lr);
2758
2759 bind(&done);
2760 }
2761
2762
TruncateHeapNumberToI(Register result,Register object)2763 void MacroAssembler::TruncateHeapNumberToI(Register result,
2764 Register object) {
2765 Label done;
2766 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2767 DCHECK(!result.is(object));
2768
2769 vldr(double_scratch,
2770 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2771 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2772
2773 // If we fell through then inline version didn't succeed - call stub instead.
2774 push(lr);
2775 DoubleToIStub stub(isolate(),
2776 object,
2777 result,
2778 HeapNumber::kValueOffset - kHeapObjectTag,
2779 true,
2780 true);
2781 CallStub(&stub);
2782 pop(lr);
2783
2784 bind(&done);
2785 }
2786
2787
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch1,Label * not_number)2788 void MacroAssembler::TruncateNumberToI(Register object,
2789 Register result,
2790 Register heap_number_map,
2791 Register scratch1,
2792 Label* not_number) {
2793 Label done;
2794 DCHECK(!result.is(object));
2795
2796 UntagAndJumpIfSmi(result, object, &done);
2797 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2798 TruncateHeapNumberToI(result, object);
2799
2800 bind(&done);
2801 }
2802
2803
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2804 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2805 Register src,
2806 int num_least_bits) {
2807 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2808 ubfx(dst, src, kSmiTagSize, num_least_bits);
2809 } else {
2810 SmiUntag(dst, src);
2811 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2812 }
2813 }
2814
2815
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2816 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2817 Register src,
2818 int num_least_bits) {
2819 and_(dst, src, Operand((1 << num_least_bits) - 1));
2820 }
2821
2822
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2823 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2824 int num_arguments,
2825 SaveFPRegsMode save_doubles) {
2826 // All parameters are on the stack. r0 has the return value after call.
2827
2828 // If the expected number of arguments of the runtime function is
2829 // constant, we check that the actual number of arguments match the
2830 // expectation.
2831 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2832
2833 // TODO(1236192): Most runtime routines don't need the number of
2834 // arguments passed in because it is constant. At some point we
2835 // should remove this need and make the runtime routine entry code
2836 // smarter.
2837 mov(r0, Operand(num_arguments));
2838 mov(r1, Operand(ExternalReference(f, isolate())));
2839 CEntryStub stub(isolate(), 1, save_doubles);
2840 CallStub(&stub);
2841 }
2842
2843
CallExternalReference(const ExternalReference & ext,int num_arguments)2844 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2845 int num_arguments) {
2846 mov(r0, Operand(num_arguments));
2847 mov(r1, Operand(ext));
2848
2849 CEntryStub stub(isolate(), 1);
2850 CallStub(&stub);
2851 }
2852
2853
TailCallRuntime(Runtime::FunctionId fid)2854 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2855 const Runtime::Function* function = Runtime::FunctionForId(fid);
2856 DCHECK_EQ(1, function->result_size);
2857 if (function->nargs >= 0) {
2858 // TODO(1236192): Most runtime routines don't need the number of
2859 // arguments passed in because it is constant. At some point we
2860 // should remove this need and make the runtime routine entry code
2861 // smarter.
2862 mov(r0, Operand(function->nargs));
2863 }
2864 JumpToExternalReference(ExternalReference(fid, isolate()));
2865 }
2866
2867
JumpToExternalReference(const ExternalReference & builtin)2868 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2869 #if defined(__thumb__)
2870 // Thumb mode builtin.
2871 DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2872 #endif
2873 mov(r1, Operand(builtin));
2874 CEntryStub stub(isolate(), 1);
2875 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2876 }
2877
2878
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2879 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2880 Register scratch1, Register scratch2) {
2881 if (FLAG_native_code_counters && counter->Enabled()) {
2882 mov(scratch1, Operand(value));
2883 mov(scratch2, Operand(ExternalReference(counter)));
2884 str(scratch1, MemOperand(scratch2));
2885 }
2886 }
2887
2888
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2889 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2890 Register scratch1, Register scratch2) {
2891 DCHECK(value > 0);
2892 if (FLAG_native_code_counters && counter->Enabled()) {
2893 mov(scratch2, Operand(ExternalReference(counter)));
2894 ldr(scratch1, MemOperand(scratch2));
2895 add(scratch1, scratch1, Operand(value));
2896 str(scratch1, MemOperand(scratch2));
2897 }
2898 }
2899
2900
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2901 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2902 Register scratch1, Register scratch2) {
2903 DCHECK(value > 0);
2904 if (FLAG_native_code_counters && counter->Enabled()) {
2905 mov(scratch2, Operand(ExternalReference(counter)));
2906 ldr(scratch1, MemOperand(scratch2));
2907 sub(scratch1, scratch1, Operand(value));
2908 str(scratch1, MemOperand(scratch2));
2909 }
2910 }
2911
2912
Assert(Condition cond,BailoutReason reason)2913 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2914 if (emit_debug_code())
2915 Check(cond, reason);
2916 }
2917
2918
AssertFastElements(Register elements)2919 void MacroAssembler::AssertFastElements(Register elements) {
2920 if (emit_debug_code()) {
2921 DCHECK(!elements.is(ip));
2922 Label ok;
2923 push(elements);
2924 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2925 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2926 cmp(elements, ip);
2927 b(eq, &ok);
2928 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2929 cmp(elements, ip);
2930 b(eq, &ok);
2931 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2932 cmp(elements, ip);
2933 b(eq, &ok);
2934 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2935 bind(&ok);
2936 pop(elements);
2937 }
2938 }
2939
2940
Check(Condition cond,BailoutReason reason)2941 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2942 Label L;
2943 b(cond, &L);
2944 Abort(reason);
2945 // will not return here
2946 bind(&L);
2947 }
2948
2949
Abort(BailoutReason reason)2950 void MacroAssembler::Abort(BailoutReason reason) {
2951 Label abort_start;
2952 bind(&abort_start);
2953 #ifdef DEBUG
2954 const char* msg = GetBailoutReason(reason);
2955 if (msg != NULL) {
2956 RecordComment("Abort message: ");
2957 RecordComment(msg);
2958 }
2959
2960 if (FLAG_trap_on_abort) {
2961 stop(msg);
2962 return;
2963 }
2964 #endif
2965
2966 mov(r0, Operand(Smi::FromInt(reason)));
2967 push(r0);
2968
2969 // Disable stub call restrictions to always allow calls to abort.
2970 if (!has_frame_) {
2971 // We don't actually want to generate a pile of code for this, so just
2972 // claim there is a stack frame, without generating one.
2973 FrameScope scope(this, StackFrame::NONE);
2974 CallRuntime(Runtime::kAbort);
2975 } else {
2976 CallRuntime(Runtime::kAbort);
2977 }
2978 // will not return here
2979 if (is_const_pool_blocked()) {
2980 // If the calling code cares about the exact number of
2981 // instructions generated, we insert padding here to keep the size
2982 // of the Abort macro constant.
2983 static const int kExpectedAbortInstructions = 7;
2984 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2985 DCHECK(abort_instructions <= kExpectedAbortInstructions);
2986 while (abort_instructions++ < kExpectedAbortInstructions) {
2987 nop();
2988 }
2989 }
2990 }
2991
2992
LoadContext(Register dst,int context_chain_length)2993 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2994 if (context_chain_length > 0) {
2995 // Move up the chain of contexts to the context containing the slot.
2996 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2997 for (int i = 1; i < context_chain_length; i++) {
2998 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2999 }
3000 } else {
3001 // Slot is in the current function context. Move it into the
3002 // destination register in case we store into it (the write barrier
3003 // cannot be allowed to destroy the context in esi).
3004 mov(dst, cp);
3005 }
3006 }
3007
3008
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)3009 void MacroAssembler::LoadTransitionedArrayMapConditional(
3010 ElementsKind expected_kind,
3011 ElementsKind transitioned_kind,
3012 Register map_in_out,
3013 Register scratch,
3014 Label* no_map_match) {
3015 DCHECK(IsFastElementsKind(expected_kind));
3016 DCHECK(IsFastElementsKind(transitioned_kind));
3017
3018 // Check that the function's map is the same as the expected cached map.
3019 ldr(scratch, NativeContextMemOperand());
3020 ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
3021 cmp(map_in_out, ip);
3022 b(ne, no_map_match);
3023
3024 // Use the transitioned cached map.
3025 ldr(map_in_out,
3026 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
3027 }
3028
3029
LoadNativeContextSlot(int index,Register dst)3030 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
3031 ldr(dst, NativeContextMemOperand());
3032 ldr(dst, ContextMemOperand(dst, index));
3033 }
3034
3035
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)3036 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
3037 Register map,
3038 Register scratch) {
3039 // Load the initial map. The global functions all have initial maps.
3040 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3041 if (emit_debug_code()) {
3042 Label ok, fail;
3043 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
3044 b(&ok);
3045 bind(&fail);
3046 Abort(kGlobalFunctionsMustHaveInitialMap);
3047 bind(&ok);
3048 }
3049 }
3050
3051
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)3052 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
3053 Register reg,
3054 Register scratch,
3055 Label* not_power_of_two_or_zero) {
3056 sub(scratch, reg, Operand(1), SetCC);
3057 b(mi, not_power_of_two_or_zero);
3058 tst(scratch, reg);
3059 b(ne, not_power_of_two_or_zero);
3060 }
3061
3062
JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,Register scratch,Label * zero_and_neg,Label * not_power_of_two)3063 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
3064 Register reg,
3065 Register scratch,
3066 Label* zero_and_neg,
3067 Label* not_power_of_two) {
3068 sub(scratch, reg, Operand(1), SetCC);
3069 b(mi, zero_and_neg);
3070 tst(scratch, reg);
3071 b(ne, not_power_of_two);
3072 }
3073
3074
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)3075 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
3076 Register reg2,
3077 Label* on_not_both_smi) {
3078 STATIC_ASSERT(kSmiTag == 0);
3079 tst(reg1, Operand(kSmiTagMask));
3080 tst(reg2, Operand(kSmiTagMask), eq);
3081 b(ne, on_not_both_smi);
3082 }
3083
3084
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)3085 void MacroAssembler::UntagAndJumpIfSmi(
3086 Register dst, Register src, Label* smi_case) {
3087 STATIC_ASSERT(kSmiTag == 0);
3088 SmiUntag(dst, src, SetCC);
3089 b(cc, smi_case); // Shifter carry is not set for a smi.
3090 }
3091
3092
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)3093 void MacroAssembler::UntagAndJumpIfNotSmi(
3094 Register dst, Register src, Label* non_smi_case) {
3095 STATIC_ASSERT(kSmiTag == 0);
3096 SmiUntag(dst, src, SetCC);
3097 b(cs, non_smi_case); // Shifter carry is set for a non-smi.
3098 }
3099
3100
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)3101 void MacroAssembler::JumpIfEitherSmi(Register reg1,
3102 Register reg2,
3103 Label* on_either_smi) {
3104 STATIC_ASSERT(kSmiTag == 0);
3105 tst(reg1, Operand(kSmiTagMask));
3106 tst(reg2, Operand(kSmiTagMask), ne);
3107 b(eq, on_either_smi);
3108 }
3109
AssertNotNumber(Register object)3110 void MacroAssembler::AssertNotNumber(Register object) {
3111 if (emit_debug_code()) {
3112 STATIC_ASSERT(kSmiTag == 0);
3113 tst(object, Operand(kSmiTagMask));
3114 Check(ne, kOperandIsANumber);
3115 push(object);
3116 CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
3117 pop(object);
3118 Check(ne, kOperandIsANumber);
3119 }
3120 }
3121
AssertNotSmi(Register object)3122 void MacroAssembler::AssertNotSmi(Register object) {
3123 if (emit_debug_code()) {
3124 STATIC_ASSERT(kSmiTag == 0);
3125 tst(object, Operand(kSmiTagMask));
3126 Check(ne, kOperandIsASmi);
3127 }
3128 }
3129
3130
AssertSmi(Register object)3131 void MacroAssembler::AssertSmi(Register object) {
3132 if (emit_debug_code()) {
3133 STATIC_ASSERT(kSmiTag == 0);
3134 tst(object, Operand(kSmiTagMask));
3135 Check(eq, kOperandIsNotSmi);
3136 }
3137 }
3138
3139
AssertString(Register object)3140 void MacroAssembler::AssertString(Register object) {
3141 if (emit_debug_code()) {
3142 STATIC_ASSERT(kSmiTag == 0);
3143 tst(object, Operand(kSmiTagMask));
3144 Check(ne, kOperandIsASmiAndNotAString);
3145 push(object);
3146 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3147 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3148 pop(object);
3149 Check(lo, kOperandIsNotAString);
3150 }
3151 }
3152
3153
AssertName(Register object)3154 void MacroAssembler::AssertName(Register object) {
3155 if (emit_debug_code()) {
3156 STATIC_ASSERT(kSmiTag == 0);
3157 tst(object, Operand(kSmiTagMask));
3158 Check(ne, kOperandIsASmiAndNotAName);
3159 push(object);
3160 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3161 CompareInstanceType(object, object, LAST_NAME_TYPE);
3162 pop(object);
3163 Check(le, kOperandIsNotAName);
3164 }
3165 }
3166
3167
AssertFunction(Register object)3168 void MacroAssembler::AssertFunction(Register object) {
3169 if (emit_debug_code()) {
3170 STATIC_ASSERT(kSmiTag == 0);
3171 tst(object, Operand(kSmiTagMask));
3172 Check(ne, kOperandIsASmiAndNotAFunction);
3173 push(object);
3174 CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
3175 pop(object);
3176 Check(eq, kOperandIsNotAFunction);
3177 }
3178 }
3179
3180
AssertBoundFunction(Register object)3181 void MacroAssembler::AssertBoundFunction(Register object) {
3182 if (emit_debug_code()) {
3183 STATIC_ASSERT(kSmiTag == 0);
3184 tst(object, Operand(kSmiTagMask));
3185 Check(ne, kOperandIsASmiAndNotABoundFunction);
3186 push(object);
3187 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
3188 pop(object);
3189 Check(eq, kOperandIsNotABoundFunction);
3190 }
3191 }
3192
AssertGeneratorObject(Register object)3193 void MacroAssembler::AssertGeneratorObject(Register object) {
3194 if (emit_debug_code()) {
3195 STATIC_ASSERT(kSmiTag == 0);
3196 tst(object, Operand(kSmiTagMask));
3197 Check(ne, kOperandIsASmiAndNotAGeneratorObject);
3198 push(object);
3199 CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
3200 pop(object);
3201 Check(eq, kOperandIsNotAGeneratorObject);
3202 }
3203 }
3204
AssertReceiver(Register object)3205 void MacroAssembler::AssertReceiver(Register object) {
3206 if (emit_debug_code()) {
3207 STATIC_ASSERT(kSmiTag == 0);
3208 tst(object, Operand(kSmiTagMask));
3209 Check(ne, kOperandIsASmiAndNotAReceiver);
3210 push(object);
3211 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3212 CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
3213 pop(object);
3214 Check(hs, kOperandIsNotAReceiver);
3215 }
3216 }
3217
3218
AssertUndefinedOrAllocationSite(Register object,Register scratch)3219 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
3220 Register scratch) {
3221 if (emit_debug_code()) {
3222 Label done_checking;
3223 AssertNotSmi(object);
3224 CompareRoot(object, Heap::kUndefinedValueRootIndex);
3225 b(eq, &done_checking);
3226 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3227 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
3228 Assert(eq, kExpectedUndefinedOrCell);
3229 bind(&done_checking);
3230 }
3231 }
3232
3233
AssertIsRoot(Register reg,Heap::RootListIndex index)3234 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3235 if (emit_debug_code()) {
3236 CompareRoot(reg, index);
3237 Check(eq, kHeapNumberMapRegisterClobbered);
3238 }
3239 }
3240
3241
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)3242 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3243 Register heap_number_map,
3244 Register scratch,
3245 Label* on_not_heap_number) {
3246 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3247 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3248 cmp(scratch, heap_number_map);
3249 b(ne, on_not_heap_number);
3250 }
3251
3252
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3253 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
3254 Register first, Register second, Register scratch1, Register scratch2,
3255 Label* failure) {
3256 // Test that both first and second are sequential one-byte strings.
3257 // Assume that they are non-smis.
3258 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3259 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3260 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3261 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3262
3263 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
3264 scratch2, failure);
3265 }
3266
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3267 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
3268 Register second,
3269 Register scratch1,
3270 Register scratch2,
3271 Label* failure) {
3272 // Check that neither is a smi.
3273 and_(scratch1, first, Operand(second));
3274 JumpIfSmi(scratch1, failure);
3275 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
3276 scratch2, failure);
3277 }
3278
3279
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)3280 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3281 Label* not_unique_name) {
3282 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3283 Label succeed;
3284 tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3285 b(eq, &succeed);
3286 cmp(reg, Operand(SYMBOL_TYPE));
3287 b(ne, not_unique_name);
3288
3289 bind(&succeed);
3290 }
3291
3292
3293 // Allocates a heap number or jumps to the need_gc label if the young space
3294 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required,MutableMode mode)3295 void MacroAssembler::AllocateHeapNumber(Register result,
3296 Register scratch1,
3297 Register scratch2,
3298 Register heap_number_map,
3299 Label* gc_required,
3300 MutableMode mode) {
3301 // Allocate an object in the heap for the heap number and tag it as a heap
3302 // object.
3303 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3304 NO_ALLOCATION_FLAGS);
3305
3306 Heap::RootListIndex map_index = mode == MUTABLE
3307 ? Heap::kMutableHeapNumberMapRootIndex
3308 : Heap::kHeapNumberMapRootIndex;
3309 AssertIsRoot(heap_number_map, map_index);
3310
3311 // Store heap number map in the allocated object.
3312 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3313 }
3314
3315
AllocateHeapNumberWithValue(Register result,DwVfpRegister value,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required)3316 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3317 DwVfpRegister value,
3318 Register scratch1,
3319 Register scratch2,
3320 Register heap_number_map,
3321 Label* gc_required) {
3322 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3323 sub(scratch1, result, Operand(kHeapObjectTag));
3324 vstr(value, scratch1, HeapNumber::kValueOffset);
3325 }
3326
3327
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)3328 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3329 Register value, Register scratch1,
3330 Register scratch2, Label* gc_required) {
3331 DCHECK(!result.is(constructor));
3332 DCHECK(!result.is(scratch1));
3333 DCHECK(!result.is(scratch2));
3334 DCHECK(!result.is(value));
3335
3336 // Allocate JSValue in new space.
3337 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
3338 NO_ALLOCATION_FLAGS);
3339
3340 // Initialize the JSValue.
3341 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3342 str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
3343 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3344 str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
3345 str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
3346 str(value, FieldMemOperand(result, JSValue::kValueOffset));
3347 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3348 }
3349
3350
CopyBytes(Register src,Register dst,Register length,Register scratch)3351 void MacroAssembler::CopyBytes(Register src,
3352 Register dst,
3353 Register length,
3354 Register scratch) {
3355 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3356
3357 // Align src before copying in word size chunks.
3358 cmp(length, Operand(kPointerSize));
3359 b(le, &byte_loop);
3360
3361 bind(&align_loop_1);
3362 tst(src, Operand(kPointerSize - 1));
3363 b(eq, &word_loop);
3364 ldrb(scratch, MemOperand(src, 1, PostIndex));
3365 strb(scratch, MemOperand(dst, 1, PostIndex));
3366 sub(length, length, Operand(1), SetCC);
3367 b(&align_loop_1);
3368 // Copy bytes in word size chunks.
3369 bind(&word_loop);
3370 if (emit_debug_code()) {
3371 tst(src, Operand(kPointerSize - 1));
3372 Assert(eq, kExpectingAlignmentForCopyBytes);
3373 }
3374 cmp(length, Operand(kPointerSize));
3375 b(lt, &byte_loop);
3376 ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3377 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3378 str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3379 } else {
3380 strb(scratch, MemOperand(dst, 1, PostIndex));
3381 mov(scratch, Operand(scratch, LSR, 8));
3382 strb(scratch, MemOperand(dst, 1, PostIndex));
3383 mov(scratch, Operand(scratch, LSR, 8));
3384 strb(scratch, MemOperand(dst, 1, PostIndex));
3385 mov(scratch, Operand(scratch, LSR, 8));
3386 strb(scratch, MemOperand(dst, 1, PostIndex));
3387 }
3388 sub(length, length, Operand(kPointerSize));
3389 b(&word_loop);
3390
3391 // Copy the last bytes if any left.
3392 bind(&byte_loop);
3393 cmp(length, Operand::Zero());
3394 b(eq, &done);
3395 bind(&byte_loop_1);
3396 ldrb(scratch, MemOperand(src, 1, PostIndex));
3397 strb(scratch, MemOperand(dst, 1, PostIndex));
3398 sub(length, length, Operand(1), SetCC);
3399 b(ne, &byte_loop_1);
3400 bind(&done);
3401 }
3402
3403
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)3404 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
3405 Register end_address,
3406 Register filler) {
3407 Label loop, entry;
3408 b(&entry);
3409 bind(&loop);
3410 str(filler, MemOperand(current_address, kPointerSize, PostIndex));
3411 bind(&entry);
3412 cmp(current_address, end_address);
3413 b(lo, &loop);
3414 }
3415
3416
CheckFor32DRegs(Register scratch)3417 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3418 mov(scratch, Operand(ExternalReference::cpu_features()));
3419 ldr(scratch, MemOperand(scratch));
3420 tst(scratch, Operand(1u << VFP32DREGS));
3421 }
3422
3423
SaveFPRegs(Register location,Register scratch)3424 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3425 CheckFor32DRegs(scratch);
3426 vstm(db_w, location, d16, d31, ne);
3427 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3428 vstm(db_w, location, d0, d15);
3429 }
3430
3431
RestoreFPRegs(Register location,Register scratch)3432 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3433 CheckFor32DRegs(scratch);
3434 vldm(ia_w, location, d0, d15);
3435 vldm(ia_w, location, d16, d31, ne);
3436 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3437 }
3438
3439
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3440 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3441 Register first, Register second, Register scratch1, Register scratch2,
3442 Label* failure) {
3443 const int kFlatOneByteStringMask =
3444 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3445 const int kFlatOneByteStringTag =
3446 kStringTag | kOneByteStringTag | kSeqStringTag;
3447 and_(scratch1, first, Operand(kFlatOneByteStringMask));
3448 and_(scratch2, second, Operand(kFlatOneByteStringMask));
3449 cmp(scratch1, Operand(kFlatOneByteStringTag));
3450 // Ignore second test if first test failed.
3451 cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
3452 b(ne, failure);
3453 }
3454
3455
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)3456 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3457 Register scratch,
3458 Label* failure) {
3459 const int kFlatOneByteStringMask =
3460 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3461 const int kFlatOneByteStringTag =
3462 kStringTag | kOneByteStringTag | kSeqStringTag;
3463 and_(scratch, type, Operand(kFlatOneByteStringMask));
3464 cmp(scratch, Operand(kFlatOneByteStringTag));
3465 b(ne, failure);
3466 }
3467
3468 static const int kRegisterPassedArguments = 4;
3469
3470
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)3471 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3472 int num_double_arguments) {
3473 int stack_passed_words = 0;
3474 if (use_eabi_hardfloat()) {
3475 // In the hard floating point calling convention, we can use
3476 // all double registers to pass doubles.
3477 if (num_double_arguments > DoubleRegister::NumRegisters()) {
3478 stack_passed_words +=
3479 2 * (num_double_arguments - DoubleRegister::NumRegisters());
3480 }
3481 } else {
3482 // In the soft floating point calling convention, every double
3483 // argument is passed using two registers.
3484 num_reg_arguments += 2 * num_double_arguments;
3485 }
3486 // Up to four simple arguments are passed in registers r0..r3.
3487 if (num_reg_arguments > kRegisterPassedArguments) {
3488 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3489 }
3490 return stack_passed_words;
3491 }
3492
3493
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)3494 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3495 Register index,
3496 Register value,
3497 uint32_t encoding_mask) {
3498 Label is_object;
3499 SmiTst(string);
3500 Check(ne, kNonObject);
3501
3502 ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3503 ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3504
3505 and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3506 cmp(ip, Operand(encoding_mask));
3507 Check(eq, kUnexpectedStringType);
3508
3509 // The index is assumed to be untagged coming in, tag it to compare with the
3510 // string length without using a temp register, it is restored at the end of
3511 // this function.
3512 Label index_tag_ok, index_tag_bad;
3513 TrySmiTag(index, index, &index_tag_bad);
3514 b(&index_tag_ok);
3515 bind(&index_tag_bad);
3516 Abort(kIndexIsTooLarge);
3517 bind(&index_tag_ok);
3518
3519 ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3520 cmp(index, ip);
3521 Check(lt, kIndexIsTooLarge);
3522
3523 cmp(index, Operand(Smi::FromInt(0)));
3524 Check(ge, kIndexIsNegative);
3525
3526 SmiUntag(index, index);
3527 }
3528
3529
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)3530 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3531 int num_double_arguments,
3532 Register scratch) {
3533 int frame_alignment = ActivationFrameAlignment();
3534 int stack_passed_arguments = CalculateStackPassedWords(
3535 num_reg_arguments, num_double_arguments);
3536 if (frame_alignment > kPointerSize) {
3537 // Make stack end at alignment and make room for num_arguments - 4 words
3538 // and the original value of sp.
3539 mov(scratch, sp);
3540 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3541 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3542 and_(sp, sp, Operand(-frame_alignment));
3543 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3544 } else {
3545 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3546 }
3547 }
3548
3549
PrepareCallCFunction(int num_reg_arguments,Register scratch)3550 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3551 Register scratch) {
3552 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3553 }
3554
3555
MovToFloatParameter(DwVfpRegister src)3556 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3557 DCHECK(src.is(d0));
3558 if (!use_eabi_hardfloat()) {
3559 vmov(r0, r1, src);
3560 }
3561 }
3562
3563
3564 // On ARM this is just a synonym to make the purpose clear.
MovToFloatResult(DwVfpRegister src)3565 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3566 MovToFloatParameter(src);
3567 }
3568
3569
MovToFloatParameters(DwVfpRegister src1,DwVfpRegister src2)3570 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3571 DwVfpRegister src2) {
3572 DCHECK(src1.is(d0));
3573 DCHECK(src2.is(d1));
3574 if (!use_eabi_hardfloat()) {
3575 vmov(r0, r1, src1);
3576 vmov(r2, r3, src2);
3577 }
3578 }
3579
3580
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)3581 void MacroAssembler::CallCFunction(ExternalReference function,
3582 int num_reg_arguments,
3583 int num_double_arguments) {
3584 mov(ip, Operand(function));
3585 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3586 }
3587
3588
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)3589 void MacroAssembler::CallCFunction(Register function,
3590 int num_reg_arguments,
3591 int num_double_arguments) {
3592 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3593 }
3594
3595
CallCFunction(ExternalReference function,int num_arguments)3596 void MacroAssembler::CallCFunction(ExternalReference function,
3597 int num_arguments) {
3598 CallCFunction(function, num_arguments, 0);
3599 }
3600
3601
CallCFunction(Register function,int num_arguments)3602 void MacroAssembler::CallCFunction(Register function,
3603 int num_arguments) {
3604 CallCFunction(function, num_arguments, 0);
3605 }
3606
3607
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)3608 void MacroAssembler::CallCFunctionHelper(Register function,
3609 int num_reg_arguments,
3610 int num_double_arguments) {
3611 DCHECK(has_frame());
3612 // Make sure that the stack is aligned before calling a C function unless
3613 // running in the simulator. The simulator has its own alignment check which
3614 // provides more information.
3615 #if V8_HOST_ARCH_ARM
3616 if (emit_debug_code()) {
3617 int frame_alignment = base::OS::ActivationFrameAlignment();
3618 int frame_alignment_mask = frame_alignment - 1;
3619 if (frame_alignment > kPointerSize) {
3620 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3621 Label alignment_as_expected;
3622 tst(sp, Operand(frame_alignment_mask));
3623 b(eq, &alignment_as_expected);
3624 // Don't use Check here, as it will call Runtime_Abort possibly
3625 // re-entering here.
3626 stop("Unexpected alignment");
3627 bind(&alignment_as_expected);
3628 }
3629 }
3630 #endif
3631
3632 // Just call directly. The function called cannot cause a GC, or
3633 // allow preemption, so the return address in the link register
3634 // stays correct.
3635 Call(function);
3636 int stack_passed_arguments = CalculateStackPassedWords(
3637 num_reg_arguments, num_double_arguments);
3638 if (ActivationFrameAlignment() > kPointerSize) {
3639 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3640 } else {
3641 add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3642 }
3643 }
3644
3645
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)3646 void MacroAssembler::CheckPageFlag(
3647 Register object,
3648 Register scratch,
3649 int mask,
3650 Condition cc,
3651 Label* condition_met) {
3652 DCHECK(cc == eq || cc == ne);
3653 Bfc(scratch, object, 0, kPageSizeBits);
3654 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3655 tst(scratch, Operand(mask));
3656 b(cc, condition_met);
3657 }
3658
3659
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)3660 void MacroAssembler::JumpIfBlack(Register object,
3661 Register scratch0,
3662 Register scratch1,
3663 Label* on_black) {
3664 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
3665 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3666 }
3667
3668
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)3669 void MacroAssembler::HasColor(Register object,
3670 Register bitmap_scratch,
3671 Register mask_scratch,
3672 Label* has_color,
3673 int first_bit,
3674 int second_bit) {
3675 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3676
3677 GetMarkBits(object, bitmap_scratch, mask_scratch);
3678
3679 Label other_color, word_boundary;
3680 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3681 tst(ip, Operand(mask_scratch));
3682 b(first_bit == 1 ? eq : ne, &other_color);
3683 // Shift left 1 by adding.
3684 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3685 b(eq, &word_boundary);
3686 tst(ip, Operand(mask_scratch));
3687 b(second_bit == 1 ? ne : eq, has_color);
3688 jmp(&other_color);
3689
3690 bind(&word_boundary);
3691 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3692 tst(ip, Operand(1));
3693 b(second_bit == 1 ? ne : eq, has_color);
3694 bind(&other_color);
3695 }
3696
3697
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)3698 void MacroAssembler::GetMarkBits(Register addr_reg,
3699 Register bitmap_reg,
3700 Register mask_reg) {
3701 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3702 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3703 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3704 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3705 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3706 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3707 mov(ip, Operand(1));
3708 mov(mask_reg, Operand(ip, LSL, mask_reg));
3709 }
3710
3711
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)3712 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3713 Register mask_scratch, Register load_scratch,
3714 Label* value_is_white) {
3715 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3716 GetMarkBits(value, bitmap_scratch, mask_scratch);
3717
3718 // If the value is black or grey we don't need to do anything.
3719 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3720 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3721 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3722 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3723
3724 // Since both black and grey have a 1 in the first position and white does
3725 // not have a 1 there we only need to check one bit.
3726 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3727 tst(mask_scratch, load_scratch);
3728 b(eq, value_is_white);
3729 }
3730
3731
ClampUint8(Register output_reg,Register input_reg)3732 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3733 usat(output_reg, 8, Operand(input_reg));
3734 }
3735
3736
ClampDoubleToUint8(Register result_reg,DwVfpRegister input_reg,LowDwVfpRegister double_scratch)3737 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3738 DwVfpRegister input_reg,
3739 LowDwVfpRegister double_scratch) {
3740 Label done;
3741
3742 // Handle inputs >= 255 (including +infinity).
3743 Vmov(double_scratch, 255.0, result_reg);
3744 mov(result_reg, Operand(255));
3745 VFPCompareAndSetFlags(input_reg, double_scratch);
3746 b(ge, &done);
3747
3748 // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
3749 // rounding mode will provide the correct result.
3750 vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3751 vmov(result_reg, double_scratch.low());
3752
3753 bind(&done);
3754 }
3755
3756
LoadInstanceDescriptors(Register map,Register descriptors)3757 void MacroAssembler::LoadInstanceDescriptors(Register map,
3758 Register descriptors) {
3759 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3760 }
3761
3762
NumberOfOwnDescriptors(Register dst,Register map)3763 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3764 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3765 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3766 }
3767
3768
EnumLength(Register dst,Register map)3769 void MacroAssembler::EnumLength(Register dst, Register map) {
3770 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3771 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3772 and_(dst, dst, Operand(Map::EnumLengthBits::kMask));
3773 SmiTag(dst);
3774 }
3775
3776
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3777 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3778 int accessor_index,
3779 AccessorComponent accessor) {
3780 ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3781 LoadInstanceDescriptors(dst, dst);
3782 ldr(dst,
3783 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3784 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3785 : AccessorPair::kSetterOffset;
3786 ldr(dst, FieldMemOperand(dst, offset));
3787 }
3788
3789
CheckEnumCache(Label * call_runtime)3790 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3791 Register null_value = r5;
3792 Register empty_fixed_array_value = r6;
3793 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3794 Label next, start;
3795 mov(r2, r0);
3796
3797 // Check if the enum length field is properly initialized, indicating that
3798 // there is an enum cache.
3799 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3800
3801 EnumLength(r3, r1);
3802 cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3803 b(eq, call_runtime);
3804
3805 LoadRoot(null_value, Heap::kNullValueRootIndex);
3806 jmp(&start);
3807
3808 bind(&next);
3809 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3810
3811 // For all objects but the receiver, check that the cache is empty.
3812 EnumLength(r3, r1);
3813 cmp(r3, Operand(Smi::FromInt(0)));
3814 b(ne, call_runtime);
3815
3816 bind(&start);
3817
3818 // Check that there are no elements. Register r2 contains the current JS
3819 // object we've reached through the prototype chain.
3820 Label no_elements;
3821 ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3822 cmp(r2, empty_fixed_array_value);
3823 b(eq, &no_elements);
3824
3825 // Second chance, the object may be using the empty slow element dictionary.
3826 CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3827 b(ne, call_runtime);
3828
3829 bind(&no_elements);
3830 ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3831 cmp(r2, null_value);
3832 b(ne, &next);
3833 }
3834
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)3835 void MacroAssembler::TestJSArrayForAllocationMemento(
3836 Register receiver_reg,
3837 Register scratch_reg,
3838 Label* no_memento_found) {
3839 Label map_check;
3840 Label top_check;
3841 ExternalReference new_space_allocation_top_adr =
3842 ExternalReference::new_space_allocation_top_address(isolate());
3843 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
3844 const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
3845
3846 // Bail out if the object is not in new space.
3847 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
3848 // If the object is in new space, we need to check whether it is on the same
3849 // page as the current top.
3850 add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
3851 mov(ip, Operand(new_space_allocation_top_adr));
3852 ldr(ip, MemOperand(ip));
3853 eor(scratch_reg, scratch_reg, Operand(ip));
3854 tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
3855 b(eq, &top_check);
3856 // The object is on a different page than allocation top. Bail out if the
3857 // object sits on the page boundary as no memento can follow and we cannot
3858 // touch the memory following it.
3859 add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
3860 eor(scratch_reg, scratch_reg, Operand(receiver_reg));
3861 tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
3862 b(ne, no_memento_found);
3863 // Continue with the actual map check.
3864 jmp(&map_check);
3865 // If top is on the same page as the current object, we need to check whether
3866 // we are below top.
3867 bind(&top_check);
3868 add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
3869 mov(ip, Operand(new_space_allocation_top_adr));
3870 ldr(ip, MemOperand(ip));
3871 cmp(scratch_reg, ip);
3872 b(gt, no_memento_found);
3873 // Memento map check.
3874 bind(&map_check);
3875 ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
3876 cmp(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
3877 }
3878
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)3879 Register GetRegisterThatIsNotOneOf(Register reg1,
3880 Register reg2,
3881 Register reg3,
3882 Register reg4,
3883 Register reg5,
3884 Register reg6) {
3885 RegList regs = 0;
3886 if (reg1.is_valid()) regs |= reg1.bit();
3887 if (reg2.is_valid()) regs |= reg2.bit();
3888 if (reg3.is_valid()) regs |= reg3.bit();
3889 if (reg4.is_valid()) regs |= reg4.bit();
3890 if (reg5.is_valid()) regs |= reg5.bit();
3891 if (reg6.is_valid()) regs |= reg6.bit();
3892
3893 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
3894 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3895 int code = config->GetAllocatableGeneralCode(i);
3896 Register candidate = Register::from_code(code);
3897 if (regs & candidate.bit()) continue;
3898 return candidate;
3899 }
3900 UNREACHABLE();
3901 return no_reg;
3902 }
3903
3904
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)3905 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3906 Register object,
3907 Register scratch0,
3908 Register scratch1,
3909 Label* found) {
3910 DCHECK(!scratch1.is(scratch0));
3911 Register current = scratch0;
3912 Label loop_again, end;
3913
3914 // scratch contained elements pointer.
3915 mov(current, object);
3916 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
3917 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
3918 CompareRoot(current, Heap::kNullValueRootIndex);
3919 b(eq, &end);
3920
3921 // Loop based on the map going up the prototype chain.
3922 bind(&loop_again);
3923 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
3924
3925 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
3926 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
3927 ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
3928 cmp(scratch1, Operand(JS_OBJECT_TYPE));
3929 b(lo, found);
3930
3931 ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
3932 DecodeField<Map::ElementsKindBits>(scratch1);
3933 cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
3934 b(eq, found);
3935 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
3936 CompareRoot(current, Heap::kNullValueRootIndex);
3937 b(ne, &loop_again);
3938
3939 bind(&end);
3940 }
3941
3942
3943 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)3944 bool AreAliased(Register reg1,
3945 Register reg2,
3946 Register reg3,
3947 Register reg4,
3948 Register reg5,
3949 Register reg6,
3950 Register reg7,
3951 Register reg8) {
3952 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3953 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
3954 reg7.is_valid() + reg8.is_valid();
3955
3956 RegList regs = 0;
3957 if (reg1.is_valid()) regs |= reg1.bit();
3958 if (reg2.is_valid()) regs |= reg2.bit();
3959 if (reg3.is_valid()) regs |= reg3.bit();
3960 if (reg4.is_valid()) regs |= reg4.bit();
3961 if (reg5.is_valid()) regs |= reg5.bit();
3962 if (reg6.is_valid()) regs |= reg6.bit();
3963 if (reg7.is_valid()) regs |= reg7.bit();
3964 if (reg8.is_valid()) regs |= reg8.bit();
3965 int n_of_non_aliasing_regs = NumRegs(regs);
3966
3967 return n_of_valid_regs != n_of_non_aliasing_regs;
3968 }
3969 #endif
3970
3971
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)3972 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
3973 FlushICache flush_cache)
3974 : address_(address),
3975 size_(instructions * Assembler::kInstrSize),
3976 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
3977 flush_cache_(flush_cache) {
3978 // Create a new macro assembler pointing to the address of the code to patch.
3979 // The size is adjusted with kGap on order for the assembler to generate size
3980 // bytes of instructions without failing with buffer size constraints.
3981 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3982 }
3983
3984
~CodePatcher()3985 CodePatcher::~CodePatcher() {
3986 // Indicate that code has changed.
3987 if (flush_cache_ == FLUSH) {
3988 Assembler::FlushICache(masm_.isolate(), address_, size_);
3989 }
3990
3991 // Check that we don't have any pending constant pools.
3992 DCHECK(masm_.pending_32_bit_constants_.empty());
3993 DCHECK(masm_.pending_64_bit_constants_.empty());
3994
3995 // Check that the code was patched as expected.
3996 DCHECK(masm_.pc_ == address_ + size_);
3997 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3998 }
3999
4000
Emit(Instr instr)4001 void CodePatcher::Emit(Instr instr) {
4002 masm()->emit(instr);
4003 }
4004
4005
Emit(Address addr)4006 void CodePatcher::Emit(Address addr) {
4007 masm()->emit(reinterpret_cast<Instr>(addr));
4008 }
4009
4010
EmitCondition(Condition cond)4011 void CodePatcher::EmitCondition(Condition cond) {
4012 Instr instr = Assembler::instr_at(masm_.pc_);
4013 instr = (instr & ~kCondMask) | cond;
4014 masm_.emit(instr);
4015 }
4016
4017
TruncatingDiv(Register result,Register dividend,int32_t divisor)4018 void MacroAssembler::TruncatingDiv(Register result,
4019 Register dividend,
4020 int32_t divisor) {
4021 DCHECK(!dividend.is(result));
4022 DCHECK(!dividend.is(ip));
4023 DCHECK(!result.is(ip));
4024 base::MagicNumbersForDivision<uint32_t> mag =
4025 base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
4026 mov(ip, Operand(mag.multiplier));
4027 bool neg = (mag.multiplier & (1U << 31)) != 0;
4028 if (divisor > 0 && neg) {
4029 smmla(result, dividend, ip, dividend);
4030 } else {
4031 smmul(result, dividend, ip);
4032 if (divisor < 0 && !neg && mag.multiplier > 0) {
4033 sub(result, result, Operand(dividend));
4034 }
4035 }
4036 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
4037 add(result, result, Operand(dividend, LSR, 31));
4038 }
4039
4040 } // namespace internal
4041 } // namespace v8
4042
4043 #endif // V8_TARGET_ARCH_ARM
4044