• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits.h>  // For LONG_MIN, LONG_MAX.
6 
7 #if V8_TARGET_ARCH_ARM
8 
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/debug/debug.h"
14 #include "src/register-configuration.h"
15 #include "src/runtime/runtime.h"
16 
17 #include "src/arm/macro-assembler-arm.h"
18 
19 namespace v8 {
20 namespace internal {
21 
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
23                                CodeObjectRequired create_code_object)
24     : Assembler(arg_isolate, buffer, size),
25       generating_stub_(false),
26       has_frame_(false) {
27   if (create_code_object == CodeObjectRequired::kYes) {
28     code_object_ =
29         Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
30   }
31 }
32 
33 
Jump(Register target,Condition cond)34 void MacroAssembler::Jump(Register target, Condition cond) {
35   bx(target, cond);
36 }
37 
38 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)39 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
40                           Condition cond) {
41   DCHECK(RelocInfo::IsCodeTarget(rmode));
42   mov(pc, Operand(target, rmode), LeaveCC, cond);
43 }
44 
45 
Jump(Address target,RelocInfo::Mode rmode,Condition cond)46 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
47                           Condition cond) {
48   DCHECK(!RelocInfo::IsCodeTarget(rmode));
49   Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
50 }
51 
52 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)53 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
54                           Condition cond) {
55   DCHECK(RelocInfo::IsCodeTarget(rmode));
56   // 'code' is always generated ARM code, never THUMB code
57   AllowDeferredHandleDereference embedding_raw_address;
58   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
59 }
60 
61 
CallSize(Register target,Condition cond)62 int MacroAssembler::CallSize(Register target, Condition cond) {
63   return kInstrSize;
64 }
65 
66 
Call(Register target,Condition cond)67 void MacroAssembler::Call(Register target, Condition cond) {
68   // Block constant pool for the call instruction sequence.
69   BlockConstPoolScope block_const_pool(this);
70   Label start;
71   bind(&start);
72   blx(target, cond);
73   DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
74 }
75 
76 
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)77 int MacroAssembler::CallSize(
78     Address target, RelocInfo::Mode rmode, Condition cond) {
79   Instr mov_instr = cond | MOV | LeaveCC;
80   Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
81   return kInstrSize +
82          mov_operand.instructions_required(this, mov_instr) * kInstrSize;
83 }
84 
85 
CallStubSize(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)86 int MacroAssembler::CallStubSize(
87     CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
88   return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
89 }
90 
91 
CallSizeNotPredictableCodeSize(Isolate * isolate,Address target,RelocInfo::Mode rmode,Condition cond)92 int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
93                                                    Address target,
94                                                    RelocInfo::Mode rmode,
95                                                    Condition cond) {
96   Instr mov_instr = cond | MOV | LeaveCC;
97   Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
98   return kInstrSize +
99          mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
100 }
101 
102 
Call(Address target,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode)103 void MacroAssembler::Call(Address target,
104                           RelocInfo::Mode rmode,
105                           Condition cond,
106                           TargetAddressStorageMode mode) {
107   // Block constant pool for the call instruction sequence.
108   BlockConstPoolScope block_const_pool(this);
109   Label start;
110   bind(&start);
111 
112   bool old_predictable_code_size = predictable_code_size();
113   if (mode == NEVER_INLINE_TARGET_ADDRESS) {
114     set_predictable_code_size(true);
115   }
116 
117 #ifdef DEBUG
118   // Check the expected size before generating code to ensure we assume the same
119   // constant pool availability (e.g., whether constant pool is full or not).
120   int expected_size = CallSize(target, rmode, cond);
121 #endif
122 
123   // Call sequence on V7 or later may be :
124   //  movw  ip, #... @ call address low 16
125   //  movt  ip, #... @ call address high 16
126   //  blx   ip
127   //                      @ return address
128   // Or for pre-V7 or values that may be back-patched
129   // to avoid ICache flushes:
130   //  ldr   ip, [pc, #...] @ call address
131   //  blx   ip
132   //                      @ return address
133 
134   // Statement positions are expected to be recorded when the target
135   // address is loaded. The mov method will automatically record
136   // positions when pc is the target, since this is not the case here
137   // we have to do it explicitly.
138   positions_recorder()->WriteRecordedPositions();
139 
140   mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
141   blx(ip, cond);
142 
143   DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
144   if (mode == NEVER_INLINE_TARGET_ADDRESS) {
145     set_predictable_code_size(old_predictable_code_size);
146   }
147 }
148 
149 
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)150 int MacroAssembler::CallSize(Handle<Code> code,
151                              RelocInfo::Mode rmode,
152                              TypeFeedbackId ast_id,
153                              Condition cond) {
154   AllowDeferredHandleDereference using_raw_address;
155   return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
156 }
157 
158 
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,TargetAddressStorageMode mode)159 void MacroAssembler::Call(Handle<Code> code,
160                           RelocInfo::Mode rmode,
161                           TypeFeedbackId ast_id,
162                           Condition cond,
163                           TargetAddressStorageMode mode) {
164   Label start;
165   bind(&start);
166   DCHECK(RelocInfo::IsCodeTarget(rmode));
167   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
168     SetRecordedAstId(ast_id);
169     rmode = RelocInfo::CODE_TARGET_WITH_ID;
170   }
171   // 'code' is always generated ARM code, never THUMB code
172   AllowDeferredHandleDereference embedding_raw_address;
173   Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
174 }
175 
176 
Ret(Condition cond)177 void MacroAssembler::Ret(Condition cond) {
178   bx(lr, cond);
179 }
180 
181 
Drop(int count,Condition cond)182 void MacroAssembler::Drop(int count, Condition cond) {
183   if (count > 0) {
184     add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
185   }
186 }
187 
188 
Ret(int drop,Condition cond)189 void MacroAssembler::Ret(int drop, Condition cond) {
190   Drop(drop, cond);
191   Ret(cond);
192 }
193 
194 
Swap(Register reg1,Register reg2,Register scratch,Condition cond)195 void MacroAssembler::Swap(Register reg1,
196                           Register reg2,
197                           Register scratch,
198                           Condition cond) {
199   if (scratch.is(no_reg)) {
200     eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
201     eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
202     eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
203   } else {
204     mov(scratch, reg1, LeaveCC, cond);
205     mov(reg1, reg2, LeaveCC, cond);
206     mov(reg2, scratch, LeaveCC, cond);
207   }
208 }
209 
210 
Call(Label * target)211 void MacroAssembler::Call(Label* target) {
212   bl(target);
213 }
214 
215 
Push(Handle<Object> handle)216 void MacroAssembler::Push(Handle<Object> handle) {
217   mov(ip, Operand(handle));
218   push(ip);
219 }
220 
221 
Move(Register dst,Handle<Object> value)222 void MacroAssembler::Move(Register dst, Handle<Object> value) {
223   AllowDeferredHandleDereference smi_check;
224   if (value->IsSmi()) {
225     mov(dst, Operand(value));
226   } else {
227     DCHECK(value->IsHeapObject());
228     if (isolate()->heap()->InNewSpace(*value)) {
229       Handle<Cell> cell = isolate()->factory()->NewCell(value);
230       mov(dst, Operand(cell));
231       ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
232     } else {
233       mov(dst, Operand(value));
234     }
235   }
236 }
237 
238 
Move(Register dst,Register src,Condition cond)239 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
240   if (!dst.is(src)) {
241     mov(dst, src, LeaveCC, cond);
242   }
243 }
244 
245 
Move(DwVfpRegister dst,DwVfpRegister src)246 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
247   if (!dst.is(src)) {
248     vmov(dst, src);
249   }
250 }
251 
252 
Mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)253 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
254                          Register srcA, Condition cond) {
255   if (CpuFeatures::IsSupported(MLS)) {
256     CpuFeatureScope scope(this, MLS);
257     mls(dst, src1, src2, srcA, cond);
258   } else {
259     DCHECK(!srcA.is(ip));
260     mul(ip, src1, src2, LeaveCC, cond);
261     sub(dst, srcA, ip, LeaveCC, cond);
262   }
263 }
264 
265 
And(Register dst,Register src1,const Operand & src2,Condition cond)266 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
267                          Condition cond) {
268   if (!src2.is_reg() &&
269       !src2.must_output_reloc_info(this) &&
270       src2.immediate() == 0) {
271     mov(dst, Operand::Zero(), LeaveCC, cond);
272   } else if (!(src2.instructions_required(this) == 1) &&
273              !src2.must_output_reloc_info(this) &&
274              CpuFeatures::IsSupported(ARMv7) &&
275              base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
276     ubfx(dst, src1, 0,
277         WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
278   } else {
279     and_(dst, src1, src2, LeaveCC, cond);
280   }
281 }
282 
283 
Ubfx(Register dst,Register src1,int lsb,int width,Condition cond)284 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
285                           Condition cond) {
286   DCHECK(lsb < 32);
287   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
288     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
289     and_(dst, src1, Operand(mask), LeaveCC, cond);
290     if (lsb != 0) {
291       mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
292     }
293   } else {
294     ubfx(dst, src1, lsb, width, cond);
295   }
296 }
297 
298 
Sbfx(Register dst,Register src1,int lsb,int width,Condition cond)299 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
300                           Condition cond) {
301   DCHECK(lsb < 32);
302   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
303     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
304     and_(dst, src1, Operand(mask), LeaveCC, cond);
305     int shift_up = 32 - lsb - width;
306     int shift_down = lsb + shift_up;
307     if (shift_up != 0) {
308       mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
309     }
310     if (shift_down != 0) {
311       mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
312     }
313   } else {
314     sbfx(dst, src1, lsb, width, cond);
315   }
316 }
317 
318 
Bfi(Register dst,Register src,Register scratch,int lsb,int width,Condition cond)319 void MacroAssembler::Bfi(Register dst,
320                          Register src,
321                          Register scratch,
322                          int lsb,
323                          int width,
324                          Condition cond) {
325   DCHECK(0 <= lsb && lsb < 32);
326   DCHECK(0 <= width && width < 32);
327   DCHECK(lsb + width < 32);
328   DCHECK(!scratch.is(dst));
329   if (width == 0) return;
330   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
331     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
332     bic(dst, dst, Operand(mask));
333     and_(scratch, src, Operand((1 << width) - 1));
334     mov(scratch, Operand(scratch, LSL, lsb));
335     orr(dst, dst, scratch);
336   } else {
337     bfi(dst, src, lsb, width, cond);
338   }
339 }
340 
341 
Bfc(Register dst,Register src,int lsb,int width,Condition cond)342 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
343                          Condition cond) {
344   DCHECK(lsb < 32);
345   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
346     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
347     bic(dst, src, Operand(mask));
348   } else {
349     Move(dst, src, cond);
350     bfc(dst, lsb, width, cond);
351   }
352 }
353 
354 
Usat(Register dst,int satpos,const Operand & src,Condition cond)355 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
356                           Condition cond) {
357   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
358     DCHECK(!dst.is(pc) && !src.rm().is(pc));
359     DCHECK((satpos >= 0) && (satpos <= 31));
360 
361     // These asserts are required to ensure compatibility with the ARMv7
362     // implementation.
363     DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
364     DCHECK(src.rs().is(no_reg));
365 
366     Label done;
367     int satval = (1 << satpos) - 1;
368 
369     if (cond != al) {
370       b(NegateCondition(cond), &done);  // Skip saturate if !condition.
371     }
372     if (!(src.is_reg() && dst.is(src.rm()))) {
373       mov(dst, src);
374     }
375     tst(dst, Operand(~satval));
376     b(eq, &done);
377     mov(dst, Operand::Zero(), LeaveCC, mi);  // 0 if negative.
378     mov(dst, Operand(satval), LeaveCC, pl);  // satval if positive.
379     bind(&done);
380   } else {
381     usat(dst, satpos, src, cond);
382   }
383 }
384 
385 
Load(Register dst,const MemOperand & src,Representation r)386 void MacroAssembler::Load(Register dst,
387                           const MemOperand& src,
388                           Representation r) {
389   DCHECK(!r.IsDouble());
390   if (r.IsInteger8()) {
391     ldrsb(dst, src);
392   } else if (r.IsUInteger8()) {
393     ldrb(dst, src);
394   } else if (r.IsInteger16()) {
395     ldrsh(dst, src);
396   } else if (r.IsUInteger16()) {
397     ldrh(dst, src);
398   } else {
399     ldr(dst, src);
400   }
401 }
402 
403 
Store(Register src,const MemOperand & dst,Representation r)404 void MacroAssembler::Store(Register src,
405                            const MemOperand& dst,
406                            Representation r) {
407   DCHECK(!r.IsDouble());
408   if (r.IsInteger8() || r.IsUInteger8()) {
409     strb(src, dst);
410   } else if (r.IsInteger16() || r.IsUInteger16()) {
411     strh(src, dst);
412   } else {
413     if (r.IsHeapObject()) {
414       AssertNotSmi(src);
415     } else if (r.IsSmi()) {
416       AssertSmi(src);
417     }
418     str(src, dst);
419   }
420 }
421 
422 
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond)423 void MacroAssembler::LoadRoot(Register destination,
424                               Heap::RootListIndex index,
425                               Condition cond) {
426   if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
427       isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
428       !predictable_code_size()) {
429     // The CPU supports fast immediate values, and this root will never
430     // change. We will load it as a relocatable immediate value.
431     Handle<Object> root = isolate()->heap()->root_handle(index);
432     mov(destination, Operand(root), LeaveCC, cond);
433     return;
434   }
435   ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
436 }
437 
438 
StoreRoot(Register source,Heap::RootListIndex index,Condition cond)439 void MacroAssembler::StoreRoot(Register source,
440                                Heap::RootListIndex index,
441                                Condition cond) {
442   DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
443   str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
444 }
445 
446 
InNewSpace(Register object,Register scratch,Condition cond,Label * branch)447 void MacroAssembler::InNewSpace(Register object,
448                                 Register scratch,
449                                 Condition cond,
450                                 Label* branch) {
451   DCHECK(cond == eq || cond == ne);
452   and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
453   cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
454   b(cond, branch);
455 }
456 
457 
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)458 void MacroAssembler::RecordWriteField(
459     Register object,
460     int offset,
461     Register value,
462     Register dst,
463     LinkRegisterStatus lr_status,
464     SaveFPRegsMode save_fp,
465     RememberedSetAction remembered_set_action,
466     SmiCheck smi_check,
467     PointersToHereCheck pointers_to_here_check_for_value) {
468   // First, check if a write barrier is even needed. The tests below
469   // catch stores of Smis.
470   Label done;
471 
472   // Skip barrier if writing a smi.
473   if (smi_check == INLINE_SMI_CHECK) {
474     JumpIfSmi(value, &done);
475   }
476 
477   // Although the object register is tagged, the offset is relative to the start
478   // of the object, so so offset must be a multiple of kPointerSize.
479   DCHECK(IsAligned(offset, kPointerSize));
480 
481   add(dst, object, Operand(offset - kHeapObjectTag));
482   if (emit_debug_code()) {
483     Label ok;
484     tst(dst, Operand((1 << kPointerSizeLog2) - 1));
485     b(eq, &ok);
486     stop("Unaligned cell in write barrier");
487     bind(&ok);
488   }
489 
490   RecordWrite(object,
491               dst,
492               value,
493               lr_status,
494               save_fp,
495               remembered_set_action,
496               OMIT_SMI_CHECK,
497               pointers_to_here_check_for_value);
498 
499   bind(&done);
500 
501   // Clobber clobbered input registers when running with the debug-code flag
502   // turned on to provoke errors.
503   if (emit_debug_code()) {
504     mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
505     mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
506   }
507 }
508 
509 
510 // Will clobber 4 registers: object, map, dst, ip.  The
511 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)512 void MacroAssembler::RecordWriteForMap(Register object,
513                                        Register map,
514                                        Register dst,
515                                        LinkRegisterStatus lr_status,
516                                        SaveFPRegsMode fp_mode) {
517   if (emit_debug_code()) {
518     ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
519     cmp(dst, Operand(isolate()->factory()->meta_map()));
520     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
521   }
522 
523   if (!FLAG_incremental_marking) {
524     return;
525   }
526 
527   if (emit_debug_code()) {
528     ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
529     cmp(ip, map);
530     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
531   }
532 
533   Label done;
534 
535   // A single check of the map's pages interesting flag suffices, since it is
536   // only set during incremental collection, and then it's also guaranteed that
537   // the from object's page's interesting flag is also set.  This optimization
538   // relies on the fact that maps can never be in new space.
539   CheckPageFlag(map,
540                 map,  // Used as scratch.
541                 MemoryChunk::kPointersToHereAreInterestingMask,
542                 eq,
543                 &done);
544 
545   add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
546   if (emit_debug_code()) {
547     Label ok;
548     tst(dst, Operand((1 << kPointerSizeLog2) - 1));
549     b(eq, &ok);
550     stop("Unaligned cell in write barrier");
551     bind(&ok);
552   }
553 
554   // Record the actual write.
555   if (lr_status == kLRHasNotBeenSaved) {
556     push(lr);
557   }
558   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
559                        fp_mode);
560   CallStub(&stub);
561   if (lr_status == kLRHasNotBeenSaved) {
562     pop(lr);
563   }
564 
565   bind(&done);
566 
567   // Count number of write barriers in generated code.
568   isolate()->counters()->write_barriers_static()->Increment();
569   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
570 
571   // Clobber clobbered registers when running with the debug-code flag
572   // turned on to provoke errors.
573   if (emit_debug_code()) {
574     mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
575     mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
576   }
577 }
578 
579 
580 // Will clobber 4 registers: object, address, scratch, ip.  The
581 // register 'object' contains a heap object pointer.  The heap object
582 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)583 void MacroAssembler::RecordWrite(
584     Register object,
585     Register address,
586     Register value,
587     LinkRegisterStatus lr_status,
588     SaveFPRegsMode fp_mode,
589     RememberedSetAction remembered_set_action,
590     SmiCheck smi_check,
591     PointersToHereCheck pointers_to_here_check_for_value) {
592   DCHECK(!object.is(value));
593   if (emit_debug_code()) {
594     ldr(ip, MemOperand(address));
595     cmp(ip, value);
596     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
597   }
598 
599   if (remembered_set_action == OMIT_REMEMBERED_SET &&
600       !FLAG_incremental_marking) {
601     return;
602   }
603 
604   // First, check if a write barrier is even needed. The tests below
605   // catch stores of smis and stores into the young generation.
606   Label done;
607 
608   if (smi_check == INLINE_SMI_CHECK) {
609     JumpIfSmi(value, &done);
610   }
611 
612   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
613     CheckPageFlag(value,
614                   value,  // Used as scratch.
615                   MemoryChunk::kPointersToHereAreInterestingMask,
616                   eq,
617                   &done);
618   }
619   CheckPageFlag(object,
620                 value,  // Used as scratch.
621                 MemoryChunk::kPointersFromHereAreInterestingMask,
622                 eq,
623                 &done);
624 
625   // Record the actual write.
626   if (lr_status == kLRHasNotBeenSaved) {
627     push(lr);
628   }
629   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
630                        fp_mode);
631   CallStub(&stub);
632   if (lr_status == kLRHasNotBeenSaved) {
633     pop(lr);
634   }
635 
636   bind(&done);
637 
638   // Count number of write barriers in generated code.
639   isolate()->counters()->write_barriers_static()->Increment();
640   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
641                    value);
642 
643   // Clobber clobbered registers when running with the debug-code flag
644   // turned on to provoke errors.
645   if (emit_debug_code()) {
646     mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
647     mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
648   }
649 }
650 
651 
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)652 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
653                                          Register address,
654                                          Register scratch,
655                                          SaveFPRegsMode fp_mode,
656                                          RememberedSetFinalAction and_then) {
657   Label done;
658   if (emit_debug_code()) {
659     Label ok;
660     JumpIfNotInNewSpace(object, scratch, &ok);
661     stop("Remembered set pointer is in new space");
662     bind(&ok);
663   }
664   // Load store buffer top.
665   ExternalReference store_buffer =
666       ExternalReference::store_buffer_top(isolate());
667   mov(ip, Operand(store_buffer));
668   ldr(scratch, MemOperand(ip));
669   // Store pointer to buffer and increment buffer top.
670   str(address, MemOperand(scratch, kPointerSize, PostIndex));
671   // Write back new top of buffer.
672   str(scratch, MemOperand(ip));
673   // Call stub on end of buffer.
674   // Check for end of buffer.
675   tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
676   if (and_then == kFallThroughAtEnd) {
677     b(eq, &done);
678   } else {
679     DCHECK(and_then == kReturnAtEnd);
680     Ret(eq);
681   }
682   push(lr);
683   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
684   CallStub(&store_buffer_overflow);
685   pop(lr);
686   bind(&done);
687   if (and_then == kReturnAtEnd) {
688     Ret();
689   }
690 }
691 
692 
PushFixedFrame(Register marker_reg)693 void MacroAssembler::PushFixedFrame(Register marker_reg) {
694   DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
695   stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
696                     (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
697                     fp.bit() | lr.bit());
698 }
699 
700 
PopFixedFrame(Register marker_reg)701 void MacroAssembler::PopFixedFrame(Register marker_reg) {
702   DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
703   ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
704                     (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
705                     fp.bit() | lr.bit());
706 }
707 
708 
709 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()710 void MacroAssembler::PushSafepointRegisters() {
711   // Safepoints expect a block of contiguous register values starting with r0.
712   // except when FLAG_enable_embedded_constant_pool, which omits pp.
713   DCHECK(kSafepointSavedRegisters ==
714          (FLAG_enable_embedded_constant_pool
715               ? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
716               : (1 << kNumSafepointSavedRegisters) - 1));
717   // Safepoints expect a block of kNumSafepointRegisters values on the
718   // stack, so adjust the stack for unsaved registers.
719   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
720   DCHECK(num_unsaved >= 0);
721   sub(sp, sp, Operand(num_unsaved * kPointerSize));
722   stm(db_w, sp, kSafepointSavedRegisters);
723 }
724 
725 
PopSafepointRegisters()726 void MacroAssembler::PopSafepointRegisters() {
727   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
728   ldm(ia_w, sp, kSafepointSavedRegisters);
729   add(sp, sp, Operand(num_unsaved * kPointerSize));
730 }
731 
732 
StoreToSafepointRegisterSlot(Register src,Register dst)733 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
734   str(src, SafepointRegisterSlot(dst));
735 }
736 
737 
LoadFromSafepointRegisterSlot(Register dst,Register src)738 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
739   ldr(dst, SafepointRegisterSlot(src));
740 }
741 
742 
SafepointRegisterStackIndex(int reg_code)743 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
744   // The registers are pushed starting with the highest encoding,
745   // which means that lowest encodings are closest to the stack pointer.
746   if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
747     // RegList omits pp.
748     reg_code -= 1;
749   }
750   DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
751   return reg_code;
752 }
753 
754 
SafepointRegisterSlot(Register reg)755 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
756   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
757 }
758 
759 
SafepointRegistersAndDoublesSlot(Register reg)760 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
761   // Number of d-regs not known at snapshot time.
762   DCHECK(!serializer_enabled());
763   // General purpose registers are pushed last on the stack.
764   const RegisterConfiguration* config =
765       RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
766   int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
767   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
768   return MemOperand(sp, doubles_size + register_offset);
769 }
770 
771 
Ldrd(Register dst1,Register dst2,const MemOperand & src,Condition cond)772 void MacroAssembler::Ldrd(Register dst1, Register dst2,
773                           const MemOperand& src, Condition cond) {
774   DCHECK(src.rm().is(no_reg));
775   DCHECK(!dst1.is(lr));  // r14.
776 
777   // V8 does not use this addressing mode, so the fallback code
778   // below doesn't support it yet.
779   DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
780 
781   // Generate two ldr instructions if ldrd is not available.
782   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
783       (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
784     CpuFeatureScope scope(this, ARMv7);
785     ldrd(dst1, dst2, src, cond);
786   } else {
787     if ((src.am() == Offset) || (src.am() == NegOffset)) {
788       MemOperand src2(src);
789       src2.set_offset(src2.offset() + 4);
790       if (dst1.is(src.rn())) {
791         ldr(dst2, src2, cond);
792         ldr(dst1, src, cond);
793       } else {
794         ldr(dst1, src, cond);
795         ldr(dst2, src2, cond);
796       }
797     } else {  // PostIndex or NegPostIndex.
798       DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
799       if (dst1.is(src.rn())) {
800         ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
801         ldr(dst1, src, cond);
802       } else {
803         MemOperand src2(src);
804         src2.set_offset(src2.offset() - 4);
805         ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
806         ldr(dst2, src2, cond);
807       }
808     }
809   }
810 }
811 
812 
Strd(Register src1,Register src2,const MemOperand & dst,Condition cond)813 void MacroAssembler::Strd(Register src1, Register src2,
814                           const MemOperand& dst, Condition cond) {
815   DCHECK(dst.rm().is(no_reg));
816   DCHECK(!src1.is(lr));  // r14.
817 
818   // V8 does not use this addressing mode, so the fallback code
819   // below doesn't support it yet.
820   DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
821 
822   // Generate two str instructions if strd is not available.
823   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
824       (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
825     CpuFeatureScope scope(this, ARMv7);
826     strd(src1, src2, dst, cond);
827   } else {
828     MemOperand dst2(dst);
829     if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
830       dst2.set_offset(dst2.offset() + 4);
831       str(src1, dst, cond);
832       str(src2, dst2, cond);
833     } else {  // PostIndex or NegPostIndex.
834       DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
835       dst2.set_offset(dst2.offset() - 4);
836       str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
837       str(src2, dst2, cond);
838     }
839   }
840 }
841 
842 
VFPEnsureFPSCRState(Register scratch)843 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
844   // If needed, restore wanted bits of FPSCR.
845   Label fpscr_done;
846   vmrs(scratch);
847   if (emit_debug_code()) {
848     Label rounding_mode_correct;
849     tst(scratch, Operand(kVFPRoundingModeMask));
850     b(eq, &rounding_mode_correct);
851     // Don't call Assert here, since Runtime_Abort could re-enter here.
852     stop("Default rounding mode not set");
853     bind(&rounding_mode_correct);
854   }
855   tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
856   b(ne, &fpscr_done);
857   orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
858   vmsr(scratch);
859   bind(&fpscr_done);
860 }
861 
862 
VFPCanonicalizeNaN(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)863 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
864                                         const DwVfpRegister src,
865                                         const Condition cond) {
866   vsub(dst, src, kDoubleRegZero, cond);
867 }
868 
869 
VFPCompareAndSetFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)870 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
871                                            const SwVfpRegister src2,
872                                            const Condition cond) {
873   // Compare and move FPSCR flags to the normal condition flags.
874   VFPCompareAndLoadFlags(src1, src2, pc, cond);
875 }
876 
VFPCompareAndSetFlags(const SwVfpRegister src1,const float src2,const Condition cond)877 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
878                                            const float src2,
879                                            const Condition cond) {
880   // Compare and move FPSCR flags to the normal condition flags.
881   VFPCompareAndLoadFlags(src1, src2, pc, cond);
882 }
883 
884 
VFPCompareAndSetFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)885 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
886                                            const DwVfpRegister src2,
887                                            const Condition cond) {
888   // Compare and move FPSCR flags to the normal condition flags.
889   VFPCompareAndLoadFlags(src1, src2, pc, cond);
890 }
891 
VFPCompareAndSetFlags(const DwVfpRegister src1,const double src2,const Condition cond)892 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
893                                            const double src2,
894                                            const Condition cond) {
895   // Compare and move FPSCR flags to the normal condition flags.
896   VFPCompareAndLoadFlags(src1, src2, pc, cond);
897 }
898 
899 
VFPCompareAndLoadFlags(const SwVfpRegister src1,const SwVfpRegister src2,const Register fpscr_flags,const Condition cond)900 void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
901                                             const SwVfpRegister src2,
902                                             const Register fpscr_flags,
903                                             const Condition cond) {
904   // Compare and load FPSCR.
905   vcmp(src1, src2, cond);
906   vmrs(fpscr_flags, cond);
907 }
908 
VFPCompareAndLoadFlags(const SwVfpRegister src1,const float src2,const Register fpscr_flags,const Condition cond)909 void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
910                                             const float src2,
911                                             const Register fpscr_flags,
912                                             const Condition cond) {
913   // Compare and load FPSCR.
914   vcmp(src1, src2, cond);
915   vmrs(fpscr_flags, cond);
916 }
917 
918 
VFPCompareAndLoadFlags(const DwVfpRegister src1,const DwVfpRegister src2,const Register fpscr_flags,const Condition cond)919 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
920                                             const DwVfpRegister src2,
921                                             const Register fpscr_flags,
922                                             const Condition cond) {
923   // Compare and load FPSCR.
924   vcmp(src1, src2, cond);
925   vmrs(fpscr_flags, cond);
926 }
927 
VFPCompareAndLoadFlags(const DwVfpRegister src1,const double src2,const Register fpscr_flags,const Condition cond)928 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
929                                             const double src2,
930                                             const Register fpscr_flags,
931                                             const Condition cond) {
932   // Compare and load FPSCR.
933   vcmp(src1, src2, cond);
934   vmrs(fpscr_flags, cond);
935 }
936 
937 
Vmov(const DwVfpRegister dst,const double imm,const Register scratch)938 void MacroAssembler::Vmov(const DwVfpRegister dst,
939                           const double imm,
940                           const Register scratch) {
941   static const DoubleRepresentation minus_zero(-0.0);
942   static const DoubleRepresentation zero(0.0);
943   DoubleRepresentation value_rep(imm);
944   // Handle special values first.
945   if (value_rep == zero) {
946     vmov(dst, kDoubleRegZero);
947   } else if (value_rep == minus_zero) {
948     vneg(dst, kDoubleRegZero);
949   } else {
950     vmov(dst, imm, scratch);
951   }
952 }
953 
954 
VmovHigh(Register dst,DwVfpRegister src)955 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
956   if (src.code() < 16) {
957     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
958     vmov(dst, loc.high());
959   } else {
960     vmov(dst, VmovIndexHi, src);
961   }
962 }
963 
964 
VmovHigh(DwVfpRegister dst,Register src)965 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
966   if (dst.code() < 16) {
967     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
968     vmov(loc.high(), src);
969   } else {
970     vmov(dst, VmovIndexHi, src);
971   }
972 }
973 
974 
VmovLow(Register dst,DwVfpRegister src)975 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
976   if (src.code() < 16) {
977     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
978     vmov(dst, loc.low());
979   } else {
980     vmov(dst, VmovIndexLo, src);
981   }
982 }
983 
984 
VmovLow(DwVfpRegister dst,Register src)985 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
986   if (dst.code() < 16) {
987     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
988     vmov(loc.low(), src);
989   } else {
990     vmov(dst, VmovIndexLo, src);
991   }
992 }
993 
994 
LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address)995 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
996     Register code_target_address) {
997   DCHECK(FLAG_enable_embedded_constant_pool);
998   ldr(pp, MemOperand(code_target_address,
999                      Code::kConstantPoolOffset - Code::kHeaderSize));
1000   add(pp, pp, code_target_address);
1001 }
1002 
1003 
LoadConstantPoolPointerRegister()1004 void MacroAssembler::LoadConstantPoolPointerRegister() {
1005   DCHECK(FLAG_enable_embedded_constant_pool);
1006   int entry_offset = pc_offset() + Instruction::kPCReadOffset;
1007   sub(ip, pc, Operand(entry_offset));
1008   LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
1009 }
1010 
1011 
StubPrologue()1012 void MacroAssembler::StubPrologue() {
1013   PushFixedFrame();
1014   Push(Smi::FromInt(StackFrame::STUB));
1015   // Adjust FP to point to saved FP.
1016   add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
1017   if (FLAG_enable_embedded_constant_pool) {
1018     LoadConstantPoolPointerRegister();
1019     set_constant_pool_available(true);
1020   }
1021 }
1022 
1023 
Prologue(bool code_pre_aging)1024 void MacroAssembler::Prologue(bool code_pre_aging) {
1025   { PredictableCodeSizeScope predictible_code_size_scope(
1026         this, kNoCodeAgeSequenceLength);
1027     // The following three instructions must remain together and unmodified
1028     // for code aging to work properly.
1029     if (code_pre_aging) {
1030       // Pre-age the code.
1031       Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
1032       add(r0, pc, Operand(-8));
1033       ldr(pc, MemOperand(pc, -4));
1034       emit_code_stub_address(stub);
1035     } else {
1036       PushFixedFrame(r1);
1037       nop(ip.code());
1038       // Adjust FP to point to saved FP.
1039       add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
1040     }
1041   }
1042   if (FLAG_enable_embedded_constant_pool) {
1043     LoadConstantPoolPointerRegister();
1044     set_constant_pool_available(true);
1045   }
1046 }
1047 
1048 
EmitLoadTypeFeedbackVector(Register vector)1049 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
1050   ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1051   ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
1052   ldr(vector,
1053       FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
1054 }
1055 
1056 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1057 void MacroAssembler::EnterFrame(StackFrame::Type type,
1058                                 bool load_constant_pool_pointer_reg) {
1059   // r0-r3: preserved
1060   PushFixedFrame();
1061   if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
1062     LoadConstantPoolPointerRegister();
1063   }
1064   mov(ip, Operand(Smi::FromInt(type)));
1065   push(ip);
1066   mov(ip, Operand(CodeObject()));
1067   push(ip);
1068   // Adjust FP to point to saved FP.
1069   add(fp, sp,
1070       Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
1071 }
1072 
1073 
LeaveFrame(StackFrame::Type type)1074 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
1075   // r0: preserved
1076   // r1: preserved
1077   // r2: preserved
1078 
1079   // Drop the execution stack down to the frame pointer and restore
1080   // the caller frame pointer, return address and constant pool pointer
1081   // (if FLAG_enable_embedded_constant_pool).
1082   int frame_ends;
1083   if (FLAG_enable_embedded_constant_pool) {
1084     add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
1085     frame_ends = pc_offset();
1086     ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
1087   } else {
1088     mov(sp, fp);
1089     frame_ends = pc_offset();
1090     ldm(ia_w, sp, fp.bit() | lr.bit());
1091   }
1092   return frame_ends;
1093 }
1094 
1095 
EnterExitFrame(bool save_doubles,int stack_space)1096 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
1097   // Set up the frame structure on the stack.
1098   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1099   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1100   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1101   Push(lr, fp);
1102   mov(fp, Operand(sp));  // Set up new frame pointer.
1103   // Reserve room for saved entry sp and code object.
1104   sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
1105   if (emit_debug_code()) {
1106     mov(ip, Operand::Zero());
1107     str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1108   }
1109   if (FLAG_enable_embedded_constant_pool) {
1110     str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1111   }
1112   mov(ip, Operand(CodeObject()));
1113   str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1114 
1115   // Save the frame pointer and the context in top.
1116   mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1117   str(fp, MemOperand(ip));
1118   mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1119   str(cp, MemOperand(ip));
1120 
1121   // Optionally save all double registers.
1122   if (save_doubles) {
1123     SaveFPRegs(sp, ip);
1124     // Note that d0 will be accessible at
1125     //   fp - ExitFrameConstants::kFrameSize -
1126     //   DwVfpRegister::kMaxNumRegisters * kDoubleSize,
1127     // since the sp slot, code slot and constant pool slot (if
1128     // FLAG_enable_embedded_constant_pool) were pushed after the fp.
1129   }
1130 
1131   // Reserve place for the return address and stack space and align the frame
1132   // preparing for calling the runtime function.
1133   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1134   sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1135   if (frame_alignment > 0) {
1136     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1137     and_(sp, sp, Operand(-frame_alignment));
1138   }
1139 
1140   // Set the exit frame sp value to point just before the return address
1141   // location.
1142   add(ip, sp, Operand(kPointerSize));
1143   str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1144 }
1145 
1146 
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)1147 void MacroAssembler::InitializeNewString(Register string,
1148                                          Register length,
1149                                          Heap::RootListIndex map_index,
1150                                          Register scratch1,
1151                                          Register scratch2) {
1152   SmiTag(scratch1, length);
1153   LoadRoot(scratch2, map_index);
1154   str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1155   mov(scratch1, Operand(String::kEmptyHashField));
1156   str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1157   str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1158 }
1159 
1160 
ActivationFrameAlignment()1161 int MacroAssembler::ActivationFrameAlignment() {
1162 #if V8_HOST_ARCH_ARM
1163   // Running on the real platform. Use the alignment as mandated by the local
1164   // environment.
1165   // Note: This will break if we ever start generating snapshots on one ARM
1166   // platform for another ARM platform with a different alignment.
1167   return base::OS::ActivationFrameAlignment();
1168 #else  // V8_HOST_ARCH_ARM
1169   // If we are using the simulator then we should always align to the expected
1170   // alignment. As the simulator is used to generate snapshots we do not know
1171   // if the target platform will need alignment, so this is controlled from a
1172   // flag.
1173   return FLAG_sim_stack_alignment;
1174 #endif  // V8_HOST_ARCH_ARM
1175 }
1176 
1177 
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool argument_count_is_length)1178 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1179                                     bool restore_context,
1180                                     bool argument_count_is_length) {
1181   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1182 
1183   // Optionally restore all double registers.
1184   if (save_doubles) {
1185     // Calculate the stack location of the saved doubles and restore them.
1186     const int offset = ExitFrameConstants::kFrameSize;
1187     sub(r3, fp,
1188         Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1189     RestoreFPRegs(r3, ip);
1190   }
1191 
1192   // Clear top frame.
1193   mov(r3, Operand::Zero());
1194   mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1195   str(r3, MemOperand(ip));
1196 
1197   // Restore current context from top and clear it in debug mode.
1198   if (restore_context) {
1199     mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1200     ldr(cp, MemOperand(ip));
1201   }
1202 #ifdef DEBUG
1203   mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1204   str(r3, MemOperand(ip));
1205 #endif
1206 
1207   // Tear down the exit frame, pop the arguments, and return.
1208   if (FLAG_enable_embedded_constant_pool) {
1209     ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1210   }
1211   mov(sp, Operand(fp));
1212   ldm(ia_w, sp, fp.bit() | lr.bit());
1213   if (argument_count.is_valid()) {
1214     if (argument_count_is_length) {
1215       add(sp, sp, argument_count);
1216     } else {
1217       add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1218     }
1219   }
1220 }
1221 
1222 
MovFromFloatResult(const DwVfpRegister dst)1223 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1224   if (use_eabi_hardfloat()) {
1225     Move(dst, d0);
1226   } else {
1227     vmov(dst, r0, r1);
1228   }
1229 }
1230 
1231 
1232 // On ARM this is just a synonym to make the purpose clear.
MovFromFloatParameter(DwVfpRegister dst)1233 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1234   MovFromFloatResult(dst);
1235 }
1236 
1237 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)1238 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1239                                     const ParameterCount& actual,
1240                                     Label* done,
1241                                     bool* definitely_mismatches,
1242                                     InvokeFlag flag,
1243                                     const CallWrapper& call_wrapper) {
1244   bool definitely_matches = false;
1245   *definitely_mismatches = false;
1246   Label regular_invoke;
1247 
1248   // Check whether the expected and actual arguments count match. If not,
1249   // setup registers according to contract with ArgumentsAdaptorTrampoline:
1250   //  r0: actual arguments count
1251   //  r1: function (passed through to callee)
1252   //  r2: expected arguments count
1253 
1254   // The code below is made a lot easier because the calling code already sets
1255   // up actual and expected registers according to the contract if values are
1256   // passed in registers.
1257   DCHECK(actual.is_immediate() || actual.reg().is(r0));
1258   DCHECK(expected.is_immediate() || expected.reg().is(r2));
1259 
1260   if (expected.is_immediate()) {
1261     DCHECK(actual.is_immediate());
1262     mov(r0, Operand(actual.immediate()));
1263     if (expected.immediate() == actual.immediate()) {
1264       definitely_matches = true;
1265     } else {
1266       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1267       if (expected.immediate() == sentinel) {
1268         // Don't worry about adapting arguments for builtins that
1269         // don't want that done. Skip adaption code by making it look
1270         // like we have a match between expected and actual number of
1271         // arguments.
1272         definitely_matches = true;
1273       } else {
1274         *definitely_mismatches = true;
1275         mov(r2, Operand(expected.immediate()));
1276       }
1277     }
1278   } else {
1279     if (actual.is_immediate()) {
1280       mov(r0, Operand(actual.immediate()));
1281       cmp(expected.reg(), Operand(actual.immediate()));
1282       b(eq, &regular_invoke);
1283     } else {
1284       cmp(expected.reg(), Operand(actual.reg()));
1285       b(eq, &regular_invoke);
1286     }
1287   }
1288 
1289   if (!definitely_matches) {
1290     Handle<Code> adaptor =
1291         isolate()->builtins()->ArgumentsAdaptorTrampoline();
1292     if (flag == CALL_FUNCTION) {
1293       call_wrapper.BeforeCall(CallSize(adaptor));
1294       Call(adaptor);
1295       call_wrapper.AfterCall();
1296       if (!*definitely_mismatches) {
1297         b(done);
1298       }
1299     } else {
1300       Jump(adaptor, RelocInfo::CODE_TARGET);
1301     }
1302     bind(&regular_invoke);
1303   }
1304 }
1305 
1306 
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1307 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
1308                                              const ParameterCount& expected,
1309                                              const ParameterCount& actual) {
1310   Label skip_flooding;
1311   ExternalReference step_in_enabled =
1312       ExternalReference::debug_step_in_enabled_address(isolate());
1313   mov(r4, Operand(step_in_enabled));
1314   ldrb(r4, MemOperand(r4));
1315   cmp(r4, Operand(0));
1316   b(eq, &skip_flooding);
1317   {
1318     FrameScope frame(this,
1319                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1320     if (expected.is_reg()) {
1321       SmiTag(expected.reg());
1322       Push(expected.reg());
1323     }
1324     if (actual.is_reg()) {
1325       SmiTag(actual.reg());
1326       Push(actual.reg());
1327     }
1328     if (new_target.is_valid()) {
1329       Push(new_target);
1330     }
1331     Push(fun);
1332     Push(fun);
1333     CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
1334     Pop(fun);
1335     if (new_target.is_valid()) {
1336       Pop(new_target);
1337     }
1338     if (actual.is_reg()) {
1339       Pop(actual.reg());
1340       SmiUntag(actual.reg());
1341     }
1342     if (expected.is_reg()) {
1343       Pop(expected.reg());
1344       SmiUntag(expected.reg());
1345     }
1346   }
1347   bind(&skip_flooding);
1348 }
1349 
1350 
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1351 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1352                                         const ParameterCount& expected,
1353                                         const ParameterCount& actual,
1354                                         InvokeFlag flag,
1355                                         const CallWrapper& call_wrapper) {
1356   // You can't call a function without a valid frame.
1357   DCHECK(flag == JUMP_FUNCTION || has_frame());
1358   DCHECK(function.is(r1));
1359   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
1360 
1361   if (call_wrapper.NeedsDebugStepCheck()) {
1362     FloodFunctionIfStepping(function, new_target, expected, actual);
1363   }
1364 
1365   // Clear the new.target register if not given.
1366   if (!new_target.is_valid()) {
1367     LoadRoot(r3, Heap::kUndefinedValueRootIndex);
1368   }
1369 
1370   Label done;
1371   bool definitely_mismatches = false;
1372   InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1373                  call_wrapper);
1374   if (!definitely_mismatches) {
1375     // We call indirectly through the code field in the function to
1376     // allow recompilation to take effect without changing any of the
1377     // call sites.
1378     Register code = r4;
1379     ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1380     if (flag == CALL_FUNCTION) {
1381       call_wrapper.BeforeCall(CallSize(code));
1382       Call(code);
1383       call_wrapper.AfterCall();
1384     } else {
1385       DCHECK(flag == JUMP_FUNCTION);
1386       Jump(code);
1387     }
1388 
1389     // Continue here if InvokePrologue does handle the invocation due to
1390     // mismatched parameter counts.
1391     bind(&done);
1392   }
1393 }
1394 
1395 
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1396 void MacroAssembler::InvokeFunction(Register fun,
1397                                     Register new_target,
1398                                     const ParameterCount& actual,
1399                                     InvokeFlag flag,
1400                                     const CallWrapper& call_wrapper) {
1401   // You can't call a function without a valid frame.
1402   DCHECK(flag == JUMP_FUNCTION || has_frame());
1403 
1404   // Contract with called JS functions requires that function is passed in r1.
1405   DCHECK(fun.is(r1));
1406 
1407   Register expected_reg = r2;
1408   Register temp_reg = r4;
1409 
1410   ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1411   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1412   ldr(expected_reg,
1413       FieldMemOperand(temp_reg,
1414                       SharedFunctionInfo::kFormalParameterCountOffset));
1415   SmiUntag(expected_reg);
1416 
1417   ParameterCount expected(expected_reg);
1418   InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1419 }
1420 
1421 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1422 void MacroAssembler::InvokeFunction(Register function,
1423                                     const ParameterCount& expected,
1424                                     const ParameterCount& actual,
1425                                     InvokeFlag flag,
1426                                     const CallWrapper& call_wrapper) {
1427   // You can't call a function without a valid frame.
1428   DCHECK(flag == JUMP_FUNCTION || has_frame());
1429 
1430   // Contract with called JS functions requires that function is passed in r1.
1431   DCHECK(function.is(r1));
1432 
1433   // Get the function and setup the context.
1434   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1435 
1436   InvokeFunctionCode(r1, no_reg, expected, actual, flag, call_wrapper);
1437 }
1438 
1439 
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1440 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1441                                     const ParameterCount& expected,
1442                                     const ParameterCount& actual,
1443                                     InvokeFlag flag,
1444                                     const CallWrapper& call_wrapper) {
1445   Move(r1, function);
1446   InvokeFunction(r1, expected, actual, flag, call_wrapper);
1447 }
1448 
1449 
IsObjectJSStringType(Register object,Register scratch,Label * fail)1450 void MacroAssembler::IsObjectJSStringType(Register object,
1451                                           Register scratch,
1452                                           Label* fail) {
1453   DCHECK(kNotStringTag != 0);
1454 
1455   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1456   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1457   tst(scratch, Operand(kIsNotStringMask));
1458   b(ne, fail);
1459 }
1460 
1461 
IsObjectNameType(Register object,Register scratch,Label * fail)1462 void MacroAssembler::IsObjectNameType(Register object,
1463                                       Register scratch,
1464                                       Label* fail) {
1465   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1466   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1467   cmp(scratch, Operand(LAST_NAME_TYPE));
1468   b(hi, fail);
1469 }
1470 
1471 
DebugBreak()1472 void MacroAssembler::DebugBreak() {
1473   mov(r0, Operand::Zero());
1474   mov(r1,
1475       Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
1476   CEntryStub ces(isolate(), 1);
1477   DCHECK(AllowThisStubCall(&ces));
1478   Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
1479 }
1480 
1481 
PushStackHandler()1482 void MacroAssembler::PushStackHandler() {
1483   // Adjust this code if not the case.
1484   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1485   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1486 
1487   // Link the current handler as the next handler.
1488   mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1489   ldr(r5, MemOperand(r6));
1490   push(r5);
1491 
1492   // Set this new handler as the current one.
1493   str(sp, MemOperand(r6));
1494 }
1495 
1496 
PopStackHandler()1497 void MacroAssembler::PopStackHandler() {
1498   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1499   pop(r1);
1500   mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1501   add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1502   str(r1, MemOperand(ip));
1503 }
1504 
1505 
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)1506 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1507                                             Register scratch,
1508                                             Label* miss) {
1509   Label same_contexts;
1510 
1511   DCHECK(!holder_reg.is(scratch));
1512   DCHECK(!holder_reg.is(ip));
1513   DCHECK(!scratch.is(ip));
1514 
1515   // Load current lexical context from the stack frame.
1516   ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1517   // In debug mode, make sure the lexical context is set.
1518 #ifdef DEBUG
1519   cmp(scratch, Operand::Zero());
1520   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1521 #endif
1522 
1523   // Load the native context of the current context.
1524   ldr(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
1525 
1526   // Check the context is a native context.
1527   if (emit_debug_code()) {
1528     // Cannot use ip as a temporary in this verification code. Due to the fact
1529     // that ip is clobbered as part of cmp with an object Operand.
1530     push(holder_reg);  // Temporarily save holder on the stack.
1531     // Read the first word and compare to the native_context_map.
1532     ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1533     LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1534     cmp(holder_reg, ip);
1535     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1536     pop(holder_reg);  // Restore holder.
1537   }
1538 
1539   // Check if both contexts are the same.
1540   ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1541   cmp(scratch, Operand(ip));
1542   b(eq, &same_contexts);
1543 
1544   // Check the context is a native context.
1545   if (emit_debug_code()) {
1546     // Cannot use ip as a temporary in this verification code. Due to the fact
1547     // that ip is clobbered as part of cmp with an object Operand.
1548     push(holder_reg);  // Temporarily save holder on the stack.
1549     mov(holder_reg, ip);  // Move ip to its holding place.
1550     LoadRoot(ip, Heap::kNullValueRootIndex);
1551     cmp(holder_reg, ip);
1552     Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1553 
1554     ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1555     LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1556     cmp(holder_reg, ip);
1557     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1558     // Restore ip is not needed. ip is reloaded below.
1559     pop(holder_reg);  // Restore holder.
1560     // Restore ip to holder's context.
1561     ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1562   }
1563 
1564   // Check that the security token in the calling global object is
1565   // compatible with the security token in the receiving global
1566   // object.
1567   int token_offset = Context::kHeaderSize +
1568                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
1569 
1570   ldr(scratch, FieldMemOperand(scratch, token_offset));
1571   ldr(ip, FieldMemOperand(ip, token_offset));
1572   cmp(scratch, Operand(ip));
1573   b(ne, miss);
1574 
1575   bind(&same_contexts);
1576 }
1577 
1578 
1579 // Compute the hash code from the untagged key.  This must be kept in sync with
1580 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1581 // code-stub-hydrogen.cc
GetNumberHash(Register t0,Register scratch)1582 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1583   // First of all we assign the hash seed to scratch.
1584   LoadRoot(scratch, Heap::kHashSeedRootIndex);
1585   SmiUntag(scratch);
1586 
1587   // Xor original key with a seed.
1588   eor(t0, t0, Operand(scratch));
1589 
1590   // Compute the hash code from the untagged key.  This must be kept in sync
1591   // with ComputeIntegerHash in utils.h.
1592   //
1593   // hash = ~hash + (hash << 15);
1594   mvn(scratch, Operand(t0));
1595   add(t0, scratch, Operand(t0, LSL, 15));
1596   // hash = hash ^ (hash >> 12);
1597   eor(t0, t0, Operand(t0, LSR, 12));
1598   // hash = hash + (hash << 2);
1599   add(t0, t0, Operand(t0, LSL, 2));
1600   // hash = hash ^ (hash >> 4);
1601   eor(t0, t0, Operand(t0, LSR, 4));
1602   // hash = hash * 2057;
1603   mov(scratch, Operand(t0, LSL, 11));
1604   add(t0, t0, Operand(t0, LSL, 3));
1605   add(t0, t0, scratch);
1606   // hash = hash ^ (hash >> 16);
1607   eor(t0, t0, Operand(t0, LSR, 16));
1608   bic(t0, t0, Operand(0xc0000000u));
1609 }
1610 
1611 
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register t0,Register t1,Register t2)1612 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1613                                               Register elements,
1614                                               Register key,
1615                                               Register result,
1616                                               Register t0,
1617                                               Register t1,
1618                                               Register t2) {
1619   // Register use:
1620   //
1621   // elements - holds the slow-case elements of the receiver on entry.
1622   //            Unchanged unless 'result' is the same register.
1623   //
1624   // key      - holds the smi key on entry.
1625   //            Unchanged unless 'result' is the same register.
1626   //
1627   // result   - holds the result on exit if the load succeeded.
1628   //            Allowed to be the same as 'key' or 'result'.
1629   //            Unchanged on bailout so 'key' or 'result' can be used
1630   //            in further computation.
1631   //
1632   // Scratch registers:
1633   //
1634   // t0 - holds the untagged key on entry and holds the hash once computed.
1635   //
1636   // t1 - used to hold the capacity mask of the dictionary
1637   //
1638   // t2 - used for the index into the dictionary.
1639   Label done;
1640 
1641   GetNumberHash(t0, t1);
1642 
1643   // Compute the capacity mask.
1644   ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1645   SmiUntag(t1);
1646   sub(t1, t1, Operand(1));
1647 
1648   // Generate an unrolled loop that performs a few probes before giving up.
1649   for (int i = 0; i < kNumberDictionaryProbes; i++) {
1650     // Use t2 for index calculations and keep the hash intact in t0.
1651     mov(t2, t0);
1652     // Compute the masked index: (hash + i + i * i) & mask.
1653     if (i > 0) {
1654       add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1655     }
1656     and_(t2, t2, Operand(t1));
1657 
1658     // Scale the index by multiplying by the element size.
1659     DCHECK(SeededNumberDictionary::kEntrySize == 3);
1660     add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
1661 
1662     // Check if the key is identical to the name.
1663     add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1664     ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1665     cmp(key, Operand(ip));
1666     if (i != kNumberDictionaryProbes - 1) {
1667       b(eq, &done);
1668     } else {
1669       b(ne, miss);
1670     }
1671   }
1672 
1673   bind(&done);
1674   // Check that the value is a field property.
1675   // t2: elements + (index * kPointerSize)
1676   const int kDetailsOffset =
1677       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1678   ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1679   DCHECK_EQ(DATA, 0);
1680   tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1681   b(ne, miss);
1682 
1683   // Get the value at the masked, scaled index and return.
1684   const int kValueOffset =
1685       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1686   ldr(result, FieldMemOperand(t2, kValueOffset));
1687 }
1688 
1689 
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1690 void MacroAssembler::Allocate(int object_size,
1691                               Register result,
1692                               Register scratch1,
1693                               Register scratch2,
1694                               Label* gc_required,
1695                               AllocationFlags flags) {
1696   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1697   if (!FLAG_inline_new) {
1698     if (emit_debug_code()) {
1699       // Trash the registers to simulate an allocation failure.
1700       mov(result, Operand(0x7091));
1701       mov(scratch1, Operand(0x7191));
1702       mov(scratch2, Operand(0x7291));
1703     }
1704     jmp(gc_required);
1705     return;
1706   }
1707 
1708   DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1709 
1710   // Make object size into bytes.
1711   if ((flags & SIZE_IN_WORDS) != 0) {
1712     object_size *= kPointerSize;
1713   }
1714   DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1715 
1716   // Check relative positions of allocation top and limit addresses.
1717   // The values must be adjacent in memory to allow the use of LDM.
1718   // Also, assert that the registers are numbered such that the values
1719   // are loaded in the correct order.
1720   ExternalReference allocation_top =
1721       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1722   ExternalReference allocation_limit =
1723       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1724 
1725   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1726   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1727   DCHECK((limit - top) == kPointerSize);
1728   DCHECK(result.code() < ip.code());
1729 
1730   // Set up allocation top address register.
1731   Register top_address = scratch1;
1732   // This code stores a temporary value in ip. This is OK, as the code below
1733   // does not need ip for implicit literal generation.
1734   Register alloc_limit = ip;
1735   Register result_end = scratch2;
1736   mov(top_address, Operand(allocation_top));
1737 
1738   if ((flags & RESULT_CONTAINS_TOP) == 0) {
1739     // Load allocation top into result and allocation limit into alloc_limit.
1740     ldm(ia, top_address, result.bit() | alloc_limit.bit());
1741   } else {
1742     if (emit_debug_code()) {
1743       // Assert that result actually contains top on entry.
1744       ldr(alloc_limit, MemOperand(top_address));
1745       cmp(result, alloc_limit);
1746       Check(eq, kUnexpectedAllocationTop);
1747     }
1748     // Load allocation limit. Result already contains allocation top.
1749     ldr(alloc_limit, MemOperand(top_address, limit - top));
1750   }
1751 
1752   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1753     // Align the next allocation. Storing the filler map without checking top is
1754     // safe in new-space because the limit of the heap is aligned there.
1755     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1756     and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
1757     Label aligned;
1758     b(eq, &aligned);
1759     if ((flags & PRETENURE) != 0) {
1760       cmp(result, Operand(alloc_limit));
1761       b(hs, gc_required);
1762     }
1763     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1764     str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
1765     bind(&aligned);
1766   }
1767 
1768   // Calculate new top and bail out if new space is exhausted. Use result
1769   // to calculate the new top. We must preserve the ip register at this
1770   // point, so we cannot just use add().
1771   DCHECK(object_size > 0);
1772   Register source = result;
1773   Condition cond = al;
1774   int shift = 0;
1775   while (object_size != 0) {
1776     if (((object_size >> shift) & 0x03) == 0) {
1777       shift += 2;
1778     } else {
1779       int bits = object_size & (0xff << shift);
1780       object_size -= bits;
1781       shift += 8;
1782       Operand bits_operand(bits);
1783       DCHECK(bits_operand.instructions_required(this) == 1);
1784       add(result_end, source, bits_operand, SetCC, cond);
1785       source = result_end;
1786       cond = cc;
1787     }
1788   }
1789   b(cs, gc_required);
1790   cmp(result_end, Operand(alloc_limit));
1791   b(hi, gc_required);
1792   str(result_end, MemOperand(top_address));
1793 
1794   // Tag object if requested.
1795   if ((flags & TAG_OBJECT) != 0) {
1796     add(result, result, Operand(kHeapObjectTag));
1797   }
1798 }
1799 
1800 
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)1801 void MacroAssembler::Allocate(Register object_size, Register result,
1802                               Register result_end, Register scratch,
1803                               Label* gc_required, AllocationFlags flags) {
1804   if (!FLAG_inline_new) {
1805     if (emit_debug_code()) {
1806       // Trash the registers to simulate an allocation failure.
1807       mov(result, Operand(0x7091));
1808       mov(scratch, Operand(0x7191));
1809       mov(result_end, Operand(0x7291));
1810     }
1811     jmp(gc_required);
1812     return;
1813   }
1814 
1815   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1816   // is not specified. Other registers must not overlap.
1817   DCHECK(!AreAliased(object_size, result, scratch, ip));
1818   DCHECK(!AreAliased(result_end, result, scratch, ip));
1819   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1820 
1821   // Check relative positions of allocation top and limit addresses.
1822   // The values must be adjacent in memory to allow the use of LDM.
1823   // Also, assert that the registers are numbered such that the values
1824   // are loaded in the correct order.
1825   ExternalReference allocation_top =
1826       AllocationUtils::GetAllocationTopReference(isolate(), flags);
1827   ExternalReference allocation_limit =
1828       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1829   intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1830   intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1831   DCHECK((limit - top) == kPointerSize);
1832   DCHECK(result.code() < ip.code());
1833 
1834   // Set up allocation top address and allocation limit registers.
1835   Register top_address = scratch;
1836   // This code stores a temporary value in ip. This is OK, as the code below
1837   // does not need ip for implicit literal generation.
1838   Register alloc_limit = ip;
1839   mov(top_address, Operand(allocation_top));
1840 
1841   if ((flags & RESULT_CONTAINS_TOP) == 0) {
1842     // Load allocation top into result and allocation limit into alloc_limit.
1843     ldm(ia, top_address, result.bit() | alloc_limit.bit());
1844   } else {
1845     if (emit_debug_code()) {
1846       // Assert that result actually contains top on entry.
1847       ldr(alloc_limit, MemOperand(top_address));
1848       cmp(result, alloc_limit);
1849       Check(eq, kUnexpectedAllocationTop);
1850     }
1851     // Load allocation limit. Result already contains allocation top.
1852     ldr(alloc_limit, MemOperand(top_address, limit - top));
1853   }
1854 
1855   if ((flags & DOUBLE_ALIGNMENT) != 0) {
1856     // Align the next allocation. Storing the filler map without checking top is
1857     // safe in new-space because the limit of the heap is aligned there.
1858     DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1859     and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
1860     Label aligned;
1861     b(eq, &aligned);
1862     if ((flags & PRETENURE) != 0) {
1863       cmp(result, Operand(alloc_limit));
1864       b(hs, gc_required);
1865     }
1866     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1867     str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
1868     bind(&aligned);
1869   }
1870 
1871   // Calculate new top and bail out if new space is exhausted. Use result
1872   // to calculate the new top. Object size may be in words so a shift is
1873   // required to get the number of bytes.
1874   if ((flags & SIZE_IN_WORDS) != 0) {
1875     add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1876   } else {
1877     add(result_end, result, Operand(object_size), SetCC);
1878   }
1879   b(cs, gc_required);
1880   cmp(result_end, Operand(alloc_limit));
1881   b(hi, gc_required);
1882 
1883   // Update allocation top. result temporarily holds the new top.
1884   if (emit_debug_code()) {
1885     tst(result_end, Operand(kObjectAlignmentMask));
1886     Check(eq, kUnalignedAllocationInNewSpace);
1887   }
1888   str(result_end, MemOperand(top_address));
1889 
1890   // Tag object if requested.
1891   if ((flags & TAG_OBJECT) != 0) {
1892     add(result, result, Operand(kHeapObjectTag));
1893   }
1894 }
1895 
1896 
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1897 void MacroAssembler::AllocateTwoByteString(Register result,
1898                                            Register length,
1899                                            Register scratch1,
1900                                            Register scratch2,
1901                                            Register scratch3,
1902                                            Label* gc_required) {
1903   // Calculate the number of bytes needed for the characters in the string while
1904   // observing object alignment.
1905   DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1906   mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
1907   add(scratch1, scratch1,
1908       Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1909   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1910 
1911   // Allocate two-byte string in new space.
1912   Allocate(scratch1,
1913            result,
1914            scratch2,
1915            scratch3,
1916            gc_required,
1917            TAG_OBJECT);
1918 
1919   // Set the map, length and hash field.
1920   InitializeNewString(result,
1921                       length,
1922                       Heap::kStringMapRootIndex,
1923                       scratch1,
1924                       scratch2);
1925 }
1926 
1927 
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1928 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1929                                            Register scratch1, Register scratch2,
1930                                            Register scratch3,
1931                                            Label* gc_required) {
1932   // Calculate the number of bytes needed for the characters in the string while
1933   // observing object alignment.
1934   DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1935   DCHECK(kCharSize == 1);
1936   add(scratch1, length,
1937       Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1938   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1939 
1940   // Allocate one-byte string in new space.
1941   Allocate(scratch1,
1942            result,
1943            scratch2,
1944            scratch3,
1945            gc_required,
1946            TAG_OBJECT);
1947 
1948   // Set the map, length and hash field.
1949   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
1950                       scratch1, scratch2);
1951 }
1952 
1953 
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1954 void MacroAssembler::AllocateTwoByteConsString(Register result,
1955                                                Register length,
1956                                                Register scratch1,
1957                                                Register scratch2,
1958                                                Label* gc_required) {
1959   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1960            TAG_OBJECT);
1961 
1962   InitializeNewString(result,
1963                       length,
1964                       Heap::kConsStringMapRootIndex,
1965                       scratch1,
1966                       scratch2);
1967 }
1968 
1969 
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1970 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
1971                                                Register scratch1,
1972                                                Register scratch2,
1973                                                Label* gc_required) {
1974   Allocate(ConsString::kSize,
1975            result,
1976            scratch1,
1977            scratch2,
1978            gc_required,
1979            TAG_OBJECT);
1980 
1981   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
1982                       scratch1, scratch2);
1983 }
1984 
1985 
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1986 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1987                                                  Register length,
1988                                                  Register scratch1,
1989                                                  Register scratch2,
1990                                                  Label* gc_required) {
1991   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1992            TAG_OBJECT);
1993 
1994   InitializeNewString(result,
1995                       length,
1996                       Heap::kSlicedStringMapRootIndex,
1997                       scratch1,
1998                       scratch2);
1999 }
2000 
2001 
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2002 void MacroAssembler::AllocateOneByteSlicedString(Register result,
2003                                                  Register length,
2004                                                  Register scratch1,
2005                                                  Register scratch2,
2006                                                  Label* gc_required) {
2007   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2008            TAG_OBJECT);
2009 
2010   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
2011                       scratch1, scratch2);
2012 }
2013 
2014 
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)2015 void MacroAssembler::CompareObjectType(Register object,
2016                                        Register map,
2017                                        Register type_reg,
2018                                        InstanceType type) {
2019   const Register temp = type_reg.is(no_reg) ? ip : type_reg;
2020 
2021   ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2022   CompareInstanceType(map, temp, type);
2023 }
2024 
2025 
CompareInstanceType(Register map,Register type_reg,InstanceType type)2026 void MacroAssembler::CompareInstanceType(Register map,
2027                                          Register type_reg,
2028                                          InstanceType type) {
2029   // Registers map and type_reg can be ip. These two lines assert
2030   // that ip can be used with the two instructions (the constants
2031   // will never need ip).
2032   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2033   STATIC_ASSERT(LAST_TYPE < 256);
2034   ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2035   cmp(type_reg, Operand(type));
2036 }
2037 
2038 
CompareRoot(Register obj,Heap::RootListIndex index)2039 void MacroAssembler::CompareRoot(Register obj,
2040                                  Heap::RootListIndex index) {
2041   DCHECK(!obj.is(ip));
2042   LoadRoot(ip, index);
2043   cmp(obj, ip);
2044 }
2045 
2046 
CheckFastElements(Register map,Register scratch,Label * fail)2047 void MacroAssembler::CheckFastElements(Register map,
2048                                        Register scratch,
2049                                        Label* fail) {
2050   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2051   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2052   STATIC_ASSERT(FAST_ELEMENTS == 2);
2053   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2054   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2055   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2056   b(hi, fail);
2057 }
2058 
2059 
CheckFastObjectElements(Register map,Register scratch,Label * fail)2060 void MacroAssembler::CheckFastObjectElements(Register map,
2061                                              Register scratch,
2062                                              Label* fail) {
2063   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2064   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2065   STATIC_ASSERT(FAST_ELEMENTS == 2);
2066   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2067   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2068   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2069   b(ls, fail);
2070   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2071   b(hi, fail);
2072 }
2073 
2074 
CheckFastSmiElements(Register map,Register scratch,Label * fail)2075 void MacroAssembler::CheckFastSmiElements(Register map,
2076                                           Register scratch,
2077                                           Label* fail) {
2078   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2079   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2080   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2081   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2082   b(hi, fail);
2083 }
2084 
2085 
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,LowDwVfpRegister double_scratch,Label * fail,int elements_offset)2086 void MacroAssembler::StoreNumberToDoubleElements(
2087                                       Register value_reg,
2088                                       Register key_reg,
2089                                       Register elements_reg,
2090                                       Register scratch1,
2091                                       LowDwVfpRegister double_scratch,
2092                                       Label* fail,
2093                                       int elements_offset) {
2094   DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
2095   Label smi_value, store;
2096 
2097   // Handle smi values specially.
2098   JumpIfSmi(value_reg, &smi_value);
2099 
2100   // Ensure that the object is a heap number
2101   CheckMap(value_reg,
2102            scratch1,
2103            isolate()->factory()->heap_number_map(),
2104            fail,
2105            DONT_DO_SMI_CHECK);
2106 
2107   vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2108   // Force a canonical NaN.
2109   if (emit_debug_code()) {
2110     vmrs(ip);
2111     tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2112     Assert(ne, kDefaultNaNModeNotSet);
2113   }
2114   VFPCanonicalizeNaN(double_scratch);
2115   b(&store);
2116 
2117   bind(&smi_value);
2118   SmiToDouble(double_scratch, value_reg);
2119 
2120   bind(&store);
2121   add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2122   vstr(double_scratch,
2123        FieldMemOperand(scratch1,
2124                        FixedDoubleArray::kHeaderSize - elements_offset));
2125 }
2126 
2127 
CompareMap(Register obj,Register scratch,Handle<Map> map,Label * early_success)2128 void MacroAssembler::CompareMap(Register obj,
2129                                 Register scratch,
2130                                 Handle<Map> map,
2131                                 Label* early_success) {
2132   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2133   CompareMap(scratch, map, early_success);
2134 }
2135 
2136 
CompareMap(Register obj_map,Handle<Map> map,Label * early_success)2137 void MacroAssembler::CompareMap(Register obj_map,
2138                                 Handle<Map> map,
2139                                 Label* early_success) {
2140   cmp(obj_map, Operand(map));
2141 }
2142 
2143 
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)2144 void MacroAssembler::CheckMap(Register obj,
2145                               Register scratch,
2146                               Handle<Map> map,
2147                               Label* fail,
2148                               SmiCheckType smi_check_type) {
2149   if (smi_check_type == DO_SMI_CHECK) {
2150     JumpIfSmi(obj, fail);
2151   }
2152 
2153   Label success;
2154   CompareMap(obj, scratch, map, &success);
2155   b(ne, fail);
2156   bind(&success);
2157 }
2158 
2159 
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)2160 void MacroAssembler::CheckMap(Register obj,
2161                               Register scratch,
2162                               Heap::RootListIndex index,
2163                               Label* fail,
2164                               SmiCheckType smi_check_type) {
2165   if (smi_check_type == DO_SMI_CHECK) {
2166     JumpIfSmi(obj, fail);
2167   }
2168   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2169   LoadRoot(ip, index);
2170   cmp(scratch, ip);
2171   b(ne, fail);
2172 }
2173 
2174 
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)2175 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2176                                      Register scratch2, Handle<WeakCell> cell,
2177                                      Handle<Code> success,
2178                                      SmiCheckType smi_check_type) {
2179   Label fail;
2180   if (smi_check_type == DO_SMI_CHECK) {
2181     JumpIfSmi(obj, &fail);
2182   }
2183   ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2184   CmpWeakValue(scratch1, cell, scratch2);
2185   Jump(success, RelocInfo::CODE_TARGET, eq);
2186   bind(&fail);
2187 }
2188 
2189 
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch)2190 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2191                                   Register scratch) {
2192   mov(scratch, Operand(cell));
2193   ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2194   cmp(value, scratch);
2195 }
2196 
2197 
GetWeakValue(Register value,Handle<WeakCell> cell)2198 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2199   mov(value, Operand(cell));
2200   ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
2201 }
2202 
2203 
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)2204 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2205                                    Label* miss) {
2206   GetWeakValue(value, cell);
2207   JumpIfSmi(value, miss);
2208 }
2209 
2210 
GetMapConstructor(Register result,Register map,Register temp,Register temp2)2211 void MacroAssembler::GetMapConstructor(Register result, Register map,
2212                                        Register temp, Register temp2) {
2213   Label done, loop;
2214   ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2215   bind(&loop);
2216   JumpIfSmi(result, &done);
2217   CompareObjectType(result, temp, temp2, MAP_TYPE);
2218   b(ne, &done);
2219   ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2220   b(&loop);
2221   bind(&done);
2222 }
2223 
2224 
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)2225 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2226                                              Register scratch, Label* miss) {
2227   // Get the prototype or initial map from the function.
2228   ldr(result,
2229       FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2230 
2231   // If the prototype or initial map is the hole, don't return it and
2232   // simply miss the cache instead. This will allow us to allocate a
2233   // prototype object on-demand in the runtime system.
2234   LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2235   cmp(result, ip);
2236   b(eq, miss);
2237 
2238   // If the function does not have an initial map, we're done.
2239   Label done;
2240   CompareObjectType(result, scratch, scratch, MAP_TYPE);
2241   b(ne, &done);
2242 
2243   // Get the prototype from the initial map.
2244   ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2245 
2246   // All done.
2247   bind(&done);
2248 }
2249 
2250 
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)2251 void MacroAssembler::CallStub(CodeStub* stub,
2252                               TypeFeedbackId ast_id,
2253                               Condition cond) {
2254   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
2255   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2256 }
2257 
2258 
TailCallStub(CodeStub * stub,Condition cond)2259 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2260   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2261 }
2262 
2263 
AllowThisStubCall(CodeStub * stub)2264 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2265   return has_frame_ || !stub->SometimesSetsUpAFrame();
2266 }
2267 
2268 
IndexFromHash(Register hash,Register index)2269 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2270   // If the hash field contains an array index pick it out. The assert checks
2271   // that the constants for the maximum number of digits for an array index
2272   // cached in the hash field and the number of bits reserved for it does not
2273   // conflict.
2274   DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2275          (1 << String::kArrayIndexValueBits));
2276   DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2277 }
2278 
2279 
SmiToDouble(LowDwVfpRegister value,Register smi)2280 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2281   if (CpuFeatures::IsSupported(VFP3)) {
2282     vmov(value.low(), smi);
2283     vcvt_f64_s32(value, 1);
2284   } else {
2285     SmiUntag(ip, smi);
2286     vmov(value.low(), ip);
2287     vcvt_f64_s32(value, value.low());
2288   }
2289 }
2290 
2291 
TestDoubleIsInt32(DwVfpRegister double_input,LowDwVfpRegister double_scratch)2292 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2293                                        LowDwVfpRegister double_scratch) {
2294   DCHECK(!double_input.is(double_scratch));
2295   vcvt_s32_f64(double_scratch.low(), double_input);
2296   vcvt_f64_s32(double_scratch, double_scratch.low());
2297   VFPCompareAndSetFlags(double_input, double_scratch);
2298 }
2299 
2300 
TryDoubleToInt32Exact(Register result,DwVfpRegister double_input,LowDwVfpRegister double_scratch)2301 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2302                                            DwVfpRegister double_input,
2303                                            LowDwVfpRegister double_scratch) {
2304   DCHECK(!double_input.is(double_scratch));
2305   vcvt_s32_f64(double_scratch.low(), double_input);
2306   vmov(result, double_scratch.low());
2307   vcvt_f64_s32(double_scratch, double_scratch.low());
2308   VFPCompareAndSetFlags(double_input, double_scratch);
2309 }
2310 
2311 
TryInt32Floor(Register result,DwVfpRegister double_input,Register input_high,LowDwVfpRegister double_scratch,Label * done,Label * exact)2312 void MacroAssembler::TryInt32Floor(Register result,
2313                                    DwVfpRegister double_input,
2314                                    Register input_high,
2315                                    LowDwVfpRegister double_scratch,
2316                                    Label* done,
2317                                    Label* exact) {
2318   DCHECK(!result.is(input_high));
2319   DCHECK(!double_input.is(double_scratch));
2320   Label negative, exception;
2321 
2322   VmovHigh(input_high, double_input);
2323 
2324   // Test for NaN and infinities.
2325   Sbfx(result, input_high,
2326        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2327   cmp(result, Operand(-1));
2328   b(eq, &exception);
2329   // Test for values that can be exactly represented as a
2330   // signed 32-bit integer.
2331   TryDoubleToInt32Exact(result, double_input, double_scratch);
2332   // If exact, return (result already fetched).
2333   b(eq, exact);
2334   cmp(input_high, Operand::Zero());
2335   b(mi, &negative);
2336 
2337   // Input is in ]+0, +inf[.
2338   // If result equals 0x7fffffff input was out of range or
2339   // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2340   // could fits into an int32, that means we always think input was
2341   // out of range and always go to exception.
2342   // If result < 0x7fffffff, go to done, result fetched.
2343   cmn(result, Operand(1));
2344   b(mi, &exception);
2345   b(done);
2346 
2347   // Input is in ]-inf, -0[.
2348   // If x is a non integer negative number,
2349   // floor(x) <=> round_to_zero(x) - 1.
2350   bind(&negative);
2351   sub(result, result, Operand(1), SetCC);
2352   // If result is still negative, go to done, result fetched.
2353   // Else, we had an overflow and we fall through exception.
2354   b(mi, done);
2355   bind(&exception);
2356 }
2357 
TryInlineTruncateDoubleToI(Register result,DwVfpRegister double_input,Label * done)2358 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2359                                                 DwVfpRegister double_input,
2360                                                 Label* done) {
2361   LowDwVfpRegister double_scratch = kScratchDoubleReg;
2362   vcvt_s32_f64(double_scratch.low(), double_input);
2363   vmov(result, double_scratch.low());
2364 
2365   // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2366   sub(ip, result, Operand(1));
2367   cmp(ip, Operand(0x7ffffffe));
2368   b(lt, done);
2369 }
2370 
2371 
TruncateDoubleToI(Register result,DwVfpRegister double_input)2372 void MacroAssembler::TruncateDoubleToI(Register result,
2373                                        DwVfpRegister double_input) {
2374   Label done;
2375 
2376   TryInlineTruncateDoubleToI(result, double_input, &done);
2377 
2378   // If we fell through then inline version didn't succeed - call stub instead.
2379   push(lr);
2380   sub(sp, sp, Operand(kDoubleSize));  // Put input on stack.
2381   vstr(double_input, MemOperand(sp, 0));
2382 
2383   DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2384   CallStub(&stub);
2385 
2386   add(sp, sp, Operand(kDoubleSize));
2387   pop(lr);
2388 
2389   bind(&done);
2390 }
2391 
2392 
TruncateHeapNumberToI(Register result,Register object)2393 void MacroAssembler::TruncateHeapNumberToI(Register result,
2394                                            Register object) {
2395   Label done;
2396   LowDwVfpRegister double_scratch = kScratchDoubleReg;
2397   DCHECK(!result.is(object));
2398 
2399   vldr(double_scratch,
2400        MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2401   TryInlineTruncateDoubleToI(result, double_scratch, &done);
2402 
2403   // If we fell through then inline version didn't succeed - call stub instead.
2404   push(lr);
2405   DoubleToIStub stub(isolate(),
2406                      object,
2407                      result,
2408                      HeapNumber::kValueOffset - kHeapObjectTag,
2409                      true,
2410                      true);
2411   CallStub(&stub);
2412   pop(lr);
2413 
2414   bind(&done);
2415 }
2416 
2417 
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch1,Label * not_number)2418 void MacroAssembler::TruncateNumberToI(Register object,
2419                                        Register result,
2420                                        Register heap_number_map,
2421                                        Register scratch1,
2422                                        Label* not_number) {
2423   Label done;
2424   DCHECK(!result.is(object));
2425 
2426   UntagAndJumpIfSmi(result, object, &done);
2427   JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2428   TruncateHeapNumberToI(result, object);
2429 
2430   bind(&done);
2431 }
2432 
2433 
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2434 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2435                                          Register src,
2436                                          int num_least_bits) {
2437   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2438     ubfx(dst, src, kSmiTagSize, num_least_bits);
2439   } else {
2440     SmiUntag(dst, src);
2441     and_(dst, dst, Operand((1 << num_least_bits) - 1));
2442   }
2443 }
2444 
2445 
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2446 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2447                                            Register src,
2448                                            int num_least_bits) {
2449   and_(dst, src, Operand((1 << num_least_bits) - 1));
2450 }
2451 
2452 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2453 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2454                                  int num_arguments,
2455                                  SaveFPRegsMode save_doubles) {
2456   // All parameters are on the stack.  r0 has the return value after call.
2457 
2458   // If the expected number of arguments of the runtime function is
2459   // constant, we check that the actual number of arguments match the
2460   // expectation.
2461   CHECK(f->nargs < 0 || f->nargs == num_arguments);
2462 
2463   // TODO(1236192): Most runtime routines don't need the number of
2464   // arguments passed in because it is constant. At some point we
2465   // should remove this need and make the runtime routine entry code
2466   // smarter.
2467   mov(r0, Operand(num_arguments));
2468   mov(r1, Operand(ExternalReference(f, isolate())));
2469   CEntryStub stub(isolate(), 1, save_doubles);
2470   CallStub(&stub);
2471 }
2472 
2473 
CallExternalReference(const ExternalReference & ext,int num_arguments)2474 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2475                                            int num_arguments) {
2476   mov(r0, Operand(num_arguments));
2477   mov(r1, Operand(ext));
2478 
2479   CEntryStub stub(isolate(), 1);
2480   CallStub(&stub);
2481 }
2482 
2483 
TailCallRuntime(Runtime::FunctionId fid)2484 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2485   const Runtime::Function* function = Runtime::FunctionForId(fid);
2486   DCHECK_EQ(1, function->result_size);
2487   if (function->nargs >= 0) {
2488     // TODO(1236192): Most runtime routines don't need the number of
2489     // arguments passed in because it is constant. At some point we
2490     // should remove this need and make the runtime routine entry code
2491     // smarter.
2492     mov(r0, Operand(function->nargs));
2493   }
2494   JumpToExternalReference(ExternalReference(fid, isolate()));
2495 }
2496 
2497 
JumpToExternalReference(const ExternalReference & builtin)2498 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2499 #if defined(__thumb__)
2500   // Thumb mode builtin.
2501   DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2502 #endif
2503   mov(r1, Operand(builtin));
2504   CEntryStub stub(isolate(), 1);
2505   Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2506 }
2507 
2508 
InvokeBuiltin(int native_context_index,InvokeFlag flag,const CallWrapper & call_wrapper)2509 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
2510                                    const CallWrapper& call_wrapper) {
2511   // You can't call a builtin without a valid frame.
2512   DCHECK(flag == JUMP_FUNCTION || has_frame());
2513 
2514   // Fake a parameter count to avoid emitting code to do the check.
2515   ParameterCount expected(0);
2516   LoadNativeContextSlot(native_context_index, r1);
2517   InvokeFunctionCode(r1, no_reg, expected, expected, flag, call_wrapper);
2518 }
2519 
2520 
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2521 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2522                                 Register scratch1, Register scratch2) {
2523   if (FLAG_native_code_counters && counter->Enabled()) {
2524     mov(scratch1, Operand(value));
2525     mov(scratch2, Operand(ExternalReference(counter)));
2526     str(scratch1, MemOperand(scratch2));
2527   }
2528 }
2529 
2530 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2531 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2532                                       Register scratch1, Register scratch2) {
2533   DCHECK(value > 0);
2534   if (FLAG_native_code_counters && counter->Enabled()) {
2535     mov(scratch2, Operand(ExternalReference(counter)));
2536     ldr(scratch1, MemOperand(scratch2));
2537     add(scratch1, scratch1, Operand(value));
2538     str(scratch1, MemOperand(scratch2));
2539   }
2540 }
2541 
2542 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2543 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2544                                       Register scratch1, Register scratch2) {
2545   DCHECK(value > 0);
2546   if (FLAG_native_code_counters && counter->Enabled()) {
2547     mov(scratch2, Operand(ExternalReference(counter)));
2548     ldr(scratch1, MemOperand(scratch2));
2549     sub(scratch1, scratch1, Operand(value));
2550     str(scratch1, MemOperand(scratch2));
2551   }
2552 }
2553 
2554 
Assert(Condition cond,BailoutReason reason)2555 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2556   if (emit_debug_code())
2557     Check(cond, reason);
2558 }
2559 
2560 
AssertFastElements(Register elements)2561 void MacroAssembler::AssertFastElements(Register elements) {
2562   if (emit_debug_code()) {
2563     DCHECK(!elements.is(ip));
2564     Label ok;
2565     push(elements);
2566     ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2567     LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2568     cmp(elements, ip);
2569     b(eq, &ok);
2570     LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2571     cmp(elements, ip);
2572     b(eq, &ok);
2573     LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2574     cmp(elements, ip);
2575     b(eq, &ok);
2576     Abort(kJSObjectWithFastElementsMapHasSlowElements);
2577     bind(&ok);
2578     pop(elements);
2579   }
2580 }
2581 
2582 
Check(Condition cond,BailoutReason reason)2583 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2584   Label L;
2585   b(cond, &L);
2586   Abort(reason);
2587   // will not return here
2588   bind(&L);
2589 }
2590 
2591 
Abort(BailoutReason reason)2592 void MacroAssembler::Abort(BailoutReason reason) {
2593   Label abort_start;
2594   bind(&abort_start);
2595 #ifdef DEBUG
2596   const char* msg = GetBailoutReason(reason);
2597   if (msg != NULL) {
2598     RecordComment("Abort message: ");
2599     RecordComment(msg);
2600   }
2601 
2602   if (FLAG_trap_on_abort) {
2603     stop(msg);
2604     return;
2605   }
2606 #endif
2607 
2608   mov(r0, Operand(Smi::FromInt(reason)));
2609   push(r0);
2610 
2611   // Disable stub call restrictions to always allow calls to abort.
2612   if (!has_frame_) {
2613     // We don't actually want to generate a pile of code for this, so just
2614     // claim there is a stack frame, without generating one.
2615     FrameScope scope(this, StackFrame::NONE);
2616     CallRuntime(Runtime::kAbort, 1);
2617   } else {
2618     CallRuntime(Runtime::kAbort, 1);
2619   }
2620   // will not return here
2621   if (is_const_pool_blocked()) {
2622     // If the calling code cares about the exact number of
2623     // instructions generated, we insert padding here to keep the size
2624     // of the Abort macro constant.
2625     static const int kExpectedAbortInstructions = 7;
2626     int abort_instructions = InstructionsGeneratedSince(&abort_start);
2627     DCHECK(abort_instructions <= kExpectedAbortInstructions);
2628     while (abort_instructions++ < kExpectedAbortInstructions) {
2629       nop();
2630     }
2631   }
2632 }
2633 
2634 
LoadContext(Register dst,int context_chain_length)2635 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2636   if (context_chain_length > 0) {
2637     // Move up the chain of contexts to the context containing the slot.
2638     ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2639     for (int i = 1; i < context_chain_length; i++) {
2640       ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2641     }
2642   } else {
2643     // Slot is in the current function context.  Move it into the
2644     // destination register in case we store into it (the write barrier
2645     // cannot be allowed to destroy the context in esi).
2646     mov(dst, cp);
2647   }
2648 }
2649 
2650 
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)2651 void MacroAssembler::LoadTransitionedArrayMapConditional(
2652     ElementsKind expected_kind,
2653     ElementsKind transitioned_kind,
2654     Register map_in_out,
2655     Register scratch,
2656     Label* no_map_match) {
2657   DCHECK(IsFastElementsKind(expected_kind));
2658   DCHECK(IsFastElementsKind(transitioned_kind));
2659 
2660   // Check that the function's map is the same as the expected cached map.
2661   ldr(scratch, NativeContextMemOperand());
2662   ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
2663   cmp(map_in_out, ip);
2664   b(ne, no_map_match);
2665 
2666   // Use the transitioned cached map.
2667   ldr(map_in_out,
2668       ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
2669 }
2670 
2671 
LoadNativeContextSlot(int index,Register dst)2672 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2673   ldr(dst, NativeContextMemOperand());
2674   ldr(dst, ContextMemOperand(dst, index));
2675 }
2676 
2677 
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)2678 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2679                                                   Register map,
2680                                                   Register scratch) {
2681   // Load the initial map. The global functions all have initial maps.
2682   ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2683   if (emit_debug_code()) {
2684     Label ok, fail;
2685     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2686     b(&ok);
2687     bind(&fail);
2688     Abort(kGlobalFunctionsMustHaveInitialMap);
2689     bind(&ok);
2690   }
2691 }
2692 
2693 
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)2694 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2695     Register reg,
2696     Register scratch,
2697     Label* not_power_of_two_or_zero) {
2698   sub(scratch, reg, Operand(1), SetCC);
2699   b(mi, not_power_of_two_or_zero);
2700   tst(scratch, reg);
2701   b(ne, not_power_of_two_or_zero);
2702 }
2703 
2704 
JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,Register scratch,Label * zero_and_neg,Label * not_power_of_two)2705 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2706     Register reg,
2707     Register scratch,
2708     Label* zero_and_neg,
2709     Label* not_power_of_two) {
2710   sub(scratch, reg, Operand(1), SetCC);
2711   b(mi, zero_and_neg);
2712   tst(scratch, reg);
2713   b(ne, not_power_of_two);
2714 }
2715 
2716 
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)2717 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2718                                       Register reg2,
2719                                       Label* on_not_both_smi) {
2720   STATIC_ASSERT(kSmiTag == 0);
2721   tst(reg1, Operand(kSmiTagMask));
2722   tst(reg2, Operand(kSmiTagMask), eq);
2723   b(ne, on_not_both_smi);
2724 }
2725 
2726 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)2727 void MacroAssembler::UntagAndJumpIfSmi(
2728     Register dst, Register src, Label* smi_case) {
2729   STATIC_ASSERT(kSmiTag == 0);
2730   SmiUntag(dst, src, SetCC);
2731   b(cc, smi_case);  // Shifter carry is not set for a smi.
2732 }
2733 
2734 
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)2735 void MacroAssembler::UntagAndJumpIfNotSmi(
2736     Register dst, Register src, Label* non_smi_case) {
2737   STATIC_ASSERT(kSmiTag == 0);
2738   SmiUntag(dst, src, SetCC);
2739   b(cs, non_smi_case);  // Shifter carry is set for a non-smi.
2740 }
2741 
2742 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)2743 void MacroAssembler::JumpIfEitherSmi(Register reg1,
2744                                      Register reg2,
2745                                      Label* on_either_smi) {
2746   STATIC_ASSERT(kSmiTag == 0);
2747   tst(reg1, Operand(kSmiTagMask));
2748   tst(reg2, Operand(kSmiTagMask), ne);
2749   b(eq, on_either_smi);
2750 }
2751 
2752 
AssertNotSmi(Register object)2753 void MacroAssembler::AssertNotSmi(Register object) {
2754   if (emit_debug_code()) {
2755     STATIC_ASSERT(kSmiTag == 0);
2756     tst(object, Operand(kSmiTagMask));
2757     Check(ne, kOperandIsASmi);
2758   }
2759 }
2760 
2761 
AssertSmi(Register object)2762 void MacroAssembler::AssertSmi(Register object) {
2763   if (emit_debug_code()) {
2764     STATIC_ASSERT(kSmiTag == 0);
2765     tst(object, Operand(kSmiTagMask));
2766     Check(eq, kOperandIsNotSmi);
2767   }
2768 }
2769 
2770 
AssertString(Register object)2771 void MacroAssembler::AssertString(Register object) {
2772   if (emit_debug_code()) {
2773     STATIC_ASSERT(kSmiTag == 0);
2774     tst(object, Operand(kSmiTagMask));
2775     Check(ne, kOperandIsASmiAndNotAString);
2776     push(object);
2777     ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2778     CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2779     pop(object);
2780     Check(lo, kOperandIsNotAString);
2781   }
2782 }
2783 
2784 
AssertName(Register object)2785 void MacroAssembler::AssertName(Register object) {
2786   if (emit_debug_code()) {
2787     STATIC_ASSERT(kSmiTag == 0);
2788     tst(object, Operand(kSmiTagMask));
2789     Check(ne, kOperandIsASmiAndNotAName);
2790     push(object);
2791     ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2792     CompareInstanceType(object, object, LAST_NAME_TYPE);
2793     pop(object);
2794     Check(le, kOperandIsNotAName);
2795   }
2796 }
2797 
2798 
AssertFunction(Register object)2799 void MacroAssembler::AssertFunction(Register object) {
2800   if (emit_debug_code()) {
2801     STATIC_ASSERT(kSmiTag == 0);
2802     tst(object, Operand(kSmiTagMask));
2803     Check(ne, kOperandIsASmiAndNotAFunction);
2804     push(object);
2805     CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2806     pop(object);
2807     Check(eq, kOperandIsNotAFunction);
2808   }
2809 }
2810 
2811 
AssertBoundFunction(Register object)2812 void MacroAssembler::AssertBoundFunction(Register object) {
2813   if (emit_debug_code()) {
2814     STATIC_ASSERT(kSmiTag == 0);
2815     tst(object, Operand(kSmiTagMask));
2816     Check(ne, kOperandIsASmiAndNotABoundFunction);
2817     push(object);
2818     CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2819     pop(object);
2820     Check(eq, kOperandIsNotABoundFunction);
2821   }
2822 }
2823 
2824 
AssertUndefinedOrAllocationSite(Register object,Register scratch)2825 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2826                                                      Register scratch) {
2827   if (emit_debug_code()) {
2828     Label done_checking;
2829     AssertNotSmi(object);
2830     CompareRoot(object, Heap::kUndefinedValueRootIndex);
2831     b(eq, &done_checking);
2832     ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2833     CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2834     Assert(eq, kExpectedUndefinedOrCell);
2835     bind(&done_checking);
2836   }
2837 }
2838 
2839 
AssertIsRoot(Register reg,Heap::RootListIndex index)2840 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2841   if (emit_debug_code()) {
2842     CompareRoot(reg, index);
2843     Check(eq, kHeapNumberMapRegisterClobbered);
2844   }
2845 }
2846 
2847 
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)2848 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2849                                          Register heap_number_map,
2850                                          Register scratch,
2851                                          Label* on_not_heap_number) {
2852   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2853   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2854   cmp(scratch, heap_number_map);
2855   b(ne, on_not_heap_number);
2856 }
2857 
2858 
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2859 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
2860     Register first, Register second, Register scratch1, Register scratch2,
2861     Label* failure) {
2862   // Test that both first and second are sequential one-byte strings.
2863   // Assume that they are non-smis.
2864   ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2865   ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2866   ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2867   ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2868 
2869   JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
2870                                                  scratch2, failure);
2871 }
2872 
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2873 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
2874                                                            Register second,
2875                                                            Register scratch1,
2876                                                            Register scratch2,
2877                                                            Label* failure) {
2878   // Check that neither is a smi.
2879   and_(scratch1, first, Operand(second));
2880   JumpIfSmi(scratch1, failure);
2881   JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
2882                                                scratch2, failure);
2883 }
2884 
2885 
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)2886 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2887                                                      Label* not_unique_name) {
2888   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2889   Label succeed;
2890   tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2891   b(eq, &succeed);
2892   cmp(reg, Operand(SYMBOL_TYPE));
2893   b(ne, not_unique_name);
2894 
2895   bind(&succeed);
2896 }
2897 
2898 
2899 // Allocates a heap number or jumps to the need_gc label if the young space
2900 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required,TaggingMode tagging_mode,MutableMode mode)2901 void MacroAssembler::AllocateHeapNumber(Register result,
2902                                         Register scratch1,
2903                                         Register scratch2,
2904                                         Register heap_number_map,
2905                                         Label* gc_required,
2906                                         TaggingMode tagging_mode,
2907                                         MutableMode mode) {
2908   // Allocate an object in the heap for the heap number and tag it as a heap
2909   // object.
2910   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
2911            tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
2912 
2913   Heap::RootListIndex map_index = mode == MUTABLE
2914       ? Heap::kMutableHeapNumberMapRootIndex
2915       : Heap::kHeapNumberMapRootIndex;
2916   AssertIsRoot(heap_number_map, map_index);
2917 
2918   // Store heap number map in the allocated object.
2919   if (tagging_mode == TAG_RESULT) {
2920     str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2921   } else {
2922     str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
2923   }
2924 }
2925 
2926 
AllocateHeapNumberWithValue(Register result,DwVfpRegister value,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required)2927 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2928                                                  DwVfpRegister value,
2929                                                  Register scratch1,
2930                                                  Register scratch2,
2931                                                  Register heap_number_map,
2932                                                  Label* gc_required) {
2933   AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2934   sub(scratch1, result, Operand(kHeapObjectTag));
2935   vstr(value, scratch1, HeapNumber::kValueOffset);
2936 }
2937 
2938 
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)2939 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
2940                                      Register value, Register scratch1,
2941                                      Register scratch2, Label* gc_required) {
2942   DCHECK(!result.is(constructor));
2943   DCHECK(!result.is(scratch1));
2944   DCHECK(!result.is(scratch2));
2945   DCHECK(!result.is(value));
2946 
2947   // Allocate JSValue in new space.
2948   Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
2949 
2950   // Initialize the JSValue.
2951   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
2952   str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
2953   LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
2954   str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
2955   str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
2956   str(value, FieldMemOperand(result, JSValue::kValueOffset));
2957   STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
2958 }
2959 
2960 
CopyBytes(Register src,Register dst,Register length,Register scratch)2961 void MacroAssembler::CopyBytes(Register src,
2962                                Register dst,
2963                                Register length,
2964                                Register scratch) {
2965   Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
2966 
2967   // Align src before copying in word size chunks.
2968   cmp(length, Operand(kPointerSize));
2969   b(le, &byte_loop);
2970 
2971   bind(&align_loop_1);
2972   tst(src, Operand(kPointerSize - 1));
2973   b(eq, &word_loop);
2974   ldrb(scratch, MemOperand(src, 1, PostIndex));
2975   strb(scratch, MemOperand(dst, 1, PostIndex));
2976   sub(length, length, Operand(1), SetCC);
2977   b(&align_loop_1);
2978   // Copy bytes in word size chunks.
2979   bind(&word_loop);
2980   if (emit_debug_code()) {
2981     tst(src, Operand(kPointerSize - 1));
2982     Assert(eq, kExpectingAlignmentForCopyBytes);
2983   }
2984   cmp(length, Operand(kPointerSize));
2985   b(lt, &byte_loop);
2986   ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
2987   if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
2988     str(scratch, MemOperand(dst, kPointerSize, PostIndex));
2989   } else {
2990     strb(scratch, MemOperand(dst, 1, PostIndex));
2991     mov(scratch, Operand(scratch, LSR, 8));
2992     strb(scratch, MemOperand(dst, 1, PostIndex));
2993     mov(scratch, Operand(scratch, LSR, 8));
2994     strb(scratch, MemOperand(dst, 1, PostIndex));
2995     mov(scratch, Operand(scratch, LSR, 8));
2996     strb(scratch, MemOperand(dst, 1, PostIndex));
2997   }
2998   sub(length, length, Operand(kPointerSize));
2999   b(&word_loop);
3000 
3001   // Copy the last bytes if any left.
3002   bind(&byte_loop);
3003   cmp(length, Operand::Zero());
3004   b(eq, &done);
3005   bind(&byte_loop_1);
3006   ldrb(scratch, MemOperand(src, 1, PostIndex));
3007   strb(scratch, MemOperand(dst, 1, PostIndex));
3008   sub(length, length, Operand(1), SetCC);
3009   b(ne, &byte_loop_1);
3010   bind(&done);
3011 }
3012 
3013 
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)3014 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
3015                                                 Register end_address,
3016                                                 Register filler) {
3017   Label loop, entry;
3018   b(&entry);
3019   bind(&loop);
3020   str(filler, MemOperand(current_address, kPointerSize, PostIndex));
3021   bind(&entry);
3022   cmp(current_address, end_address);
3023   b(lo, &loop);
3024 }
3025 
3026 
CheckFor32DRegs(Register scratch)3027 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3028   mov(scratch, Operand(ExternalReference::cpu_features()));
3029   ldr(scratch, MemOperand(scratch));
3030   tst(scratch, Operand(1u << VFP32DREGS));
3031 }
3032 
3033 
SaveFPRegs(Register location,Register scratch)3034 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3035   CheckFor32DRegs(scratch);
3036   vstm(db_w, location, d16, d31, ne);
3037   sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3038   vstm(db_w, location, d0, d15);
3039 }
3040 
3041 
RestoreFPRegs(Register location,Register scratch)3042 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3043   CheckFor32DRegs(scratch);
3044   vldm(ia_w, location, d0, d15);
3045   vldm(ia_w, location, d16, d31, ne);
3046   add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3047 }
3048 
3049 
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3050 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3051     Register first, Register second, Register scratch1, Register scratch2,
3052     Label* failure) {
3053   const int kFlatOneByteStringMask =
3054       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3055   const int kFlatOneByteStringTag =
3056       kStringTag | kOneByteStringTag | kSeqStringTag;
3057   and_(scratch1, first, Operand(kFlatOneByteStringMask));
3058   and_(scratch2, second, Operand(kFlatOneByteStringMask));
3059   cmp(scratch1, Operand(kFlatOneByteStringTag));
3060   // Ignore second test if first test failed.
3061   cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
3062   b(ne, failure);
3063 }
3064 
3065 
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)3066 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3067                                                               Register scratch,
3068                                                               Label* failure) {
3069   const int kFlatOneByteStringMask =
3070       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3071   const int kFlatOneByteStringTag =
3072       kStringTag | kOneByteStringTag | kSeqStringTag;
3073   and_(scratch, type, Operand(kFlatOneByteStringMask));
3074   cmp(scratch, Operand(kFlatOneByteStringTag));
3075   b(ne, failure);
3076 }
3077 
3078 static const int kRegisterPassedArguments = 4;
3079 
3080 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)3081 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3082                                               int num_double_arguments) {
3083   int stack_passed_words = 0;
3084   if (use_eabi_hardfloat()) {
3085     // In the hard floating point calling convention, we can use
3086     // all double registers to pass doubles.
3087     if (num_double_arguments > DoubleRegister::NumRegisters()) {
3088       stack_passed_words +=
3089           2 * (num_double_arguments - DoubleRegister::NumRegisters());
3090     }
3091   } else {
3092     // In the soft floating point calling convention, every double
3093     // argument is passed using two registers.
3094     num_reg_arguments += 2 * num_double_arguments;
3095   }
3096   // Up to four simple arguments are passed in registers r0..r3.
3097   if (num_reg_arguments > kRegisterPassedArguments) {
3098     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3099   }
3100   return stack_passed_words;
3101 }
3102 
3103 
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)3104 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3105                                                Register index,
3106                                                Register value,
3107                                                uint32_t encoding_mask) {
3108   Label is_object;
3109   SmiTst(string);
3110   Check(ne, kNonObject);
3111 
3112   ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3113   ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3114 
3115   and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3116   cmp(ip, Operand(encoding_mask));
3117   Check(eq, kUnexpectedStringType);
3118 
3119   // The index is assumed to be untagged coming in, tag it to compare with the
3120   // string length without using a temp register, it is restored at the end of
3121   // this function.
3122   Label index_tag_ok, index_tag_bad;
3123   TrySmiTag(index, index, &index_tag_bad);
3124   b(&index_tag_ok);
3125   bind(&index_tag_bad);
3126   Abort(kIndexIsTooLarge);
3127   bind(&index_tag_ok);
3128 
3129   ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3130   cmp(index, ip);
3131   Check(lt, kIndexIsTooLarge);
3132 
3133   cmp(index, Operand(Smi::FromInt(0)));
3134   Check(ge, kIndexIsNegative);
3135 
3136   SmiUntag(index, index);
3137 }
3138 
3139 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)3140 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3141                                           int num_double_arguments,
3142                                           Register scratch) {
3143   int frame_alignment = ActivationFrameAlignment();
3144   int stack_passed_arguments = CalculateStackPassedWords(
3145       num_reg_arguments, num_double_arguments);
3146   if (frame_alignment > kPointerSize) {
3147     // Make stack end at alignment and make room for num_arguments - 4 words
3148     // and the original value of sp.
3149     mov(scratch, sp);
3150     sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3151     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3152     and_(sp, sp, Operand(-frame_alignment));
3153     str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3154   } else {
3155     sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3156   }
3157 }
3158 
3159 
PrepareCallCFunction(int num_reg_arguments,Register scratch)3160 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3161                                           Register scratch) {
3162   PrepareCallCFunction(num_reg_arguments, 0, scratch);
3163 }
3164 
3165 
MovToFloatParameter(DwVfpRegister src)3166 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3167   DCHECK(src.is(d0));
3168   if (!use_eabi_hardfloat()) {
3169     vmov(r0, r1, src);
3170   }
3171 }
3172 
3173 
3174 // On ARM this is just a synonym to make the purpose clear.
MovToFloatResult(DwVfpRegister src)3175 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3176   MovToFloatParameter(src);
3177 }
3178 
3179 
MovToFloatParameters(DwVfpRegister src1,DwVfpRegister src2)3180 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3181                                           DwVfpRegister src2) {
3182   DCHECK(src1.is(d0));
3183   DCHECK(src2.is(d1));
3184   if (!use_eabi_hardfloat()) {
3185     vmov(r0, r1, src1);
3186     vmov(r2, r3, src2);
3187   }
3188 }
3189 
3190 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)3191 void MacroAssembler::CallCFunction(ExternalReference function,
3192                                    int num_reg_arguments,
3193                                    int num_double_arguments) {
3194   mov(ip, Operand(function));
3195   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3196 }
3197 
3198 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)3199 void MacroAssembler::CallCFunction(Register function,
3200                                    int num_reg_arguments,
3201                                    int num_double_arguments) {
3202   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3203 }
3204 
3205 
CallCFunction(ExternalReference function,int num_arguments)3206 void MacroAssembler::CallCFunction(ExternalReference function,
3207                                    int num_arguments) {
3208   CallCFunction(function, num_arguments, 0);
3209 }
3210 
3211 
CallCFunction(Register function,int num_arguments)3212 void MacroAssembler::CallCFunction(Register function,
3213                                    int num_arguments) {
3214   CallCFunction(function, num_arguments, 0);
3215 }
3216 
3217 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)3218 void MacroAssembler::CallCFunctionHelper(Register function,
3219                                          int num_reg_arguments,
3220                                          int num_double_arguments) {
3221   DCHECK(has_frame());
3222   // Make sure that the stack is aligned before calling a C function unless
3223   // running in the simulator. The simulator has its own alignment check which
3224   // provides more information.
3225 #if V8_HOST_ARCH_ARM
3226   if (emit_debug_code()) {
3227     int frame_alignment = base::OS::ActivationFrameAlignment();
3228     int frame_alignment_mask = frame_alignment - 1;
3229     if (frame_alignment > kPointerSize) {
3230       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3231       Label alignment_as_expected;
3232       tst(sp, Operand(frame_alignment_mask));
3233       b(eq, &alignment_as_expected);
3234       // Don't use Check here, as it will call Runtime_Abort possibly
3235       // re-entering here.
3236       stop("Unexpected alignment");
3237       bind(&alignment_as_expected);
3238     }
3239   }
3240 #endif
3241 
3242   // Just call directly. The function called cannot cause a GC, or
3243   // allow preemption, so the return address in the link register
3244   // stays correct.
3245   Call(function);
3246   int stack_passed_arguments = CalculateStackPassedWords(
3247       num_reg_arguments, num_double_arguments);
3248   if (ActivationFrameAlignment() > kPointerSize) {
3249     ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3250   } else {
3251     add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3252   }
3253 }
3254 
3255 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)3256 void MacroAssembler::CheckPageFlag(
3257     Register object,
3258     Register scratch,
3259     int mask,
3260     Condition cc,
3261     Label* condition_met) {
3262   Bfc(scratch, object, 0, kPageSizeBits);
3263   ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3264   tst(scratch, Operand(mask));
3265   b(cc, condition_met);
3266 }
3267 
3268 
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)3269 void MacroAssembler::JumpIfBlack(Register object,
3270                                  Register scratch0,
3271                                  Register scratch1,
3272                                  Label* on_black) {
3273   HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
3274   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3275 }
3276 
3277 
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)3278 void MacroAssembler::HasColor(Register object,
3279                               Register bitmap_scratch,
3280                               Register mask_scratch,
3281                               Label* has_color,
3282                               int first_bit,
3283                               int second_bit) {
3284   DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3285 
3286   GetMarkBits(object, bitmap_scratch, mask_scratch);
3287 
3288   Label other_color, word_boundary;
3289   ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3290   tst(ip, Operand(mask_scratch));
3291   b(first_bit == 1 ? eq : ne, &other_color);
3292   // Shift left 1 by adding.
3293   add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3294   b(eq, &word_boundary);
3295   tst(ip, Operand(mask_scratch));
3296   b(second_bit == 1 ? ne : eq, has_color);
3297   jmp(&other_color);
3298 
3299   bind(&word_boundary);
3300   ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3301   tst(ip, Operand(1));
3302   b(second_bit == 1 ? ne : eq, has_color);
3303   bind(&other_color);
3304 }
3305 
3306 
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)3307 void MacroAssembler::GetMarkBits(Register addr_reg,
3308                                  Register bitmap_reg,
3309                                  Register mask_reg) {
3310   DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3311   and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3312   Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3313   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3314   Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3315   add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3316   mov(ip, Operand(1));
3317   mov(mask_reg, Operand(ip, LSL, mask_reg));
3318 }
3319 
3320 
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)3321 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3322                                  Register mask_scratch, Register load_scratch,
3323                                  Label* value_is_white) {
3324   DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3325   GetMarkBits(value, bitmap_scratch, mask_scratch);
3326 
3327   // If the value is black or grey we don't need to do anything.
3328   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3329   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3330   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3331   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3332 
3333   // Since both black and grey have a 1 in the first position and white does
3334   // not have a 1 there we only need to check one bit.
3335   ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3336   tst(mask_scratch, load_scratch);
3337   b(eq, value_is_white);
3338 }
3339 
3340 
ClampUint8(Register output_reg,Register input_reg)3341 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3342   Usat(output_reg, 8, Operand(input_reg));
3343 }
3344 
3345 
ClampDoubleToUint8(Register result_reg,DwVfpRegister input_reg,LowDwVfpRegister double_scratch)3346 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3347                                         DwVfpRegister input_reg,
3348                                         LowDwVfpRegister double_scratch) {
3349   Label done;
3350 
3351   // Handle inputs >= 255 (including +infinity).
3352   Vmov(double_scratch, 255.0, result_reg);
3353   mov(result_reg, Operand(255));
3354   VFPCompareAndSetFlags(input_reg, double_scratch);
3355   b(ge, &done);
3356 
3357   // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
3358   // rounding mode will provide the correct result.
3359   vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3360   vmov(result_reg, double_scratch.low());
3361 
3362   bind(&done);
3363 }
3364 
3365 
LoadInstanceDescriptors(Register map,Register descriptors)3366 void MacroAssembler::LoadInstanceDescriptors(Register map,
3367                                              Register descriptors) {
3368   ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3369 }
3370 
3371 
NumberOfOwnDescriptors(Register dst,Register map)3372 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3373   ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3374   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3375 }
3376 
3377 
EnumLength(Register dst,Register map)3378 void MacroAssembler::EnumLength(Register dst, Register map) {
3379   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3380   ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3381   and_(dst, dst, Operand(Map::EnumLengthBits::kMask));
3382   SmiTag(dst);
3383 }
3384 
3385 
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3386 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3387                                   int accessor_index,
3388                                   AccessorComponent accessor) {
3389   ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3390   LoadInstanceDescriptors(dst, dst);
3391   ldr(dst,
3392       FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3393   int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3394                                            : AccessorPair::kSetterOffset;
3395   ldr(dst, FieldMemOperand(dst, offset));
3396 }
3397 
3398 
CheckEnumCache(Register null_value,Label * call_runtime)3399 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3400   Register  empty_fixed_array_value = r6;
3401   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3402   Label next, start;
3403   mov(r2, r0);
3404 
3405   // Check if the enum length field is properly initialized, indicating that
3406   // there is an enum cache.
3407   ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3408 
3409   EnumLength(r3, r1);
3410   cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3411   b(eq, call_runtime);
3412 
3413   jmp(&start);
3414 
3415   bind(&next);
3416   ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3417 
3418   // For all objects but the receiver, check that the cache is empty.
3419   EnumLength(r3, r1);
3420   cmp(r3, Operand(Smi::FromInt(0)));
3421   b(ne, call_runtime);
3422 
3423   bind(&start);
3424 
3425   // Check that there are no elements. Register r2 contains the current JS
3426   // object we've reached through the prototype chain.
3427   Label no_elements;
3428   ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3429   cmp(r2, empty_fixed_array_value);
3430   b(eq, &no_elements);
3431 
3432   // Second chance, the object may be using the empty slow element dictionary.
3433   CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3434   b(ne, call_runtime);
3435 
3436   bind(&no_elements);
3437   ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3438   cmp(r2, null_value);
3439   b(ne, &next);
3440 }
3441 
3442 
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)3443 void MacroAssembler::TestJSArrayForAllocationMemento(
3444     Register receiver_reg,
3445     Register scratch_reg,
3446     Label* no_memento_found) {
3447   ExternalReference new_space_start =
3448       ExternalReference::new_space_start(isolate());
3449   ExternalReference new_space_allocation_top =
3450       ExternalReference::new_space_allocation_top_address(isolate());
3451   add(scratch_reg, receiver_reg,
3452       Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3453   cmp(scratch_reg, Operand(new_space_start));
3454   b(lt, no_memento_found);
3455   mov(ip, Operand(new_space_allocation_top));
3456   ldr(ip, MemOperand(ip));
3457   cmp(scratch_reg, ip);
3458   b(gt, no_memento_found);
3459   ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3460   cmp(scratch_reg,
3461       Operand(isolate()->factory()->allocation_memento_map()));
3462 }
3463 
3464 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)3465 Register GetRegisterThatIsNotOneOf(Register reg1,
3466                                    Register reg2,
3467                                    Register reg3,
3468                                    Register reg4,
3469                                    Register reg5,
3470                                    Register reg6) {
3471   RegList regs = 0;
3472   if (reg1.is_valid()) regs |= reg1.bit();
3473   if (reg2.is_valid()) regs |= reg2.bit();
3474   if (reg3.is_valid()) regs |= reg3.bit();
3475   if (reg4.is_valid()) regs |= reg4.bit();
3476   if (reg5.is_valid()) regs |= reg5.bit();
3477   if (reg6.is_valid()) regs |= reg6.bit();
3478 
3479   const RegisterConfiguration* config =
3480       RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
3481   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3482     int code = config->GetAllocatableGeneralCode(i);
3483     Register candidate = Register::from_code(code);
3484     if (regs & candidate.bit()) continue;
3485     return candidate;
3486   }
3487   UNREACHABLE();
3488   return no_reg;
3489 }
3490 
3491 
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)3492 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3493     Register object,
3494     Register scratch0,
3495     Register scratch1,
3496     Label* found) {
3497   DCHECK(!scratch1.is(scratch0));
3498   Register current = scratch0;
3499   Label loop_again, end;
3500 
3501   // scratch contained elements pointer.
3502   mov(current, object);
3503   ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
3504   ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
3505   CompareRoot(current, Heap::kNullValueRootIndex);
3506   b(eq, &end);
3507 
3508   // Loop based on the map going up the prototype chain.
3509   bind(&loop_again);
3510   ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
3511 
3512   STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
3513   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
3514   ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
3515   cmp(scratch1, Operand(JS_OBJECT_TYPE));
3516   b(lo, found);
3517 
3518   ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
3519   DecodeField<Map::ElementsKindBits>(scratch1);
3520   cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
3521   b(eq, found);
3522   ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
3523   CompareRoot(current, Heap::kNullValueRootIndex);
3524   b(ne, &loop_again);
3525 
3526   bind(&end);
3527 }
3528 
3529 
3530 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8)3531 bool AreAliased(Register reg1,
3532                 Register reg2,
3533                 Register reg3,
3534                 Register reg4,
3535                 Register reg5,
3536                 Register reg6,
3537                 Register reg7,
3538                 Register reg8) {
3539   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3540       reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
3541       reg7.is_valid() + reg8.is_valid();
3542 
3543   RegList regs = 0;
3544   if (reg1.is_valid()) regs |= reg1.bit();
3545   if (reg2.is_valid()) regs |= reg2.bit();
3546   if (reg3.is_valid()) regs |= reg3.bit();
3547   if (reg4.is_valid()) regs |= reg4.bit();
3548   if (reg5.is_valid()) regs |= reg5.bit();
3549   if (reg6.is_valid()) regs |= reg6.bit();
3550   if (reg7.is_valid()) regs |= reg7.bit();
3551   if (reg8.is_valid()) regs |= reg8.bit();
3552   int n_of_non_aliasing_regs = NumRegs(regs);
3553 
3554   return n_of_valid_regs != n_of_non_aliasing_regs;
3555 }
3556 #endif
3557 
3558 
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)3559 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
3560                          FlushICache flush_cache)
3561     : address_(address),
3562       size_(instructions * Assembler::kInstrSize),
3563       masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
3564       flush_cache_(flush_cache) {
3565   // Create a new macro assembler pointing to the address of the code to patch.
3566   // The size is adjusted with kGap on order for the assembler to generate size
3567   // bytes of instructions without failing with buffer size constraints.
3568   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3569 }
3570 
3571 
~CodePatcher()3572 CodePatcher::~CodePatcher() {
3573   // Indicate that code has changed.
3574   if (flush_cache_ == FLUSH) {
3575     Assembler::FlushICache(masm_.isolate(), address_, size_);
3576   }
3577 
3578   // Check that the code was patched as expected.
3579   DCHECK(masm_.pc_ == address_ + size_);
3580   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3581 }
3582 
3583 
Emit(Instr instr)3584 void CodePatcher::Emit(Instr instr) {
3585   masm()->emit(instr);
3586 }
3587 
3588 
Emit(Address addr)3589 void CodePatcher::Emit(Address addr) {
3590   masm()->emit(reinterpret_cast<Instr>(addr));
3591 }
3592 
3593 
EmitCondition(Condition cond)3594 void CodePatcher::EmitCondition(Condition cond) {
3595   Instr instr = Assembler::instr_at(masm_.pc_);
3596   instr = (instr & ~kCondMask) | cond;
3597   masm_.emit(instr);
3598 }
3599 
3600 
TruncatingDiv(Register result,Register dividend,int32_t divisor)3601 void MacroAssembler::TruncatingDiv(Register result,
3602                                    Register dividend,
3603                                    int32_t divisor) {
3604   DCHECK(!dividend.is(result));
3605   DCHECK(!dividend.is(ip));
3606   DCHECK(!result.is(ip));
3607   base::MagicNumbersForDivision<uint32_t> mag =
3608       base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
3609   mov(ip, Operand(mag.multiplier));
3610   bool neg = (mag.multiplier & (1U << 31)) != 0;
3611   if (divisor > 0 && neg) {
3612     smmla(result, dividend, ip, dividend);
3613   } else {
3614     smmul(result, dividend, ip);
3615     if (divisor < 0 && !neg && mag.multiplier > 0) {
3616       sub(result, result, Operand(dividend));
3617     }
3618   }
3619   if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
3620   add(result, result, Operand(dividend, LSR, 31));
3621 }
3622 
3623 }  // namespace internal
3624 }  // namespace v8
3625 
3626 #endif  // V8_TARGET_ARCH_ARM
3627