• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_ARM
6 
7 #include "src/code-stubs.h"
8 #include "src/api-arguments.h"
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 #include "src/isolate.h"
16 #include "src/regexp/jsregexp.h"
17 #include "src/regexp/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
19 
20 #include "src/arm/code-stubs-arm.h"
21 
22 namespace v8 {
23 namespace internal {
24 
25 #define __ ACCESS_MASM(masm)
26 
Generate(MacroAssembler * masm)27 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
28   __ lsl(r5, r0, Operand(kPointerSizeLog2));
29   __ str(r1, MemOperand(sp, r5));
30   __ Push(r1);
31   __ Push(r2);
32   __ add(r0, r0, Operand(3));
33   __ TailCallRuntime(Runtime::kNewArray);
34 }
35 
36 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
37                                           Condition cond);
38 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
39                                     Register lhs,
40                                     Register rhs,
41                                     Label* lhs_not_nan,
42                                     Label* slow,
43                                     bool strict);
44 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
45                                            Register lhs,
46                                            Register rhs);
47 
48 
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)49 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
50                                                ExternalReference miss) {
51   // Update the static counter each time a new code stub is generated.
52   isolate()->counters()->code_stubs()->Increment();
53 
54   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
55   int param_count = descriptor.GetRegisterParameterCount();
56   {
57     // Call the runtime system in a fresh internal frame.
58     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
59     DCHECK(param_count == 0 ||
60            r0.is(descriptor.GetRegisterParameter(param_count - 1)));
61     // Push arguments
62     for (int i = 0; i < param_count; ++i) {
63       __ push(descriptor.GetRegisterParameter(i));
64     }
65     __ CallExternalReference(miss, param_count);
66   }
67 
68   __ Ret();
69 }
70 
71 
Generate(MacroAssembler * masm)72 void DoubleToIStub::Generate(MacroAssembler* masm) {
73   Label out_of_range, only_low, negate, done;
74   Register input_reg = source();
75   Register result_reg = destination();
76   DCHECK(is_truncating());
77 
78   int double_offset = offset();
79   // Account for saved regs if input is sp.
80   if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
81 
82   Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
83   Register scratch_low =
84       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
85   Register scratch_high =
86       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
87   LowDwVfpRegister double_scratch = kScratchDoubleReg;
88 
89   __ Push(scratch_high, scratch_low, scratch);
90 
91   if (!skip_fastpath()) {
92     // Load double input.
93     __ vldr(double_scratch, MemOperand(input_reg, double_offset));
94     __ vmov(scratch_low, scratch_high, double_scratch);
95 
96     // Do fast-path convert from double to int.
97     __ vcvt_s32_f64(double_scratch.low(), double_scratch);
98     __ vmov(result_reg, double_scratch.low());
99 
100     // If result is not saturated (0x7fffffff or 0x80000000), we are done.
101     __ sub(scratch, result_reg, Operand(1));
102     __ cmp(scratch, Operand(0x7ffffffe));
103     __ b(lt, &done);
104   } else {
105     // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
106     // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
107     if (double_offset == 0) {
108       __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
109     } else {
110       __ ldr(scratch_low, MemOperand(input_reg, double_offset));
111       __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
112     }
113   }
114 
115   __ Ubfx(scratch, scratch_high,
116          HeapNumber::kExponentShift, HeapNumber::kExponentBits);
117   // Load scratch with exponent - 1. This is faster than loading
118   // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
119   STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
120   __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
121   // If exponent is greater than or equal to 84, the 32 less significant
122   // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
123   // the result is 0.
124   // Compare exponent with 84 (compare exponent - 1 with 83).
125   __ cmp(scratch, Operand(83));
126   __ b(ge, &out_of_range);
127 
128   // If we reach this code, 31 <= exponent <= 83.
129   // So, we don't have to handle cases where 0 <= exponent <= 20 for
130   // which we would need to shift right the high part of the mantissa.
131   // Scratch contains exponent - 1.
132   // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
133   __ rsb(scratch, scratch, Operand(51), SetCC);
134   __ b(ls, &only_low);
135   // 21 <= exponent <= 51, shift scratch_low and scratch_high
136   // to generate the result.
137   __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
138   // Scratch contains: 52 - exponent.
139   // We needs: exponent - 20.
140   // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
141   __ rsb(scratch, scratch, Operand(32));
142   __ Ubfx(result_reg, scratch_high,
143           0, HeapNumber::kMantissaBitsInTopWord);
144   // Set the implicit 1 before the mantissa part in scratch_high.
145   __ orr(result_reg, result_reg,
146          Operand(1 << HeapNumber::kMantissaBitsInTopWord));
147   __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
148   __ b(&negate);
149 
150   __ bind(&out_of_range);
151   __ mov(result_reg, Operand::Zero());
152   __ b(&done);
153 
154   __ bind(&only_low);
155   // 52 <= exponent <= 83, shift only scratch_low.
156   // On entry, scratch contains: 52 - exponent.
157   __ rsb(scratch, scratch, Operand::Zero());
158   __ mov(result_reg, Operand(scratch_low, LSL, scratch));
159 
160   __ bind(&negate);
161   // If input was positive, scratch_high ASR 31 equals 0 and
162   // scratch_high LSR 31 equals zero.
163   // New result = (result eor 0) + 0 = result.
164   // If the input was negative, we have to negate the result.
165   // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
166   // New result = (result eor 0xffffffff) + 1 = 0 - result.
167   __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
168   __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
169 
170   __ bind(&done);
171 
172   __ Pop(scratch_high, scratch_low, scratch);
173   __ Ret();
174 }
175 
176 
177 // Handle the case where the lhs and rhs are the same object.
178 // Equality is almost reflexive (everything but NaN), so this is a test
179 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cond)180 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
181                                           Condition cond) {
182   Label not_identical;
183   Label heap_number, return_equal;
184   __ cmp(r0, r1);
185   __ b(ne, &not_identical);
186 
187   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
188   // so we do the second best thing - test it ourselves.
189   // They are both equal and they are not both Smis so both of them are not
190   // Smis.  If it's not a heap number, then return equal.
191   if (cond == lt || cond == gt) {
192     // Call runtime on identical JSObjects.
193     __ CompareObjectType(r0, r4, r4, FIRST_JS_RECEIVER_TYPE);
194     __ b(ge, slow);
195     // Call runtime on identical symbols since we need to throw a TypeError.
196     __ cmp(r4, Operand(SYMBOL_TYPE));
197     __ b(eq, slow);
198   } else {
199     __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
200     __ b(eq, &heap_number);
201     // Comparing JS objects with <=, >= is complicated.
202     if (cond != eq) {
203       __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
204       __ b(ge, slow);
205       // Call runtime on identical symbols since we need to throw a TypeError.
206       __ cmp(r4, Operand(SYMBOL_TYPE));
207       __ b(eq, slow);
208       // Normally here we fall through to return_equal, but undefined is
209       // special: (undefined == undefined) == true, but
210       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
211       if (cond == le || cond == ge) {
212         __ cmp(r4, Operand(ODDBALL_TYPE));
213         __ b(ne, &return_equal);
214         __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
215         __ cmp(r0, r2);
216         __ b(ne, &return_equal);
217         if (cond == le) {
218           // undefined <= undefined should fail.
219           __ mov(r0, Operand(GREATER));
220         } else  {
221           // undefined >= undefined should fail.
222           __ mov(r0, Operand(LESS));
223         }
224         __ Ret();
225       }
226     }
227   }
228 
229   __ bind(&return_equal);
230   if (cond == lt) {
231     __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
232   } else if (cond == gt) {
233     __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
234   } else {
235     __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
236   }
237   __ Ret();
238 
239   // For less and greater we don't have to check for NaN since the result of
240   // x < x is false regardless.  For the others here is some code to check
241   // for NaN.
242   if (cond != lt && cond != gt) {
243     __ bind(&heap_number);
244     // It is a heap number, so return non-equal if it's NaN and equal if it's
245     // not NaN.
246 
247     // The representation of NaN values has all exponent bits (52..62) set,
248     // and not all mantissa bits (0..51) clear.
249     // Read top bits of double representation (second word of value).
250     __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
251     // Test that exponent bits are all set.
252     __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
253     // NaNs have all-one exponents so they sign extend to -1.
254     __ cmp(r3, Operand(-1));
255     __ b(ne, &return_equal);
256 
257     // Shift out flag and all exponent bits, retaining only mantissa.
258     __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
259     // Or with all low-bits of mantissa.
260     __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
261     __ orr(r0, r3, Operand(r2), SetCC);
262     // For equal we already have the right value in r0:  Return zero (equal)
263     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
264     // not (it's a NaN).  For <= and >= we need to load r0 with the failing
265     // value if it's a NaN.
266     if (cond != eq) {
267       // All-zero means Infinity means equal.
268       __ Ret(eq);
269       if (cond == le) {
270         __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
271       } else {
272         __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
273       }
274     }
275     __ Ret();
276   }
277   // No fall through here.
278 
279   __ bind(&not_identical);
280 }
281 
282 
283 // See comment at call site.
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * lhs_not_nan,Label * slow,bool strict)284 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
285                                     Register lhs,
286                                     Register rhs,
287                                     Label* lhs_not_nan,
288                                     Label* slow,
289                                     bool strict) {
290   DCHECK((lhs.is(r0) && rhs.is(r1)) ||
291          (lhs.is(r1) && rhs.is(r0)));
292 
293   Label rhs_is_smi;
294   __ JumpIfSmi(rhs, &rhs_is_smi);
295 
296   // Lhs is a Smi.  Check whether the rhs is a heap number.
297   __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
298   if (strict) {
299     // If rhs is not a number and lhs is a Smi then strict equality cannot
300     // succeed.  Return non-equal
301     // If rhs is r0 then there is already a non zero value in it.
302     if (!rhs.is(r0)) {
303       __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
304     }
305     __ Ret(ne);
306   } else {
307     // Smi compared non-strictly with a non-Smi non-heap-number.  Call
308     // the runtime.
309     __ b(ne, slow);
310   }
311 
312   // Lhs is a smi, rhs is a number.
313   // Convert lhs to a double in d7.
314   __ SmiToDouble(d7, lhs);
315   // Load the double from rhs, tagged HeapNumber r0, to d6.
316   __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
317 
318   // We now have both loaded as doubles but we can skip the lhs nan check
319   // since it's a smi.
320   __ jmp(lhs_not_nan);
321 
322   __ bind(&rhs_is_smi);
323   // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
324   __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
325   if (strict) {
326     // If lhs is not a number and rhs is a smi then strict equality cannot
327     // succeed.  Return non-equal.
328     // If lhs is r0 then there is already a non zero value in it.
329     if (!lhs.is(r0)) {
330       __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
331     }
332     __ Ret(ne);
333   } else {
334     // Smi compared non-strictly with a non-smi non-heap-number.  Call
335     // the runtime.
336     __ b(ne, slow);
337   }
338 
339   // Rhs is a smi, lhs is a heap number.
340   // Load the double from lhs, tagged HeapNumber r1, to d7.
341   __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
342   // Convert rhs to a double in d6              .
343   __ SmiToDouble(d6, rhs);
344   // Fall through to both_loaded_as_doubles.
345 }
346 
347 
348 // See comment at call site.
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)349 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
350                                            Register lhs,
351                                            Register rhs) {
352     DCHECK((lhs.is(r0) && rhs.is(r1)) ||
353            (lhs.is(r1) && rhs.is(r0)));
354 
355     // If either operand is a JS object or an oddball value, then they are
356     // not equal since their pointers are different.
357     // There is no test for undetectability in strict equality.
358     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
359     Label first_non_object;
360     // Get the type of the first operand into r2 and compare it with
361     // FIRST_JS_RECEIVER_TYPE.
362     __ CompareObjectType(rhs, r2, r2, FIRST_JS_RECEIVER_TYPE);
363     __ b(lt, &first_non_object);
364 
365     // Return non-zero (r0 is not zero)
366     Label return_not_equal;
367     __ bind(&return_not_equal);
368     __ Ret();
369 
370     __ bind(&first_non_object);
371     // Check for oddballs: true, false, null, undefined.
372     __ cmp(r2, Operand(ODDBALL_TYPE));
373     __ b(eq, &return_not_equal);
374 
375     __ CompareObjectType(lhs, r3, r3, FIRST_JS_RECEIVER_TYPE);
376     __ b(ge, &return_not_equal);
377 
378     // Check for oddballs: true, false, null, undefined.
379     __ cmp(r3, Operand(ODDBALL_TYPE));
380     __ b(eq, &return_not_equal);
381 
382     // Now that we have the types we might as well check for
383     // internalized-internalized.
384     STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
385     __ orr(r2, r2, Operand(r3));
386     __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
387     __ b(eq, &return_not_equal);
388 }
389 
390 
391 // See comment at call site.
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)392 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
393                                        Register lhs,
394                                        Register rhs,
395                                        Label* both_loaded_as_doubles,
396                                        Label* not_heap_numbers,
397                                        Label* slow) {
398   DCHECK((lhs.is(r0) && rhs.is(r1)) ||
399          (lhs.is(r1) && rhs.is(r0)));
400 
401   __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
402   __ b(ne, not_heap_numbers);
403   __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
404   __ cmp(r2, r3);
405   __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
406 
407   // Both are heap numbers.  Load them up then jump to the code we have
408   // for that.
409   __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
410   __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
411   __ jmp(both_loaded_as_doubles);
412 }
413 
414 
415 // Fast negative check for internalized-to-internalized equality or receiver
416 // equality. Also handles the undetectable receiver to null/undefined
417 // comparison.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * runtime_call)418 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
419                                                      Register lhs, Register rhs,
420                                                      Label* possible_strings,
421                                                      Label* runtime_call) {
422   DCHECK((lhs.is(r0) && rhs.is(r1)) ||
423          (lhs.is(r1) && rhs.is(r0)));
424 
425   // r2 is object type of rhs.
426   Label object_test, return_equal, return_unequal, undetectable;
427   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
428   __ tst(r2, Operand(kIsNotStringMask));
429   __ b(ne, &object_test);
430   __ tst(r2, Operand(kIsNotInternalizedMask));
431   __ b(ne, possible_strings);
432   __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
433   __ b(ge, runtime_call);
434   __ tst(r3, Operand(kIsNotInternalizedMask));
435   __ b(ne, possible_strings);
436 
437   // Both are internalized. We already checked they weren't the same pointer so
438   // they are not equal. Return non-equal by returning the non-zero object
439   // pointer in r0.
440   __ Ret();
441 
442   __ bind(&object_test);
443   __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
444   __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
445   __ ldrb(r4, FieldMemOperand(r2, Map::kBitFieldOffset));
446   __ ldrb(r5, FieldMemOperand(r3, Map::kBitFieldOffset));
447   __ tst(r4, Operand(1 << Map::kIsUndetectable));
448   __ b(ne, &undetectable);
449   __ tst(r5, Operand(1 << Map::kIsUndetectable));
450   __ b(ne, &return_unequal);
451 
452   __ CompareInstanceType(r2, r2, FIRST_JS_RECEIVER_TYPE);
453   __ b(lt, runtime_call);
454   __ CompareInstanceType(r3, r3, FIRST_JS_RECEIVER_TYPE);
455   __ b(lt, runtime_call);
456 
457   __ bind(&return_unequal);
458   // Return non-equal by returning the non-zero object pointer in r0.
459   __ Ret();
460 
461   __ bind(&undetectable);
462   __ tst(r5, Operand(1 << Map::kIsUndetectable));
463   __ b(eq, &return_unequal);
464 
465   // If both sides are JSReceivers, then the result is false according to
466   // the HTML specification, which says that only comparisons with null or
467   // undefined are affected by special casing for document.all.
468   __ CompareInstanceType(r2, r2, ODDBALL_TYPE);
469   __ b(eq, &return_equal);
470   __ CompareInstanceType(r3, r3, ODDBALL_TYPE);
471   __ b(ne, &return_unequal);
472 
473   __ bind(&return_equal);
474   __ mov(r0, Operand(EQUAL));
475   __ Ret();
476 }
477 
478 
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,Register scratch,CompareICState::State expected,Label * fail)479 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
480                                          Register scratch,
481                                          CompareICState::State expected,
482                                          Label* fail) {
483   Label ok;
484   if (expected == CompareICState::SMI) {
485     __ JumpIfNotSmi(input, fail);
486   } else if (expected == CompareICState::NUMBER) {
487     __ JumpIfSmi(input, &ok);
488     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
489                 DONT_DO_SMI_CHECK);
490   }
491   // We could be strict about internalized/non-internalized here, but as long as
492   // hydrogen doesn't care, the stub doesn't have to care either.
493   __ bind(&ok);
494 }
495 
496 
497 // On entry r1 and r2 are the values to be compared.
498 // On exit r0 is 0, positive or negative to indicate the result of
499 // the comparison.
GenerateGeneric(MacroAssembler * masm)500 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
501   Register lhs = r1;
502   Register rhs = r0;
503   Condition cc = GetCondition();
504 
505   Label miss;
506   CompareICStub_CheckInputType(masm, lhs, r2, left(), &miss);
507   CompareICStub_CheckInputType(masm, rhs, r3, right(), &miss);
508 
509   Label slow;  // Call builtin.
510   Label not_smis, both_loaded_as_doubles, lhs_not_nan;
511 
512   Label not_two_smis, smi_done;
513   __ orr(r2, r1, r0);
514   __ JumpIfNotSmi(r2, &not_two_smis);
515   __ mov(r1, Operand(r1, ASR, 1));
516   __ sub(r0, r1, Operand(r0, ASR, 1));
517   __ Ret();
518   __ bind(&not_two_smis);
519 
520   // NOTICE! This code is only reached after a smi-fast-case check, so
521   // it is certain that at least one operand isn't a smi.
522 
523   // Handle the case where the objects are identical.  Either returns the answer
524   // or goes to slow.  Only falls through if the objects were not identical.
525   EmitIdenticalObjectComparison(masm, &slow, cc);
526 
527   // If either is a Smi (we know that not both are), then they can only
528   // be strictly equal if the other is a HeapNumber.
529   STATIC_ASSERT(kSmiTag == 0);
530   DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
531   __ and_(r2, lhs, Operand(rhs));
532   __ JumpIfNotSmi(r2, &not_smis);
533   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
534   // 1) Return the answer.
535   // 2) Go to slow.
536   // 3) Fall through to both_loaded_as_doubles.
537   // 4) Jump to lhs_not_nan.
538   // In cases 3 and 4 we have found out we were dealing with a number-number
539   // comparison. The double values of the numbers have been loaded into d7 (lhs)
540   // and d6 (rhs).
541   EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
542 
543   __ bind(&both_loaded_as_doubles);
544   // The arguments have been converted to doubles and stored in d6 and d7.
545   __ bind(&lhs_not_nan);
546   Label no_nan;
547   __ VFPCompareAndSetFlags(d7, d6);
548   Label nan;
549   __ b(vs, &nan);
550   __ mov(r0, Operand(EQUAL), LeaveCC, eq);
551   __ mov(r0, Operand(LESS), LeaveCC, lt);
552   __ mov(r0, Operand(GREATER), LeaveCC, gt);
553   __ Ret();
554 
555   __ bind(&nan);
556   // If one of the sides was a NaN then the v flag is set.  Load r0 with
557   // whatever it takes to make the comparison fail, since comparisons with NaN
558   // always fail.
559   if (cc == lt || cc == le) {
560     __ mov(r0, Operand(GREATER));
561   } else {
562     __ mov(r0, Operand(LESS));
563   }
564   __ Ret();
565 
566   __ bind(&not_smis);
567   // At this point we know we are dealing with two different objects,
568   // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
569   if (strict()) {
570     // This returns non-equal for some object types, or falls through if it
571     // was not lucky.
572     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
573   }
574 
575   Label check_for_internalized_strings;
576   Label flat_string_check;
577   // Check for heap-number-heap-number comparison.  Can jump to slow case,
578   // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
579   // that case.  If the inputs are not doubles then jumps to
580   // check_for_internalized_strings.
581   // In this case r2 will contain the type of rhs_.  Never falls through.
582   EmitCheckForTwoHeapNumbers(masm,
583                              lhs,
584                              rhs,
585                              &both_loaded_as_doubles,
586                              &check_for_internalized_strings,
587                              &flat_string_check);
588 
589   __ bind(&check_for_internalized_strings);
590   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
591   // internalized strings.
592   if (cc == eq && !strict()) {
593     // Returns an answer for two internalized strings or two detectable objects.
594     // Otherwise jumps to string case or not both strings case.
595     // Assumes that r2 is the type of rhs_ on entry.
596     EmitCheckForInternalizedStringsOrObjects(
597         masm, lhs, rhs, &flat_string_check, &slow);
598   }
599 
600   // Check for both being sequential one-byte strings,
601   // and inline if that is the case.
602   __ bind(&flat_string_check);
603 
604   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r2, r3, &slow);
605 
606   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
607                       r3);
608   if (cc == eq) {
609     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r2, r3, r4);
610   } else {
611     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r2, r3, r4,
612                                                     r5);
613   }
614   // Never falls through to here.
615 
616   __ bind(&slow);
617 
618   if (cc == eq) {
619     {
620       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
621       __ Push(cp);
622       __ Call(strict() ? isolate()->builtins()->StrictEqual()
623                        : isolate()->builtins()->Equal(),
624               RelocInfo::CODE_TARGET);
625       __ Pop(cp);
626     }
627     // Turn true into 0 and false into some non-zero value.
628     STATIC_ASSERT(EQUAL == 0);
629     __ LoadRoot(r1, Heap::kTrueValueRootIndex);
630     __ sub(r0, r0, r1);
631     __ Ret();
632   } else {
633     __ Push(lhs, rhs);
634     int ncr;  // NaN compare result
635     if (cc == lt || cc == le) {
636       ncr = GREATER;
637     } else {
638       DCHECK(cc == gt || cc == ge);  // remaining cases
639       ncr = LESS;
640     }
641     __ mov(r0, Operand(Smi::FromInt(ncr)));
642     __ push(r0);
643 
644     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
645     // tagged as a small integer.
646     __ TailCallRuntime(Runtime::kCompare);
647   }
648 
649   __ bind(&miss);
650   GenerateMiss(masm);
651 }
652 
653 
Generate(MacroAssembler * masm)654 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
655   // We don't allow a GC during a store buffer overflow so there is no need to
656   // store the registers in any particular way, but we do have to store and
657   // restore them.
658   __ stm(db_w, sp, kCallerSaved | lr.bit());
659 
660   const Register scratch = r1;
661 
662   if (save_doubles()) {
663     __ SaveFPRegs(sp, scratch);
664   }
665   const int argument_count = 1;
666   const int fp_argument_count = 0;
667 
668   AllowExternalCallThatCantCauseGC scope(masm);
669   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
670   __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
671   __ CallCFunction(
672       ExternalReference::store_buffer_overflow_function(isolate()),
673       argument_count);
674   if (save_doubles()) {
675     __ RestoreFPRegs(sp, scratch);
676   }
677   __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0).
678 }
679 
Generate(MacroAssembler * masm)680 void MathPowStub::Generate(MacroAssembler* masm) {
681   const Register exponent = MathPowTaggedDescriptor::exponent();
682   DCHECK(exponent.is(r2));
683   const LowDwVfpRegister double_base = d0;
684   const LowDwVfpRegister double_exponent = d1;
685   const LowDwVfpRegister double_result = d2;
686   const LowDwVfpRegister double_scratch = d3;
687   const SwVfpRegister single_scratch = s6;
688   const Register scratch = r9;
689   const Register scratch2 = r4;
690 
691   Label call_runtime, done, int_exponent;
692   if (exponent_type() == TAGGED) {
693     // Base is already in double_base.
694     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
695 
696     __ vldr(double_exponent,
697             FieldMemOperand(exponent, HeapNumber::kValueOffset));
698   }
699 
700   if (exponent_type() != INTEGER) {
701     // Detect integer exponents stored as double.
702     __ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
703     __ b(eq, &int_exponent);
704 
705     __ push(lr);
706     {
707       AllowExternalCallThatCantCauseGC scope(masm);
708       __ PrepareCallCFunction(0, 2, scratch);
709       __ MovToFloatParameters(double_base, double_exponent);
710       __ CallCFunction(
711           ExternalReference::power_double_double_function(isolate()), 0, 2);
712     }
713     __ pop(lr);
714     __ MovFromFloatResult(double_result);
715     __ b(&done);
716   }
717 
718   // Calculate power with integer exponent.
719   __ bind(&int_exponent);
720 
721   // Get two copies of exponent in the registers scratch and exponent.
722   if (exponent_type() == INTEGER) {
723     __ mov(scratch, exponent);
724   } else {
725     // Exponent has previously been stored into scratch as untagged integer.
726     __ mov(exponent, scratch);
727   }
728   __ vmov(double_scratch, double_base);  // Back up base.
729   __ vmov(double_result, 1.0, scratch2);
730 
731   // Get absolute value of exponent.
732   __ cmp(scratch, Operand::Zero());
733   __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, mi);
734 
735   Label while_true;
736   __ bind(&while_true);
737   __ mov(scratch, Operand(scratch, LSR, 1), SetCC);
738   __ vmul(double_result, double_result, double_scratch, cs);
739   __ vmul(double_scratch, double_scratch, double_scratch, ne);
740   __ b(ne, &while_true);
741 
742   __ cmp(exponent, Operand::Zero());
743   __ b(ge, &done);
744   __ vmov(double_scratch, 1.0, scratch);
745   __ vdiv(double_result, double_scratch, double_result);
746   // Test whether result is zero.  Bail out to check for subnormal result.
747   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
748   __ VFPCompareAndSetFlags(double_result, 0.0);
749   __ b(ne, &done);
750   // double_exponent may not containe the exponent value if the input was a
751   // smi.  We set it with exponent value before bailing out.
752   __ vmov(single_scratch, exponent);
753   __ vcvt_f64_s32(double_exponent, single_scratch);
754 
755   // Returning or bailing out.
756   __ push(lr);
757   {
758     AllowExternalCallThatCantCauseGC scope(masm);
759     __ PrepareCallCFunction(0, 2, scratch);
760     __ MovToFloatParameters(double_base, double_exponent);
761     __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
762                      0, 2);
763   }
764   __ pop(lr);
765   __ MovFromFloatResult(double_result);
766 
767   __ bind(&done);
768   __ Ret();
769 }
770 
NeedsImmovableCode()771 bool CEntryStub::NeedsImmovableCode() {
772   return true;
773 }
774 
775 
GenerateStubsAheadOfTime(Isolate * isolate)776 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
777   CEntryStub::GenerateAheadOfTime(isolate);
778   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
779   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
780   CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
781   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
782   CreateWeakCellStub::GenerateAheadOfTime(isolate);
783   BinaryOpICStub::GenerateAheadOfTime(isolate);
784   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
785   StoreFastElementStub::GenerateAheadOfTime(isolate);
786 }
787 
788 
GenerateFPStubs(Isolate * isolate)789 void CodeStub::GenerateFPStubs(Isolate* isolate) {
790   // Generate if not already in cache.
791   SaveFPRegsMode mode = kSaveFPRegs;
792   CEntryStub(isolate, 1, mode).GetCode();
793   StoreBufferOverflowStub(isolate, mode).GetCode();
794 }
795 
796 
GenerateAheadOfTime(Isolate * isolate)797 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
798   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
799   stub.GetCode();
800 }
801 
802 
Generate(MacroAssembler * masm)803 void CEntryStub::Generate(MacroAssembler* masm) {
804   // Called from JavaScript; parameters are on stack as if calling JS function.
805   // r0: number of arguments including receiver
806   // r1: pointer to builtin function
807   // fp: frame pointer  (restored after C call)
808   // sp: stack pointer  (restored as callee's sp after C call)
809   // cp: current context  (C callee-saved)
810   //
811   // If argv_in_register():
812   // r2: pointer to the first argument
813   ProfileEntryHookStub::MaybeCallEntryHook(masm);
814 
815   __ mov(r5, Operand(r1));
816 
817   if (argv_in_register()) {
818     // Move argv into the correct register.
819     __ mov(r1, Operand(r2));
820   } else {
821     // Compute the argv pointer in a callee-saved register.
822     __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
823     __ sub(r1, r1, Operand(kPointerSize));
824   }
825 
826   // Enter the exit frame that transitions from JavaScript to C++.
827   FrameScope scope(masm, StackFrame::MANUAL);
828   __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
829                                            ? StackFrame::BUILTIN_EXIT
830                                            : StackFrame::EXIT);
831 
832   // Store a copy of argc in callee-saved registers for later.
833   __ mov(r4, Operand(r0));
834 
835   // r0, r4: number of arguments including receiver  (C callee-saved)
836   // r1: pointer to the first argument (C callee-saved)
837   // r5: pointer to builtin function  (C callee-saved)
838 
839   int frame_alignment = MacroAssembler::ActivationFrameAlignment();
840   int frame_alignment_mask = frame_alignment - 1;
841 #if V8_HOST_ARCH_ARM
842   if (FLAG_debug_code) {
843     if (frame_alignment > kPointerSize) {
844       Label alignment_as_expected;
845       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
846       __ tst(sp, Operand(frame_alignment_mask));
847       __ b(eq, &alignment_as_expected);
848       // Don't use Check here, as it will call Runtime_Abort re-entering here.
849       __ stop("Unexpected alignment");
850       __ bind(&alignment_as_expected);
851     }
852   }
853 #endif
854 
855   // Call C built-in.
856   int result_stack_size;
857   if (result_size() <= 2) {
858     // r0 = argc, r1 = argv, r2 = isolate
859     __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
860     result_stack_size = 0;
861   } else {
862     DCHECK_EQ(3, result_size());
863     // Allocate additional space for the result.
864     result_stack_size =
865         ((result_size() * kPointerSize) + frame_alignment_mask) &
866         ~frame_alignment_mask;
867     __ sub(sp, sp, Operand(result_stack_size));
868 
869     // r0 = hidden result argument, r1 = argc, r2 = argv, r3 = isolate.
870     __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
871     __ mov(r2, Operand(r1));
872     __ mov(r1, Operand(r0));
873     __ mov(r0, Operand(sp));
874   }
875 
876   // To let the GC traverse the return address of the exit frames, we need to
877   // know where the return address is. The CEntryStub is unmovable, so
878   // we can store the address on the stack to be able to find it again and
879   // we never have to restore it, because it will not change.
880   // Compute the return address in lr to return to after the jump below. Pc is
881   // already at '+ 8' from the current instruction but return is after three
882   // instructions so add another 4 to pc to get the return address.
883   {
884     // Prevent literal pool emission before return address.
885     Assembler::BlockConstPoolScope block_const_pool(masm);
886     __ add(lr, pc, Operand(4));
887     __ str(lr, MemOperand(sp, result_stack_size));
888     __ Call(r5);
889   }
890   if (result_size() > 2) {
891     DCHECK_EQ(3, result_size());
892     // Read result values stored on stack.
893     __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
894     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
895     __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
896   }
897   // Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
898 
899   // Check result for exception sentinel.
900   Label exception_returned;
901   __ CompareRoot(r0, Heap::kExceptionRootIndex);
902   __ b(eq, &exception_returned);
903 
904   // Check that there is no pending exception, otherwise we
905   // should have returned the exception sentinel.
906   if (FLAG_debug_code) {
907     Label okay;
908     ExternalReference pending_exception_address(
909         Isolate::kPendingExceptionAddress, isolate());
910     __ mov(r3, Operand(pending_exception_address));
911     __ ldr(r3, MemOperand(r3));
912     __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
913     // Cannot use check here as it attempts to generate call into runtime.
914     __ b(eq, &okay);
915     __ stop("Unexpected pending exception");
916     __ bind(&okay);
917   }
918 
919   // Exit C frame and return.
920   // r0:r1: result
921   // sp: stack pointer
922   // fp: frame pointer
923   Register argc;
924   if (argv_in_register()) {
925     // We don't want to pop arguments so set argc to no_reg.
926     argc = no_reg;
927   } else {
928     // Callee-saved register r4 still holds argc.
929     argc = r4;
930   }
931   __ LeaveExitFrame(save_doubles(), argc, true);
932   __ mov(pc, lr);
933 
934   // Handling of exception.
935   __ bind(&exception_returned);
936 
937   ExternalReference pending_handler_context_address(
938       Isolate::kPendingHandlerContextAddress, isolate());
939   ExternalReference pending_handler_code_address(
940       Isolate::kPendingHandlerCodeAddress, isolate());
941   ExternalReference pending_handler_offset_address(
942       Isolate::kPendingHandlerOffsetAddress, isolate());
943   ExternalReference pending_handler_fp_address(
944       Isolate::kPendingHandlerFPAddress, isolate());
945   ExternalReference pending_handler_sp_address(
946       Isolate::kPendingHandlerSPAddress, isolate());
947 
948   // Ask the runtime for help to determine the handler. This will set r0 to
949   // contain the current pending exception, don't clobber it.
950   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
951                                  isolate());
952   {
953     FrameScope scope(masm, StackFrame::MANUAL);
954     __ PrepareCallCFunction(3, 0, r0);
955     __ mov(r0, Operand(0));
956     __ mov(r1, Operand(0));
957     __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
958     __ CallCFunction(find_handler, 3);
959   }
960 
961   // Retrieve the handler context, SP and FP.
962   __ mov(cp, Operand(pending_handler_context_address));
963   __ ldr(cp, MemOperand(cp));
964   __ mov(sp, Operand(pending_handler_sp_address));
965   __ ldr(sp, MemOperand(sp));
966   __ mov(fp, Operand(pending_handler_fp_address));
967   __ ldr(fp, MemOperand(fp));
968 
969   // If the handler is a JS frame, restore the context to the frame. Note that
970   // the context will be set to (cp == 0) for non-JS frames.
971   __ cmp(cp, Operand(0));
972   __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
973 
974   // Compute the handler entry address and jump to it.
975   ConstantPoolUnavailableScope constant_pool_unavailable(masm);
976   __ mov(r1, Operand(pending_handler_code_address));
977   __ ldr(r1, MemOperand(r1));
978   __ mov(r2, Operand(pending_handler_offset_address));
979   __ ldr(r2, MemOperand(r2));
980   __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
981   if (FLAG_enable_embedded_constant_pool) {
982     __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
983   }
984   __ add(pc, r1, r2);
985 }
986 
987 
Generate(MacroAssembler * masm)988 void JSEntryStub::Generate(MacroAssembler* masm) {
989   // r0: code entry
990   // r1: function
991   // r2: receiver
992   // r3: argc
993   // [sp+0]: argv
994 
995   Label invoke, handler_entry, exit;
996 
997   ProfileEntryHookStub::MaybeCallEntryHook(masm);
998 
999   // Called from C, so do not pop argc and args on exit (preserve sp)
1000   // No need to save register-passed args
1001   // Save callee-saved registers (incl. cp and fp), sp, and lr
1002   __ stm(db_w, sp, kCalleeSaved | lr.bit());
1003 
1004   // Save callee-saved vfp registers.
1005   __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1006   // Set up the reserved register for 0.0.
1007   __ vmov(kDoubleRegZero, 0.0);
1008 
1009   // Get address of argv, see stm above.
1010   // r0: code entry
1011   // r1: function
1012   // r2: receiver
1013   // r3: argc
1014 
1015   // Set up argv in r4.
1016   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1017   offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1018   __ ldr(r4, MemOperand(sp, offset_to_argv));
1019 
1020   // Push a frame with special values setup to mark it as an entry frame.
1021   // r0: code entry
1022   // r1: function
1023   // r2: receiver
1024   // r3: argc
1025   // r4: argv
1026   StackFrame::Type marker = type();
1027   if (FLAG_enable_embedded_constant_pool) {
1028     __ mov(r8, Operand::Zero());
1029   }
1030   __ mov(r7, Operand(StackFrame::TypeToMarker(marker)));
1031   __ mov(r6, Operand(StackFrame::TypeToMarker(marker)));
1032   __ mov(r5,
1033          Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1034   __ ldr(r5, MemOperand(r5));
1035   __ mov(ip, Operand(-1));  // Push a bad frame pointer to fail if it is used.
1036   __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
1037                        (FLAG_enable_embedded_constant_pool ? r8.bit() : 0) |
1038                        ip.bit());
1039 
1040   // Set up frame pointer for the frame to be pushed.
1041   __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1042 
1043   // If this is the outermost JS call, set js_entry_sp value.
1044   Label non_outermost_js;
1045   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1046   __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1047   __ ldr(r6, MemOperand(r5));
1048   __ cmp(r6, Operand::Zero());
1049   __ b(ne, &non_outermost_js);
1050   __ str(fp, MemOperand(r5));
1051   __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
1052   Label cont;
1053   __ b(&cont);
1054   __ bind(&non_outermost_js);
1055   __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
1056   __ bind(&cont);
1057   __ push(ip);
1058 
1059   // Jump to a faked try block that does the invoke, with a faked catch
1060   // block that sets the pending exception.
1061   __ jmp(&invoke);
1062 
1063   // Block literal pool emission whilst taking the position of the handler
1064   // entry. This avoids making the assumption that literal pools are always
1065   // emitted after an instruction is emitted, rather than before.
1066   {
1067     Assembler::BlockConstPoolScope block_const_pool(masm);
1068     __ bind(&handler_entry);
1069     handler_offset_ = handler_entry.pos();
1070     // Caught exception: Store result (exception) in the pending exception
1071     // field in the JSEnv and return a failure sentinel.  Coming in here the
1072     // fp will be invalid because the PushStackHandler below sets it to 0 to
1073     // signal the existence of the JSEntry frame.
1074     __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1075                                          isolate())));
1076   }
1077   __ str(r0, MemOperand(ip));
1078   __ LoadRoot(r0, Heap::kExceptionRootIndex);
1079   __ b(&exit);
1080 
1081   // Invoke: Link this frame into the handler chain.
1082   __ bind(&invoke);
1083   // Must preserve r0-r4, r5-r6 are available.
1084   __ PushStackHandler();
1085   // If an exception not caught by another handler occurs, this handler
1086   // returns control to the code after the bl(&invoke) above, which
1087   // restores all kCalleeSaved registers (including cp and fp) to their
1088   // saved values before returning a failure to C.
1089 
1090   // Invoke the function by calling through JS entry trampoline builtin.
1091   // Notice that we cannot store a reference to the trampoline code directly in
1092   // this stub, because runtime stubs are not traversed when doing GC.
1093 
1094   // Expected registers by Builtins::JSEntryTrampoline
1095   // r0: code entry
1096   // r1: function
1097   // r2: receiver
1098   // r3: argc
1099   // r4: argv
1100   if (type() == StackFrame::ENTRY_CONSTRUCT) {
1101     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1102                                       isolate());
1103     __ mov(ip, Operand(construct_entry));
1104   } else {
1105     ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1106     __ mov(ip, Operand(entry));
1107   }
1108   __ ldr(ip, MemOperand(ip));  // deref address
1109   __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1110 
1111   // Branch and link to JSEntryTrampoline.
1112   __ Call(ip);
1113 
1114   // Unlink this frame from the handler chain.
1115   __ PopStackHandler();
1116 
1117   __ bind(&exit);  // r0 holds result
1118   // Check if the current stack frame is marked as the outermost JS frame.
1119   Label non_outermost_js_2;
1120   __ pop(r5);
1121   __ cmp(r5, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
1122   __ b(ne, &non_outermost_js_2);
1123   __ mov(r6, Operand::Zero());
1124   __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1125   __ str(r6, MemOperand(r5));
1126   __ bind(&non_outermost_js_2);
1127 
1128   // Restore the top frame descriptors from the stack.
1129   __ pop(r3);
1130   __ mov(ip,
1131          Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1132   __ str(r3, MemOperand(ip));
1133 
1134   // Reset the stack to the callee saved registers.
1135   __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1136 
1137   // Restore callee-saved registers and return.
1138 #ifdef DEBUG
1139   if (FLAG_debug_code) {
1140     __ mov(lr, Operand(pc));
1141   }
1142 #endif
1143 
1144   // Restore callee-saved vfp registers.
1145   __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1146 
1147   __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
1148 }
1149 
Generate(MacroAssembler * masm)1150 void RegExpExecStub::Generate(MacroAssembler* masm) {
1151   // Just jump directly to runtime if native RegExp is not selected at compile
1152   // time or if regexp entry in generated code is turned off runtime switch or
1153   // at compilation.
1154 #ifdef V8_INTERPRETED_REGEXP
1155   __ TailCallRuntime(Runtime::kRegExpExec);
1156 #else  // V8_INTERPRETED_REGEXP
1157 
1158   // Stack frame on entry.
1159   //  sp[0]: last_match_info (expected JSArray)
1160   //  sp[4]: previous index
1161   //  sp[8]: subject string
1162   //  sp[12]: JSRegExp object
1163 
1164   const int kLastMatchInfoOffset = 0 * kPointerSize;
1165   const int kPreviousIndexOffset = 1 * kPointerSize;
1166   const int kSubjectOffset = 2 * kPointerSize;
1167   const int kJSRegExpOffset = 3 * kPointerSize;
1168 
1169   Label runtime;
1170   // Allocation of registers for this function. These are in callee save
1171   // registers and will be preserved by the call to the native RegExp code, as
1172   // this code is called using the normal C calling convention. When calling
1173   // directly from generated code the native RegExp code will not do a GC and
1174   // therefore the content of these registers are safe to use after the call.
1175   Register subject = r4;
1176   Register regexp_data = r5;
1177   Register last_match_info_elements = no_reg;  // will be r6;
1178 
1179   // Ensure that a RegExp stack is allocated.
1180   ExternalReference address_of_regexp_stack_memory_address =
1181       ExternalReference::address_of_regexp_stack_memory_address(isolate());
1182   ExternalReference address_of_regexp_stack_memory_size =
1183       ExternalReference::address_of_regexp_stack_memory_size(isolate());
1184   __ mov(r0, Operand(address_of_regexp_stack_memory_size));
1185   __ ldr(r0, MemOperand(r0, 0));
1186   __ cmp(r0, Operand::Zero());
1187   __ b(eq, &runtime);
1188 
1189   // Check that the first argument is a JSRegExp object.
1190   __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
1191   __ JumpIfSmi(r0, &runtime);
1192   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
1193   __ b(ne, &runtime);
1194 
1195   // Check that the RegExp has been compiled (data contains a fixed array).
1196   __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
1197   if (FLAG_debug_code) {
1198     __ SmiTst(regexp_data);
1199     __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1200     __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
1201     __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1202   }
1203 
1204   // regexp_data: RegExp data (FixedArray)
1205   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1206   __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1207   __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
1208   __ b(ne, &runtime);
1209 
1210   // regexp_data: RegExp data (FixedArray)
1211   // Check that the number of captures fit in the static offsets vector buffer.
1212   __ ldr(r2,
1213          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1214   // Check (number_of_captures + 1) * 2 <= offsets vector size
1215   // Or          number_of_captures * 2 <= offsets vector size - 2
1216   // Multiplying by 2 comes for free since r2 is smi-tagged.
1217   STATIC_ASSERT(kSmiTag == 0);
1218   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
1219   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1220   __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
1221   __ b(hi, &runtime);
1222 
1223   // Reset offset for possibly sliced string.
1224   __ mov(r9, Operand::Zero());
1225   __ ldr(subject, MemOperand(sp, kSubjectOffset));
1226   __ JumpIfSmi(subject, &runtime);
1227   __ mov(r3, subject);  // Make a copy of the original subject string.
1228   // subject: subject string
1229   // r3: subject string
1230   // regexp_data: RegExp data (FixedArray)
1231   // Handle subject string according to its encoding and representation:
1232   // (1) Sequential string?  If yes, go to (4).
1233   // (2) Sequential or cons?  If not, go to (5).
1234   // (3) Cons string.  If the string is flat, replace subject with first string
1235   //     and go to (1). Otherwise bail out to runtime.
1236   // (4) Sequential string.  Load regexp code according to encoding.
1237   // (E) Carry on.
1238   /// [...]
1239 
1240   // Deferred code at the end of the stub:
1241   // (5) Long external string?  If not, go to (7).
1242   // (6) External string.  Make it, offset-wise, look like a sequential string.
1243   //     Go to (4).
1244   // (7) Short external string or not a string?  If yes, bail out to runtime.
1245   // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
1246 
1247   Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
1248       not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
1249 
1250   __ bind(&check_underlying);
1251   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
1252   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
1253 
1254   // (1) Sequential string?  If yes, go to (4).
1255   __ and_(r1,
1256           r0,
1257           Operand(kIsNotStringMask |
1258                   kStringRepresentationMask |
1259                   kShortExternalStringMask),
1260           SetCC);
1261   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1262   __ b(eq, &seq_string);  // Go to (4).
1263 
1264   // (2) Sequential or cons?  If not, go to (5).
1265   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1266   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1267   STATIC_ASSERT(kThinStringTag > kExternalStringTag);
1268   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1269   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1270   __ cmp(r1, Operand(kExternalStringTag));
1271   __ b(ge, &not_seq_nor_cons);  // Go to (5).
1272 
1273   // (3) Cons string.  Check that it's flat.
1274   // Replace subject with first string and reload instance type.
1275   __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
1276   __ CompareRoot(r0, Heap::kempty_stringRootIndex);
1277   __ b(ne, &runtime);
1278   __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1279   __ jmp(&check_underlying);
1280 
1281   // (4) Sequential string.  Load regexp code according to encoding.
1282   __ bind(&seq_string);
1283   // subject: sequential subject string (or look-alike, external string)
1284   // r3: original subject string
1285   // Load previous index and check range before r3 is overwritten.  We have to
1286   // use r3 instead of subject here because subject might have been only made
1287   // to look like a sequential string when it actually is an external string.
1288   __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
1289   __ JumpIfNotSmi(r1, &runtime);
1290   __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
1291   __ cmp(r3, Operand(r1));
1292   __ b(ls, &runtime);
1293   __ SmiUntag(r1);
1294 
1295   STATIC_ASSERT(8 == kOneByteStringTag);
1296   STATIC_ASSERT(kTwoByteStringTag == 0);
1297   __ and_(r0, r0, Operand(kStringEncodingMask));
1298   __ mov(r3, Operand(r0, ASR, 3), SetCC);
1299   __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
1300          ne);
1301   __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
1302 
1303   // (E) Carry on.  String handling is done.
1304   // r6: irregexp code
1305   // Check that the irregexp code has been generated for the actual string
1306   // encoding. If it has, the field contains a code object otherwise it contains
1307   // a smi (code flushing support).
1308   __ JumpIfSmi(r6, &runtime);
1309 
1310   // r1: previous index
1311   // r3: encoding of subject string (1 if one_byte, 0 if two_byte);
1312   // r6: code
1313   // subject: Subject string
1314   // regexp_data: RegExp data (FixedArray)
1315   // All checks done. Now push arguments for native regexp code.
1316   __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
1317 
1318   // Isolates: note we add an additional parameter here (isolate pointer).
1319   const int kRegExpExecuteArguments = 9;
1320   const int kParameterRegisters = 4;
1321   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
1322 
1323   // Stack pointer now points to cell where return address is to be written.
1324   // Arguments are before that on the stack or in registers.
1325 
1326   // Argument 9 (sp[20]): Pass current isolate address.
1327   __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
1328   __ str(r0, MemOperand(sp, 5 * kPointerSize));
1329 
1330   // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
1331   __ mov(r0, Operand(1));
1332   __ str(r0, MemOperand(sp, 4 * kPointerSize));
1333 
1334   // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
1335   __ mov(r0, Operand(address_of_regexp_stack_memory_address));
1336   __ ldr(r0, MemOperand(r0, 0));
1337   __ mov(r2, Operand(address_of_regexp_stack_memory_size));
1338   __ ldr(r2, MemOperand(r2, 0));
1339   __ add(r0, r0, Operand(r2));
1340   __ str(r0, MemOperand(sp, 3 * kPointerSize));
1341 
1342   // Argument 6: Set the number of capture registers to zero to force global
1343   // regexps to behave as non-global.  This does not affect non-global regexps.
1344   __ mov(r0, Operand::Zero());
1345   __ str(r0, MemOperand(sp, 2 * kPointerSize));
1346 
1347   // Argument 5 (sp[4]): static offsets vector buffer.
1348   __ mov(r0,
1349          Operand(ExternalReference::address_of_static_offsets_vector(
1350              isolate())));
1351   __ str(r0, MemOperand(sp, 1 * kPointerSize));
1352 
1353   // For arguments 4 and 3 get string length, calculate start of string data and
1354   // calculate the shift of the index (0 for one-byte and 1 for two-byte).
1355   __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
1356   __ eor(r3, r3, Operand(1));
1357   // Load the length from the original subject string from the previous stack
1358   // frame. Therefore we have to use fp, which points exactly to two pointer
1359   // sizes below the previous sp. (Because creating a new stack frame pushes
1360   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
1361   __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1362   // If slice offset is not 0, load the length from the original sliced string.
1363   // Argument 4, r3: End of string data
1364   // Argument 3, r2: Start of string data
1365   // Prepare start and end index of the input.
1366   __ add(r9, r7, Operand(r9, LSL, r3));
1367   __ add(r2, r9, Operand(r1, LSL, r3));
1368 
1369   __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
1370   __ SmiUntag(r7);
1371   __ add(r3, r9, Operand(r7, LSL, r3));
1372 
1373   // Argument 2 (r1): Previous index.
1374   // Already there
1375 
1376   // Argument 1 (r0): Subject string.
1377   __ mov(r0, subject);
1378 
1379   // Locate the code entry and call it.
1380   __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
1381   DirectCEntryStub stub(isolate());
1382   stub.GenerateCall(masm, r6);
1383 
1384   __ LeaveExitFrame(false, no_reg, true);
1385 
1386   last_match_info_elements = r6;
1387 
1388   // r0: result
1389   // subject: subject string (callee saved)
1390   // regexp_data: RegExp data (callee saved)
1391   // last_match_info_elements: Last match info elements (callee saved)
1392   // Check the result.
1393   Label success;
1394   __ cmp(r0, Operand(1));
1395   // We expect exactly one result since we force the called regexp to behave
1396   // as non-global.
1397   __ b(eq, &success);
1398   Label failure;
1399   __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
1400   __ b(eq, &failure);
1401   __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
1402   // If not exception it can only be retry. Handle that in the runtime system.
1403   __ b(ne, &runtime);
1404   // Result must now be exception. If there is no pending exception already a
1405   // stack overflow (on the backtrack stack) was detected in RegExp code but
1406   // haven't created the exception yet. Handle that in the runtime system.
1407   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1408   __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
1409   __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1410                                        isolate())));
1411   __ ldr(r0, MemOperand(r2, 0));
1412   __ cmp(r0, r1);
1413   __ b(eq, &runtime);
1414 
1415   // For exception, throw the exception again.
1416   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1417 
1418   __ bind(&failure);
1419   // For failure and exception return null.
1420   __ mov(r0, Operand(isolate()->factory()->null_value()));
1421   __ add(sp, sp, Operand(4 * kPointerSize));
1422   __ Ret();
1423 
1424   // Process the result from the native regexp code.
1425   __ bind(&success);
1426   __ ldr(r1,
1427          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1428   // Calculate number of capture registers (number_of_captures + 1) * 2.
1429   // Multiplying by 2 comes for free since r1 is smi-tagged.
1430   STATIC_ASSERT(kSmiTag == 0);
1431   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
1432   __ add(r1, r1, Operand(2));  // r1 was a smi.
1433 
1434   // Check that the last match info is a FixedArray.
1435   __ ldr(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
1436   __ JumpIfSmi(last_match_info_elements, &runtime);
1437   // Check that the object has fast elements.
1438   __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1439   __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
1440   __ b(ne, &runtime);
1441   // Check that the last match info has space for the capture registers and the
1442   // additional information.
1443   __ ldr(r0,
1444          FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
1445   __ add(r2, r1, Operand(RegExpMatchInfo::kLastMatchOverhead));
1446   __ cmp(r2, Operand::SmiUntag(r0));
1447   __ b(gt, &runtime);
1448 
1449   // r1: number of capture registers
1450   // r4: subject string
1451   // Store the capture count.
1452   __ SmiTag(r2, r1);
1453   __ str(r2, FieldMemOperand(last_match_info_elements,
1454                              RegExpMatchInfo::kNumberOfCapturesOffset));
1455   // Store last subject and last input.
1456   __ str(subject, FieldMemOperand(last_match_info_elements,
1457                                   RegExpMatchInfo::kLastSubjectOffset));
1458   __ mov(r2, subject);
1459   __ RecordWriteField(last_match_info_elements,
1460                       RegExpMatchInfo::kLastSubjectOffset, subject, r3,
1461                       kLRHasNotBeenSaved, kDontSaveFPRegs);
1462   __ mov(subject, r2);
1463   __ str(subject, FieldMemOperand(last_match_info_elements,
1464                                   RegExpMatchInfo::kLastInputOffset));
1465   __ RecordWriteField(last_match_info_elements,
1466                       RegExpMatchInfo::kLastInputOffset, subject, r3,
1467                       kLRHasNotBeenSaved, kDontSaveFPRegs);
1468 
1469   // Get the static offsets vector filled by the native regexp code.
1470   ExternalReference address_of_static_offsets_vector =
1471       ExternalReference::address_of_static_offsets_vector(isolate());
1472   __ mov(r2, Operand(address_of_static_offsets_vector));
1473 
1474   // r1: number of capture registers
1475   // r2: offsets vector
1476   Label next_capture, done;
1477   // Capture register counter starts from number of capture registers and
1478   // counts down until wrapping after zero.
1479   __ add(r0, last_match_info_elements,
1480          Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
1481   __ bind(&next_capture);
1482   __ sub(r1, r1, Operand(1), SetCC);
1483   __ b(mi, &done);
1484   // Read the value from the static offsets vector buffer.
1485   __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
1486   // Store the smi value in the last match info.
1487   __ SmiTag(r3);
1488   __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
1489   __ jmp(&next_capture);
1490   __ bind(&done);
1491 
1492   // Return last match info.
1493   __ mov(r0, last_match_info_elements);
1494   __ add(sp, sp, Operand(4 * kPointerSize));
1495   __ Ret();
1496 
1497   // Do the runtime call to execute the regexp.
1498   __ bind(&runtime);
1499   __ TailCallRuntime(Runtime::kRegExpExec);
1500 
1501   // Deferred code for string handling.
1502   // (5) Long external string?  If not, go to (7).
1503   __ bind(&not_seq_nor_cons);
1504   // Compare flags are still set.
1505   __ b(gt, &not_long_external);  // Go to (7).
1506 
1507   // (6) External string.  Make it, offset-wise, look like a sequential string.
1508   __ bind(&external_string);
1509   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
1510   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
1511   if (FLAG_debug_code) {
1512     // Assert that we do not have a cons or slice (indirect strings) here.
1513     // Sequential strings have already been ruled out.
1514     __ tst(r0, Operand(kIsIndirectStringMask));
1515     __ Assert(eq, kExternalStringExpectedButNotFound);
1516   }
1517   __ ldr(subject,
1518          FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1519   // Move the pointer so that offset-wise, it looks like a sequential string.
1520   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1521   __ sub(subject,
1522          subject,
1523          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1524   __ jmp(&seq_string);  // Go to (4).
1525 
1526   // (7) Short external string or not a string?  If yes, bail out to runtime.
1527   __ bind(&not_long_external);
1528   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
1529   __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
1530   __ b(ne, &runtime);
1531 
1532   // (8) Sliced or thin string.  Replace subject with parent.  Go to (4).
1533   Label thin_string;
1534   __ cmp(r1, Operand(kThinStringTag));
1535   __ b(eq, &thin_string);
1536   // Load offset into r9 and replace subject string with parent.
1537   __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
1538   __ SmiUntag(r9);
1539   __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1540   __ jmp(&check_underlying);  // Go to (4).
1541 
1542   __ bind(&thin_string);
1543   __ ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
1544   __ jmp(&check_underlying);  // Go to (4).
1545 #endif  // V8_INTERPRETED_REGEXP
1546 }
1547 
1548 
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub)1549 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1550   // r0 : number of arguments to the construct function
1551   // r1 : the function to call
1552   // r2 : feedback vector
1553   // r3 : slot in feedback vector (Smi)
1554   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1555 
1556   // Number-of-arguments register must be smi-tagged to call out.
1557   __ SmiTag(r0);
1558   __ Push(r3, r2, r1, r0);
1559   __ Push(cp);
1560 
1561   __ CallStub(stub);
1562 
1563   __ Pop(cp);
1564   __ Pop(r3, r2, r1, r0);
1565   __ SmiUntag(r0);
1566 }
1567 
1568 
GenerateRecordCallTarget(MacroAssembler * masm)1569 static void GenerateRecordCallTarget(MacroAssembler* masm) {
1570   // Cache the called function in a feedback vector slot.  Cache states
1571   // are uninitialized, monomorphic (indicated by a JSFunction), and
1572   // megamorphic.
1573   // r0 : number of arguments to the construct function
1574   // r1 : the function to call
1575   // r2 : feedback vector
1576   // r3 : slot in feedback vector (Smi)
1577   Label initialize, done, miss, megamorphic, not_array_function;
1578 
1579   DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
1580             masm->isolate()->heap()->megamorphic_symbol());
1581   DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
1582             masm->isolate()->heap()->uninitialized_symbol());
1583 
1584   // Load the cache state into r5.
1585   __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
1586   __ ldr(r5, FieldMemOperand(r5, FixedArray::kHeaderSize));
1587 
1588   // A monomorphic cache hit or an already megamorphic state: invoke the
1589   // function without changing the state.
1590   // We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
1591   // this position in a symbol (see static asserts in feedback-vector.h).
1592   Label check_allocation_site;
1593   Register feedback_map = r6;
1594   Register weak_value = r9;
1595   __ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
1596   __ cmp(r1, weak_value);
1597   __ b(eq, &done);
1598   __ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
1599   __ b(eq, &done);
1600   __ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
1601   __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
1602   __ b(ne, &check_allocation_site);
1603 
1604   // If the weak cell is cleared, we have a new chance to become monomorphic.
1605   __ JumpIfSmi(weak_value, &initialize);
1606   __ jmp(&megamorphic);
1607 
1608   __ bind(&check_allocation_site);
1609   // If we came here, we need to see if we are the array function.
1610   // If we didn't have a matching function, and we didn't find the megamorph
1611   // sentinel, then we have in the slot either some other function or an
1612   // AllocationSite.
1613   __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
1614   __ b(ne, &miss);
1615 
1616   // Make sure the function is the Array() function
1617   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
1618   __ cmp(r1, r5);
1619   __ b(ne, &megamorphic);
1620   __ jmp(&done);
1621 
1622   __ bind(&miss);
1623 
1624   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1625   // megamorphic.
1626   __ CompareRoot(r5, Heap::kuninitialized_symbolRootIndex);
1627   __ b(eq, &initialize);
1628   // MegamorphicSentinel is an immortal immovable object (undefined) so no
1629   // write-barrier is needed.
1630   __ bind(&megamorphic);
1631   __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
1632   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
1633   __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
1634   __ jmp(&done);
1635 
1636   // An uninitialized cache is patched with the function
1637   __ bind(&initialize);
1638 
1639   // Make sure the function is the Array() function
1640   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
1641   __ cmp(r1, r5);
1642   __ b(ne, &not_array_function);
1643 
1644   // The target function is the Array constructor,
1645   // Create an AllocationSite if we don't already have it, store it in the
1646   // slot.
1647   CreateAllocationSiteStub create_stub(masm->isolate());
1648   CallStubInRecordCallTarget(masm, &create_stub);
1649   __ b(&done);
1650 
1651   __ bind(&not_array_function);
1652   CreateWeakCellStub weak_cell_stub(masm->isolate());
1653   CallStubInRecordCallTarget(masm, &weak_cell_stub);
1654 
1655   __ bind(&done);
1656 
1657   // Increment the call count for all function calls.
1658   __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
1659   __ add(r5, r5, Operand(FixedArray::kHeaderSize + kPointerSize));
1660   __ ldr(r4, FieldMemOperand(r5, 0));
1661   __ add(r4, r4, Operand(Smi::FromInt(1)));
1662   __ str(r4, FieldMemOperand(r5, 0));
1663 }
1664 
Generate(MacroAssembler * masm)1665 void CallConstructStub::Generate(MacroAssembler* masm) {
1666   // r0 : number of arguments
1667   // r1 : the function to call
1668   // r2 : feedback vector
1669   // r3 : slot in feedback vector (Smi, for RecordCallTarget)
1670 
1671   Label non_function;
1672   // Check that the function is not a smi.
1673   __ JumpIfSmi(r1, &non_function);
1674   // Check that the function is a JSFunction.
1675   __ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
1676   __ b(ne, &non_function);
1677 
1678   GenerateRecordCallTarget(masm);
1679 
1680   __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
1681   Label feedback_register_initialized;
1682   // Put the AllocationSite from the feedback vector into r2, or undefined.
1683   __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
1684   __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
1685   __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
1686   __ b(eq, &feedback_register_initialized);
1687   __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
1688   __ bind(&feedback_register_initialized);
1689 
1690   __ AssertUndefinedOrAllocationSite(r2, r5);
1691 
1692   // Pass function as new target.
1693   __ mov(r3, r1);
1694 
1695   // Tail call to the function-specific construct stub (still in the caller
1696   // context at this point).
1697   __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1698   __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
1699   __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
1700 
1701   __ bind(&non_function);
1702   __ mov(r3, r1);
1703   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1704 }
1705 
1706 // StringCharCodeAtGenerator
GenerateFast(MacroAssembler * masm)1707 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
1708   // If the receiver is a smi trigger the non-string case.
1709   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
1710     __ JumpIfSmi(object_, receiver_not_string_);
1711 
1712     // Fetch the instance type of the receiver into result register.
1713     __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1714     __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1715     // If the receiver is not a string trigger the non-string case.
1716     __ tst(result_, Operand(kIsNotStringMask));
1717     __ b(ne, receiver_not_string_);
1718   }
1719 
1720   // If the index is non-smi trigger the non-smi case.
1721   __ JumpIfNotSmi(index_, &index_not_smi_);
1722   __ bind(&got_smi_index_);
1723 
1724   // Check for index out of range.
1725   __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
1726   __ cmp(ip, Operand(index_));
1727   __ b(ls, index_out_of_range_);
1728 
1729   __ SmiUntag(index_);
1730 
1731   StringCharLoadGenerator::Generate(masm,
1732                                     object_,
1733                                     index_,
1734                                     result_,
1735                                     &call_runtime_);
1736 
1737   __ SmiTag(result_);
1738   __ bind(&exit_);
1739 }
1740 
1741 
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)1742 void StringCharCodeAtGenerator::GenerateSlow(
1743     MacroAssembler* masm, EmbedMode embed_mode,
1744     const RuntimeCallHelper& call_helper) {
1745   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
1746 
1747   // Index is not a smi.
1748   __ bind(&index_not_smi_);
1749   // If index is a heap number, try converting it to an integer.
1750   __ CheckMap(index_,
1751               result_,
1752               Heap::kHeapNumberMapRootIndex,
1753               index_not_number_,
1754               DONT_DO_SMI_CHECK);
1755   call_helper.BeforeCall(masm);
1756   if (embed_mode == PART_OF_IC_HANDLER) {
1757     __ Push(LoadWithVectorDescriptor::VectorRegister(),
1758             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
1759   } else {
1760     // index_ is consumed by runtime conversion function.
1761     __ Push(object_, index_);
1762   }
1763   __ CallRuntime(Runtime::kNumberToSmi);
1764   // Save the conversion result before the pop instructions below
1765   // have a chance to overwrite it.
1766   __ Move(index_, r0);
1767   if (embed_mode == PART_OF_IC_HANDLER) {
1768     __ Pop(LoadWithVectorDescriptor::VectorRegister(),
1769            LoadWithVectorDescriptor::SlotRegister(), object_);
1770   } else {
1771     __ pop(object_);
1772   }
1773   // Reload the instance type.
1774   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1775   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1776   call_helper.AfterCall(masm);
1777   // If index is still not a smi, it must be out of range.
1778   __ JumpIfNotSmi(index_, index_out_of_range_);
1779   // Otherwise, return to the fast path.
1780   __ jmp(&got_smi_index_);
1781 
1782   // Call runtime. We get here when the receiver is a string and the
1783   // index is a number, but the code of getting the actual character
1784   // is too complex (e.g., when the string needs to be flattened).
1785   __ bind(&call_runtime_);
1786   call_helper.BeforeCall(masm);
1787   __ SmiTag(index_);
1788   __ Push(object_, index_);
1789   __ CallRuntime(Runtime::kStringCharCodeAtRT);
1790   __ Move(result_, r0);
1791   call_helper.AfterCall(masm);
1792   __ jmp(&exit_);
1793 
1794   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
1795 }
1796 
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)1797 void StringHelper::GenerateFlatOneByteStringEquals(
1798     MacroAssembler* masm, Register left, Register right, Register scratch1,
1799     Register scratch2, Register scratch3) {
1800   Register length = scratch1;
1801 
1802   // Compare lengths.
1803   Label strings_not_equal, check_zero_length;
1804   __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
1805   __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
1806   __ cmp(length, scratch2);
1807   __ b(eq, &check_zero_length);
1808   __ bind(&strings_not_equal);
1809   __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
1810   __ Ret();
1811 
1812   // Check if the length is zero.
1813   Label compare_chars;
1814   __ bind(&check_zero_length);
1815   STATIC_ASSERT(kSmiTag == 0);
1816   __ cmp(length, Operand::Zero());
1817   __ b(ne, &compare_chars);
1818   __ mov(r0, Operand(Smi::FromInt(EQUAL)));
1819   __ Ret();
1820 
1821   // Compare characters.
1822   __ bind(&compare_chars);
1823   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
1824                                   &strings_not_equal);
1825 
1826   // Characters are equal.
1827   __ mov(r0, Operand(Smi::FromInt(EQUAL)));
1828   __ Ret();
1829 }
1830 
1831 
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)1832 void StringHelper::GenerateCompareFlatOneByteStrings(
1833     MacroAssembler* masm, Register left, Register right, Register scratch1,
1834     Register scratch2, Register scratch3, Register scratch4) {
1835   Label result_not_equal, compare_lengths;
1836   // Find minimum length and length difference.
1837   __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
1838   __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
1839   __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
1840   Register length_delta = scratch3;
1841   __ mov(scratch1, scratch2, LeaveCC, gt);
1842   Register min_length = scratch1;
1843   STATIC_ASSERT(kSmiTag == 0);
1844   __ cmp(min_length, Operand::Zero());
1845   __ b(eq, &compare_lengths);
1846 
1847   // Compare loop.
1848   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
1849                                   scratch4, &result_not_equal);
1850 
1851   // Compare lengths - strings up to min-length are equal.
1852   __ bind(&compare_lengths);
1853   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
1854   // Use length_delta as result if it's zero.
1855   __ mov(r0, Operand(length_delta), SetCC);
1856   __ bind(&result_not_equal);
1857   // Conditionally update the result based either on length_delta or
1858   // the last comparion performed in the loop above.
1859   __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
1860   __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
1861   __ Ret();
1862 }
1863 
1864 
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Label * chars_not_equal)1865 void StringHelper::GenerateOneByteCharsCompareLoop(
1866     MacroAssembler* masm, Register left, Register right, Register length,
1867     Register scratch1, Register scratch2, Label* chars_not_equal) {
1868   // Change index to run from -length to -1 by adding length to string
1869   // start. This means that loop ends when index reaches zero, which
1870   // doesn't need an additional compare.
1871   __ SmiUntag(length);
1872   __ add(scratch1, length,
1873          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
1874   __ add(left, left, Operand(scratch1));
1875   __ add(right, right, Operand(scratch1));
1876   __ rsb(length, length, Operand::Zero());
1877   Register index = length;  // index = -length;
1878 
1879   // Compare loop.
1880   Label loop;
1881   __ bind(&loop);
1882   __ ldrb(scratch1, MemOperand(left, index));
1883   __ ldrb(scratch2, MemOperand(right, index));
1884   __ cmp(scratch1, scratch2);
1885   __ b(ne, chars_not_equal);
1886   __ add(index, index, Operand(1), SetCC);
1887   __ b(ne, &loop);
1888 }
1889 
1890 
Generate(MacroAssembler * masm)1891 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
1892   // ----------- S t a t e -------------
1893   //  -- r1    : left
1894   //  -- r0    : right
1895   //  -- lr    : return address
1896   // -----------------------------------
1897 
1898   // Load r2 with the allocation site.  We stick an undefined dummy value here
1899   // and replace it with the real allocation site later when we instantiate this
1900   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
1901   __ Move(r2, isolate()->factory()->undefined_value());
1902 
1903   // Make sure that we actually patched the allocation site.
1904   if (FLAG_debug_code) {
1905     __ tst(r2, Operand(kSmiTagMask));
1906     __ Assert(ne, kExpectedAllocationSite);
1907     __ push(r2);
1908     __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
1909     __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
1910     __ cmp(r2, ip);
1911     __ pop(r2);
1912     __ Assert(eq, kExpectedAllocationSite);
1913   }
1914 
1915   // Tail call into the stub that handles binary operations with allocation
1916   // sites.
1917   BinaryOpWithAllocationSiteStub stub(isolate(), state());
1918   __ TailCallStub(&stub);
1919 }
1920 
1921 
GenerateBooleans(MacroAssembler * masm)1922 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
1923   DCHECK_EQ(CompareICState::BOOLEAN, state());
1924   Label miss;
1925 
1926   __ CheckMap(r1, r2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
1927   __ CheckMap(r0, r3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
1928   if (!Token::IsEqualityOp(op())) {
1929     __ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
1930     __ AssertSmi(r1);
1931     __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
1932     __ AssertSmi(r0);
1933   }
1934   __ sub(r0, r1, r0);
1935   __ Ret();
1936 
1937   __ bind(&miss);
1938   GenerateMiss(masm);
1939 }
1940 
1941 
GenerateSmis(MacroAssembler * masm)1942 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
1943   DCHECK(state() == CompareICState::SMI);
1944   Label miss;
1945   __ orr(r2, r1, r0);
1946   __ JumpIfNotSmi(r2, &miss);
1947 
1948   if (GetCondition() == eq) {
1949     // For equality we do not care about the sign of the result.
1950     __ sub(r0, r0, r1, SetCC);
1951   } else {
1952     // Untag before subtracting to avoid handling overflow.
1953     __ SmiUntag(r1);
1954     __ sub(r0, r1, Operand::SmiUntag(r0));
1955   }
1956   __ Ret();
1957 
1958   __ bind(&miss);
1959   GenerateMiss(masm);
1960 }
1961 
1962 
GenerateNumbers(MacroAssembler * masm)1963 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
1964   DCHECK(state() == CompareICState::NUMBER);
1965 
1966   Label generic_stub;
1967   Label unordered, maybe_undefined1, maybe_undefined2;
1968   Label miss;
1969 
1970   if (left() == CompareICState::SMI) {
1971     __ JumpIfNotSmi(r1, &miss);
1972   }
1973   if (right() == CompareICState::SMI) {
1974     __ JumpIfNotSmi(r0, &miss);
1975   }
1976 
1977   // Inlining the double comparison and falling back to the general compare
1978   // stub if NaN is involved.
1979   // Load left and right operand.
1980   Label done, left, left_smi, right_smi;
1981   __ JumpIfSmi(r0, &right_smi);
1982   __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
1983               DONT_DO_SMI_CHECK);
1984   __ sub(r2, r0, Operand(kHeapObjectTag));
1985   __ vldr(d1, r2, HeapNumber::kValueOffset);
1986   __ b(&left);
1987   __ bind(&right_smi);
1988   __ SmiToDouble(d1, r0);
1989 
1990   __ bind(&left);
1991   __ JumpIfSmi(r1, &left_smi);
1992   __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
1993               DONT_DO_SMI_CHECK);
1994   __ sub(r2, r1, Operand(kHeapObjectTag));
1995   __ vldr(d0, r2, HeapNumber::kValueOffset);
1996   __ b(&done);
1997   __ bind(&left_smi);
1998   __ SmiToDouble(d0, r1);
1999 
2000   __ bind(&done);
2001   // Compare operands.
2002   __ VFPCompareAndSetFlags(d0, d1);
2003 
2004   // Don't base result on status bits when a NaN is involved.
2005   __ b(vs, &unordered);
2006 
2007   // Return a result of -1, 0, or 1, based on status bits.
2008   __ mov(r0, Operand(EQUAL), LeaveCC, eq);
2009   __ mov(r0, Operand(LESS), LeaveCC, lt);
2010   __ mov(r0, Operand(GREATER), LeaveCC, gt);
2011   __ Ret();
2012 
2013   __ bind(&unordered);
2014   __ bind(&generic_stub);
2015   CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2016                      CompareICState::GENERIC, CompareICState::GENERIC);
2017   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2018 
2019   __ bind(&maybe_undefined1);
2020   if (Token::IsOrderedRelationalCompareOp(op())) {
2021     __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
2022     __ b(ne, &miss);
2023     __ JumpIfSmi(r1, &unordered);
2024     __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
2025     __ b(ne, &maybe_undefined2);
2026     __ jmp(&unordered);
2027   }
2028 
2029   __ bind(&maybe_undefined2);
2030   if (Token::IsOrderedRelationalCompareOp(op())) {
2031     __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
2032     __ b(eq, &unordered);
2033   }
2034 
2035   __ bind(&miss);
2036   GenerateMiss(masm);
2037 }
2038 
2039 
GenerateInternalizedStrings(MacroAssembler * masm)2040 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2041   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2042   Label miss;
2043 
2044   // Registers containing left and right operands respectively.
2045   Register left = r1;
2046   Register right = r0;
2047   Register tmp1 = r2;
2048   Register tmp2 = r3;
2049 
2050   // Check that both operands are heap objects.
2051   __ JumpIfEitherSmi(left, right, &miss);
2052 
2053   // Check that both operands are internalized strings.
2054   __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2055   __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2056   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2057   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2058   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2059   __ orr(tmp1, tmp1, Operand(tmp2));
2060   __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2061   __ b(ne, &miss);
2062 
2063   // Internalized strings are compared by identity.
2064   __ cmp(left, right);
2065   // Make sure r0 is non-zero. At this point input operands are
2066   // guaranteed to be non-zero.
2067   DCHECK(right.is(r0));
2068   STATIC_ASSERT(EQUAL == 0);
2069   STATIC_ASSERT(kSmiTag == 0);
2070   __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
2071   __ Ret();
2072 
2073   __ bind(&miss);
2074   GenerateMiss(masm);
2075 }
2076 
2077 
GenerateUniqueNames(MacroAssembler * masm)2078 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2079   DCHECK(state() == CompareICState::UNIQUE_NAME);
2080   DCHECK(GetCondition() == eq);
2081   Label miss;
2082 
2083   // Registers containing left and right operands respectively.
2084   Register left = r1;
2085   Register right = r0;
2086   Register tmp1 = r2;
2087   Register tmp2 = r3;
2088 
2089   // Check that both operands are heap objects.
2090   __ JumpIfEitherSmi(left, right, &miss);
2091 
2092   // Check that both operands are unique names. This leaves the instance
2093   // types loaded in tmp1 and tmp2.
2094   __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2095   __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2096   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2097   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2098 
2099   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
2100   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
2101 
2102   // Unique names are compared by identity.
2103   __ cmp(left, right);
2104   // Make sure r0 is non-zero. At this point input operands are
2105   // guaranteed to be non-zero.
2106   DCHECK(right.is(r0));
2107   STATIC_ASSERT(EQUAL == 0);
2108   STATIC_ASSERT(kSmiTag == 0);
2109   __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
2110   __ Ret();
2111 
2112   __ bind(&miss);
2113   GenerateMiss(masm);
2114 }
2115 
2116 
GenerateStrings(MacroAssembler * masm)2117 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2118   DCHECK(state() == CompareICState::STRING);
2119   Label miss;
2120 
2121   bool equality = Token::IsEqualityOp(op());
2122 
2123   // Registers containing left and right operands respectively.
2124   Register left = r1;
2125   Register right = r0;
2126   Register tmp1 = r2;
2127   Register tmp2 = r3;
2128   Register tmp3 = r4;
2129   Register tmp4 = r5;
2130 
2131   // Check that both operands are heap objects.
2132   __ JumpIfEitherSmi(left, right, &miss);
2133 
2134   // Check that both operands are strings. This leaves the instance
2135   // types loaded in tmp1 and tmp2.
2136   __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2137   __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2138   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2139   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2140   STATIC_ASSERT(kNotStringTag != 0);
2141   __ orr(tmp3, tmp1, tmp2);
2142   __ tst(tmp3, Operand(kIsNotStringMask));
2143   __ b(ne, &miss);
2144 
2145   // Fast check for identical strings.
2146   __ cmp(left, right);
2147   STATIC_ASSERT(EQUAL == 0);
2148   STATIC_ASSERT(kSmiTag == 0);
2149   __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
2150   __ Ret(eq);
2151 
2152   // Handle not identical strings.
2153 
2154   // Check that both strings are internalized strings. If they are, we're done
2155   // because we already know they are not identical. We know they are both
2156   // strings.
2157   if (equality) {
2158     DCHECK(GetCondition() == eq);
2159     STATIC_ASSERT(kInternalizedTag == 0);
2160     __ orr(tmp3, tmp1, Operand(tmp2));
2161     __ tst(tmp3, Operand(kIsNotInternalizedMask));
2162     // Make sure r0 is non-zero. At this point input operands are
2163     // guaranteed to be non-zero.
2164     DCHECK(right.is(r0));
2165     __ Ret(eq);
2166   }
2167 
2168   // Check that both strings are sequential one-byte.
2169   Label runtime;
2170   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
2171                                                     &runtime);
2172 
2173   // Compare flat one-byte strings. Returns when done.
2174   if (equality) {
2175     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
2176                                                   tmp3);
2177   } else {
2178     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
2179                                                     tmp2, tmp3, tmp4);
2180   }
2181 
2182   // Handle more complex cases in runtime.
2183   __ bind(&runtime);
2184   if (equality) {
2185     {
2186       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2187       __ Push(left, right);
2188       __ CallRuntime(Runtime::kStringEqual);
2189     }
2190     __ LoadRoot(r1, Heap::kTrueValueRootIndex);
2191     __ sub(r0, r0, r1);
2192     __ Ret();
2193   } else {
2194     __ Push(left, right);
2195     __ TailCallRuntime(Runtime::kStringCompare);
2196   }
2197 
2198   __ bind(&miss);
2199   GenerateMiss(masm);
2200 }
2201 
2202 
GenerateReceivers(MacroAssembler * masm)2203 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2204   DCHECK_EQ(CompareICState::RECEIVER, state());
2205   Label miss;
2206   __ and_(r2, r1, Operand(r0));
2207   __ JumpIfSmi(r2, &miss);
2208 
2209   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2210   __ CompareObjectType(r0, r2, r2, FIRST_JS_RECEIVER_TYPE);
2211   __ b(lt, &miss);
2212   __ CompareObjectType(r1, r2, r2, FIRST_JS_RECEIVER_TYPE);
2213   __ b(lt, &miss);
2214 
2215   DCHECK(GetCondition() == eq);
2216   __ sub(r0, r0, Operand(r1));
2217   __ Ret();
2218 
2219   __ bind(&miss);
2220   GenerateMiss(masm);
2221 }
2222 
2223 
GenerateKnownReceivers(MacroAssembler * masm)2224 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2225   Label miss;
2226   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
2227   __ and_(r2, r1, Operand(r0));
2228   __ JumpIfSmi(r2, &miss);
2229   __ GetWeakValue(r4, cell);
2230   __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
2231   __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2232   __ cmp(r2, r4);
2233   __ b(ne, &miss);
2234   __ cmp(r3, r4);
2235   __ b(ne, &miss);
2236 
2237   if (Token::IsEqualityOp(op())) {
2238     __ sub(r0, r0, Operand(r1));
2239     __ Ret();
2240   } else {
2241     if (op() == Token::LT || op() == Token::LTE) {
2242       __ mov(r2, Operand(Smi::FromInt(GREATER)));
2243     } else {
2244       __ mov(r2, Operand(Smi::FromInt(LESS)));
2245     }
2246     __ Push(r1, r0, r2);
2247     __ TailCallRuntime(Runtime::kCompare);
2248   }
2249 
2250   __ bind(&miss);
2251   GenerateMiss(masm);
2252 }
2253 
2254 
GenerateMiss(MacroAssembler * masm)2255 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2256   {
2257     // Call the runtime system in a fresh internal frame.
2258     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2259     __ Push(r1, r0);
2260     __ Push(lr, r1, r0);
2261     __ mov(ip, Operand(Smi::FromInt(op())));
2262     __ push(ip);
2263     __ CallRuntime(Runtime::kCompareIC_Miss);
2264     // Compute the entry point of the rewritten stub.
2265     __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
2266     // Restore registers.
2267     __ pop(lr);
2268     __ Pop(r1, r0);
2269   }
2270 
2271   __ Jump(r2);
2272 }
2273 
2274 
Generate(MacroAssembler * masm)2275 void DirectCEntryStub::Generate(MacroAssembler* masm) {
2276   // Place the return address on the stack, making the call
2277   // GC safe. The RegExp backend also relies on this.
2278   __ str(lr, MemOperand(sp, 0));
2279   __ blx(ip);  // Call the C++ function.
2280   __ ldr(pc, MemOperand(sp, 0));
2281 }
2282 
2283 
GenerateCall(MacroAssembler * masm,Register target)2284 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
2285                                     Register target) {
2286   intptr_t code =
2287       reinterpret_cast<intptr_t>(GetCode().location());
2288   __ Move(ip, target);
2289   __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
2290   __ blx(lr);  // Call the stub.
2291 }
2292 
2293 
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)2294 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
2295                                                       Label* miss,
2296                                                       Label* done,
2297                                                       Register receiver,
2298                                                       Register properties,
2299                                                       Handle<Name> name,
2300                                                       Register scratch0) {
2301   DCHECK(name->IsUniqueName());
2302   // If names of slots in range from 1 to kProbes - 1 for the hash value are
2303   // not equal to the name and kProbes-th slot is not used (its name is the
2304   // undefined value), it guarantees the hash table doesn't contain the
2305   // property. It's true even if some slots represent deleted properties
2306   // (their names are the hole value).
2307   for (int i = 0; i < kInlinedProbes; i++) {
2308     // scratch0 points to properties hash.
2309     // Compute the masked index: (hash + i + i * i) & mask.
2310     Register index = scratch0;
2311     // Capacity is smi 2^n.
2312     __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
2313     __ sub(index, index, Operand(1));
2314     __ and_(index, index, Operand(
2315         Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
2316 
2317     // Scale the index by multiplying by the entry size.
2318     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2319     __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
2320 
2321     Register entity_name = scratch0;
2322     // Having undefined at this place means the name is not contained.
2323     STATIC_ASSERT(kSmiTagSize == 1);
2324     Register tmp = properties;
2325     __ add(tmp, properties, Operand(index, LSL, 1));
2326     __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
2327 
2328     DCHECK(!tmp.is(entity_name));
2329     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
2330     __ cmp(entity_name, tmp);
2331     __ b(eq, done);
2332 
2333     // Load the hole ready for use below:
2334     __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
2335 
2336     // Stop if found the property.
2337     __ cmp(entity_name, Operand(Handle<Name>(name)));
2338     __ b(eq, miss);
2339 
2340     Label good;
2341     __ cmp(entity_name, tmp);
2342     __ b(eq, &good);
2343 
2344     // Check if the entry name is not a unique name.
2345     __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
2346     __ ldrb(entity_name,
2347             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
2348     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
2349     __ bind(&good);
2350 
2351     // Restore the properties.
2352     __ ldr(properties,
2353            FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2354   }
2355 
2356   const int spill_mask =
2357       (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
2358        r2.bit() | r1.bit() | r0.bit());
2359 
2360   __ stm(db_w, sp, spill_mask);
2361   __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2362   __ mov(r1, Operand(Handle<Name>(name)));
2363   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
2364   __ CallStub(&stub);
2365   __ cmp(r0, Operand::Zero());
2366   __ ldm(ia_w, sp, spill_mask);
2367 
2368   __ b(eq, done);
2369   __ b(ne, miss);
2370 }
2371 
Generate(MacroAssembler * masm)2372 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
2373   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
2374   // we cannot call anything that could cause a GC from this stub.
2375   // Registers:
2376   //  result: NameDictionary to probe
2377   //  r1: key
2378   //  dictionary: NameDictionary to probe.
2379   //  index: will hold an index of entry if lookup is successful.
2380   //         might alias with result_.
2381   // Returns:
2382   //  result_ is zero if lookup failed, non zero otherwise.
2383 
2384   Register result = r0;
2385   Register dictionary = r0;
2386   Register key = r1;
2387   Register index = r2;
2388   Register mask = r3;
2389   Register hash = r4;
2390   Register undefined = r5;
2391   Register entry_key = r6;
2392 
2393   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
2394 
2395   __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
2396   __ SmiUntag(mask);
2397   __ sub(mask, mask, Operand(1));
2398 
2399   __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
2400 
2401   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
2402 
2403   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
2404     // Compute the masked index: (hash + i + i * i) & mask.
2405     // Capacity is smi 2^n.
2406     if (i > 0) {
2407       // Add the probe offset (i + i * i) left shifted to avoid right shifting
2408       // the hash in a separate instruction. The value hash + i + i * i is right
2409       // shifted in the following and instruction.
2410       DCHECK(NameDictionary::GetProbeOffset(i) <
2411              1 << (32 - Name::kHashFieldOffset));
2412       __ add(index, hash, Operand(
2413           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2414     } else {
2415       __ mov(index, Operand(hash));
2416     }
2417     __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
2418 
2419     // Scale the index by multiplying by the entry size.
2420     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2421     __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
2422 
2423     STATIC_ASSERT(kSmiTagSize == 1);
2424     __ add(index, dictionary, Operand(index, LSL, 2));
2425     __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
2426 
2427     // Having undefined at this place means the name is not contained.
2428     __ cmp(entry_key, Operand(undefined));
2429     __ b(eq, &not_in_dictionary);
2430 
2431     // Stop if found the property.
2432     __ cmp(entry_key, Operand(key));
2433     __ b(eq, &in_dictionary);
2434 
2435     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
2436       // Check if the entry name is not a unique name.
2437       __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
2438       __ ldrb(entry_key,
2439               FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
2440       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
2441     }
2442   }
2443 
2444   __ bind(&maybe_in_dictionary);
2445   // If we are doing negative lookup then probing failure should be
2446   // treated as a lookup success. For positive lookup probing failure
2447   // should be treated as lookup failure.
2448   if (mode() == POSITIVE_LOOKUP) {
2449     __ mov(result, Operand::Zero());
2450     __ Ret();
2451   }
2452 
2453   __ bind(&in_dictionary);
2454   __ mov(result, Operand(1));
2455   __ Ret();
2456 
2457   __ bind(&not_in_dictionary);
2458   __ mov(result, Operand::Zero());
2459   __ Ret();
2460 }
2461 
2462 
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)2463 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
2464     Isolate* isolate) {
2465   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
2466   stub1.GetCode();
2467   // Hydrogen code stubs need stub2 at snapshot time.
2468   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
2469   stub2.GetCode();
2470 }
2471 
2472 
2473 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
2474 // the value has just been written into the object, now this stub makes sure
2475 // we keep the GC informed.  The word in the object where the value has been
2476 // written is in the address register.
Generate(MacroAssembler * masm)2477 void RecordWriteStub::Generate(MacroAssembler* masm) {
2478   Label skip_to_incremental_noncompacting;
2479   Label skip_to_incremental_compacting;
2480 
2481   // The first two instructions are generated with labels so as to get the
2482   // offset fixed up correctly by the bind(Label*) call.  We patch it back and
2483   // forth between a compare instructions (a nop in this position) and the
2484   // real branch when we start and stop incremental heap marking.
2485   // See RecordWriteStub::Patch for details.
2486   {
2487     // Block literal pool emission, as the position of these two instructions
2488     // is assumed by the patching code.
2489     Assembler::BlockConstPoolScope block_const_pool(masm);
2490     __ b(&skip_to_incremental_noncompacting);
2491     __ b(&skip_to_incremental_compacting);
2492   }
2493 
2494   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2495     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2496                            MacroAssembler::kReturnAtEnd);
2497   }
2498   __ Ret();
2499 
2500   __ bind(&skip_to_incremental_noncompacting);
2501   GenerateIncremental(masm, INCREMENTAL);
2502 
2503   __ bind(&skip_to_incremental_compacting);
2504   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
2505 
2506   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
2507   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
2508   DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
2509   DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
2510   PatchBranchIntoNop(masm, 0);
2511   PatchBranchIntoNop(masm, Assembler::kInstrSize);
2512 }
2513 
2514 
GenerateIncremental(MacroAssembler * masm,Mode mode)2515 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
2516   regs_.Save(masm);
2517 
2518   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2519     Label dont_need_remembered_set;
2520 
2521     __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
2522     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
2523                            regs_.scratch0(),
2524                            &dont_need_remembered_set);
2525 
2526     __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
2527                         &dont_need_remembered_set);
2528 
2529     // First notify the incremental marker if necessary, then update the
2530     // remembered set.
2531     CheckNeedsToInformIncrementalMarker(
2532         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
2533     InformIncrementalMarker(masm);
2534     regs_.Restore(masm);
2535     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2536                            MacroAssembler::kReturnAtEnd);
2537 
2538     __ bind(&dont_need_remembered_set);
2539   }
2540 
2541   CheckNeedsToInformIncrementalMarker(
2542       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
2543   InformIncrementalMarker(masm);
2544   regs_.Restore(masm);
2545   __ Ret();
2546 }
2547 
2548 
InformIncrementalMarker(MacroAssembler * masm)2549 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
2550   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
2551   int argument_count = 3;
2552   __ PrepareCallCFunction(argument_count, regs_.scratch0());
2553   Register address =
2554       r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
2555   DCHECK(!address.is(regs_.object()));
2556   DCHECK(!address.is(r0));
2557   __ Move(address, regs_.address());
2558   __ Move(r0, regs_.object());
2559   __ Move(r1, address);
2560   __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
2561 
2562   AllowExternalCallThatCantCauseGC scope(masm);
2563   __ CallCFunction(
2564       ExternalReference::incremental_marking_record_write_function(isolate()),
2565       argument_count);
2566   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
2567 }
2568 
2569 
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)2570 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
2571     MacroAssembler* masm,
2572     OnNoNeedToInformIncrementalMarker on_no_need,
2573     Mode mode) {
2574   Label on_black;
2575   Label need_incremental;
2576   Label need_incremental_pop_scratch;
2577 
2578   // Let's look at the color of the object:  If it is not black we don't have
2579   // to inform the incremental marker.
2580   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
2581 
2582   regs_.Restore(masm);
2583   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2584     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2585                            MacroAssembler::kReturnAtEnd);
2586   } else {
2587     __ Ret();
2588   }
2589 
2590   __ bind(&on_black);
2591 
2592   // Get the value from the slot.
2593   __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
2594 
2595   if (mode == INCREMENTAL_COMPACTION) {
2596     Label ensure_not_white;
2597 
2598     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
2599                      regs_.scratch1(),  // Scratch.
2600                      MemoryChunk::kEvacuationCandidateMask,
2601                      eq,
2602                      &ensure_not_white);
2603 
2604     __ CheckPageFlag(regs_.object(),
2605                      regs_.scratch1(),  // Scratch.
2606                      MemoryChunk::kSkipEvacuationSlotsRecordingMask,
2607                      eq,
2608                      &need_incremental);
2609 
2610     __ bind(&ensure_not_white);
2611   }
2612 
2613   // We need extra registers for this, so we push the object and the address
2614   // register temporarily.
2615   __ Push(regs_.object(), regs_.address());
2616   __ JumpIfWhite(regs_.scratch0(),  // The value.
2617                  regs_.scratch1(),  // Scratch.
2618                  regs_.object(),    // Scratch.
2619                  regs_.address(),   // Scratch.
2620                  &need_incremental_pop_scratch);
2621   __ Pop(regs_.object(), regs_.address());
2622 
2623   regs_.Restore(masm);
2624   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2625     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2626                            MacroAssembler::kReturnAtEnd);
2627   } else {
2628     __ Ret();
2629   }
2630 
2631   __ bind(&need_incremental_pop_scratch);
2632   __ Pop(regs_.object(), regs_.address());
2633 
2634   __ bind(&need_incremental);
2635 
2636   // Fall through when we need to inform the incremental marker.
2637 }
2638 
2639 
Generate(MacroAssembler * masm)2640 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
2641   CEntryStub ces(isolate(), 1, kSaveFPRegs);
2642   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
2643   int parameter_count_offset =
2644       StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
2645   __ ldr(r1, MemOperand(fp, parameter_count_offset));
2646   if (function_mode() == JS_FUNCTION_STUB_MODE) {
2647     __ add(r1, r1, Operand(1));
2648   }
2649   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
2650   __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
2651   __ add(sp, sp, r1);
2652   __ Ret();
2653 }
2654 
MaybeCallEntryHook(MacroAssembler * masm)2655 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
2656   if (masm->isolate()->function_entry_hook() != NULL) {
2657     ProfileEntryHookStub stub(masm->isolate());
2658     PredictableCodeSizeScope predictable(masm);
2659     predictable.ExpectSize(masm->CallStubSize(&stub) +
2660                            2 * Assembler::kInstrSize);
2661     __ push(lr);
2662     __ CallStub(&stub);
2663     __ pop(lr);
2664   }
2665 }
2666 
2667 
Generate(MacroAssembler * masm)2668 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
2669   // The entry hook is a "push lr" instruction, followed by a call.
2670   const int32_t kReturnAddressDistanceFromFunctionStart =
2671       3 * Assembler::kInstrSize;
2672 
2673   // This should contain all kCallerSaved registers.
2674   const RegList kSavedRegs =
2675       1 <<  0 |  // r0
2676       1 <<  1 |  // r1
2677       1 <<  2 |  // r2
2678       1 <<  3 |  // r3
2679       1 <<  5 |  // r5
2680       1 <<  9;   // r9
2681   // We also save lr, so the count here is one higher than the mask indicates.
2682   const int32_t kNumSavedRegs = 7;
2683 
2684   DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
2685 
2686   // Save all caller-save registers as this may be called from anywhere.
2687   __ stm(db_w, sp, kSavedRegs | lr.bit());
2688 
2689   // Compute the function's address for the first argument.
2690   __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
2691 
2692   // The caller's return address is above the saved temporaries.
2693   // Grab that for the second argument to the hook.
2694   __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
2695 
2696   // Align the stack if necessary.
2697   int frame_alignment = masm->ActivationFrameAlignment();
2698   if (frame_alignment > kPointerSize) {
2699     __ mov(r5, sp);
2700     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2701     __ and_(sp, sp, Operand(-frame_alignment));
2702   }
2703 
2704 #if V8_HOST_ARCH_ARM
2705   int32_t entry_hook =
2706       reinterpret_cast<int32_t>(isolate()->function_entry_hook());
2707   __ mov(ip, Operand(entry_hook));
2708 #else
2709   // Under the simulator we need to indirect the entry hook through a
2710   // trampoline function at a known address.
2711   // It additionally takes an isolate as a third parameter
2712   __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
2713 
2714   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
2715   __ mov(ip, Operand(ExternalReference(&dispatcher,
2716                                        ExternalReference::BUILTIN_CALL,
2717                                        isolate())));
2718 #endif
2719   __ Call(ip);
2720 
2721   // Restore the stack pointer if needed.
2722   if (frame_alignment > kPointerSize) {
2723     __ mov(sp, r5);
2724   }
2725 
2726   // Also pop pc to get Ret(0).
2727   __ ldm(ia_w, sp, kSavedRegs | pc.bit());
2728 }
2729 
2730 
2731 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)2732 static void CreateArrayDispatch(MacroAssembler* masm,
2733                                 AllocationSiteOverrideMode mode) {
2734   if (mode == DISABLE_ALLOCATION_SITES) {
2735     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
2736     __ TailCallStub(&stub);
2737   } else if (mode == DONT_OVERRIDE) {
2738     int last_index = GetSequenceIndexFromFastElementsKind(
2739         TERMINAL_FAST_ELEMENTS_KIND);
2740     for (int i = 0; i <= last_index; ++i) {
2741       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
2742       __ cmp(r3, Operand(kind));
2743       T stub(masm->isolate(), kind);
2744       __ TailCallStub(&stub, eq);
2745     }
2746 
2747     // If we reached this point there is a problem.
2748     __ Abort(kUnexpectedElementsKindInArrayConstructor);
2749   } else {
2750     UNREACHABLE();
2751   }
2752 }
2753 
2754 
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)2755 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
2756                                            AllocationSiteOverrideMode mode) {
2757   // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
2758   // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
2759   // r0 - number of arguments
2760   // r1 - constructor?
2761   // sp[0] - last argument
2762   Label normal_sequence;
2763   if (mode == DONT_OVERRIDE) {
2764     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2765     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2766     STATIC_ASSERT(FAST_ELEMENTS == 2);
2767     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2768     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
2769     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
2770 
2771     // is the low bit set? If so, we are holey and that is good.
2772     __ tst(r3, Operand(1));
2773     __ b(ne, &normal_sequence);
2774   }
2775 
2776   // look at the first argument
2777   __ ldr(r5, MemOperand(sp, 0));
2778   __ cmp(r5, Operand::Zero());
2779   __ b(eq, &normal_sequence);
2780 
2781   if (mode == DISABLE_ALLOCATION_SITES) {
2782     ElementsKind initial = GetInitialFastElementsKind();
2783     ElementsKind holey_initial = GetHoleyElementsKind(initial);
2784 
2785     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
2786                                                   holey_initial,
2787                                                   DISABLE_ALLOCATION_SITES);
2788     __ TailCallStub(&stub_holey);
2789 
2790     __ bind(&normal_sequence);
2791     ArraySingleArgumentConstructorStub stub(masm->isolate(),
2792                                             initial,
2793                                             DISABLE_ALLOCATION_SITES);
2794     __ TailCallStub(&stub);
2795   } else if (mode == DONT_OVERRIDE) {
2796     // We are going to create a holey array, but our kind is non-holey.
2797     // Fix kind and retry (only if we have an allocation site in the slot).
2798     __ add(r3, r3, Operand(1));
2799 
2800     if (FLAG_debug_code) {
2801       __ ldr(r5, FieldMemOperand(r2, 0));
2802       __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2803       __ Assert(eq, kExpectedAllocationSite);
2804     }
2805 
2806     // Save the resulting elements kind in type info. We can't just store r3
2807     // in the AllocationSite::transition_info field because elements kind is
2808     // restricted to a portion of the field...upper bits need to be left alone.
2809     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
2810     __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
2811     __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
2812     __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
2813 
2814     __ bind(&normal_sequence);
2815     int last_index = GetSequenceIndexFromFastElementsKind(
2816         TERMINAL_FAST_ELEMENTS_KIND);
2817     for (int i = 0; i <= last_index; ++i) {
2818       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
2819       __ cmp(r3, Operand(kind));
2820       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
2821       __ TailCallStub(&stub, eq);
2822     }
2823 
2824     // If we reached this point there is a problem.
2825     __ Abort(kUnexpectedElementsKindInArrayConstructor);
2826   } else {
2827     UNREACHABLE();
2828   }
2829 }
2830 
2831 
2832 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)2833 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
2834   int to_index = GetSequenceIndexFromFastElementsKind(
2835       TERMINAL_FAST_ELEMENTS_KIND);
2836   for (int i = 0; i <= to_index; ++i) {
2837     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
2838     T stub(isolate, kind);
2839     stub.GetCode();
2840     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
2841       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
2842       stub1.GetCode();
2843     }
2844   }
2845 }
2846 
GenerateStubsAheadOfTime(Isolate * isolate)2847 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
2848   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
2849       isolate);
2850   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
2851       isolate);
2852   ArrayNArgumentsConstructorStub stub(isolate);
2853   stub.GetCode();
2854   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
2855   for (int i = 0; i < 2; i++) {
2856     // For internal arrays we only need a few things
2857     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
2858     stubh1.GetCode();
2859     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
2860     stubh2.GetCode();
2861   }
2862 }
2863 
2864 
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)2865 void ArrayConstructorStub::GenerateDispatchToArrayStub(
2866     MacroAssembler* masm,
2867     AllocationSiteOverrideMode mode) {
2868   Label not_zero_case, not_one_case;
2869   __ tst(r0, r0);
2870   __ b(ne, &not_zero_case);
2871   CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
2872 
2873   __ bind(&not_zero_case);
2874   __ cmp(r0, Operand(1));
2875   __ b(gt, &not_one_case);
2876   CreateArrayDispatchOneArgument(masm, mode);
2877 
2878   __ bind(&not_one_case);
2879   ArrayNArgumentsConstructorStub stub(masm->isolate());
2880   __ TailCallStub(&stub);
2881 }
2882 
2883 
Generate(MacroAssembler * masm)2884 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
2885   // ----------- S t a t e -------------
2886   //  -- r0 : argc (only if argument_count() == ANY)
2887   //  -- r1 : constructor
2888   //  -- r2 : AllocationSite or undefined
2889   //  -- r3 : new target
2890   //  -- sp[0] : return address
2891   //  -- sp[4] : last argument
2892   // -----------------------------------
2893 
2894   if (FLAG_debug_code) {
2895     // The array construct code is only set for the global and natives
2896     // builtin Array functions which always have maps.
2897 
2898     // Initial map for the builtin Array function should be a map.
2899     __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
2900     // Will both indicate a NULL and a Smi.
2901     __ tst(r4, Operand(kSmiTagMask));
2902     __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
2903     __ CompareObjectType(r4, r4, r5, MAP_TYPE);
2904     __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
2905 
2906     // We should either have undefined in r2 or a valid AllocationSite
2907     __ AssertUndefinedOrAllocationSite(r2, r4);
2908   }
2909 
2910   // Enter the context of the Array function.
2911   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
2912 
2913   Label subclassing;
2914   __ cmp(r3, r1);
2915   __ b(ne, &subclassing);
2916 
2917   Label no_info;
2918   // Get the elements kind and case on that.
2919   __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
2920   __ b(eq, &no_info);
2921 
2922   __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
2923   __ SmiUntag(r3);
2924   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
2925   __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
2926   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
2927 
2928   __ bind(&no_info);
2929   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
2930 
2931   __ bind(&subclassing);
2932   __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
2933   __ add(r0, r0, Operand(3));
2934   __ Push(r3, r2);
2935   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
2936 }
2937 
2938 
GenerateCase(MacroAssembler * masm,ElementsKind kind)2939 void InternalArrayConstructorStub::GenerateCase(
2940     MacroAssembler* masm, ElementsKind kind) {
2941   __ cmp(r0, Operand(1));
2942 
2943   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
2944   __ TailCallStub(&stub0, lo);
2945 
2946   ArrayNArgumentsConstructorStub stubN(isolate());
2947   __ TailCallStub(&stubN, hi);
2948 
2949   if (IsFastPackedElementsKind(kind)) {
2950     // We might need to create a holey array
2951     // look at the first argument
2952     __ ldr(r3, MemOperand(sp, 0));
2953     __ cmp(r3, Operand::Zero());
2954 
2955     InternalArraySingleArgumentConstructorStub
2956         stub1_holey(isolate(), GetHoleyElementsKind(kind));
2957     __ TailCallStub(&stub1_holey, ne);
2958   }
2959 
2960   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
2961   __ TailCallStub(&stub1);
2962 }
2963 
2964 
Generate(MacroAssembler * masm)2965 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
2966   // ----------- S t a t e -------------
2967   //  -- r0 : argc
2968   //  -- r1 : constructor
2969   //  -- sp[0] : return address
2970   //  -- sp[4] : last argument
2971   // -----------------------------------
2972 
2973   if (FLAG_debug_code) {
2974     // The array construct code is only set for the global and natives
2975     // builtin Array functions which always have maps.
2976 
2977     // Initial map for the builtin Array function should be a map.
2978     __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
2979     // Will both indicate a NULL and a Smi.
2980     __ tst(r3, Operand(kSmiTagMask));
2981     __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
2982     __ CompareObjectType(r3, r3, r4, MAP_TYPE);
2983     __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
2984   }
2985 
2986   // Figure out the right elements kind
2987   __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
2988   // Load the map's "bit field 2" into |result|. We only need the first byte,
2989   // but the following bit field extraction takes care of that anyway.
2990   __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
2991   // Retrieve elements_kind from bit field 2.
2992   __ DecodeField<Map::ElementsKindBits>(r3);
2993 
2994   if (FLAG_debug_code) {
2995     Label done;
2996     __ cmp(r3, Operand(FAST_ELEMENTS));
2997     __ b(eq, &done);
2998     __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
2999     __ Assert(eq,
3000               kInvalidElementsKindForInternalArrayOrInternalPackedArray);
3001     __ bind(&done);
3002   }
3003 
3004   Label fast_elements_case;
3005   __ cmp(r3, Operand(FAST_ELEMENTS));
3006   __ b(eq, &fast_elements_case);
3007   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
3008 
3009   __ bind(&fast_elements_case);
3010   GenerateCase(masm, FAST_ELEMENTS);
3011 }
3012 
AddressOffset(ExternalReference ref0,ExternalReference ref1)3013 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3014   return ref0.address() - ref1.address();
3015 }
3016 
3017 
3018 // Calls an API function.  Allocates HandleScope, extracts returned value
3019 // from handle and propagates exceptions.  Restores context.  stack_space
3020 // - space to be unwound on exit (includes the call JS arguments space and
3021 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand,MemOperand * context_restore_operand)3022 static void CallApiFunctionAndReturn(MacroAssembler* masm,
3023                                      Register function_address,
3024                                      ExternalReference thunk_ref,
3025                                      int stack_space,
3026                                      MemOperand* stack_space_operand,
3027                                      MemOperand return_value_operand,
3028                                      MemOperand* context_restore_operand) {
3029   Isolate* isolate = masm->isolate();
3030   ExternalReference next_address =
3031       ExternalReference::handle_scope_next_address(isolate);
3032   const int kNextOffset = 0;
3033   const int kLimitOffset = AddressOffset(
3034       ExternalReference::handle_scope_limit_address(isolate), next_address);
3035   const int kLevelOffset = AddressOffset(
3036       ExternalReference::handle_scope_level_address(isolate), next_address);
3037 
3038   DCHECK(function_address.is(r1) || function_address.is(r2));
3039 
3040   Label profiler_disabled;
3041   Label end_profiler_check;
3042   __ mov(r9, Operand(ExternalReference::is_profiling_address(isolate)));
3043   __ ldrb(r9, MemOperand(r9, 0));
3044   __ cmp(r9, Operand(0));
3045   __ b(eq, &profiler_disabled);
3046 
3047   // Additional parameter is the address of the actual callback.
3048   __ mov(r3, Operand(thunk_ref));
3049   __ jmp(&end_profiler_check);
3050 
3051   __ bind(&profiler_disabled);
3052   __ Move(r3, function_address);
3053   __ bind(&end_profiler_check);
3054 
3055   // Allocate HandleScope in callee-save registers.
3056   __ mov(r9, Operand(next_address));
3057   __ ldr(r4, MemOperand(r9, kNextOffset));
3058   __ ldr(r5, MemOperand(r9, kLimitOffset));
3059   __ ldr(r6, MemOperand(r9, kLevelOffset));
3060   __ add(r6, r6, Operand(1));
3061   __ str(r6, MemOperand(r9, kLevelOffset));
3062 
3063   if (FLAG_log_timer_events) {
3064     FrameScope frame(masm, StackFrame::MANUAL);
3065     __ PushSafepointRegisters();
3066     __ PrepareCallCFunction(1, r0);
3067     __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
3068     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
3069                      1);
3070     __ PopSafepointRegisters();
3071   }
3072 
3073   // Native call returns to the DirectCEntry stub which redirects to the
3074   // return address pushed on stack (could have moved after GC).
3075   // DirectCEntry stub itself is generated early and never moves.
3076   DirectCEntryStub stub(isolate);
3077   stub.GenerateCall(masm, r3);
3078 
3079   if (FLAG_log_timer_events) {
3080     FrameScope frame(masm, StackFrame::MANUAL);
3081     __ PushSafepointRegisters();
3082     __ PrepareCallCFunction(1, r0);
3083     __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
3084     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
3085                      1);
3086     __ PopSafepointRegisters();
3087   }
3088 
3089   Label promote_scheduled_exception;
3090   Label delete_allocated_handles;
3091   Label leave_exit_frame;
3092   Label return_value_loaded;
3093 
3094   // load value from ReturnValue
3095   __ ldr(r0, return_value_operand);
3096   __ bind(&return_value_loaded);
3097   // No more valid handles (the result handle was the last one). Restore
3098   // previous handle scope.
3099   __ str(r4, MemOperand(r9, kNextOffset));
3100   if (__ emit_debug_code()) {
3101     __ ldr(r1, MemOperand(r9, kLevelOffset));
3102     __ cmp(r1, r6);
3103     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
3104   }
3105   __ sub(r6, r6, Operand(1));
3106   __ str(r6, MemOperand(r9, kLevelOffset));
3107   __ ldr(ip, MemOperand(r9, kLimitOffset));
3108   __ cmp(r5, ip);
3109   __ b(ne, &delete_allocated_handles);
3110 
3111   // Leave the API exit frame.
3112   __ bind(&leave_exit_frame);
3113   bool restore_context = context_restore_operand != NULL;
3114   if (restore_context) {
3115     __ ldr(cp, *context_restore_operand);
3116   }
3117   // LeaveExitFrame expects unwind space to be in a register.
3118   if (stack_space_operand != NULL) {
3119     __ ldr(r4, *stack_space_operand);
3120   } else {
3121     __ mov(r4, Operand(stack_space));
3122   }
3123   __ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
3124 
3125   // Check if the function scheduled an exception.
3126   __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
3127   __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
3128   __ ldr(r5, MemOperand(ip));
3129   __ cmp(r4, r5);
3130   __ b(ne, &promote_scheduled_exception);
3131 
3132   __ mov(pc, lr);
3133 
3134   // Re-throw by promoting a scheduled exception.
3135   __ bind(&promote_scheduled_exception);
3136   __ TailCallRuntime(Runtime::kPromoteScheduledException);
3137 
3138   // HandleScope limit has changed. Delete allocated extensions.
3139   __ bind(&delete_allocated_handles);
3140   __ str(r5, MemOperand(r9, kLimitOffset));
3141   __ mov(r4, r0);
3142   __ PrepareCallCFunction(1, r5);
3143   __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
3144   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
3145                    1);
3146   __ mov(r0, r4);
3147   __ jmp(&leave_exit_frame);
3148 }
3149 
Generate(MacroAssembler * masm)3150 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
3151   // ----------- S t a t e -------------
3152   //  -- r0                  : callee
3153   //  -- r4                  : call_data
3154   //  -- r2                  : holder
3155   //  -- r1                  : api_function_address
3156   //  -- cp                  : context
3157   //  --
3158   //  -- sp[0]               : last argument
3159   //  -- ...
3160   //  -- sp[(argc - 1)* 4]   : first argument
3161   //  -- sp[argc * 4]        : receiver
3162   // -----------------------------------
3163 
3164   Register callee = r0;
3165   Register call_data = r4;
3166   Register holder = r2;
3167   Register api_function_address = r1;
3168   Register context = cp;
3169 
3170   typedef FunctionCallbackArguments FCA;
3171 
3172   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
3173   STATIC_ASSERT(FCA::kCalleeIndex == 5);
3174   STATIC_ASSERT(FCA::kDataIndex == 4);
3175   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3176   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3177   STATIC_ASSERT(FCA::kIsolateIndex == 1);
3178   STATIC_ASSERT(FCA::kHolderIndex == 0);
3179   STATIC_ASSERT(FCA::kNewTargetIndex == 7);
3180   STATIC_ASSERT(FCA::kArgsLength == 8);
3181 
3182   // new target
3183   __ PushRoot(Heap::kUndefinedValueRootIndex);
3184 
3185   // context save
3186   __ push(context);
3187   if (!is_lazy()) {
3188     // load context from callee
3189     __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
3190   }
3191 
3192   // callee
3193   __ push(callee);
3194 
3195   // call data
3196   __ push(call_data);
3197 
3198   Register scratch = call_data;
3199   if (!call_data_undefined()) {
3200     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3201   }
3202   // return value
3203   __ push(scratch);
3204   // return value default
3205   __ push(scratch);
3206   // isolate
3207   __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
3208   __ push(scratch);
3209   // holder
3210   __ push(holder);
3211 
3212   // Prepare arguments.
3213   __ mov(scratch, sp);
3214 
3215   // Allocate the v8::Arguments structure in the arguments' space since
3216   // it's not controlled by GC.
3217   const int kApiStackSpace = 3;
3218 
3219   FrameScope frame_scope(masm, StackFrame::MANUAL);
3220   __ EnterExitFrame(false, kApiStackSpace);
3221 
3222   DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
3223   // r0 = FunctionCallbackInfo&
3224   // Arguments is after the return address.
3225   __ add(r0, sp, Operand(1 * kPointerSize));
3226   // FunctionCallbackInfo::implicit_args_
3227   __ str(scratch, MemOperand(r0, 0 * kPointerSize));
3228   // FunctionCallbackInfo::values_
3229   __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
3230   __ str(ip, MemOperand(r0, 1 * kPointerSize));
3231   // FunctionCallbackInfo::length_ = argc
3232   __ mov(ip, Operand(argc()));
3233   __ str(ip, MemOperand(r0, 2 * kPointerSize));
3234 
3235   ExternalReference thunk_ref =
3236       ExternalReference::invoke_function_callback(masm->isolate());
3237 
3238   AllowExternalCallThatCantCauseGC scope(masm);
3239   MemOperand context_restore_operand(
3240       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
3241   // Stores return the first js argument
3242   int return_value_offset = 0;
3243   if (is_store()) {
3244     return_value_offset = 2 + FCA::kArgsLength;
3245   } else {
3246     return_value_offset = 2 + FCA::kReturnValueOffset;
3247   }
3248   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
3249   int stack_space = 0;
3250   MemOperand length_operand = MemOperand(sp, 3 * kPointerSize);
3251   MemOperand* stack_space_operand = &length_operand;
3252   stack_space = argc() + FCA::kArgsLength + 1;
3253   stack_space_operand = NULL;
3254 
3255   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
3256                            stack_space_operand, return_value_operand,
3257                            &context_restore_operand);
3258 }
3259 
3260 
Generate(MacroAssembler * masm)3261 void CallApiGetterStub::Generate(MacroAssembler* masm) {
3262   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3263   // name below the exit frame to make GC aware of them.
3264   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3265   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3266   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3267   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3268   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3269   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3270   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3271   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3272 
3273   Register receiver = ApiGetterDescriptor::ReceiverRegister();
3274   Register holder = ApiGetterDescriptor::HolderRegister();
3275   Register callback = ApiGetterDescriptor::CallbackRegister();
3276   Register scratch = r4;
3277   DCHECK(!AreAliased(receiver, holder, callback, scratch));
3278 
3279   Register api_function_address = r2;
3280 
3281   __ push(receiver);
3282   // Push data from AccessorInfo.
3283   __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3284   __ push(scratch);
3285   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3286   __ Push(scratch, scratch);
3287   __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
3288   __ Push(scratch, holder);
3289   __ Push(Smi::kZero);  // should_throw_on_error -> false
3290   __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3291   __ push(scratch);
3292   // v8::PropertyCallbackInfo::args_ array and name handle.
3293   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3294 
3295   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3296   __ mov(r0, sp);                             // r0 = Handle<Name>
3297   __ add(r1, r0, Operand(1 * kPointerSize));  // r1 = v8::PCI::args_
3298 
3299   const int kApiStackSpace = 1;
3300   FrameScope frame_scope(masm, StackFrame::MANUAL);
3301   __ EnterExitFrame(false, kApiStackSpace);
3302 
3303   // Create v8::PropertyCallbackInfo object on the stack and initialize
3304   // it's args_ field.
3305   __ str(r1, MemOperand(sp, 1 * kPointerSize));
3306   __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = v8::PropertyCallbackInfo&
3307 
3308   ExternalReference thunk_ref =
3309       ExternalReference::invoke_accessor_getter_callback(isolate());
3310 
3311   __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3312   __ ldr(api_function_address,
3313          FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3314 
3315   // +3 is to skip prolog, return address and name handle.
3316   MemOperand return_value_operand(
3317       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3318   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3319                            kStackUnwindSpace, NULL, return_value_operand, NULL);
3320 }
3321 
3322 #undef __
3323 
3324 }  // namespace internal
3325 }  // namespace v8
3326 
3327 #endif  // V8_TARGET_ARCH_ARM
3328