• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_MIPS
6 
7 #include "src/base/bits.h"
8 #include "src/bootstrapper.h"
9 #include "src/code-stubs.h"
10 #include "src/codegen.h"
11 #include "src/ic/handler-compiler.h"
12 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h"
14 #include "src/isolate.h"
15 #include "src/mips/code-stubs-mips.h"
16 #include "src/regexp/jsregexp.h"
17 #include "src/regexp/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 
InitializeArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)24 static void InitializeArrayConstructorDescriptor(
25     Isolate* isolate, CodeStubDescriptor* descriptor,
26     int constant_stack_parameter_count) {
27   Address deopt_handler = Runtime::FunctionForId(
28       Runtime::kArrayConstructor)->entry;
29 
30   if (constant_stack_parameter_count == 0) {
31     descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
32                            JS_FUNCTION_STUB_MODE);
33   } else {
34     descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
35                            JS_FUNCTION_STUB_MODE);
36   }
37 }
38 
39 
InitializeInternalArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)40 static void InitializeInternalArrayConstructorDescriptor(
41     Isolate* isolate, CodeStubDescriptor* descriptor,
42     int constant_stack_parameter_count) {
43   Address deopt_handler = Runtime::FunctionForId(
44       Runtime::kInternalArrayConstructor)->entry;
45 
46   if (constant_stack_parameter_count == 0) {
47     descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
48                            JS_FUNCTION_STUB_MODE);
49   } else {
50     descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
51                            JS_FUNCTION_STUB_MODE);
52   }
53 }
54 
55 
InitializeDescriptor(CodeStubDescriptor * descriptor)56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
57     CodeStubDescriptor* descriptor) {
58   InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
59 }
60 
61 
InitializeDescriptor(CodeStubDescriptor * descriptor)62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
63     CodeStubDescriptor* descriptor) {
64   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
65 }
66 
67 
InitializeDescriptor(CodeStubDescriptor * descriptor)68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
69     CodeStubDescriptor* descriptor) {
70   InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
71 }
72 
73 
InitializeDescriptor(CodeStubDescriptor * descriptor)74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
75     CodeStubDescriptor* descriptor) {
76   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
77 }
78 
79 
InitializeDescriptor(CodeStubDescriptor * descriptor)80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
81     CodeStubDescriptor* descriptor) {
82   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
83 }
84 
85 
InitializeDescriptor(CodeStubDescriptor * descriptor)86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
87     CodeStubDescriptor* descriptor) {
88   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
89 }
90 
91 
92 #define __ ACCESS_MASM(masm)
93 
94 
95 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
96                                           Condition cc, Strength strength);
97 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
98                                     Register lhs,
99                                     Register rhs,
100                                     Label* rhs_not_nan,
101                                     Label* slow,
102                                     bool strict);
103 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
104                                            Register lhs,
105                                            Register rhs);
106 
107 
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)108 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
109                                                ExternalReference miss) {
110   // Update the static counter each time a new code stub is generated.
111   isolate()->counters()->code_stubs()->Increment();
112 
113   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
114   int param_count = descriptor.GetRegisterParameterCount();
115   {
116     // Call the runtime system in a fresh internal frame.
117     FrameScope scope(masm, StackFrame::INTERNAL);
118     DCHECK(param_count == 0 ||
119            a0.is(descriptor.GetRegisterParameter(param_count - 1)));
120     // Push arguments, adjust sp.
121     __ Subu(sp, sp, Operand(param_count * kPointerSize));
122     for (int i = 0; i < param_count; ++i) {
123       // Store argument to stack.
124       __ sw(descriptor.GetRegisterParameter(i),
125             MemOperand(sp, (param_count - 1 - i) * kPointerSize));
126     }
127     __ CallExternalReference(miss, param_count);
128   }
129 
130   __ Ret();
131 }
132 
133 
Generate(MacroAssembler * masm)134 void DoubleToIStub::Generate(MacroAssembler* masm) {
135   Label out_of_range, only_low, negate, done;
136   Register input_reg = source();
137   Register result_reg = destination();
138 
139   int double_offset = offset();
140   // Account for saved regs if input is sp.
141   if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
142 
143   Register scratch =
144       GetRegisterThatIsNotOneOf(input_reg, result_reg);
145   Register scratch2 =
146       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
147   Register scratch3 =
148       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
149   DoubleRegister double_scratch = kLithiumScratchDouble;
150 
151   __ Push(scratch, scratch2, scratch3);
152 
153   if (!skip_fastpath()) {
154     // Load double input.
155     __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
156 
157     // Clear cumulative exception flags and save the FCSR.
158     __ cfc1(scratch2, FCSR);
159     __ ctc1(zero_reg, FCSR);
160 
161     // Try a conversion to a signed integer.
162     __ Trunc_w_d(double_scratch, double_scratch);
163     // Move the converted value into the result register.
164     __ mfc1(scratch3, double_scratch);
165 
166     // Retrieve and restore the FCSR.
167     __ cfc1(scratch, FCSR);
168     __ ctc1(scratch2, FCSR);
169 
170     // Check for overflow and NaNs.
171     __ And(
172         scratch, scratch,
173         kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
174            | kFCSRInvalidOpFlagMask);
175     // If we had no exceptions then set result_reg and we are done.
176     Label error;
177     __ Branch(&error, ne, scratch, Operand(zero_reg));
178     __ Move(result_reg, scratch3);
179     __ Branch(&done);
180     __ bind(&error);
181   }
182 
183   // Load the double value and perform a manual truncation.
184   Register input_high = scratch2;
185   Register input_low = scratch3;
186 
187   __ lw(input_low,
188       MemOperand(input_reg, double_offset + Register::kMantissaOffset));
189   __ lw(input_high,
190       MemOperand(input_reg, double_offset + Register::kExponentOffset));
191 
192   Label normal_exponent, restore_sign;
193   // Extract the biased exponent in result.
194   __ Ext(result_reg,
195          input_high,
196          HeapNumber::kExponentShift,
197          HeapNumber::kExponentBits);
198 
199   // Check for Infinity and NaNs, which should return 0.
200   __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
201   __ Movz(result_reg, zero_reg, scratch);
202   __ Branch(&done, eq, scratch, Operand(zero_reg));
203 
204   // Express exponent as delta to (number of mantissa bits + 31).
205   __ Subu(result_reg,
206           result_reg,
207           Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
208 
209   // If the delta is strictly positive, all bits would be shifted away,
210   // which means that we can return 0.
211   __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
212   __ mov(result_reg, zero_reg);
213   __ Branch(&done);
214 
215   __ bind(&normal_exponent);
216   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
217   // Calculate shift.
218   __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
219 
220   // Save the sign.
221   Register sign = result_reg;
222   result_reg = no_reg;
223   __ And(sign, input_high, Operand(HeapNumber::kSignMask));
224 
225   // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
226   // to check for this specific case.
227   Label high_shift_needed, high_shift_done;
228   __ Branch(&high_shift_needed, lt, scratch, Operand(32));
229   __ mov(input_high, zero_reg);
230   __ Branch(&high_shift_done);
231   __ bind(&high_shift_needed);
232 
233   // Set the implicit 1 before the mantissa part in input_high.
234   __ Or(input_high,
235         input_high,
236         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
237   // Shift the mantissa bits to the correct position.
238   // We don't need to clear non-mantissa bits as they will be shifted away.
239   // If they weren't, it would mean that the answer is in the 32bit range.
240   __ sllv(input_high, input_high, scratch);
241 
242   __ bind(&high_shift_done);
243 
244   // Replace the shifted bits with bits from the lower mantissa word.
245   Label pos_shift, shift_done;
246   __ li(at, 32);
247   __ subu(scratch, at, scratch);
248   __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
249 
250   // Negate scratch.
251   __ Subu(scratch, zero_reg, scratch);
252   __ sllv(input_low, input_low, scratch);
253   __ Branch(&shift_done);
254 
255   __ bind(&pos_shift);
256   __ srlv(input_low, input_low, scratch);
257 
258   __ bind(&shift_done);
259   __ Or(input_high, input_high, Operand(input_low));
260   // Restore sign if necessary.
261   __ mov(scratch, sign);
262   result_reg = sign;
263   sign = no_reg;
264   __ Subu(result_reg, zero_reg, input_high);
265   __ Movz(result_reg, input_high, scratch);
266 
267   __ bind(&done);
268 
269   __ Pop(scratch, scratch2, scratch3);
270   __ Ret();
271 }
272 
273 
274 // Handle the case where the lhs and rhs are the same object.
275 // Equality is almost reflexive (everything but NaN), so this is a test
276 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cc,Strength strength)277 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
278                                           Condition cc, Strength strength) {
279   Label not_identical;
280   Label heap_number, return_equal;
281   Register exp_mask_reg = t5;
282 
283   __ Branch(&not_identical, ne, a0, Operand(a1));
284 
285   __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
286 
287   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
288   // so we do the second best thing - test it ourselves.
289   // They are both equal and they are not both Smis so both of them are not
290   // Smis. If it's not a heap number, then return equal.
291   __ GetObjectType(a0, t4, t4);
292   if (cc == less || cc == greater) {
293     // Call runtime on identical JSObjects.
294     __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
295     // Call runtime on identical symbols since we need to throw a TypeError.
296     __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
297     // Call runtime on identical SIMD values since we must throw a TypeError.
298     __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
299     if (is_strong(strength)) {
300       // Call the runtime on anything that is converted in the semantics, since
301       // we need to throw a TypeError. Smis have already been ruled out.
302       __ Branch(&return_equal, eq, t4, Operand(HEAP_NUMBER_TYPE));
303       __ And(t4, t4, Operand(kIsNotStringMask));
304       __ Branch(slow, ne, t4, Operand(zero_reg));
305     }
306   } else {
307     __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
308     // Comparing JS objects with <=, >= is complicated.
309     if (cc != eq) {
310     __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
311     // Call runtime on identical symbols since we need to throw a TypeError.
312     __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
313     // Call runtime on identical SIMD values since we must throw a TypeError.
314     __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
315     if (is_strong(strength)) {
316       // Call the runtime on anything that is converted in the semantics,
317       // since we need to throw a TypeError. Smis and heap numbers have
318       // already been ruled out.
319       __ And(t4, t4, Operand(kIsNotStringMask));
320       __ Branch(slow, ne, t4, Operand(zero_reg));
321     }
322       // Normally here we fall through to return_equal, but undefined is
323       // special: (undefined == undefined) == true, but
324       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
325       if (cc == less_equal || cc == greater_equal) {
326         __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
327         __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
328         __ Branch(&return_equal, ne, a0, Operand(t2));
329         DCHECK(is_int16(GREATER) && is_int16(LESS));
330         __ Ret(USE_DELAY_SLOT);
331         if (cc == le) {
332           // undefined <= undefined should fail.
333           __ li(v0, Operand(GREATER));
334         } else  {
335           // undefined >= undefined should fail.
336           __ li(v0, Operand(LESS));
337         }
338       }
339     }
340   }
341 
342   __ bind(&return_equal);
343   DCHECK(is_int16(GREATER) && is_int16(LESS));
344   __ Ret(USE_DELAY_SLOT);
345   if (cc == less) {
346     __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
347   } else if (cc == greater) {
348     __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
349   } else {
350     __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
351   }
352 
353   // For less and greater we don't have to check for NaN since the result of
354   // x < x is false regardless.  For the others here is some code to check
355   // for NaN.
356   if (cc != lt && cc != gt) {
357     __ bind(&heap_number);
358     // It is a heap number, so return non-equal if it's NaN and equal if it's
359     // not NaN.
360 
361     // The representation of NaN values has all exponent bits (52..62) set,
362     // and not all mantissa bits (0..51) clear.
363     // Read top bits of double representation (second word of value).
364     __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
365     // Test that exponent bits are all set.
366     __ And(t3, t2, Operand(exp_mask_reg));
367     // If all bits not set (ne cond), then not a NaN, objects are equal.
368     __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
369 
370     // Shift out flag and all exponent bits, retaining only mantissa.
371     __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
372     // Or with all low-bits of mantissa.
373     __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
374     __ Or(v0, t3, Operand(t2));
375     // For equal we already have the right value in v0:  Return zero (equal)
376     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
377     // not (it's a NaN).  For <= and >= we need to load v0 with the failing
378     // value if it's a NaN.
379     if (cc != eq) {
380       // All-zero means Infinity means equal.
381       __ Ret(eq, v0, Operand(zero_reg));
382       DCHECK(is_int16(GREATER) && is_int16(LESS));
383       __ Ret(USE_DELAY_SLOT);
384       if (cc == le) {
385         __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
386       } else {
387         __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
388       }
389     }
390   }
391   // No fall through here.
392 
393   __ bind(&not_identical);
394 }
395 
396 
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * slow,bool strict)397 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
398                                     Register lhs,
399                                     Register rhs,
400                                     Label* both_loaded_as_doubles,
401                                     Label* slow,
402                                     bool strict) {
403   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
404          (lhs.is(a1) && rhs.is(a0)));
405 
406   Label lhs_is_smi;
407   __ JumpIfSmi(lhs, &lhs_is_smi);
408   // Rhs is a Smi.
409   // Check whether the non-smi is a heap number.
410   __ GetObjectType(lhs, t4, t4);
411   if (strict) {
412     // If lhs was not a number and rhs was a Smi then strict equality cannot
413     // succeed. Return non-equal (lhs is already not zero).
414     __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
415     __ mov(v0, lhs);
416   } else {
417     // Smi compared non-strictly with a non-Smi non-heap-number. Call
418     // the runtime.
419     __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
420   }
421 
422   // Rhs is a smi, lhs is a number.
423   // Convert smi rhs to double.
424   __ sra(at, rhs, kSmiTagSize);
425   __ mtc1(at, f14);
426   __ cvt_d_w(f14, f14);
427   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
428 
429   // We now have both loaded as doubles.
430   __ jmp(both_loaded_as_doubles);
431 
432   __ bind(&lhs_is_smi);
433   // Lhs is a Smi.  Check whether the non-smi is a heap number.
434   __ GetObjectType(rhs, t4, t4);
435   if (strict) {
436     // If lhs was not a number and rhs was a Smi then strict equality cannot
437     // succeed. Return non-equal.
438     __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
439     __ li(v0, Operand(1));
440   } else {
441     // Smi compared non-strictly with a non-Smi non-heap-number. Call
442     // the runtime.
443     __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
444   }
445 
446   // Lhs is a smi, rhs is a number.
447   // Convert smi lhs to double.
448   __ sra(at, lhs, kSmiTagSize);
449   __ mtc1(at, f12);
450   __ cvt_d_w(f12, f12);
451   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
452   // Fall through to both_loaded_as_doubles.
453 }
454 
455 
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)456 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
457                                            Register lhs,
458                                            Register rhs) {
459     // If either operand is a JS object or an oddball value, then they are
460     // not equal since their pointers are different.
461     // There is no test for undetectability in strict equality.
462     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
463     Label first_non_object;
464     // Get the type of the first operand into a2 and compare it with
465     // FIRST_JS_RECEIVER_TYPE.
466     __ GetObjectType(lhs, a2, a2);
467     __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
468 
469     // Return non-zero.
470     Label return_not_equal;
471     __ bind(&return_not_equal);
472     __ Ret(USE_DELAY_SLOT);
473     __ li(v0, Operand(1));
474 
475     __ bind(&first_non_object);
476     // Check for oddballs: true, false, null, undefined.
477     __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
478 
479     __ GetObjectType(rhs, a3, a3);
480     __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
481 
482     // Check for oddballs: true, false, null, undefined.
483     __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
484 
485     // Now that we have the types we might as well check for
486     // internalized-internalized.
487     STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
488     __ Or(a2, a2, Operand(a3));
489     __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
490     __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
491 }
492 
493 
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)494 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
495                                        Register lhs,
496                                        Register rhs,
497                                        Label* both_loaded_as_doubles,
498                                        Label* not_heap_numbers,
499                                        Label* slow) {
500   __ GetObjectType(lhs, a3, a2);
501   __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
502   __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
503   // If first was a heap number & second wasn't, go to slow case.
504   __ Branch(slow, ne, a3, Operand(a2));
505 
506   // Both are heap numbers. Load them up then jump to the code we have
507   // for that.
508   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
509   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
510 
511   __ jmp(both_loaded_as_doubles);
512 }
513 
514 
515 // Fast negative check for internalized-to-internalized equality.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * not_both_strings)516 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
517                                                      Register lhs,
518                                                      Register rhs,
519                                                      Label* possible_strings,
520                                                      Label* not_both_strings) {
521   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
522          (lhs.is(a1) && rhs.is(a0)));
523 
524   // a2 is object type of rhs.
525   Label object_test;
526   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
527   __ And(at, a2, Operand(kIsNotStringMask));
528   __ Branch(&object_test, ne, at, Operand(zero_reg));
529   __ And(at, a2, Operand(kIsNotInternalizedMask));
530   __ Branch(possible_strings, ne, at, Operand(zero_reg));
531   __ GetObjectType(rhs, a3, a3);
532   __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
533   __ And(at, a3, Operand(kIsNotInternalizedMask));
534   __ Branch(possible_strings, ne, at, Operand(zero_reg));
535 
536   // Both are internalized strings. We already checked they weren't the same
537   // pointer so they are not equal.
538   __ Ret(USE_DELAY_SLOT);
539   __ li(v0, Operand(1));   // Non-zero indicates not equal.
540 
541   __ bind(&object_test);
542   __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
543   __ GetObjectType(rhs, a2, a3);
544   __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
545 
546   // If both objects are undetectable, they are equal.  Otherwise, they
547   // are not equal, since they are different objects and an object is not
548   // equal to undefined.
549   __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
550   __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
551   __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
552   __ and_(a0, a2, a3);
553   __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
554   __ Ret(USE_DELAY_SLOT);
555   __ xori(v0, a0, 1 << Map::kIsUndetectable);
556 }
557 
558 
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,Register scratch,CompareICState::State expected,Label * fail)559 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
560                                          Register scratch,
561                                          CompareICState::State expected,
562                                          Label* fail) {
563   Label ok;
564   if (expected == CompareICState::SMI) {
565     __ JumpIfNotSmi(input, fail);
566   } else if (expected == CompareICState::NUMBER) {
567     __ JumpIfSmi(input, &ok);
568     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
569                 DONT_DO_SMI_CHECK);
570   }
571   // We could be strict about internalized/string here, but as long as
572   // hydrogen doesn't care, the stub doesn't have to care either.
573   __ bind(&ok);
574 }
575 
576 
577 // On entry a1 and a2 are the values to be compared.
578 // On exit a0 is 0, positive or negative to indicate the result of
579 // the comparison.
GenerateGeneric(MacroAssembler * masm)580 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
581   Register lhs = a1;
582   Register rhs = a0;
583   Condition cc = GetCondition();
584 
585   Label miss;
586   CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
587   CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
588 
589   Label slow;  // Call builtin.
590   Label not_smis, both_loaded_as_doubles;
591 
592   Label not_two_smis, smi_done;
593   __ Or(a2, a1, a0);
594   __ JumpIfNotSmi(a2, &not_two_smis);
595   __ sra(a1, a1, 1);
596   __ sra(a0, a0, 1);
597   __ Ret(USE_DELAY_SLOT);
598   __ subu(v0, a1, a0);
599   __ bind(&not_two_smis);
600 
601   // NOTICE! This code is only reached after a smi-fast-case check, so
602   // it is certain that at least one operand isn't a smi.
603 
604   // Handle the case where the objects are identical.  Either returns the answer
605   // or goes to slow.  Only falls through if the objects were not identical.
606   EmitIdenticalObjectComparison(masm, &slow, cc, strength());
607 
608   // If either is a Smi (we know that not both are), then they can only
609   // be strictly equal if the other is a HeapNumber.
610   STATIC_ASSERT(kSmiTag == 0);
611   DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
612   __ And(t2, lhs, Operand(rhs));
613   __ JumpIfNotSmi(t2, &not_smis, t0);
614   // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
615   // 1) Return the answer.
616   // 2) Go to slow.
617   // 3) Fall through to both_loaded_as_doubles.
618   // 4) Jump to rhs_not_nan.
619   // In cases 3 and 4 we have found out we were dealing with a number-number
620   // comparison and the numbers have been loaded into f12 and f14 as doubles,
621   // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
622   EmitSmiNonsmiComparison(masm, lhs, rhs,
623                           &both_loaded_as_doubles, &slow, strict());
624 
625   __ bind(&both_loaded_as_doubles);
626   // f12, f14 are the double representations of the left hand side
627   // and the right hand side if we have FPU. Otherwise a2, a3 represent
628   // left hand side and a0, a1 represent right hand side.
629   Label nan;
630   __ li(t0, Operand(LESS));
631   __ li(t1, Operand(GREATER));
632   __ li(t2, Operand(EQUAL));
633 
634   // Check if either rhs or lhs is NaN.
635   __ BranchF(NULL, &nan, eq, f12, f14);
636 
637   // Check if LESS condition is satisfied. If true, move conditionally
638   // result to v0.
639   if (!IsMipsArchVariant(kMips32r6)) {
640     __ c(OLT, D, f12, f14);
641     __ Movt(v0, t0);
642     // Use previous check to store conditionally to v0 oposite condition
643     // (GREATER). If rhs is equal to lhs, this will be corrected in next
644     // check.
645     __ Movf(v0, t1);
646     // Check if EQUAL condition is satisfied. If true, move conditionally
647     // result to v0.
648     __ c(EQ, D, f12, f14);
649     __ Movt(v0, t2);
650   } else {
651     Label skip;
652     __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
653     __ mov(v0, t0);  // Return LESS as result.
654 
655     __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
656     __ mov(v0, t2);  // Return EQUAL as result.
657 
658     __ mov(v0, t1);  // Return GREATER as result.
659     __ bind(&skip);
660   }
661 
662   __ Ret();
663 
664   __ bind(&nan);
665   // NaN comparisons always fail.
666   // Load whatever we need in v0 to make the comparison fail.
667   DCHECK(is_int16(GREATER) && is_int16(LESS));
668   __ Ret(USE_DELAY_SLOT);
669   if (cc == lt || cc == le) {
670     __ li(v0, Operand(GREATER));
671   } else {
672     __ li(v0, Operand(LESS));
673   }
674 
675 
676   __ bind(&not_smis);
677   // At this point we know we are dealing with two different objects,
678   // and neither of them is a Smi. The objects are in lhs_ and rhs_.
679   if (strict()) {
680     // This returns non-equal for some object types, or falls through if it
681     // was not lucky.
682     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
683   }
684 
685   Label check_for_internalized_strings;
686   Label flat_string_check;
687   // Check for heap-number-heap-number comparison. Can jump to slow case,
688   // or load both doubles and jump to the code that handles
689   // that case. If the inputs are not doubles then jumps to
690   // check_for_internalized_strings.
691   // In this case a2 will contain the type of lhs_.
692   EmitCheckForTwoHeapNumbers(masm,
693                              lhs,
694                              rhs,
695                              &both_loaded_as_doubles,
696                              &check_for_internalized_strings,
697                              &flat_string_check);
698 
699   __ bind(&check_for_internalized_strings);
700   if (cc == eq && !strict()) {
701     // Returns an answer for two internalized strings or two
702     // detectable objects.
703     // Otherwise jumps to string case or not both strings case.
704     // Assumes that a2 is the type of lhs_ on entry.
705     EmitCheckForInternalizedStringsOrObjects(
706         masm, lhs, rhs, &flat_string_check, &slow);
707   }
708 
709   // Check for both being sequential one-byte strings,
710   // and inline if that is the case.
711   __ bind(&flat_string_check);
712 
713   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
714 
715   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
716                       a3);
717   if (cc == eq) {
718     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
719   } else {
720     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
721                                                     t1);
722   }
723   // Never falls through to here.
724 
725   __ bind(&slow);
726   // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
727   // a1 (rhs) second.
728   __ Push(lhs, rhs);
729   // Figure out which native to call and setup the arguments.
730   if (cc == eq) {
731     __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
732   } else {
733     int ncr;  // NaN compare result.
734     if (cc == lt || cc == le) {
735       ncr = GREATER;
736     } else {
737       DCHECK(cc == gt || cc == ge);  // Remaining cases.
738       ncr = LESS;
739     }
740     __ li(a0, Operand(Smi::FromInt(ncr)));
741     __ push(a0);
742 
743     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
744     // tagged as a small integer.
745     __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
746                                              : Runtime::kCompare);
747   }
748 
749   __ bind(&miss);
750   GenerateMiss(masm);
751 }
752 
753 
Generate(MacroAssembler * masm)754 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
755   __ mov(t9, ra);
756   __ pop(ra);
757   __ PushSafepointRegisters();
758   __ Jump(t9);
759 }
760 
761 
Generate(MacroAssembler * masm)762 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
763   __ mov(t9, ra);
764   __ pop(ra);
765   __ PopSafepointRegisters();
766   __ Jump(t9);
767 }
768 
769 
Generate(MacroAssembler * masm)770 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
771   // We don't allow a GC during a store buffer overflow so there is no need to
772   // store the registers in any particular way, but we do have to store and
773   // restore them.
774   __ MultiPush(kJSCallerSaved | ra.bit());
775   if (save_doubles()) {
776     __ MultiPushFPU(kCallerSavedFPU);
777   }
778   const int argument_count = 1;
779   const int fp_argument_count = 0;
780   const Register scratch = a1;
781 
782   AllowExternalCallThatCantCauseGC scope(masm);
783   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
784   __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
785   __ CallCFunction(
786       ExternalReference::store_buffer_overflow_function(isolate()),
787       argument_count);
788   if (save_doubles()) {
789     __ MultiPopFPU(kCallerSavedFPU);
790   }
791 
792   __ MultiPop(kJSCallerSaved | ra.bit());
793   __ Ret();
794 }
795 
796 
Generate(MacroAssembler * masm)797 void MathPowStub::Generate(MacroAssembler* masm) {
798   const Register base = a1;
799   const Register exponent = MathPowTaggedDescriptor::exponent();
800   DCHECK(exponent.is(a2));
801   const Register heapnumbermap = t1;
802   const Register heapnumber = v0;
803   const DoubleRegister double_base = f2;
804   const DoubleRegister double_exponent = f4;
805   const DoubleRegister double_result = f0;
806   const DoubleRegister double_scratch = f6;
807   const FPURegister single_scratch = f8;
808   const Register scratch = t5;
809   const Register scratch2 = t3;
810 
811   Label call_runtime, done, int_exponent;
812   if (exponent_type() == ON_STACK) {
813     Label base_is_smi, unpack_exponent;
814     // The exponent and base are supplied as arguments on the stack.
815     // This can only happen if the stub is called from non-optimized code.
816     // Load input parameters from stack to double registers.
817     __ lw(base, MemOperand(sp, 1 * kPointerSize));
818     __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
819 
820     __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
821 
822     __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
823     __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
824     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
825 
826     __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
827     __ jmp(&unpack_exponent);
828 
829     __ bind(&base_is_smi);
830     __ mtc1(scratch, single_scratch);
831     __ cvt_d_w(double_base, single_scratch);
832     __ bind(&unpack_exponent);
833 
834     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
835 
836     __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
837     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
838     __ ldc1(double_exponent,
839             FieldMemOperand(exponent, HeapNumber::kValueOffset));
840   } else if (exponent_type() == TAGGED) {
841     // Base is already in double_base.
842     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
843 
844     __ ldc1(double_exponent,
845             FieldMemOperand(exponent, HeapNumber::kValueOffset));
846   }
847 
848   if (exponent_type() != INTEGER) {
849     Label int_exponent_convert;
850     // Detect integer exponents stored as double.
851     __ EmitFPUTruncate(kRoundToMinusInf,
852                        scratch,
853                        double_exponent,
854                        at,
855                        double_scratch,
856                        scratch2,
857                        kCheckForInexactConversion);
858     // scratch2 == 0 means there was no conversion error.
859     __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
860 
861     if (exponent_type() == ON_STACK) {
862       // Detect square root case.  Crankshaft detects constant +/-0.5 at
863       // compile time and uses DoMathPowHalf instead.  We then skip this check
864       // for non-constant cases of +/-0.5 as these hardly occur.
865       Label not_plus_half;
866       // Test for 0.5.
867       __ Move(double_scratch, 0.5);
868       __ BranchF(USE_DELAY_SLOT,
869                  &not_plus_half,
870                  NULL,
871                  ne,
872                  double_exponent,
873                  double_scratch);
874       // double_scratch can be overwritten in the delay slot.
875       // Calculates square root of base.  Check for the special case of
876       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
877       __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
878       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
879       __ neg_d(double_result, double_scratch);
880 
881       // Add +0 to convert -0 to +0.
882       __ add_d(double_scratch, double_base, kDoubleRegZero);
883       __ sqrt_d(double_result, double_scratch);
884       __ jmp(&done);
885 
886       __ bind(&not_plus_half);
887       __ Move(double_scratch, -0.5);
888       __ BranchF(USE_DELAY_SLOT,
889                  &call_runtime,
890                  NULL,
891                  ne,
892                  double_exponent,
893                  double_scratch);
894       // double_scratch can be overwritten in the delay slot.
895       // Calculates square root of base.  Check for the special case of
896       // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
897       __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
898       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
899       __ Move(double_result, kDoubleRegZero);
900 
901       // Add +0 to convert -0 to +0.
902       __ add_d(double_scratch, double_base, kDoubleRegZero);
903       __ Move(double_result, 1.);
904       __ sqrt_d(double_scratch, double_scratch);
905       __ div_d(double_result, double_result, double_scratch);
906       __ jmp(&done);
907     }
908 
909     __ push(ra);
910     {
911       AllowExternalCallThatCantCauseGC scope(masm);
912       __ PrepareCallCFunction(0, 2, scratch2);
913       __ MovToFloatParameters(double_base, double_exponent);
914       __ CallCFunction(
915           ExternalReference::power_double_double_function(isolate()),
916           0, 2);
917     }
918     __ pop(ra);
919     __ MovFromFloatResult(double_result);
920     __ jmp(&done);
921 
922     __ bind(&int_exponent_convert);
923   }
924 
925   // Calculate power with integer exponent.
926   __ bind(&int_exponent);
927 
928   // Get two copies of exponent in the registers scratch and exponent.
929   if (exponent_type() == INTEGER) {
930     __ mov(scratch, exponent);
931   } else {
932     // Exponent has previously been stored into scratch as untagged integer.
933     __ mov(exponent, scratch);
934   }
935 
936   __ mov_d(double_scratch, double_base);  // Back up base.
937   __ Move(double_result, 1.0);
938 
939   // Get absolute value of exponent.
940   Label positive_exponent;
941   __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
942   __ Subu(scratch, zero_reg, scratch);
943   __ bind(&positive_exponent);
944 
945   Label while_true, no_carry, loop_end;
946   __ bind(&while_true);
947 
948   __ And(scratch2, scratch, 1);
949 
950   __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
951   __ mul_d(double_result, double_result, double_scratch);
952   __ bind(&no_carry);
953 
954   __ sra(scratch, scratch, 1);
955 
956   __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
957   __ mul_d(double_scratch, double_scratch, double_scratch);
958 
959   __ Branch(&while_true);
960 
961   __ bind(&loop_end);
962 
963   __ Branch(&done, ge, exponent, Operand(zero_reg));
964   __ Move(double_scratch, 1.0);
965   __ div_d(double_result, double_scratch, double_result);
966   // Test whether result is zero.  Bail out to check for subnormal result.
967   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
968   __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
969 
970   // double_exponent may not contain the exponent value if the input was a
971   // smi.  We set it with exponent value before bailing out.
972   __ mtc1(exponent, single_scratch);
973   __ cvt_d_w(double_exponent, single_scratch);
974 
975   // Returning or bailing out.
976   Counters* counters = isolate()->counters();
977   if (exponent_type() == ON_STACK) {
978     // The arguments are still on the stack.
979     __ bind(&call_runtime);
980     __ TailCallRuntime(Runtime::kMathPowRT);
981 
982     // The stub is called from non-optimized code, which expects the result
983     // as heap number in exponent.
984     __ bind(&done);
985     __ AllocateHeapNumber(
986         heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
987     __ sdc1(double_result,
988             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
989     DCHECK(heapnumber.is(v0));
990     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
991     __ DropAndRet(2);
992   } else {
993     __ push(ra);
994     {
995       AllowExternalCallThatCantCauseGC scope(masm);
996       __ PrepareCallCFunction(0, 2, scratch);
997       __ MovToFloatParameters(double_base, double_exponent);
998       __ CallCFunction(
999           ExternalReference::power_double_double_function(isolate()),
1000           0, 2);
1001     }
1002     __ pop(ra);
1003     __ MovFromFloatResult(double_result);
1004 
1005     __ bind(&done);
1006     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1007     __ Ret();
1008   }
1009 }
1010 
1011 
NeedsImmovableCode()1012 bool CEntryStub::NeedsImmovableCode() {
1013   return true;
1014 }
1015 
1016 
GenerateStubsAheadOfTime(Isolate * isolate)1017 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1018   CEntryStub::GenerateAheadOfTime(isolate);
1019   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1020   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1021   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1022   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1023   CreateWeakCellStub::GenerateAheadOfTime(isolate);
1024   BinaryOpICStub::GenerateAheadOfTime(isolate);
1025   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1026   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1027   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1028   StoreFastElementStub::GenerateAheadOfTime(isolate);
1029   TypeofStub::GenerateAheadOfTime(isolate);
1030 }
1031 
1032 
GenerateAheadOfTime(Isolate * isolate)1033 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1034   StoreRegistersStateStub stub(isolate);
1035   stub.GetCode();
1036 }
1037 
1038 
GenerateAheadOfTime(Isolate * isolate)1039 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1040   RestoreRegistersStateStub stub(isolate);
1041   stub.GetCode();
1042 }
1043 
1044 
GenerateFPStubs(Isolate * isolate)1045 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1046   // Generate if not already in cache.
1047   SaveFPRegsMode mode = kSaveFPRegs;
1048   CEntryStub(isolate, 1, mode).GetCode();
1049   StoreBufferOverflowStub(isolate, mode).GetCode();
1050   isolate->set_fp_stubs_generated(true);
1051 }
1052 
1053 
GenerateAheadOfTime(Isolate * isolate)1054 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1055   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1056   stub.GetCode();
1057 }
1058 
1059 
Generate(MacroAssembler * masm)1060 void CEntryStub::Generate(MacroAssembler* masm) {
1061   // Called from JavaScript; parameters are on stack as if calling JS function
1062   // a0: number of arguments including receiver
1063   // a1: pointer to builtin function
1064   // fp: frame pointer    (restored after C call)
1065   // sp: stack pointer    (restored as callee's sp after C call)
1066   // cp: current context  (C callee-saved)
1067   //
1068   // If argv_in_register():
1069   // a2: pointer to the first argument
1070 
1071   ProfileEntryHookStub::MaybeCallEntryHook(masm);
1072 
1073   if (argv_in_register()) {
1074     // Move argv into the correct register.
1075     __ mov(s1, a2);
1076   } else {
1077     // Compute the argv pointer in a callee-saved register.
1078     __ sll(s1, a0, kPointerSizeLog2);
1079     __ Addu(s1, sp, s1);
1080     __ Subu(s1, s1, kPointerSize);
1081   }
1082 
1083   // Enter the exit frame that transitions from JavaScript to C++.
1084   FrameScope scope(masm, StackFrame::MANUAL);
1085   __ EnterExitFrame(save_doubles());
1086 
1087   // s0: number of arguments  including receiver (C callee-saved)
1088   // s1: pointer to first argument (C callee-saved)
1089   // s2: pointer to builtin function (C callee-saved)
1090 
1091   // Prepare arguments for C routine.
1092   // a0 = argc
1093   __ mov(s0, a0);
1094   __ mov(s2, a1);
1095   // a1 = argv (set in the delay slot after find_ra below).
1096 
1097   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1098   // also need to reserve the 4 argument slots on the stack.
1099 
1100   __ AssertStackIsAligned();
1101 
1102   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1103 
1104   // To let the GC traverse the return address of the exit frames, we need to
1105   // know where the return address is. The CEntryStub is unmovable, so
1106   // we can store the address on the stack to be able to find it again and
1107   // we never have to restore it, because it will not change.
1108   { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1109     // This branch-and-link sequence is needed to find the current PC on mips,
1110     // saved to the ra register.
1111     // Use masm-> here instead of the double-underscore macro since extra
1112     // coverage code can interfere with the proper calculation of ra.
1113     Label find_ra;
1114     masm->bal(&find_ra);  // bal exposes branch delay slot.
1115     masm->mov(a1, s1);
1116     masm->bind(&find_ra);
1117 
1118     // Adjust the value in ra to point to the correct return location, 2nd
1119     // instruction past the real call into C code (the jalr(t9)), and push it.
1120     // This is the return address of the exit frame.
1121     const int kNumInstructionsToJump = 5;
1122     masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1123     masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
1124     // Stack space reservation moved to the branch delay slot below.
1125     // Stack is still aligned.
1126 
1127     // Call the C routine.
1128     masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
1129     masm->jalr(t9);
1130     // Set up sp in the delay slot.
1131     masm->addiu(sp, sp, -kCArgsSlotsSize);
1132     // Make sure the stored 'ra' points to this position.
1133     DCHECK_EQ(kNumInstructionsToJump,
1134               masm->InstructionsGeneratedSince(&find_ra));
1135   }
1136 
1137 
1138   // Check result for exception sentinel.
1139   Label exception_returned;
1140   __ LoadRoot(t0, Heap::kExceptionRootIndex);
1141   __ Branch(&exception_returned, eq, t0, Operand(v0));
1142 
1143   // Check that there is no pending exception, otherwise we
1144   // should have returned the exception sentinel.
1145   if (FLAG_debug_code) {
1146     Label okay;
1147     ExternalReference pending_exception_address(
1148         Isolate::kPendingExceptionAddress, isolate());
1149     __ li(a2, Operand(pending_exception_address));
1150     __ lw(a2, MemOperand(a2));
1151     __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1152     // Cannot use check here as it attempts to generate call into runtime.
1153     __ Branch(&okay, eq, t0, Operand(a2));
1154     __ stop("Unexpected pending exception");
1155     __ bind(&okay);
1156   }
1157 
1158   // Exit C frame and return.
1159   // v0:v1: result
1160   // sp: stack pointer
1161   // fp: frame pointer
1162   Register argc;
1163   if (argv_in_register()) {
1164     // We don't want to pop arguments so set argc to no_reg.
1165     argc = no_reg;
1166   } else {
1167     // s0: still holds argc (callee-saved).
1168     argc = s0;
1169   }
1170   __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
1171 
1172   // Handling of exception.
1173   __ bind(&exception_returned);
1174 
1175   ExternalReference pending_handler_context_address(
1176       Isolate::kPendingHandlerContextAddress, isolate());
1177   ExternalReference pending_handler_code_address(
1178       Isolate::kPendingHandlerCodeAddress, isolate());
1179   ExternalReference pending_handler_offset_address(
1180       Isolate::kPendingHandlerOffsetAddress, isolate());
1181   ExternalReference pending_handler_fp_address(
1182       Isolate::kPendingHandlerFPAddress, isolate());
1183   ExternalReference pending_handler_sp_address(
1184       Isolate::kPendingHandlerSPAddress, isolate());
1185 
1186   // Ask the runtime for help to determine the handler. This will set v0 to
1187   // contain the current pending exception, don't clobber it.
1188   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1189                                  isolate());
1190   {
1191     FrameScope scope(masm, StackFrame::MANUAL);
1192     __ PrepareCallCFunction(3, 0, a0);
1193     __ mov(a0, zero_reg);
1194     __ mov(a1, zero_reg);
1195     __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1196     __ CallCFunction(find_handler, 3);
1197   }
1198 
1199   // Retrieve the handler context, SP and FP.
1200   __ li(cp, Operand(pending_handler_context_address));
1201   __ lw(cp, MemOperand(cp));
1202   __ li(sp, Operand(pending_handler_sp_address));
1203   __ lw(sp, MemOperand(sp));
1204   __ li(fp, Operand(pending_handler_fp_address));
1205   __ lw(fp, MemOperand(fp));
1206 
1207   // If the handler is a JS frame, restore the context to the frame. Note that
1208   // the context will be set to (cp == 0) for non-JS frames.
1209   Label zero;
1210   __ Branch(&zero, eq, cp, Operand(zero_reg));
1211   __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1212   __ bind(&zero);
1213 
1214   // Compute the handler entry address and jump to it.
1215   __ li(a1, Operand(pending_handler_code_address));
1216   __ lw(a1, MemOperand(a1));
1217   __ li(a2, Operand(pending_handler_offset_address));
1218   __ lw(a2, MemOperand(a2));
1219   __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
1220   __ Addu(t9, a1, a2);
1221   __ Jump(t9);
1222 }
1223 
1224 
Generate(MacroAssembler * masm)1225 void JSEntryStub::Generate(MacroAssembler* masm) {
1226   Label invoke, handler_entry, exit;
1227   Isolate* isolate = masm->isolate();
1228 
1229   // Registers:
1230   // a0: entry address
1231   // a1: function
1232   // a2: receiver
1233   // a3: argc
1234   //
1235   // Stack:
1236   // 4 args slots
1237   // args
1238 
1239   ProfileEntryHookStub::MaybeCallEntryHook(masm);
1240 
1241   // Save callee saved registers on the stack.
1242   __ MultiPush(kCalleeSaved | ra.bit());
1243 
1244   // Save callee-saved FPU registers.
1245   __ MultiPushFPU(kCalleeSavedFPU);
1246   // Set up the reserved register for 0.0.
1247   __ Move(kDoubleRegZero, 0.0);
1248 
1249 
1250   // Load argv in s0 register.
1251   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1252   offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1253 
1254   __ InitializeRootRegister();
1255   __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1256 
1257   // We build an EntryFrame.
1258   __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
1259   int marker = type();
1260   __ li(t2, Operand(Smi::FromInt(marker)));
1261   __ li(t1, Operand(Smi::FromInt(marker)));
1262   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1263                                       isolate)));
1264   __ lw(t0, MemOperand(t0));
1265   __ Push(t3, t2, t1, t0);
1266   // Set up frame pointer for the frame to be pushed.
1267   __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1268 
1269   // Registers:
1270   // a0: entry_address
1271   // a1: function
1272   // a2: receiver_pointer
1273   // a3: argc
1274   // s0: argv
1275   //
1276   // Stack:
1277   // caller fp          |
1278   // function slot      | entry frame
1279   // context slot       |
1280   // bad fp (0xff...f)  |
1281   // callee saved registers + ra
1282   // 4 args slots
1283   // args
1284 
1285   // If this is the outermost JS call, set js_entry_sp value.
1286   Label non_outermost_js;
1287   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1288   __ li(t1, Operand(ExternalReference(js_entry_sp)));
1289   __ lw(t2, MemOperand(t1));
1290   __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1291   __ sw(fp, MemOperand(t1));
1292   __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1293   Label cont;
1294   __ b(&cont);
1295   __ nop();   // Branch delay slot nop.
1296   __ bind(&non_outermost_js);
1297   __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1298   __ bind(&cont);
1299   __ push(t0);
1300 
1301   // Jump to a faked try block that does the invoke, with a faked catch
1302   // block that sets the pending exception.
1303   __ jmp(&invoke);
1304   __ bind(&handler_entry);
1305   handler_offset_ = handler_entry.pos();
1306   // Caught exception: Store result (exception) in the pending exception
1307   // field in the JSEnv and return a failure sentinel.  Coming in here the
1308   // fp will be invalid because the PushStackHandler below sets it to 0 to
1309   // signal the existence of the JSEntry frame.
1310   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1311                                       isolate)));
1312   __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
1313   __ LoadRoot(v0, Heap::kExceptionRootIndex);
1314   __ b(&exit);  // b exposes branch delay slot.
1315   __ nop();   // Branch delay slot nop.
1316 
1317   // Invoke: Link this frame into the handler chain.
1318   __ bind(&invoke);
1319   __ PushStackHandler();
1320   // If an exception not caught by another handler occurs, this handler
1321   // returns control to the code after the bal(&invoke) above, which
1322   // restores all kCalleeSaved registers (including cp and fp) to their
1323   // saved values before returning a failure to C.
1324 
1325   // Clear any pending exceptions.
1326   __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1327   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1328                                       isolate)));
1329   __ sw(t1, MemOperand(t0));
1330 
1331   // Invoke the function by calling through JS entry trampoline builtin.
1332   // Notice that we cannot store a reference to the trampoline code directly in
1333   // this stub, because runtime stubs are not traversed when doing GC.
1334 
1335   // Registers:
1336   // a0: entry_address
1337   // a1: function
1338   // a2: receiver_pointer
1339   // a3: argc
1340   // s0: argv
1341   //
1342   // Stack:
1343   // handler frame
1344   // entry frame
1345   // callee saved registers + ra
1346   // 4 args slots
1347   // args
1348 
1349   if (type() == StackFrame::ENTRY_CONSTRUCT) {
1350     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1351                                       isolate);
1352     __ li(t0, Operand(construct_entry));
1353   } else {
1354     ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1355     __ li(t0, Operand(entry));
1356   }
1357   __ lw(t9, MemOperand(t0));  // Deref address.
1358 
1359   // Call JSEntryTrampoline.
1360   __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1361   __ Call(t9);
1362 
1363   // Unlink this frame from the handler chain.
1364   __ PopStackHandler();
1365 
1366   __ bind(&exit);  // v0 holds result
1367   // Check if the current stack frame is marked as the outermost JS frame.
1368   Label non_outermost_js_2;
1369   __ pop(t1);
1370   __ Branch(&non_outermost_js_2,
1371             ne,
1372             t1,
1373             Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1374   __ li(t1, Operand(ExternalReference(js_entry_sp)));
1375   __ sw(zero_reg, MemOperand(t1));
1376   __ bind(&non_outermost_js_2);
1377 
1378   // Restore the top frame descriptors from the stack.
1379   __ pop(t1);
1380   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1381                                       isolate)));
1382   __ sw(t1, MemOperand(t0));
1383 
1384   // Reset the stack to the callee saved registers.
1385   __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1386 
1387   // Restore callee-saved fpu registers.
1388   __ MultiPopFPU(kCalleeSavedFPU);
1389 
1390   // Restore callee saved registers from the stack.
1391   __ MultiPop(kCalleeSaved | ra.bit());
1392   // Return.
1393   __ Jump(ra);
1394 }
1395 
1396 
Generate(MacroAssembler * masm)1397 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1398   // Return address is in ra.
1399   Label miss;
1400 
1401   Register receiver = LoadDescriptor::ReceiverRegister();
1402   Register index = LoadDescriptor::NameRegister();
1403   Register scratch = t1;
1404   Register result = v0;
1405   DCHECK(!scratch.is(receiver) && !scratch.is(index));
1406   DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
1407 
1408   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1409                                           &miss,  // When not a string.
1410                                           &miss,  // When not a number.
1411                                           &miss,  // When index out of range.
1412                                           STRING_INDEX_IS_ARRAY_INDEX,
1413                                           RECEIVER_IS_STRING);
1414   char_at_generator.GenerateFast(masm);
1415   __ Ret();
1416 
1417   StubRuntimeCallHelper call_helper;
1418   char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1419 
1420   __ bind(&miss);
1421   PropertyAccessCompiler::TailCallBuiltin(
1422       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1423 }
1424 
1425 
Generate(MacroAssembler * masm)1426 void InstanceOfStub::Generate(MacroAssembler* masm) {
1427   Register const object = a1;              // Object (lhs).
1428   Register const function = a0;            // Function (rhs).
1429   Register const object_map = a2;          // Map of {object}.
1430   Register const function_map = a3;        // Map of {function}.
1431   Register const function_prototype = t0;  // Prototype of {function}.
1432   Register const scratch = t1;
1433 
1434   DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
1435   DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
1436 
1437   // Check if {object} is a smi.
1438   Label object_is_smi;
1439   __ JumpIfSmi(object, &object_is_smi);
1440 
1441   // Lookup the {function} and the {object} map in the global instanceof cache.
1442   // Note: This is safe because we clear the global instanceof cache whenever
1443   // we change the prototype of any object.
1444   Label fast_case, slow_case;
1445   __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
1446   __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1447   __ Branch(&fast_case, ne, function, Operand(at));
1448   __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1449   __ Branch(&fast_case, ne, object_map, Operand(at));
1450   __ Ret(USE_DELAY_SLOT);
1451   __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);  // In delay slot.
1452 
1453   // If {object} is a smi we can safely return false if {function} is a JS
1454   // function, otherwise we have to miss to the runtime and throw an exception.
1455   __ bind(&object_is_smi);
1456   __ JumpIfSmi(function, &slow_case);
1457   __ GetObjectType(function, function_map, scratch);
1458   __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
1459   __ Ret(USE_DELAY_SLOT);
1460   __ LoadRoot(v0, Heap::kFalseValueRootIndex);  // In delay slot.
1461 
1462   // Fast-case: The {function} must be a valid JSFunction.
1463   __ bind(&fast_case);
1464   __ JumpIfSmi(function, &slow_case);
1465   __ GetObjectType(function, function_map, scratch);
1466   __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
1467 
1468   // Ensure that {function} has an instance prototype.
1469   __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
1470   __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
1471   __ Branch(&slow_case, ne, at, Operand(zero_reg));
1472 
1473   // Get the "prototype" (or initial map) of the {function}.
1474   __ lw(function_prototype,
1475         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1476   __ AssertNotSmi(function_prototype);
1477 
1478   // Resolve the prototype if the {function} has an initial map.  Afterwards the
1479   // {function_prototype} will be either the JSReceiver prototype object or the
1480   // hole value, which means that no instances of the {function} were created so
1481   // far and hence we should return false.
1482   Label function_prototype_valid;
1483   __ GetObjectType(function_prototype, scratch, scratch);
1484   __ Branch(&function_prototype_valid, ne, scratch, Operand(MAP_TYPE));
1485   __ lw(function_prototype,
1486         FieldMemOperand(function_prototype, Map::kPrototypeOffset));
1487   __ bind(&function_prototype_valid);
1488   __ AssertNotSmi(function_prototype);
1489 
1490   // Update the global instanceof cache with the current {object} map and
1491   // {function}.  The cached answer will be set when it is known below.
1492   __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1493   __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
1494 
1495   // Loop through the prototype chain looking for the {function} prototype.
1496   // Assume true, and change to false if not found.
1497   Register const object_instance_type = function_map;
1498   Register const map_bit_field = function_map;
1499   Register const null = scratch;
1500   Register const result = v0;
1501 
1502   Label done, loop, fast_runtime_fallback;
1503   __ LoadRoot(result, Heap::kTrueValueRootIndex);
1504   __ LoadRoot(null, Heap::kNullValueRootIndex);
1505   __ bind(&loop);
1506 
1507   // Check if the object needs to be access checked.
1508   __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
1509   __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
1510   __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg));
1511   // Check if the current object is a Proxy.
1512   __ lbu(object_instance_type,
1513          FieldMemOperand(object_map, Map::kInstanceTypeOffset));
1514   __ Branch(&fast_runtime_fallback, eq, object_instance_type,
1515             Operand(JS_PROXY_TYPE));
1516 
1517   __ lw(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
1518   __ Branch(&done, eq, object, Operand(function_prototype));
1519   __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null));
1520   __ lw(object_map,
1521         FieldMemOperand(object, HeapObject::kMapOffset));  // In delay slot.
1522   __ LoadRoot(result, Heap::kFalseValueRootIndex);
1523   __ bind(&done);
1524   __ Ret(USE_DELAY_SLOT);
1525   __ StoreRoot(result,
1526                Heap::kInstanceofCacheAnswerRootIndex);  // In delay slot.
1527 
1528   // Found Proxy or access check needed: Call the runtime
1529   __ bind(&fast_runtime_fallback);
1530   __ Push(object, function_prototype);
1531   // Invalidate the instanceof cache.
1532   DCHECK(Smi::FromInt(0) == 0);
1533   __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex);
1534   __ TailCallRuntime(Runtime::kHasInPrototypeChain);
1535 
1536   // Slow-case: Call the %InstanceOf runtime function.
1537   __ bind(&slow_case);
1538   __ Push(object, function);
1539   __ TailCallRuntime(Runtime::kInstanceOf);
1540 }
1541 
1542 
Generate(MacroAssembler * masm)1543 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1544   Label miss;
1545   Register receiver = LoadDescriptor::ReceiverRegister();
1546   // Ensure that the vector and slot registers won't be clobbered before
1547   // calling the miss handler.
1548   DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::VectorRegister(),
1549                      LoadWithVectorDescriptor::SlotRegister()));
1550 
1551   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
1552                                                           t1, &miss);
1553   __ bind(&miss);
1554   PropertyAccessCompiler::TailCallBuiltin(
1555       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1556 }
1557 
1558 
GenerateReadElement(MacroAssembler * masm)1559 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1560   // The displacement is the offset of the last parameter (if any)
1561   // relative to the frame pointer.
1562   const int kDisplacement =
1563       StandardFrameConstants::kCallerSPOffset - kPointerSize;
1564   DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
1565   DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1566 
1567   // Check that the key is a smiGenerateReadElement.
1568   Label slow;
1569   __ JumpIfNotSmi(a1, &slow);
1570 
1571   // Check if the calling frame is an arguments adaptor frame.
1572   Label adaptor;
1573   __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1574   __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1575   __ Branch(&adaptor,
1576             eq,
1577             a3,
1578             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1579 
1580   // Check index (a1) against formal parameters count limit passed in
1581   // through register a0. Use unsigned comparison to get negative
1582   // check for free.
1583   __ Branch(&slow, hs, a1, Operand(a0));
1584 
1585   // Read the argument from the stack and return it.
1586   __ subu(a3, a0, a1);
1587   __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1588   __ Addu(a3, fp, Operand(t3));
1589   __ Ret(USE_DELAY_SLOT);
1590   __ lw(v0, MemOperand(a3, kDisplacement));
1591 
1592   // Arguments adaptor case: Check index (a1) against actual arguments
1593   // limit found in the arguments adaptor frame. Use unsigned
1594   // comparison to get negative check for free.
1595   __ bind(&adaptor);
1596   __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1597   __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
1598 
1599   // Read the argument from the adaptor frame and return it.
1600   __ subu(a3, a0, a1);
1601   __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1602   __ Addu(a3, a2, Operand(t3));
1603   __ Ret(USE_DELAY_SLOT);
1604   __ lw(v0, MemOperand(a3, kDisplacement));
1605 
1606   // Slow-case: Handle non-smi or out-of-bounds access to arguments
1607   // by calling the runtime system.
1608   __ bind(&slow);
1609   __ push(a1);
1610   __ TailCallRuntime(Runtime::kArguments);
1611 }
1612 
1613 
GenerateNewSloppySlow(MacroAssembler * masm)1614 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1615   // a1 : function
1616   // a2 : number of parameters (tagged)
1617   // a3 : parameters pointer
1618 
1619   DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
1620   DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
1621   DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
1622 
1623   // Check if the calling frame is an arguments adaptor frame.
1624   Label runtime;
1625   __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1626   __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
1627   __ Branch(&runtime, ne, a0,
1628             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1629 
1630   // Patch the arguments.length and the parameters pointer in the current frame.
1631   __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
1632   __ sll(t3, a2, 1);
1633   __ Addu(t0, t0, Operand(t3));
1634   __ addiu(a3, t0, StandardFrameConstants::kCallerSPOffset);
1635 
1636   __ bind(&runtime);
1637   __ Push(a1, a3, a2);
1638   __ TailCallRuntime(Runtime::kNewSloppyArguments);
1639 }
1640 
1641 
GenerateNewSloppyFast(MacroAssembler * masm)1642 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1643   // a1 : function
1644   // a2 : number of parameters (tagged)
1645   // a3 : parameters pointer
1646   // Registers used over whole function:
1647   //  t1 : arguments count (tagged)
1648   //  t2 : mapped parameter count (tagged)
1649 
1650   DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
1651   DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
1652   DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
1653 
1654   // Check if the calling frame is an arguments adaptor frame.
1655   Label adaptor_frame, try_allocate, runtime;
1656   __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1657   __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
1658   __ Branch(&adaptor_frame, eq, a0,
1659             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1660 
1661   // No adaptor, parameter count = argument count.
1662   __ mov(t1, a2);
1663   __ Branch(USE_DELAY_SLOT, &try_allocate);
1664   __ mov(t2, a2);  // In delay slot.
1665 
1666   // We have an adaptor frame. Patch the parameters pointer.
1667   __ bind(&adaptor_frame);
1668   __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
1669   __ sll(t6, t1, 1);
1670   __ Addu(t0, t0, Operand(t6));
1671   __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
1672 
1673   // t1 = argument count (tagged)
1674   // t2 = parameter count (tagged)
1675   // Compute the mapped parameter count = min(t2, t1) in t2.
1676   __ mov(t2, a2);
1677   __ Branch(&try_allocate, le, t2, Operand(t1));
1678   __ mov(t2, t1);
1679 
1680   __ bind(&try_allocate);
1681 
1682   // Compute the sizes of backing store, parameter map, and arguments object.
1683   // 1. Parameter map, has 2 extra words containing context and backing store.
1684   const int kParameterMapHeaderSize =
1685       FixedArray::kHeaderSize + 2 * kPointerSize;
1686   // If there are no mapped parameters, we do not need the parameter_map.
1687   Label param_map_size;
1688   DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
1689   __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
1690   __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when t2 == 0.
1691   __ sll(t5, t2, 1);
1692   __ addiu(t5, t5, kParameterMapHeaderSize);
1693   __ bind(&param_map_size);
1694 
1695   // 2. Backing store.
1696   __ sll(t6, t1, 1);
1697   __ Addu(t5, t5, Operand(t6));
1698   __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
1699 
1700   // 3. Arguments object.
1701   __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
1702 
1703   // Do the allocation of all three objects in one go.
1704   __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
1705 
1706   // v0 = address of new object(s) (tagged)
1707   // a2 = argument count (smi-tagged)
1708   // Get the arguments boilerplate from the current native context into t0.
1709   const int kNormalOffset =
1710       Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1711   const int kAliasedOffset =
1712       Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
1713 
1714   __ lw(t0, NativeContextMemOperand());
1715   Label skip2_ne, skip2_eq;
1716   __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
1717   __ lw(t0, MemOperand(t0, kNormalOffset));
1718   __ bind(&skip2_ne);
1719 
1720   __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
1721   __ lw(t0, MemOperand(t0, kAliasedOffset));
1722   __ bind(&skip2_eq);
1723 
1724   // v0 = address of new object (tagged)
1725   // a2 = argument count (smi-tagged)
1726   // t0 = address of arguments map (tagged)
1727   // t2 = mapped parameter count (tagged)
1728   __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1729   __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
1730   __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1731   __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
1732 
1733   // Set up the callee in-object property.
1734   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1735   __ AssertNotSmi(a1);
1736   const int kCalleeOffset = JSObject::kHeaderSize +
1737       Heap::kArgumentsCalleeIndex * kPointerSize;
1738   __ sw(a1, FieldMemOperand(v0, kCalleeOffset));
1739 
1740   // Use the length (smi tagged) and set that as an in-object property too.
1741   __ AssertSmi(t1);
1742   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1743   const int kLengthOffset = JSObject::kHeaderSize +
1744       Heap::kArgumentsLengthIndex * kPointerSize;
1745   __ sw(t1, FieldMemOperand(v0, kLengthOffset));
1746 
1747   // Set up the elements pointer in the allocated arguments object.
1748   // If we allocated a parameter map, t0 will point there, otherwise
1749   // it will point to the backing store.
1750   __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
1751   __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
1752 
1753   // v0 = address of new object (tagged)
1754   // a2 = argument count (tagged)
1755   // t0 = address of parameter map or backing store (tagged)
1756   // t2 = mapped parameter count (tagged)
1757   // Initialize parameter map. If there are no mapped arguments, we're done.
1758   Label skip_parameter_map;
1759   Label skip3;
1760   __ Branch(&skip3, ne, t2, Operand(Smi::FromInt(0)));
1761   // Move backing store address to a1, because it is
1762   // expected there when filling in the unmapped arguments.
1763   __ mov(a1, t0);
1764   __ bind(&skip3);
1765 
1766   __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0)));
1767 
1768   __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
1769   __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
1770   __ Addu(t1, t2, Operand(Smi::FromInt(2)));
1771   __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
1772   __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
1773   __ sll(t6, t2, 1);
1774   __ Addu(t1, t0, Operand(t6));
1775   __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
1776   __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
1777 
1778   // Copy the parameter slots and the holes in the arguments.
1779   // We need to fill in mapped_parameter_count slots. They index the context,
1780   // where parameters are stored in reverse order, at
1781   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1782   // The mapped parameter thus need to get indices
1783   //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
1784   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1785   // We loop from right to left.
1786   Label parameters_loop, parameters_test;
1787   __ mov(t1, t2);
1788   __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1789   __ Subu(t5, t5, Operand(t2));
1790   __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
1791   __ sll(t6, t1, 1);
1792   __ Addu(a1, t0, Operand(t6));
1793   __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
1794 
1795   // a1 = address of backing store (tagged)
1796   // t0 = address of parameter map (tagged)
1797   // a0 = temporary scratch (a.o., for address calculation)
1798   // t1 = loop variable (tagged)
1799   // t3 = the hole value
1800   __ jmp(&parameters_test);
1801 
1802   __ bind(&parameters_loop);
1803   __ Subu(t1, t1, Operand(Smi::FromInt(1)));
1804   __ sll(a0, t1, 1);
1805   __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1806   __ Addu(t6, t0, a0);
1807   __ sw(t5, MemOperand(t6));
1808   __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1809   __ Addu(t6, a1, a0);
1810   __ sw(t3, MemOperand(t6));
1811   __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1812   __ bind(&parameters_test);
1813   __ Branch(&parameters_loop, ne, t1, Operand(Smi::FromInt(0)));
1814 
1815   // t1 = argument count (tagged).
1816   __ lw(t1, FieldMemOperand(v0, kLengthOffset));
1817 
1818   __ bind(&skip_parameter_map);
1819   // v0 = address of new object (tagged)
1820   // a1 = address of backing store (tagged)
1821   // t1 = argument count (tagged)
1822   // t2 = mapped parameter count (tagged)
1823   // t5 = scratch
1824   // Copy arguments header and remaining slots (if there are any).
1825   __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
1826   __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
1827   __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
1828 
1829   Label arguments_loop, arguments_test;
1830   __ sll(t6, t2, 1);
1831   __ Subu(a3, a3, Operand(t6));
1832   __ jmp(&arguments_test);
1833 
1834   __ bind(&arguments_loop);
1835   __ Subu(a3, a3, Operand(kPointerSize));
1836   __ lw(t0, MemOperand(a3, 0));
1837   __ sll(t6, t2, 1);
1838   __ Addu(t5, a1, Operand(t6));
1839   __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
1840   __ Addu(t2, t2, Operand(Smi::FromInt(1)));
1841 
1842   __ bind(&arguments_test);
1843   __ Branch(&arguments_loop, lt, t2, Operand(t1));
1844 
1845   // Return.
1846   __ Ret();
1847 
1848   // Do the runtime call to allocate the arguments object.
1849   // t1 = argument count (tagged)
1850   __ bind(&runtime);
1851   __ Push(a1, a3, t1);
1852   __ TailCallRuntime(Runtime::kNewSloppyArguments);
1853 }
1854 
1855 
Generate(MacroAssembler * masm)1856 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1857   // Return address is in ra.
1858   Label slow;
1859 
1860   Register receiver = LoadDescriptor::ReceiverRegister();
1861   Register key = LoadDescriptor::NameRegister();
1862 
1863   // Check that the key is an array index, that is Uint32.
1864   __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
1865   __ Branch(&slow, ne, t0, Operand(zero_reg));
1866 
1867   // Everything is fine, call runtime.
1868   __ Push(receiver, key);  // Receiver, key.
1869 
1870   // Perform tail call to the entry.
1871   __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
1872 
1873   __ bind(&slow);
1874   PropertyAccessCompiler::TailCallBuiltin(
1875       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1876 }
1877 
1878 
GenerateNewStrict(MacroAssembler * masm)1879 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1880   // a1 : function
1881   // a2 : number of parameters (tagged)
1882   // a3 : parameters pointer
1883 
1884   DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
1885   DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
1886   DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
1887 
1888   // Check if the calling frame is an arguments adaptor frame.
1889   Label try_allocate, runtime;
1890   __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1891   __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
1892   __ Branch(&try_allocate, ne, a0,
1893             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1894 
1895   // Patch the arguments.length and the parameters pointer.
1896   __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
1897   __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
1898   __ Addu(t0, t0, Operand(at));
1899   __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
1900 
1901   // Try the new space allocation. Start out with computing the size
1902   // of the arguments object and the elements array in words.
1903   Label add_arguments_object;
1904   __ bind(&try_allocate);
1905   __ SmiUntag(t5, a2);
1906   __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
1907 
1908   __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize));
1909   __ bind(&add_arguments_object);
1910   __ Addu(t5, t5, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1911 
1912   // Do the allocation of both objects in one go.
1913   __ Allocate(t5, v0, t0, t1, &runtime,
1914               static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1915 
1916   // Get the arguments boilerplate from the current native context.
1917   __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, t0);
1918 
1919   __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1920   __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
1921   __ sw(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1922   __ sw(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
1923 
1924   // Get the length (smi tagged) and set that as an in-object property too.
1925   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1926   __ AssertSmi(a2);
1927   __ sw(a2,
1928         FieldMemOperand(v0, JSObject::kHeaderSize +
1929                                 Heap::kArgumentsLengthIndex * kPointerSize));
1930 
1931   Label done;
1932   __ Branch(&done, eq, a2, Operand(zero_reg));
1933 
1934   // Set up the elements pointer in the allocated arguments object and
1935   // initialize the header in the elements fixed array.
1936   __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
1937   __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
1938   __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
1939   __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
1940   __ sw(a2, FieldMemOperand(t0, FixedArray::kLengthOffset));
1941   __ SmiUntag(a2);
1942 
1943   // Copy the fixed array slots.
1944   Label loop;
1945   // Set up t0 to point to the first array slot.
1946   __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1947   __ bind(&loop);
1948   // Pre-decrement a3 with kPointerSize on each iteration.
1949   // Pre-decrement in order to skip receiver.
1950   __ Addu(a3, a3, Operand(-kPointerSize));
1951   __ lw(t1, MemOperand(a3));
1952   // Post-increment t0 with kPointerSize on each iteration.
1953   __ sw(t1, MemOperand(t0));
1954   __ Addu(t0, t0, Operand(kPointerSize));
1955   __ Subu(a2, a2, Operand(1));
1956   __ Branch(&loop, ne, a2, Operand(zero_reg));
1957 
1958   // Return.
1959   __ bind(&done);
1960   __ Ret();
1961 
1962   // Do the runtime call to allocate the arguments object.
1963   __ bind(&runtime);
1964   __ Push(a1, a3, a2);
1965   __ TailCallRuntime(Runtime::kNewStrictArguments);
1966 }
1967 
1968 
GenerateNew(MacroAssembler * masm)1969 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
1970   // a2 : number of parameters (tagged)
1971   // a3 : parameters pointer
1972   // a1 : rest parameter index (tagged)
1973   // Check if the calling frame is an arguments adaptor frame.
1974 
1975   Label runtime;
1976   __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1977   __ lw(t1, MemOperand(t0, StandardFrameConstants::kContextOffset));
1978   __ Branch(&runtime, ne, t1,
1979             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1980 
1981   // Patch the arguments.length and the parameters pointer.
1982   __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
1983   __ sll(t1, a2, kPointerSizeLog2 - kSmiTagSize);
1984   __ Addu(a3, t0, Operand(t1));
1985   __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1986 
1987   // Do the runtime call to allocate the arguments object.
1988   __ bind(&runtime);
1989   __ Push(a2, a3, a1);
1990   __ TailCallRuntime(Runtime::kNewRestParam);
1991 }
1992 
1993 
Generate(MacroAssembler * masm)1994 void RegExpExecStub::Generate(MacroAssembler* masm) {
1995   // Just jump directly to runtime if native RegExp is not selected at compile
1996   // time or if regexp entry in generated code is turned off runtime switch or
1997   // at compilation.
1998 #ifdef V8_INTERPRETED_REGEXP
1999   __ TailCallRuntime(Runtime::kRegExpExec);
2000 #else  // V8_INTERPRETED_REGEXP
2001 
2002   // Stack frame on entry.
2003   //  sp[0]: last_match_info (expected JSArray)
2004   //  sp[4]: previous index
2005   //  sp[8]: subject string
2006   //  sp[12]: JSRegExp object
2007 
2008   const int kLastMatchInfoOffset = 0 * kPointerSize;
2009   const int kPreviousIndexOffset = 1 * kPointerSize;
2010   const int kSubjectOffset = 2 * kPointerSize;
2011   const int kJSRegExpOffset = 3 * kPointerSize;
2012 
2013   Label runtime;
2014   // Allocation of registers for this function. These are in callee save
2015   // registers and will be preserved by the call to the native RegExp code, as
2016   // this code is called using the normal C calling convention. When calling
2017   // directly from generated code the native RegExp code will not do a GC and
2018   // therefore the content of these registers are safe to use after the call.
2019   // MIPS - using s0..s2, since we are not using CEntry Stub.
2020   Register subject = s0;
2021   Register regexp_data = s1;
2022   Register last_match_info_elements = s2;
2023 
2024   // Ensure that a RegExp stack is allocated.
2025   ExternalReference address_of_regexp_stack_memory_address =
2026       ExternalReference::address_of_regexp_stack_memory_address(
2027           isolate());
2028   ExternalReference address_of_regexp_stack_memory_size =
2029       ExternalReference::address_of_regexp_stack_memory_size(isolate());
2030   __ li(a0, Operand(address_of_regexp_stack_memory_size));
2031   __ lw(a0, MemOperand(a0, 0));
2032   __ Branch(&runtime, eq, a0, Operand(zero_reg));
2033 
2034   // Check that the first argument is a JSRegExp object.
2035   __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2036   STATIC_ASSERT(kSmiTag == 0);
2037   __ JumpIfSmi(a0, &runtime);
2038   __ GetObjectType(a0, a1, a1);
2039   __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2040 
2041   // Check that the RegExp has been compiled (data contains a fixed array).
2042   __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2043   if (FLAG_debug_code) {
2044     __ SmiTst(regexp_data, t0);
2045     __ Check(nz,
2046              kUnexpectedTypeForRegExpDataFixedArrayExpected,
2047              t0,
2048              Operand(zero_reg));
2049     __ GetObjectType(regexp_data, a0, a0);
2050     __ Check(eq,
2051              kUnexpectedTypeForRegExpDataFixedArrayExpected,
2052              a0,
2053              Operand(FIXED_ARRAY_TYPE));
2054   }
2055 
2056   // regexp_data: RegExp data (FixedArray)
2057   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2058   __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2059   __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2060 
2061   // regexp_data: RegExp data (FixedArray)
2062   // Check that the number of captures fit in the static offsets vector buffer.
2063   __ lw(a2,
2064          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2065   // Check (number_of_captures + 1) * 2 <= offsets vector size
2066   // Or          number_of_captures * 2 <= offsets vector size - 2
2067   // Multiplying by 2 comes for free since a2 is smi-tagged.
2068   STATIC_ASSERT(kSmiTag == 0);
2069   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2070   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2071   __ Branch(
2072       &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2073 
2074   // Reset offset for possibly sliced string.
2075   __ mov(t0, zero_reg);
2076   __ lw(subject, MemOperand(sp, kSubjectOffset));
2077   __ JumpIfSmi(subject, &runtime);
2078   __ mov(a3, subject);  // Make a copy of the original subject string.
2079   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2080   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2081   // subject: subject string
2082   // a3: subject string
2083   // a0: subject string instance type
2084   // regexp_data: RegExp data (FixedArray)
2085   // Handle subject string according to its encoding and representation:
2086   // (1) Sequential string?  If yes, go to (5).
2087   // (2) Anything but sequential or cons?  If yes, go to (6).
2088   // (3) Cons string.  If the string is flat, replace subject with first string.
2089   //     Otherwise bailout.
2090   // (4) Is subject external?  If yes, go to (7).
2091   // (5) Sequential string.  Load regexp code according to encoding.
2092   // (E) Carry on.
2093   /// [...]
2094 
2095   // Deferred code at the end of the stub:
2096   // (6) Not a long external string?  If yes, go to (8).
2097   // (7) External string.  Make it, offset-wise, look like a sequential string.
2098   //     Go to (5).
2099   // (8) Short external string or not a string?  If yes, bail out to runtime.
2100   // (9) Sliced string.  Replace subject with parent.  Go to (4).
2101 
2102   Label seq_string /* 5 */, external_string /* 7 */,
2103         check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2104         not_long_external /* 8 */;
2105 
2106   // (1) Sequential string?  If yes, go to (5).
2107   __ And(a1,
2108          a0,
2109          Operand(kIsNotStringMask |
2110                  kStringRepresentationMask |
2111                  kShortExternalStringMask));
2112   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2113   __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (5).
2114 
2115   // (2) Anything but sequential or cons?  If yes, go to (6).
2116   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2117   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2118   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2119   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2120   // Go to (6).
2121   __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2122 
2123   // (3) Cons string.  Check that it's flat.
2124   // Replace subject with first string and reload instance type.
2125   __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2126   __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2127   __ Branch(&runtime, ne, a0, Operand(a1));
2128   __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2129 
2130   // (4) Is subject external?  If yes, go to (7).
2131   __ bind(&check_underlying);
2132   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2133   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2134   STATIC_ASSERT(kSeqStringTag == 0);
2135   __ And(at, a0, Operand(kStringRepresentationMask));
2136   // The underlying external string is never a short external string.
2137   STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2138   STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2139   __ Branch(&external_string, ne, at, Operand(zero_reg));  // Go to (7).
2140 
2141   // (5) Sequential string.  Load regexp code according to encoding.
2142   __ bind(&seq_string);
2143   // subject: sequential subject string (or look-alike, external string)
2144   // a3: original subject string
2145   // Load previous index and check range before a3 is overwritten.  We have to
2146   // use a3 instead of subject here because subject might have been only made
2147   // to look like a sequential string when it actually is an external string.
2148   __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2149   __ JumpIfNotSmi(a1, &runtime);
2150   __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
2151   __ Branch(&runtime, ls, a3, Operand(a1));
2152   __ sra(a1, a1, kSmiTagSize);  // Untag the Smi.
2153 
2154   STATIC_ASSERT(kStringEncodingMask == 4);
2155   STATIC_ASSERT(kOneByteStringTag == 4);
2156   STATIC_ASSERT(kTwoByteStringTag == 0);
2157   __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one-byte.
2158   __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
2159   __ sra(a3, a0, 2);  // a3 is 1 for ASCII, 0 for UC16 (used below).
2160   __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2161   __ Movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2162 
2163   // (E) Carry on.  String handling is done.
2164   // t9: irregexp code
2165   // Check that the irregexp code has been generated for the actual string
2166   // encoding. If it has, the field contains a code object otherwise it contains
2167   // a smi (code flushing support).
2168   __ JumpIfSmi(t9, &runtime);
2169 
2170   // a1: previous index
2171   // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
2172   // t9: code
2173   // subject: Subject string
2174   // regexp_data: RegExp data (FixedArray)
2175   // All checks done. Now push arguments for native regexp code.
2176   __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2177                       1, a0, a2);
2178 
2179   // Isolates: note we add an additional parameter here (isolate pointer).
2180   const int kRegExpExecuteArguments = 9;
2181   const int kParameterRegisters = 4;
2182   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2183 
2184   // Stack pointer now points to cell where return address is to be written.
2185   // Arguments are before that on the stack or in registers, meaning we
2186   // treat the return address as argument 5. Thus every argument after that
2187   // needs to be shifted back by 1. Since DirectCEntryStub will handle
2188   // allocating space for the c argument slots, we don't need to calculate
2189   // that into the argument positions on the stack. This is how the stack will
2190   // look (sp meaning the value of sp at this moment):
2191   // [sp + 5] - Argument 9
2192   // [sp + 4] - Argument 8
2193   // [sp + 3] - Argument 7
2194   // [sp + 2] - Argument 6
2195   // [sp + 1] - Argument 5
2196   // [sp + 0] - saved ra
2197 
2198   // Argument 9: Pass current isolate address.
2199   // CFunctionArgumentOperand handles MIPS stack argument slots.
2200   __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2201   __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2202 
2203   // Argument 8: Indicate that this is a direct call from JavaScript.
2204   __ li(a0, Operand(1));
2205   __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2206 
2207   // Argument 7: Start (high end) of backtracking stack memory area.
2208   __ li(a0, Operand(address_of_regexp_stack_memory_address));
2209   __ lw(a0, MemOperand(a0, 0));
2210   __ li(a2, Operand(address_of_regexp_stack_memory_size));
2211   __ lw(a2, MemOperand(a2, 0));
2212   __ addu(a0, a0, a2);
2213   __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2214 
2215   // Argument 6: Set the number of capture registers to zero to force global
2216   // regexps to behave as non-global.  This does not affect non-global regexps.
2217   __ mov(a0, zero_reg);
2218   __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2219 
2220   // Argument 5: static offsets vector buffer.
2221   __ li(a0, Operand(
2222         ExternalReference::address_of_static_offsets_vector(isolate())));
2223   __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2224 
2225   // For arguments 4 and 3 get string length, calculate start of string data
2226   // calculate the shift of the index (0 for one-byte and 1 for two-byte).
2227   __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2228   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
2229   // Load the length from the original subject string from the previous stack
2230   // frame. Therefore we have to use fp, which points exactly to two pointer
2231   // sizes below the previous sp. (Because creating a new stack frame pushes
2232   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2233   __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2234   // If slice offset is not 0, load the length from the original sliced string.
2235   // Argument 4, a3: End of string data
2236   // Argument 3, a2: Start of string data
2237   // Prepare start and end index of the input.
2238   __ sllv(t1, t0, a3);
2239   __ addu(t0, t2, t1);
2240   __ sllv(t1, a1, a3);
2241   __ addu(a2, t0, t1);
2242 
2243   __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
2244   __ sra(t2, t2, kSmiTagSize);
2245   __ sllv(t1, t2, a3);
2246   __ addu(a3, t0, t1);
2247   // Argument 2 (a1): Previous index.
2248   // Already there
2249 
2250   // Argument 1 (a0): Subject string.
2251   __ mov(a0, subject);
2252 
2253   // Locate the code entry and call it.
2254   __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2255   DirectCEntryStub stub(isolate());
2256   stub.GenerateCall(masm, t9);
2257 
2258   __ LeaveExitFrame(false, no_reg, true);
2259 
2260   // v0: result
2261   // subject: subject string (callee saved)
2262   // regexp_data: RegExp data (callee saved)
2263   // last_match_info_elements: Last match info elements (callee saved)
2264   // Check the result.
2265   Label success;
2266   __ Branch(&success, eq, v0, Operand(1));
2267   // We expect exactly one result since we force the called regexp to behave
2268   // as non-global.
2269   Label failure;
2270   __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2271   // If not exception it can only be retry. Handle that in the runtime system.
2272   __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2273   // Result must now be exception. If there is no pending exception already a
2274   // stack overflow (on the backtrack stack) was detected in RegExp code but
2275   // haven't created the exception yet. Handle that in the runtime system.
2276   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2277   __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2278   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2279                                       isolate())));
2280   __ lw(v0, MemOperand(a2, 0));
2281   __ Branch(&runtime, eq, v0, Operand(a1));
2282 
2283   // For exception, throw the exception again.
2284   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
2285 
2286   __ bind(&failure);
2287   // For failure and exception return null.
2288   __ li(v0, Operand(isolate()->factory()->null_value()));
2289   __ DropAndRet(4);
2290 
2291   // Process the result from the native regexp code.
2292   __ bind(&success);
2293   __ lw(a1,
2294          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2295   // Calculate number of capture registers (number_of_captures + 1) * 2.
2296   // Multiplying by 2 comes for free since r1 is smi-tagged.
2297   STATIC_ASSERT(kSmiTag == 0);
2298   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2299   __ Addu(a1, a1, Operand(2));  // a1 was a smi.
2300 
2301   __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
2302   __ JumpIfSmi(a0, &runtime);
2303   __ GetObjectType(a0, a2, a2);
2304   __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2305   // Check that the JSArray is in fast case.
2306   __ lw(last_match_info_elements,
2307         FieldMemOperand(a0, JSArray::kElementsOffset));
2308   __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2309   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2310   __ Branch(&runtime, ne, a0, Operand(at));
2311   // Check that the last match info has space for the capture registers and the
2312   // additional information.
2313   __ lw(a0,
2314         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2315   __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2316   __ sra(at, a0, kSmiTagSize);
2317   __ Branch(&runtime, gt, a2, Operand(at));
2318 
2319   // a1: number of capture registers
2320   // subject: subject string
2321   // Store the capture count.
2322   __ sll(a2, a1, kSmiTagSize + kSmiShiftSize);  // To smi.
2323   __ sw(a2, FieldMemOperand(last_match_info_elements,
2324                              RegExpImpl::kLastCaptureCountOffset));
2325   // Store last subject and last input.
2326   __ sw(subject,
2327          FieldMemOperand(last_match_info_elements,
2328                          RegExpImpl::kLastSubjectOffset));
2329   __ mov(a2, subject);
2330   __ RecordWriteField(last_match_info_elements,
2331                       RegExpImpl::kLastSubjectOffset,
2332                       subject,
2333                       t3,
2334                       kRAHasNotBeenSaved,
2335                       kDontSaveFPRegs);
2336   __ mov(subject, a2);
2337   __ sw(subject,
2338          FieldMemOperand(last_match_info_elements,
2339                          RegExpImpl::kLastInputOffset));
2340   __ RecordWriteField(last_match_info_elements,
2341                       RegExpImpl::kLastInputOffset,
2342                       subject,
2343                       t3,
2344                       kRAHasNotBeenSaved,
2345                       kDontSaveFPRegs);
2346 
2347   // Get the static offsets vector filled by the native regexp code.
2348   ExternalReference address_of_static_offsets_vector =
2349       ExternalReference::address_of_static_offsets_vector(isolate());
2350   __ li(a2, Operand(address_of_static_offsets_vector));
2351 
2352   // a1: number of capture registers
2353   // a2: offsets vector
2354   Label next_capture, done;
2355   // Capture register counter starts from number of capture registers and
2356   // counts down until wrapping after zero.
2357   __ Addu(a0,
2358          last_match_info_elements,
2359          Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2360   __ bind(&next_capture);
2361   __ Subu(a1, a1, Operand(1));
2362   __ Branch(&done, lt, a1, Operand(zero_reg));
2363   // Read the value from the static offsets vector buffer.
2364   __ lw(a3, MemOperand(a2, 0));
2365   __ addiu(a2, a2, kPointerSize);
2366   // Store the smi value in the last match info.
2367   __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
2368   __ sw(a3, MemOperand(a0, 0));
2369   __ Branch(&next_capture, USE_DELAY_SLOT);
2370   __ addiu(a0, a0, kPointerSize);  // In branch delay slot.
2371 
2372   __ bind(&done);
2373 
2374   // Return last match info.
2375   __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
2376   __ DropAndRet(4);
2377 
2378   // Do the runtime call to execute the regexp.
2379   __ bind(&runtime);
2380   __ TailCallRuntime(Runtime::kRegExpExec);
2381 
2382   // Deferred code for string handling.
2383   // (6) Not a long external string?  If yes, go to (8).
2384   __ bind(&not_seq_nor_cons);
2385   // Go to (8).
2386   __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
2387 
2388   // (7) External string.  Make it, offset-wise, look like a sequential string.
2389   __ bind(&external_string);
2390   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2391   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2392   if (FLAG_debug_code) {
2393     // Assert that we do not have a cons or slice (indirect strings) here.
2394     // Sequential strings have already been ruled out.
2395     __ And(at, a0, Operand(kIsIndirectStringMask));
2396     __ Assert(eq,
2397               kExternalStringExpectedButNotFound,
2398               at,
2399               Operand(zero_reg));
2400   }
2401   __ lw(subject,
2402         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2403   // Move the pointer so that offset-wise, it looks like a sequential string.
2404   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2405   __ Subu(subject,
2406           subject,
2407           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2408   __ jmp(&seq_string);    // Go to (5).
2409 
2410   // (8) Short external string or not a string?  If yes, bail out to runtime.
2411   __ bind(&not_long_external);
2412   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2413   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2414   __ Branch(&runtime, ne, at, Operand(zero_reg));
2415 
2416   // (9) Sliced string.  Replace subject with parent.  Go to (4).
2417   // Load offset into t0 and replace subject string with parent.
2418   __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2419   __ sra(t0, t0, kSmiTagSize);
2420   __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2421   __ jmp(&check_underlying);  // Go to (4).
2422 #endif  // V8_INTERPRETED_REGEXP
2423 }
2424 
2425 
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub)2426 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
2427   // a0 : number of arguments to the construct function
2428   // a2 : feedback vector
2429   // a3 : slot in feedback vector (Smi)
2430   // a1 : the function to call
2431   FrameScope scope(masm, StackFrame::INTERNAL);
2432   const RegList kSavedRegs = 1 << 4 |  // a0
2433                              1 << 5 |  // a1
2434                              1 << 6 |  // a2
2435                              1 << 7;   // a3
2436 
2437   // Number-of-arguments register must be smi-tagged to call out.
2438   __ SmiTag(a0);
2439   __ MultiPush(kSavedRegs);
2440 
2441   __ CallStub(stub);
2442 
2443   __ MultiPop(kSavedRegs);
2444   __ SmiUntag(a0);
2445 }
2446 
2447 
GenerateRecordCallTarget(MacroAssembler * masm)2448 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2449   // Cache the called function in a feedback vector slot.  Cache states
2450   // are uninitialized, monomorphic (indicated by a JSFunction), and
2451   // megamorphic.
2452   // a0 : number of arguments to the construct function
2453   // a1 : the function to call
2454   // a2 : feedback vector
2455   // a3 : slot in feedback vector (Smi)
2456   Label initialize, done, miss, megamorphic, not_array_function;
2457 
2458   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2459             masm->isolate()->heap()->megamorphic_symbol());
2460   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2461             masm->isolate()->heap()->uninitialized_symbol());
2462 
2463   // Load the cache state into t2.
2464   __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
2465   __ Addu(t2, a2, Operand(t2));
2466   __ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize));
2467 
2468   // A monomorphic cache hit or an already megamorphic state: invoke the
2469   // function without changing the state.
2470   // We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at
2471   // this position in a symbol (see static asserts in type-feedback-vector.h).
2472   Label check_allocation_site;
2473   Register feedback_map = t1;
2474   Register weak_value = t4;
2475   __ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
2476   __ Branch(&done, eq, a1, Operand(weak_value));
2477   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2478   __ Branch(&done, eq, t2, Operand(at));
2479   __ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
2480   __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
2481   __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
2482 
2483   // If the weak cell is cleared, we have a new chance to become monomorphic.
2484   __ JumpIfSmi(weak_value, &initialize);
2485   __ jmp(&megamorphic);
2486 
2487   __ bind(&check_allocation_site);
2488   // If we came here, we need to see if we are the array function.
2489   // If we didn't have a matching function, and we didn't find the megamorph
2490   // sentinel, then we have in the slot either some other function or an
2491   // AllocationSite.
2492   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2493   __ Branch(&miss, ne, feedback_map, Operand(at));
2494 
2495   // Make sure the function is the Array() function
2496   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
2497   __ Branch(&megamorphic, ne, a1, Operand(t2));
2498   __ jmp(&done);
2499 
2500   __ bind(&miss);
2501 
2502   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2503   // megamorphic.
2504   __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2505   __ Branch(&initialize, eq, t2, Operand(at));
2506   // MegamorphicSentinel is an immortal immovable object (undefined) so no
2507   // write-barrier is needed.
2508   __ bind(&megamorphic);
2509   __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
2510   __ Addu(t2, a2, Operand(t2));
2511   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2512   __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
2513   __ jmp(&done);
2514 
2515   // An uninitialized cache is patched with the function.
2516   __ bind(&initialize);
2517   // Make sure the function is the Array() function.
2518   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
2519   __ Branch(&not_array_function, ne, a1, Operand(t2));
2520 
2521   // The target function is the Array constructor,
2522   // Create an AllocationSite if we don't already have it, store it in the
2523   // slot.
2524   CreateAllocationSiteStub create_stub(masm->isolate());
2525   CallStubInRecordCallTarget(masm, &create_stub);
2526   __ Branch(&done);
2527 
2528   __ bind(&not_array_function);
2529   CreateWeakCellStub weak_cell_stub(masm->isolate());
2530   CallStubInRecordCallTarget(masm, &weak_cell_stub);
2531   __ bind(&done);
2532 }
2533 
2534 
Generate(MacroAssembler * masm)2535 void CallConstructStub::Generate(MacroAssembler* masm) {
2536   // a0 : number of arguments
2537   // a1 : the function to call
2538   // a2 : feedback vector
2539   // a3 : slot in feedback vector (Smi, for RecordCallTarget)
2540 
2541   Label non_function;
2542   // Check that the function is not a smi.
2543   __ JumpIfSmi(a1, &non_function);
2544   // Check that the function is a JSFunction.
2545   __ GetObjectType(a1, t1, t1);
2546   __ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE));
2547 
2548   GenerateRecordCallTarget(masm);
2549 
2550   __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2551   __ Addu(t1, a2, at);
2552   Label feedback_register_initialized;
2553   // Put the AllocationSite from the feedback vector into a2, or undefined.
2554   __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2555   __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
2556   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2557   __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
2558   __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
2559   __ bind(&feedback_register_initialized);
2560 
2561   __ AssertUndefinedOrAllocationSite(a2, t1);
2562 
2563   // Pass function as new target.
2564   __ mov(a3, a1);
2565 
2566   // Tail call to the function-specific construct stub (still in the caller
2567   // context at this point).
2568   __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2569   __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
2570   __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
2571   __ Jump(at);
2572 
2573   __ bind(&non_function);
2574   __ mov(a3, a1);
2575   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
2576 }
2577 
2578 
HandleArrayCase(MacroAssembler * masm,Label * miss)2579 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
2580   // a1 - function
2581   // a3 - slot id
2582   // a2 - vector
2583   // t0 - loaded from vector[slot]
2584   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
2585   __ Branch(miss, ne, a1, Operand(at));
2586 
2587   __ li(a0, Operand(arg_count()));
2588 
2589   // Increment the call count for monomorphic function calls.
2590   __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2591   __ Addu(at, a2, Operand(at));
2592   __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
2593   __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2594   __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
2595 
2596   __ mov(a2, t0);
2597   __ mov(a3, a1);
2598   ArrayConstructorStub stub(masm->isolate(), arg_count());
2599   __ TailCallStub(&stub);
2600 }
2601 
2602 
Generate(MacroAssembler * masm)2603 void CallICStub::Generate(MacroAssembler* masm) {
2604   // a1 - function
2605   // a3 - slot id (Smi)
2606   // a2 - vector
2607   Label extra_checks_or_miss, call, call_function;
2608   int argc = arg_count();
2609   ParameterCount actual(argc);
2610 
2611   // The checks. First, does r1 match the recorded monomorphic target?
2612   __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2613   __ Addu(t0, a2, Operand(t0));
2614   __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2615 
2616   // We don't know that we have a weak cell. We might have a private symbol
2617   // or an AllocationSite, but the memory is safe to examine.
2618   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2619   // FixedArray.
2620   // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2621   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2622   // computed, meaning that it can't appear to be a pointer. If the low bit is
2623   // 0, then hash is computed, but the 0 bit prevents the field from appearing
2624   // to be a pointer.
2625   STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2626   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2627                     WeakCell::kValueOffset &&
2628                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2629 
2630   __ lw(t1, FieldMemOperand(t0, WeakCell::kValueOffset));
2631   __ Branch(&extra_checks_or_miss, ne, a1, Operand(t1));
2632 
2633   // The compare above could have been a SMI/SMI comparison. Guard against this
2634   // convincing us that we have a monomorphic JSFunction.
2635   __ JumpIfSmi(a1, &extra_checks_or_miss);
2636 
2637   // Increment the call count for monomorphic function calls.
2638   __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2639   __ Addu(at, a2, Operand(at));
2640   __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
2641   __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2642   __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
2643 
2644   __ bind(&call_function);
2645   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
2646           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
2647           USE_DELAY_SLOT);
2648   __ li(a0, Operand(argc));  // In delay slot.
2649 
2650   __ bind(&extra_checks_or_miss);
2651   Label uninitialized, miss, not_allocation_site;
2652 
2653   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2654   __ Branch(&call, eq, t0, Operand(at));
2655 
2656   // Verify that t0 contains an AllocationSite
2657   __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
2658   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2659   __ Branch(&not_allocation_site, ne, t1, Operand(at));
2660 
2661   HandleArrayCase(masm, &miss);
2662 
2663   __ bind(&not_allocation_site);
2664 
2665   // The following cases attempt to handle MISS cases without going to the
2666   // runtime.
2667   if (FLAG_trace_ic) {
2668     __ Branch(&miss);
2669   }
2670 
2671   __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2672   __ Branch(&uninitialized, eq, t0, Operand(at));
2673 
2674   // We are going megamorphic. If the feedback is a JSFunction, it is fine
2675   // to handle it here. More complex cases are dealt with in the runtime.
2676   __ AssertNotSmi(t0);
2677   __ GetObjectType(t0, t1, t1);
2678   __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
2679   __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2680   __ Addu(t0, a2, Operand(t0));
2681   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2682   __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2683 
2684   __ bind(&call);
2685   __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
2686           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
2687           USE_DELAY_SLOT);
2688   __ li(a0, Operand(argc));  // In delay slot.
2689 
2690   __ bind(&uninitialized);
2691 
2692   // We are going monomorphic, provided we actually have a JSFunction.
2693   __ JumpIfSmi(a1, &miss);
2694 
2695   // Goto miss case if we do not have a function.
2696   __ GetObjectType(a1, t0, t0);
2697   __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
2698 
2699   // Make sure the function is not the Array() function, which requires special
2700   // behavior on MISS.
2701   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0);
2702   __ Branch(&miss, eq, a1, Operand(t0));
2703 
2704   // Make sure the function belongs to the same native context.
2705   __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
2706   __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
2707   __ lw(t1, NativeContextMemOperand());
2708   __ Branch(&miss, ne, t0, Operand(t1));
2709 
2710   // Initialize the call counter.
2711   __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2712   __ Addu(at, a2, Operand(at));
2713   __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2714   __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
2715 
2716   // Store the function. Use a stub since we need a frame for allocation.
2717   // a2 - vector
2718   // a3 - slot
2719   // a1 - function
2720   {
2721     FrameScope scope(masm, StackFrame::INTERNAL);
2722     CreateWeakCellStub create_stub(masm->isolate());
2723     __ Push(a1);
2724     __ CallStub(&create_stub);
2725     __ Pop(a1);
2726   }
2727 
2728   __ Branch(&call_function);
2729 
2730   // We are here because tracing is on or we encountered a MISS case we can't
2731   // handle here.
2732   __ bind(&miss);
2733   GenerateMiss(masm);
2734 
2735   __ Branch(&call);
2736 }
2737 
2738 
GenerateMiss(MacroAssembler * masm)2739 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2740   FrameScope scope(masm, StackFrame::INTERNAL);
2741 
2742   // Push the receiver and the function and feedback info.
2743   __ Push(a1, a2, a3);
2744 
2745   // Call the entry.
2746   __ CallRuntime(Runtime::kCallIC_Miss);
2747 
2748   // Move result to a1 and exit the internal frame.
2749   __ mov(a1, v0);
2750 }
2751 
2752 
2753 // StringCharCodeAtGenerator.
GenerateFast(MacroAssembler * masm)2754 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2755   DCHECK(!t0.is(index_));
2756   DCHECK(!t0.is(result_));
2757   DCHECK(!t0.is(object_));
2758   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2759     // If the receiver is a smi trigger the non-string case.
2760     __ JumpIfSmi(object_, receiver_not_string_);
2761 
2762     // Fetch the instance type of the receiver into result register.
2763     __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2764     __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2765     // If the receiver is not a string trigger the non-string case.
2766     __ And(t0, result_, Operand(kIsNotStringMask));
2767     __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
2768   }
2769 
2770   // If the index is non-smi trigger the non-smi case.
2771   __ JumpIfNotSmi(index_, &index_not_smi_);
2772 
2773   __ bind(&got_smi_index_);
2774 
2775   // Check for index out of range.
2776   __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
2777   __ Branch(index_out_of_range_, ls, t0, Operand(index_));
2778 
2779   __ sra(index_, index_, kSmiTagSize);
2780 
2781   StringCharLoadGenerator::Generate(masm,
2782                                     object_,
2783                                     index_,
2784                                     result_,
2785                                     &call_runtime_);
2786 
2787   __ sll(result_, result_, kSmiTagSize);
2788   __ bind(&exit_);
2789 }
2790 
2791 
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)2792 void StringCharCodeAtGenerator::GenerateSlow(
2793     MacroAssembler* masm, EmbedMode embed_mode,
2794     const RuntimeCallHelper& call_helper) {
2795   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2796 
2797   // Index is not a smi.
2798   __ bind(&index_not_smi_);
2799   // If index is a heap number, try converting it to an integer.
2800   __ CheckMap(index_,
2801               result_,
2802               Heap::kHeapNumberMapRootIndex,
2803               index_not_number_,
2804               DONT_DO_SMI_CHECK);
2805   call_helper.BeforeCall(masm);
2806   // Consumed by runtime conversion function:
2807   if (embed_mode == PART_OF_IC_HANDLER) {
2808     __ Push(LoadWithVectorDescriptor::VectorRegister(),
2809             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2810   } else {
2811     __ Push(object_, index_);
2812   }
2813   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
2814     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
2815   } else {
2816     DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
2817     // NumberToSmi discards numbers that are not exact integers.
2818     __ CallRuntime(Runtime::kNumberToSmi);
2819   }
2820 
2821   // Save the conversion result before the pop instructions below
2822   // have a chance to overwrite it.
2823   __ Move(index_, v0);
2824   if (embed_mode == PART_OF_IC_HANDLER) {
2825     __ Pop(LoadWithVectorDescriptor::VectorRegister(),
2826            LoadWithVectorDescriptor::SlotRegister(), object_);
2827   } else {
2828     __ pop(object_);
2829   }
2830   // Reload the instance type.
2831   __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2832   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2833   call_helper.AfterCall(masm);
2834   // If index is still not a smi, it must be out of range.
2835   __ JumpIfNotSmi(index_, index_out_of_range_);
2836   // Otherwise, return to the fast path.
2837   __ Branch(&got_smi_index_);
2838 
2839   // Call runtime. We get here when the receiver is a string and the
2840   // index is a number, but the code of getting the actual character
2841   // is too complex (e.g., when the string needs to be flattened).
2842   __ bind(&call_runtime_);
2843   call_helper.BeforeCall(masm);
2844   __ sll(index_, index_, kSmiTagSize);
2845   __ Push(object_, index_);
2846   __ CallRuntime(Runtime::kStringCharCodeAtRT);
2847 
2848   __ Move(result_, v0);
2849 
2850   call_helper.AfterCall(masm);
2851   __ jmp(&exit_);
2852 
2853   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2854 }
2855 
2856 
2857 // -------------------------------------------------------------------------
2858 // StringCharFromCodeGenerator
2859 
GenerateFast(MacroAssembler * masm)2860 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2861   // Fast case of Heap::LookupSingleCharacterStringFromCode.
2862 
2863   DCHECK(!t0.is(result_));
2864   DCHECK(!t0.is(code_));
2865 
2866   STATIC_ASSERT(kSmiTag == 0);
2867   STATIC_ASSERT(kSmiShiftSize == 0);
2868   DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
2869   __ And(t0, code_, Operand(kSmiTagMask |
2870                             ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
2871   __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
2872 
2873   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2874   // At this point code register contains smi tagged one-byte char code.
2875   STATIC_ASSERT(kSmiTag == 0);
2876   __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
2877   __ Addu(result_, result_, t0);
2878   __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2879   __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
2880   __ Branch(&slow_case_, eq, result_, Operand(t0));
2881   __ bind(&exit_);
2882 }
2883 
2884 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2885 void StringCharFromCodeGenerator::GenerateSlow(
2886     MacroAssembler* masm,
2887     const RuntimeCallHelper& call_helper) {
2888   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2889 
2890   __ bind(&slow_case_);
2891   call_helper.BeforeCall(masm);
2892   __ push(code_);
2893   __ CallRuntime(Runtime::kStringCharFromCode);
2894   __ Move(result_, v0);
2895 
2896   call_helper.AfterCall(masm);
2897   __ Branch(&exit_);
2898 
2899   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2900 }
2901 
2902 
2903 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
2904 
2905 
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,String::Encoding encoding)2906 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2907                                           Register dest,
2908                                           Register src,
2909                                           Register count,
2910                                           Register scratch,
2911                                           String::Encoding encoding) {
2912   if (FLAG_debug_code) {
2913     // Check that destination is word aligned.
2914     __ And(scratch, dest, Operand(kPointerAlignmentMask));
2915     __ Check(eq,
2916              kDestinationOfCopyNotAligned,
2917              scratch,
2918              Operand(zero_reg));
2919   }
2920 
2921   // Assumes word reads and writes are little endian.
2922   // Nothing to do for zero characters.
2923   Label done;
2924 
2925   if (encoding == String::TWO_BYTE_ENCODING) {
2926     __ Addu(count, count, count);
2927   }
2928 
2929   Register limit = count;  // Read until dest equals this.
2930   __ Addu(limit, dest, Operand(count));
2931 
2932   Label loop_entry, loop;
2933   // Copy bytes from src to dest until dest hits limit.
2934   __ Branch(&loop_entry);
2935   __ bind(&loop);
2936   __ lbu(scratch, MemOperand(src));
2937   __ Addu(src, src, Operand(1));
2938   __ sb(scratch, MemOperand(dest));
2939   __ Addu(dest, dest, Operand(1));
2940   __ bind(&loop_entry);
2941   __ Branch(&loop, lt, dest, Operand(limit));
2942 
2943   __ bind(&done);
2944 }
2945 
2946 
Generate(MacroAssembler * masm)2947 void SubStringStub::Generate(MacroAssembler* masm) {
2948   Label runtime;
2949   // Stack frame on entry.
2950   //  ra: return address
2951   //  sp[0]: to
2952   //  sp[4]: from
2953   //  sp[8]: string
2954 
2955   // This stub is called from the native-call %_SubString(...), so
2956   // nothing can be assumed about the arguments. It is tested that:
2957   //  "string" is a sequential string,
2958   //  both "from" and "to" are smis, and
2959   //  0 <= from <= to <= string.length.
2960   // If any of these assumptions fail, we call the runtime system.
2961 
2962   const int kToOffset = 0 * kPointerSize;
2963   const int kFromOffset = 1 * kPointerSize;
2964   const int kStringOffset = 2 * kPointerSize;
2965 
2966   __ lw(a2, MemOperand(sp, kToOffset));
2967   __ lw(a3, MemOperand(sp, kFromOffset));
2968   STATIC_ASSERT(kFromOffset == kToOffset + 4);
2969   STATIC_ASSERT(kSmiTag == 0);
2970   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2971 
2972   // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
2973   // safe in this case.
2974   __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
2975   __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
2976   // Both a2 and a3 are untagged integers.
2977 
2978   __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
2979 
2980   __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
2981   __ Subu(a2, a2, a3);
2982 
2983   // Make sure first argument is a string.
2984   __ lw(v0, MemOperand(sp, kStringOffset));
2985   __ JumpIfSmi(v0, &runtime);
2986   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
2987   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
2988   __ And(t0, a1, Operand(kIsNotStringMask));
2989 
2990   __ Branch(&runtime, ne, t0, Operand(zero_reg));
2991 
2992   Label single_char;
2993   __ Branch(&single_char, eq, a2, Operand(1));
2994 
2995   // Short-cut for the case of trivial substring.
2996   Label return_v0;
2997   // v0: original string
2998   // a2: result string length
2999   __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
3000   __ sra(t0, t0, 1);
3001   // Return original string.
3002   __ Branch(&return_v0, eq, a2, Operand(t0));
3003   // Longer than original string's length or negative: unsafe arguments.
3004   __ Branch(&runtime, hi, a2, Operand(t0));
3005   // Shorter than original string's length: an actual substring.
3006 
3007   // Deal with different string types: update the index if necessary
3008   // and put the underlying string into t1.
3009   // v0: original string
3010   // a1: instance type
3011   // a2: length
3012   // a3: from index (untagged)
3013   Label underlying_unpacked, sliced_string, seq_or_external_string;
3014   // If the string is not indirect, it can only be sequential or external.
3015   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3016   STATIC_ASSERT(kIsIndirectStringMask != 0);
3017   __ And(t0, a1, Operand(kIsIndirectStringMask));
3018   __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3019   // t0 is used as a scratch register and can be overwritten in either case.
3020   __ And(t0, a1, Operand(kSlicedNotConsMask));
3021   __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3022   // Cons string.  Check whether it is flat, then fetch first part.
3023   __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
3024   __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3025   __ Branch(&runtime, ne, t1, Operand(t0));
3026   __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
3027   // Update instance type.
3028   __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3029   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3030   __ jmp(&underlying_unpacked);
3031 
3032   __ bind(&sliced_string);
3033   // Sliced string.  Fetch parent and correct start index by offset.
3034   __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3035   __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3036   __ sra(t0, t0, 1);  // Add offset to index.
3037   __ Addu(a3, a3, t0);
3038   // Update instance type.
3039   __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3040   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3041   __ jmp(&underlying_unpacked);
3042 
3043   __ bind(&seq_or_external_string);
3044   // Sequential or external string.  Just move string to the expected register.
3045   __ mov(t1, v0);
3046 
3047   __ bind(&underlying_unpacked);
3048 
3049   if (FLAG_string_slices) {
3050     Label copy_routine;
3051     // t1: underlying subject string
3052     // a1: instance type of underlying subject string
3053     // a2: length
3054     // a3: adjusted start index (untagged)
3055     // Short slice.  Copy instead of slicing.
3056     __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
3057     // Allocate new sliced string.  At this point we do not reload the instance
3058     // type including the string encoding because we simply rely on the info
3059     // provided by the original string.  It does not matter if the original
3060     // string's encoding is wrong because we always have to recheck encoding of
3061     // the newly created string's parent anyways due to externalized strings.
3062     Label two_byte_slice, set_slice_header;
3063     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3064     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3065     __ And(t0, a1, Operand(kStringEncodingMask));
3066     __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3067     __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
3068     __ jmp(&set_slice_header);
3069     __ bind(&two_byte_slice);
3070     __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3071     __ bind(&set_slice_header);
3072     __ sll(a3, a3, 1);
3073     __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3074     __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3075     __ jmp(&return_v0);
3076 
3077     __ bind(&copy_routine);
3078   }
3079 
3080   // t1: underlying subject string
3081   // a1: instance type of underlying subject string
3082   // a2: length
3083   // a3: adjusted start index (untagged)
3084   Label two_byte_sequential, sequential_string, allocate_result;
3085   STATIC_ASSERT(kExternalStringTag != 0);
3086   STATIC_ASSERT(kSeqStringTag == 0);
3087   __ And(t0, a1, Operand(kExternalStringTag));
3088   __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3089 
3090   // Handle external string.
3091   // Rule out short external strings.
3092   STATIC_ASSERT(kShortExternalStringTag != 0);
3093   __ And(t0, a1, Operand(kShortExternalStringTag));
3094   __ Branch(&runtime, ne, t0, Operand(zero_reg));
3095   __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
3096   // t1 already points to the first character of underlying string.
3097   __ jmp(&allocate_result);
3098 
3099   __ bind(&sequential_string);
3100   // Locate first character of underlying subject string.
3101   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3102   __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3103 
3104   __ bind(&allocate_result);
3105   // Sequential acii string.  Allocate the result.
3106   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3107   __ And(t0, a1, Operand(kStringEncodingMask));
3108   __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3109 
3110   // Allocate and copy the resulting ASCII string.
3111   __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
3112 
3113   // Locate first character of substring to copy.
3114   __ Addu(t1, t1, a3);
3115 
3116   // Locate first character of result.
3117   __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3118 
3119   // v0: result string
3120   // a1: first character of result string
3121   // a2: result string length
3122   // t1: first character of substring to copy
3123   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3124   StringHelper::GenerateCopyCharacters(
3125       masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
3126   __ jmp(&return_v0);
3127 
3128   // Allocate and copy the resulting two-byte string.
3129   __ bind(&two_byte_sequential);
3130   __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3131 
3132   // Locate first character of substring to copy.
3133   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3134   __ sll(t0, a3, 1);
3135   __ Addu(t1, t1, t0);
3136   // Locate first character of result.
3137   __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3138 
3139   // v0: result string.
3140   // a1: first character of result.
3141   // a2: result length.
3142   // t1: first character of substring to copy.
3143   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3144   StringHelper::GenerateCopyCharacters(
3145       masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
3146 
3147   __ bind(&return_v0);
3148   Counters* counters = isolate()->counters();
3149   __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3150   __ DropAndRet(3);
3151 
3152   // Just jump to runtime to create the sub string.
3153   __ bind(&runtime);
3154   __ TailCallRuntime(Runtime::kSubString);
3155 
3156   __ bind(&single_char);
3157   // v0: original string
3158   // a1: instance type
3159   // a2: length
3160   // a3: from index (untagged)
3161   __ SmiTag(a3, a3);
3162   StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
3163                                   STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3164   generator.GenerateFast(masm);
3165   __ DropAndRet(3);
3166   generator.SkipSlow(masm, &runtime);
3167 }
3168 
3169 
Generate(MacroAssembler * masm)3170 void ToNumberStub::Generate(MacroAssembler* masm) {
3171   // The ToNumber stub takes one argument in a0.
3172   Label not_smi;
3173   __ JumpIfNotSmi(a0, &not_smi);
3174   __ Ret(USE_DELAY_SLOT);
3175   __ mov(v0, a0);
3176   __ bind(&not_smi);
3177 
3178   Label not_heap_number;
3179   __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
3180   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3181   // a0: object
3182   // a1: instance type.
3183   __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
3184   __ Ret(USE_DELAY_SLOT);
3185   __ mov(v0, a0);
3186   __ bind(&not_heap_number);
3187 
3188   Label not_string, slow_string;
3189   __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
3190   // Check if string has a cached array index.
3191   __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
3192   __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
3193   __ Branch(&slow_string, ne, at, Operand(zero_reg));
3194   __ IndexFromHash(a2, a0);
3195   __ Ret(USE_DELAY_SLOT);
3196   __ mov(v0, a0);
3197   __ bind(&slow_string);
3198   __ push(a0);  // Push argument.
3199   __ TailCallRuntime(Runtime::kStringToNumber);
3200   __ bind(&not_string);
3201 
3202   Label not_oddball;
3203   __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
3204   __ Ret(USE_DELAY_SLOT);
3205   __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
3206   __ bind(&not_oddball);
3207 
3208   __ push(a0);  // Push argument.
3209   __ TailCallRuntime(Runtime::kToNumber);
3210 }
3211 
3212 
Generate(MacroAssembler * masm)3213 void ToLengthStub::Generate(MacroAssembler* masm) {
3214   // The ToLength stub takes on argument in a0.
3215   Label not_smi, positive_smi;
3216   __ JumpIfNotSmi(a0, &not_smi);
3217   STATIC_ASSERT(kSmiTag == 0);
3218   __ Branch(&positive_smi, ge, a0, Operand(zero_reg));
3219   __ mov(a0, zero_reg);
3220   __ bind(&positive_smi);
3221   __ Ret(USE_DELAY_SLOT);
3222   __ mov(v0, a0);
3223   __ bind(&not_smi);
3224 
3225   __ push(a0);  // Push argument.
3226   __ TailCallRuntime(Runtime::kToLength);
3227 }
3228 
3229 
Generate(MacroAssembler * masm)3230 void ToStringStub::Generate(MacroAssembler* masm) {
3231   // The ToString stub takes on argument in a0.
3232   Label is_number;
3233   __ JumpIfSmi(a0, &is_number);
3234 
3235   Label not_string;
3236   __ GetObjectType(a0, a1, a1);
3237   // a0: receiver
3238   // a1: receiver instance type
3239   __ Branch(&not_string, ge, a1, Operand(FIRST_NONSTRING_TYPE));
3240   __ Ret(USE_DELAY_SLOT);
3241   __ mov(v0, a0);
3242   __ bind(&not_string);
3243 
3244   Label not_heap_number;
3245   __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
3246   __ bind(&is_number);
3247   NumberToStringStub stub(isolate());
3248   __ TailCallStub(&stub);
3249   __ bind(&not_heap_number);
3250 
3251   Label not_oddball;
3252   __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
3253   __ Ret(USE_DELAY_SLOT);
3254   __ lw(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
3255   __ bind(&not_oddball);
3256 
3257   __ push(a0);  // Push argument.
3258   __ TailCallRuntime(Runtime::kToString);
3259 }
3260 
3261 
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)3262 void StringHelper::GenerateFlatOneByteStringEquals(
3263     MacroAssembler* masm, Register left, Register right, Register scratch1,
3264     Register scratch2, Register scratch3) {
3265   Register length = scratch1;
3266 
3267   // Compare lengths.
3268   Label strings_not_equal, check_zero_length;
3269   __ lw(length, FieldMemOperand(left, String::kLengthOffset));
3270   __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3271   __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3272   __ bind(&strings_not_equal);
3273   DCHECK(is_int16(NOT_EQUAL));
3274   __ Ret(USE_DELAY_SLOT);
3275   __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3276 
3277   // Check if the length is zero.
3278   Label compare_chars;
3279   __ bind(&check_zero_length);
3280   STATIC_ASSERT(kSmiTag == 0);
3281   __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3282   DCHECK(is_int16(EQUAL));
3283   __ Ret(USE_DELAY_SLOT);
3284   __ li(v0, Operand(Smi::FromInt(EQUAL)));
3285 
3286   // Compare characters.
3287   __ bind(&compare_chars);
3288 
3289   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3290                                   v0, &strings_not_equal);
3291 
3292   // Characters are equal.
3293   __ Ret(USE_DELAY_SLOT);
3294   __ li(v0, Operand(Smi::FromInt(EQUAL)));
3295 }
3296 
3297 
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)3298 void StringHelper::GenerateCompareFlatOneByteStrings(
3299     MacroAssembler* masm, Register left, Register right, Register scratch1,
3300     Register scratch2, Register scratch3, Register scratch4) {
3301   Label result_not_equal, compare_lengths;
3302   // Find minimum length and length difference.
3303   __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
3304   __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3305   __ Subu(scratch3, scratch1, Operand(scratch2));
3306   Register length_delta = scratch3;
3307   __ slt(scratch4, scratch2, scratch1);
3308   __ Movn(scratch1, scratch2, scratch4);
3309   Register min_length = scratch1;
3310   STATIC_ASSERT(kSmiTag == 0);
3311   __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3312 
3313   // Compare loop.
3314   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3315                                   scratch4, v0, &result_not_equal);
3316 
3317   // Compare lengths - strings up to min-length are equal.
3318   __ bind(&compare_lengths);
3319   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3320   // Use length_delta as result if it's zero.
3321   __ mov(scratch2, length_delta);
3322   __ mov(scratch4, zero_reg);
3323   __ mov(v0, zero_reg);
3324 
3325   __ bind(&result_not_equal);
3326   // Conditionally update the result based either on length_delta or
3327   // the last comparion performed in the loop above.
3328   Label ret;
3329   __ Branch(&ret, eq, scratch2, Operand(scratch4));
3330   __ li(v0, Operand(Smi::FromInt(GREATER)));
3331   __ Branch(&ret, gt, scratch2, Operand(scratch4));
3332   __ li(v0, Operand(Smi::FromInt(LESS)));
3333   __ bind(&ret);
3334   __ Ret();
3335 }
3336 
3337 
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Register scratch3,Label * chars_not_equal)3338 void StringHelper::GenerateOneByteCharsCompareLoop(
3339     MacroAssembler* masm, Register left, Register right, Register length,
3340     Register scratch1, Register scratch2, Register scratch3,
3341     Label* chars_not_equal) {
3342   // Change index to run from -length to -1 by adding length to string
3343   // start. This means that loop ends when index reaches zero, which
3344   // doesn't need an additional compare.
3345   __ SmiUntag(length);
3346   __ Addu(scratch1, length,
3347           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3348   __ Addu(left, left, Operand(scratch1));
3349   __ Addu(right, right, Operand(scratch1));
3350   __ Subu(length, zero_reg, length);
3351   Register index = length;  // index = -length;
3352 
3353 
3354   // Compare loop.
3355   Label loop;
3356   __ bind(&loop);
3357   __ Addu(scratch3, left, index);
3358   __ lbu(scratch1, MemOperand(scratch3));
3359   __ Addu(scratch3, right, index);
3360   __ lbu(scratch2, MemOperand(scratch3));
3361   __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3362   __ Addu(index, index, 1);
3363   __ Branch(&loop, ne, index, Operand(zero_reg));
3364 }
3365 
3366 
Generate(MacroAssembler * masm)3367 void StringCompareStub::Generate(MacroAssembler* masm) {
3368   // ----------- S t a t e -------------
3369   //  -- a1    : left
3370   //  -- a0    : right
3371   //  -- ra    : return address
3372   // -----------------------------------
3373   __ AssertString(a1);
3374   __ AssertString(a0);
3375 
3376   Label not_same;
3377   __ Branch(&not_same, ne, a0, Operand(a1));
3378   __ li(v0, Operand(Smi::FromInt(EQUAL)));
3379   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a1,
3380                       a2);
3381   __ Ret();
3382 
3383   __ bind(&not_same);
3384 
3385   // Check that both objects are sequential one-byte strings.
3386   Label runtime;
3387   __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
3388 
3389   // Compare flat ASCII strings natively.
3390   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
3391                       a3);
3392   StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
3393 
3394   __ bind(&runtime);
3395   __ Push(a1, a0);
3396   __ TailCallRuntime(Runtime::kStringCompare);
3397 }
3398 
3399 
Generate(MacroAssembler * masm)3400 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3401   // ----------- S t a t e -------------
3402   //  -- a1    : left
3403   //  -- a0    : right
3404   //  -- ra    : return address
3405   // -----------------------------------
3406 
3407   // Load a2 with the allocation site. We stick an undefined dummy value here
3408   // and replace it with the real allocation site later when we instantiate this
3409   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3410   __ li(a2, handle(isolate()->heap()->undefined_value()));
3411 
3412   // Make sure that we actually patched the allocation site.
3413   if (FLAG_debug_code) {
3414     __ And(at, a2, Operand(kSmiTagMask));
3415     __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
3416     __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
3417     __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3418     __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
3419   }
3420 
3421   // Tail call into the stub that handles binary operations with allocation
3422   // sites.
3423   BinaryOpWithAllocationSiteStub stub(isolate(), state());
3424   __ TailCallStub(&stub);
3425 }
3426 
3427 
GenerateBooleans(MacroAssembler * masm)3428 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
3429   DCHECK_EQ(CompareICState::BOOLEAN, state());
3430   Label miss;
3431 
3432   __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
3433   __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
3434   if (op() != Token::EQ_STRICT && is_strong(strength())) {
3435     __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
3436   } else {
3437     if (!Token::IsEqualityOp(op())) {
3438       __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
3439       __ AssertSmi(a1);
3440       __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
3441       __ AssertSmi(a0);
3442     }
3443     __ Ret(USE_DELAY_SLOT);
3444     __ Subu(v0, a1, a0);
3445   }
3446 
3447   __ bind(&miss);
3448   GenerateMiss(masm);
3449 }
3450 
3451 
GenerateSmis(MacroAssembler * masm)3452 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3453   DCHECK(state() == CompareICState::SMI);
3454   Label miss;
3455   __ Or(a2, a1, a0);
3456   __ JumpIfNotSmi(a2, &miss);
3457 
3458   if (GetCondition() == eq) {
3459     // For equality we do not care about the sign of the result.
3460     __ Ret(USE_DELAY_SLOT);
3461     __ Subu(v0, a0, a1);
3462   } else {
3463     // Untag before subtracting to avoid handling overflow.
3464     __ SmiUntag(a1);
3465     __ SmiUntag(a0);
3466     __ Ret(USE_DELAY_SLOT);
3467     __ Subu(v0, a1, a0);
3468   }
3469 
3470   __ bind(&miss);
3471   GenerateMiss(masm);
3472 }
3473 
3474 
GenerateNumbers(MacroAssembler * masm)3475 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3476   DCHECK(state() == CompareICState::NUMBER);
3477 
3478   Label generic_stub;
3479   Label unordered, maybe_undefined1, maybe_undefined2;
3480   Label miss;
3481 
3482   if (left() == CompareICState::SMI) {
3483     __ JumpIfNotSmi(a1, &miss);
3484   }
3485   if (right() == CompareICState::SMI) {
3486     __ JumpIfNotSmi(a0, &miss);
3487   }
3488 
3489   // Inlining the double comparison and falling back to the general compare
3490   // stub if NaN is involved.
3491   // Load left and right operand.
3492   Label done, left, left_smi, right_smi;
3493   __ JumpIfSmi(a0, &right_smi);
3494   __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3495               DONT_DO_SMI_CHECK);
3496   __ Subu(a2, a0, Operand(kHeapObjectTag));
3497   __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
3498   __ Branch(&left);
3499   __ bind(&right_smi);
3500   __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
3501   FPURegister single_scratch = f6;
3502   __ mtc1(a2, single_scratch);
3503   __ cvt_d_w(f2, single_scratch);
3504 
3505   __ bind(&left);
3506   __ JumpIfSmi(a1, &left_smi);
3507   __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3508               DONT_DO_SMI_CHECK);
3509   __ Subu(a2, a1, Operand(kHeapObjectTag));
3510   __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
3511   __ Branch(&done);
3512   __ bind(&left_smi);
3513   __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
3514   single_scratch = f8;
3515   __ mtc1(a2, single_scratch);
3516   __ cvt_d_w(f0, single_scratch);
3517 
3518   __ bind(&done);
3519 
3520   // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
3521   Label fpu_eq, fpu_lt;
3522   // Test if equal, and also handle the unordered/NaN case.
3523   __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
3524 
3525   // Test if less (unordered case is already handled).
3526   __ BranchF(&fpu_lt, NULL, lt, f0, f2);
3527 
3528   // Otherwise it's greater, so just fall thru, and return.
3529   DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
3530   __ Ret(USE_DELAY_SLOT);
3531   __ li(v0, Operand(GREATER));
3532 
3533   __ bind(&fpu_eq);
3534   __ Ret(USE_DELAY_SLOT);
3535   __ li(v0, Operand(EQUAL));
3536 
3537   __ bind(&fpu_lt);
3538   __ Ret(USE_DELAY_SLOT);
3539   __ li(v0, Operand(LESS));
3540 
3541   __ bind(&unordered);
3542   __ bind(&generic_stub);
3543   CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
3544                      CompareICState::GENERIC, CompareICState::GENERIC);
3545   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3546 
3547   __ bind(&maybe_undefined1);
3548   if (Token::IsOrderedRelationalCompareOp(op())) {
3549     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3550     __ Branch(&miss, ne, a0, Operand(at));
3551     __ JumpIfSmi(a1, &unordered);
3552     __ GetObjectType(a1, a2, a2);
3553     __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
3554     __ jmp(&unordered);
3555   }
3556 
3557   __ bind(&maybe_undefined2);
3558   if (Token::IsOrderedRelationalCompareOp(op())) {
3559     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3560     __ Branch(&unordered, eq, a1, Operand(at));
3561   }
3562 
3563   __ bind(&miss);
3564   GenerateMiss(masm);
3565 }
3566 
3567 
GenerateInternalizedStrings(MacroAssembler * masm)3568 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3569   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3570   Label miss;
3571 
3572   // Registers containing left and right operands respectively.
3573   Register left = a1;
3574   Register right = a0;
3575   Register tmp1 = a2;
3576   Register tmp2 = a3;
3577 
3578   // Check that both operands are heap objects.
3579   __ JumpIfEitherSmi(left, right, &miss);
3580 
3581   // Check that both operands are internalized strings.
3582   __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3583   __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3584   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3585   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3586   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3587   __ Or(tmp1, tmp1, Operand(tmp2));
3588   __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3589   __ Branch(&miss, ne, at, Operand(zero_reg));
3590 
3591   // Make sure a0 is non-zero. At this point input operands are
3592   // guaranteed to be non-zero.
3593   DCHECK(right.is(a0));
3594   STATIC_ASSERT(EQUAL == 0);
3595   STATIC_ASSERT(kSmiTag == 0);
3596   __ mov(v0, right);
3597   // Internalized strings are compared by identity.
3598   __ Ret(ne, left, Operand(right));
3599   DCHECK(is_int16(EQUAL));
3600   __ Ret(USE_DELAY_SLOT);
3601   __ li(v0, Operand(Smi::FromInt(EQUAL)));
3602 
3603   __ bind(&miss);
3604   GenerateMiss(masm);
3605 }
3606 
3607 
GenerateUniqueNames(MacroAssembler * masm)3608 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3609   DCHECK(state() == CompareICState::UNIQUE_NAME);
3610   DCHECK(GetCondition() == eq);
3611   Label miss;
3612 
3613   // Registers containing left and right operands respectively.
3614   Register left = a1;
3615   Register right = a0;
3616   Register tmp1 = a2;
3617   Register tmp2 = a3;
3618 
3619   // Check that both operands are heap objects.
3620   __ JumpIfEitherSmi(left, right, &miss);
3621 
3622   // Check that both operands are unique names. This leaves the instance
3623   // types loaded in tmp1 and tmp2.
3624   __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3625   __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3626   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3627   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3628 
3629   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3630   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3631 
3632   // Use a0 as result
3633   __ mov(v0, a0);
3634 
3635   // Unique names are compared by identity.
3636   Label done;
3637   __ Branch(&done, ne, left, Operand(right));
3638   // Make sure a0 is non-zero. At this point input operands are
3639   // guaranteed to be non-zero.
3640   DCHECK(right.is(a0));
3641   STATIC_ASSERT(EQUAL == 0);
3642   STATIC_ASSERT(kSmiTag == 0);
3643   __ li(v0, Operand(Smi::FromInt(EQUAL)));
3644   __ bind(&done);
3645   __ Ret();
3646 
3647   __ bind(&miss);
3648   GenerateMiss(masm);
3649 }
3650 
3651 
GenerateStrings(MacroAssembler * masm)3652 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3653   DCHECK(state() == CompareICState::STRING);
3654   Label miss;
3655 
3656   bool equality = Token::IsEqualityOp(op());
3657 
3658   // Registers containing left and right operands respectively.
3659   Register left = a1;
3660   Register right = a0;
3661   Register tmp1 = a2;
3662   Register tmp2 = a3;
3663   Register tmp3 = t0;
3664   Register tmp4 = t1;
3665   Register tmp5 = t2;
3666 
3667   // Check that both operands are heap objects.
3668   __ JumpIfEitherSmi(left, right, &miss);
3669 
3670   // Check that both operands are strings. This leaves the instance
3671   // types loaded in tmp1 and tmp2.
3672   __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3673   __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3674   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3675   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3676   STATIC_ASSERT(kNotStringTag != 0);
3677   __ Or(tmp3, tmp1, tmp2);
3678   __ And(tmp5, tmp3, Operand(kIsNotStringMask));
3679   __ Branch(&miss, ne, tmp5, Operand(zero_reg));
3680 
3681   // Fast check for identical strings.
3682   Label left_ne_right;
3683   STATIC_ASSERT(EQUAL == 0);
3684   STATIC_ASSERT(kSmiTag == 0);
3685   __ Branch(&left_ne_right, ne, left, Operand(right));
3686   __ Ret(USE_DELAY_SLOT);
3687   __ mov(v0, zero_reg);  // In the delay slot.
3688   __ bind(&left_ne_right);
3689 
3690   // Handle not identical strings.
3691 
3692   // Check that both strings are internalized strings. If they are, we're done
3693   // because we already know they are not identical. We know they are both
3694   // strings.
3695   if (equality) {
3696     DCHECK(GetCondition() == eq);
3697     STATIC_ASSERT(kInternalizedTag == 0);
3698     __ Or(tmp3, tmp1, Operand(tmp2));
3699     __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
3700     Label is_symbol;
3701     __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
3702     // Make sure a0 is non-zero. At this point input operands are
3703     // guaranteed to be non-zero.
3704     DCHECK(right.is(a0));
3705     __ Ret(USE_DELAY_SLOT);
3706     __ mov(v0, a0);  // In the delay slot.
3707     __ bind(&is_symbol);
3708   }
3709 
3710   // Check that both strings are sequential one-byte.
3711   Label runtime;
3712   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3713                                                     &runtime);
3714 
3715   // Compare flat one-byte strings. Returns when done.
3716   if (equality) {
3717     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3718                                                   tmp3);
3719   } else {
3720     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3721                                                     tmp2, tmp3, tmp4);
3722   }
3723 
3724   // Handle more complex cases in runtime.
3725   __ bind(&runtime);
3726   __ Push(left, right);
3727   if (equality) {
3728     __ TailCallRuntime(Runtime::kStringEquals);
3729   } else {
3730     __ TailCallRuntime(Runtime::kStringCompare);
3731   }
3732 
3733   __ bind(&miss);
3734   GenerateMiss(masm);
3735 }
3736 
3737 
GenerateReceivers(MacroAssembler * masm)3738 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
3739   DCHECK_EQ(CompareICState::RECEIVER, state());
3740   Label miss;
3741   __ And(a2, a1, Operand(a0));
3742   __ JumpIfSmi(a2, &miss);
3743 
3744   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3745   __ GetObjectType(a0, a2, a2);
3746   __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
3747   __ GetObjectType(a1, a2, a2);
3748   __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
3749 
3750   DCHECK_EQ(eq, GetCondition());
3751   __ Ret(USE_DELAY_SLOT);
3752   __ subu(v0, a0, a1);
3753 
3754   __ bind(&miss);
3755   GenerateMiss(masm);
3756 }
3757 
3758 
GenerateKnownReceivers(MacroAssembler * masm)3759 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
3760   Label miss;
3761   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3762   __ And(a2, a1, a0);
3763   __ JumpIfSmi(a2, &miss);
3764   __ GetWeakValue(t0, cell);
3765   __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
3766   __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
3767   __ Branch(&miss, ne, a2, Operand(t0));
3768   __ Branch(&miss, ne, a3, Operand(t0));
3769 
3770   if (Token::IsEqualityOp(op())) {
3771     __ Ret(USE_DELAY_SLOT);
3772     __ subu(v0, a0, a1);
3773   } else if (is_strong(strength())) {
3774     __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
3775   } else {
3776     if (op() == Token::LT || op() == Token::LTE) {
3777       __ li(a2, Operand(Smi::FromInt(GREATER)));
3778     } else {
3779       __ li(a2, Operand(Smi::FromInt(LESS)));
3780     }
3781     __ Push(a1, a0, a2);
3782     __ TailCallRuntime(Runtime::kCompare);
3783   }
3784 
3785   __ bind(&miss);
3786   GenerateMiss(masm);
3787 }
3788 
3789 
GenerateMiss(MacroAssembler * masm)3790 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3791   {
3792     // Call the runtime system in a fresh internal frame.
3793     FrameScope scope(masm, StackFrame::INTERNAL);
3794     __ Push(a1, a0);
3795     __ Push(ra, a1, a0);
3796     __ li(t0, Operand(Smi::FromInt(op())));
3797     __ addiu(sp, sp, -kPointerSize);
3798     __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
3799                    USE_DELAY_SLOT);
3800     __ sw(t0, MemOperand(sp));  // In the delay slot.
3801     // Compute the entry point of the rewritten stub.
3802     __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
3803     // Restore registers.
3804     __ Pop(a1, a0, ra);
3805   }
3806   __ Jump(a2);
3807 }
3808 
3809 
Generate(MacroAssembler * masm)3810 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3811   // Make place for arguments to fit C calling convention. Most of the callers
3812   // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
3813   // so they handle stack restoring and we don't have to do that here.
3814   // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
3815   // kCArgsSlotsSize stack space after the call.
3816   __ Subu(sp, sp, Operand(kCArgsSlotsSize));
3817   // Place the return address on the stack, making the call
3818   // GC safe. The RegExp backend also relies on this.
3819   __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
3820   __ Call(t9);  // Call the C++ function.
3821   __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
3822 
3823   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3824     // In case of an error the return address may point to a memory area
3825     // filled with kZapValue by the GC.
3826     // Dereference the address and check for this.
3827     __ lw(t0, MemOperand(t9));
3828     __ Assert(ne, kReceivedInvalidReturnAddress, t0,
3829         Operand(reinterpret_cast<uint32_t>(kZapValue)));
3830   }
3831   __ Jump(t9);
3832 }
3833 
3834 
GenerateCall(MacroAssembler * masm,Register target)3835 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3836                                     Register target) {
3837   intptr_t loc =
3838       reinterpret_cast<intptr_t>(GetCode().location());
3839   __ Move(t9, target);
3840   __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
3841   __ Call(at);
3842 }
3843 
3844 
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)3845 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3846                                                       Label* miss,
3847                                                       Label* done,
3848                                                       Register receiver,
3849                                                       Register properties,
3850                                                       Handle<Name> name,
3851                                                       Register scratch0) {
3852   DCHECK(name->IsUniqueName());
3853   // If names of slots in range from 1 to kProbes - 1 for the hash value are
3854   // not equal to the name and kProbes-th slot is not used (its name is the
3855   // undefined value), it guarantees the hash table doesn't contain the
3856   // property. It's true even if some slots represent deleted properties
3857   // (their names are the hole value).
3858   for (int i = 0; i < kInlinedProbes; i++) {
3859     // scratch0 points to properties hash.
3860     // Compute the masked index: (hash + i + i * i) & mask.
3861     Register index = scratch0;
3862     // Capacity is smi 2^n.
3863     __ lw(index, FieldMemOperand(properties, kCapacityOffset));
3864     __ Subu(index, index, Operand(1));
3865     __ And(index, index, Operand(
3866         Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
3867 
3868     // Scale the index by multiplying by the entry size.
3869     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3870     __ sll(at, index, 1);
3871     __ Addu(index, index, at);
3872 
3873     Register entity_name = scratch0;
3874     // Having undefined at this place means the name is not contained.
3875     STATIC_ASSERT(kSmiTagSize == 1);
3876     Register tmp = properties;
3877     __ sll(scratch0, index, 1);
3878     __ Addu(tmp, properties, scratch0);
3879     __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3880 
3881     DCHECK(!tmp.is(entity_name));
3882     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3883     __ Branch(done, eq, entity_name, Operand(tmp));
3884 
3885     // Load the hole ready for use below:
3886     __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3887 
3888     // Stop if found the property.
3889     __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
3890 
3891     Label good;
3892     __ Branch(&good, eq, entity_name, Operand(tmp));
3893 
3894     // Check if the entry name is not a unique name.
3895     __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3896     __ lbu(entity_name,
3897            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3898     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3899     __ bind(&good);
3900 
3901     // Restore the properties.
3902     __ lw(properties,
3903           FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3904   }
3905 
3906   const int spill_mask =
3907       (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
3908        a2.bit() | a1.bit() | a0.bit() | v0.bit());
3909 
3910   __ MultiPush(spill_mask);
3911   __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3912   __ li(a1, Operand(Handle<Name>(name)));
3913   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3914   __ CallStub(&stub);
3915   __ mov(at, v0);
3916   __ MultiPop(spill_mask);
3917 
3918   __ Branch(done, eq, at, Operand(zero_reg));
3919   __ Branch(miss, ne, at, Operand(zero_reg));
3920 }
3921 
3922 
3923 // Probe the name dictionary in the |elements| register. Jump to the
3924 // |done| label if a property with the given name is found. Jump to
3925 // the |miss| label otherwise.
3926 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)3927 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3928                                                       Label* miss,
3929                                                       Label* done,
3930                                                       Register elements,
3931                                                       Register name,
3932                                                       Register scratch1,
3933                                                       Register scratch2) {
3934   DCHECK(!elements.is(scratch1));
3935   DCHECK(!elements.is(scratch2));
3936   DCHECK(!name.is(scratch1));
3937   DCHECK(!name.is(scratch2));
3938 
3939   __ AssertName(name);
3940 
3941   // Compute the capacity mask.
3942   __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
3943   __ sra(scratch1, scratch1, kSmiTagSize);  // convert smi to int
3944   __ Subu(scratch1, scratch1, Operand(1));
3945 
3946   // Generate an unrolled loop that performs a few probes before
3947   // giving up. Measurements done on Gmail indicate that 2 probes
3948   // cover ~93% of loads from dictionaries.
3949   for (int i = 0; i < kInlinedProbes; i++) {
3950     // Compute the masked index: (hash + i + i * i) & mask.
3951     __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
3952     if (i > 0) {
3953       // Add the probe offset (i + i * i) left shifted to avoid right shifting
3954       // the hash in a separate instruction. The value hash + i + i * i is right
3955       // shifted in the following and instruction.
3956       DCHECK(NameDictionary::GetProbeOffset(i) <
3957              1 << (32 - Name::kHashFieldOffset));
3958       __ Addu(scratch2, scratch2, Operand(
3959           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3960     }
3961     __ srl(scratch2, scratch2, Name::kHashShift);
3962     __ And(scratch2, scratch1, scratch2);
3963 
3964     // Scale the index by multiplying by the element size.
3965     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3966     // scratch2 = scratch2 * 3.
3967 
3968     __ sll(at, scratch2, 1);
3969     __ Addu(scratch2, scratch2, at);
3970 
3971     // Check if the key is identical to the name.
3972     __ sll(at, scratch2, 2);
3973     __ Addu(scratch2, elements, at);
3974     __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
3975     __ Branch(done, eq, name, Operand(at));
3976   }
3977 
3978   const int spill_mask =
3979       (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
3980        a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
3981       ~(scratch1.bit() | scratch2.bit());
3982 
3983   __ MultiPush(spill_mask);
3984   if (name.is(a0)) {
3985     DCHECK(!elements.is(a1));
3986     __ Move(a1, name);
3987     __ Move(a0, elements);
3988   } else {
3989     __ Move(a0, elements);
3990     __ Move(a1, name);
3991   }
3992   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
3993   __ CallStub(&stub);
3994   __ mov(scratch2, a2);
3995   __ mov(at, v0);
3996   __ MultiPop(spill_mask);
3997 
3998   __ Branch(done, ne, at, Operand(zero_reg));
3999   __ Branch(miss, eq, at, Operand(zero_reg));
4000 }
4001 
4002 
Generate(MacroAssembler * masm)4003 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4004   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
4005   // we cannot call anything that could cause a GC from this stub.
4006   // Registers:
4007   //  result: NameDictionary to probe
4008   //  a1: key
4009   //  dictionary: NameDictionary to probe.
4010   //  index: will hold an index of entry if lookup is successful.
4011   //         might alias with result_.
4012   // Returns:
4013   //  result_ is zero if lookup failed, non zero otherwise.
4014 
4015   Register result = v0;
4016   Register dictionary = a0;
4017   Register key = a1;
4018   Register index = a2;
4019   Register mask = a3;
4020   Register hash = t0;
4021   Register undefined = t1;
4022   Register entry_key = t2;
4023 
4024   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4025 
4026   __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
4027   __ sra(mask, mask, kSmiTagSize);
4028   __ Subu(mask, mask, Operand(1));
4029 
4030   __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4031 
4032   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4033 
4034   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4035     // Compute the masked index: (hash + i + i * i) & mask.
4036     // Capacity is smi 2^n.
4037     if (i > 0) {
4038       // Add the probe offset (i + i * i) left shifted to avoid right shifting
4039       // the hash in a separate instruction. The value hash + i + i * i is right
4040       // shifted in the following and instruction.
4041       DCHECK(NameDictionary::GetProbeOffset(i) <
4042              1 << (32 - Name::kHashFieldOffset));
4043       __ Addu(index, hash, Operand(
4044           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4045     } else {
4046       __ mov(index, hash);
4047     }
4048     __ srl(index, index, Name::kHashShift);
4049     __ And(index, mask, index);
4050 
4051     // Scale the index by multiplying by the entry size.
4052     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
4053     // index *= 3.
4054     __ mov(at, index);
4055     __ sll(index, index, 1);
4056     __ Addu(index, index, at);
4057 
4058 
4059     STATIC_ASSERT(kSmiTagSize == 1);
4060     __ sll(index, index, 2);
4061     __ Addu(index, index, dictionary);
4062     __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4063 
4064     // Having undefined at this place means the name is not contained.
4065     __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
4066 
4067     // Stop if found the property.
4068     __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4069 
4070     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4071       // Check if the entry name is not a unique name.
4072       __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4073       __ lbu(entry_key,
4074              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4075       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4076     }
4077   }
4078 
4079   __ bind(&maybe_in_dictionary);
4080   // If we are doing negative lookup then probing failure should be
4081   // treated as a lookup success. For positive lookup probing failure
4082   // should be treated as lookup failure.
4083   if (mode() == POSITIVE_LOOKUP) {
4084     __ Ret(USE_DELAY_SLOT);
4085     __ mov(result, zero_reg);
4086   }
4087 
4088   __ bind(&in_dictionary);
4089   __ Ret(USE_DELAY_SLOT);
4090   __ li(result, 1);
4091 
4092   __ bind(&not_in_dictionary);
4093   __ Ret(USE_DELAY_SLOT);
4094   __ mov(result, zero_reg);
4095 }
4096 
4097 
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)4098 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4099     Isolate* isolate) {
4100   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4101   stub1.GetCode();
4102   // Hydrogen code stubs need stub2 at snapshot time.
4103   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4104   stub2.GetCode();
4105 }
4106 
4107 
4108 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
4109 // the value has just been written into the object, now this stub makes sure
4110 // we keep the GC informed.  The word in the object where the value has been
4111 // written is in the address register.
Generate(MacroAssembler * masm)4112 void RecordWriteStub::Generate(MacroAssembler* masm) {
4113   Label skip_to_incremental_noncompacting;
4114   Label skip_to_incremental_compacting;
4115 
4116   // The first two branch+nop instructions are generated with labels so as to
4117   // get the offset fixed up correctly by the bind(Label*) call.  We patch it
4118   // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4119   // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4120   // incremental heap marking.
4121   // See RecordWriteStub::Patch for details.
4122   __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4123   __ nop();
4124   __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4125   __ nop();
4126 
4127   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4128     __ RememberedSetHelper(object(),
4129                            address(),
4130                            value(),
4131                            save_fp_regs_mode(),
4132                            MacroAssembler::kReturnAtEnd);
4133   }
4134   __ Ret();
4135 
4136   __ bind(&skip_to_incremental_noncompacting);
4137   GenerateIncremental(masm, INCREMENTAL);
4138 
4139   __ bind(&skip_to_incremental_compacting);
4140   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4141 
4142   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4143   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4144 
4145   PatchBranchIntoNop(masm, 0);
4146   PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4147 }
4148 
4149 
GenerateIncremental(MacroAssembler * masm,Mode mode)4150 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4151   regs_.Save(masm);
4152 
4153   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4154     Label dont_need_remembered_set;
4155 
4156     __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4157     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
4158                            regs_.scratch0(),
4159                            &dont_need_remembered_set);
4160 
4161     __ CheckPageFlag(regs_.object(),
4162                      regs_.scratch0(),
4163                      1 << MemoryChunk::SCAN_ON_SCAVENGE,
4164                      ne,
4165                      &dont_need_remembered_set);
4166 
4167     // First notify the incremental marker if necessary, then update the
4168     // remembered set.
4169     CheckNeedsToInformIncrementalMarker(
4170         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4171     InformIncrementalMarker(masm);
4172     regs_.Restore(masm);
4173     __ RememberedSetHelper(object(),
4174                            address(),
4175                            value(),
4176                            save_fp_regs_mode(),
4177                            MacroAssembler::kReturnAtEnd);
4178 
4179     __ bind(&dont_need_remembered_set);
4180   }
4181 
4182   CheckNeedsToInformIncrementalMarker(
4183       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4184   InformIncrementalMarker(masm);
4185   regs_.Restore(masm);
4186   __ Ret();
4187 }
4188 
4189 
InformIncrementalMarker(MacroAssembler * masm)4190 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4191   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4192   int argument_count = 3;
4193   __ PrepareCallCFunction(argument_count, regs_.scratch0());
4194   Register address =
4195       a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4196   DCHECK(!address.is(regs_.object()));
4197   DCHECK(!address.is(a0));
4198   __ Move(address, regs_.address());
4199   __ Move(a0, regs_.object());
4200   __ Move(a1, address);
4201   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4202 
4203   AllowExternalCallThatCantCauseGC scope(masm);
4204   __ CallCFunction(
4205       ExternalReference::incremental_marking_record_write_function(isolate()),
4206       argument_count);
4207   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4208 }
4209 
4210 
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)4211 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4212     MacroAssembler* masm,
4213     OnNoNeedToInformIncrementalMarker on_no_need,
4214     Mode mode) {
4215   Label on_black;
4216   Label need_incremental;
4217   Label need_incremental_pop_scratch;
4218 
4219   __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4220   __ lw(regs_.scratch1(),
4221         MemOperand(regs_.scratch0(),
4222                    MemoryChunk::kWriteBarrierCounterOffset));
4223   __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4224   __ sw(regs_.scratch1(),
4225          MemOperand(regs_.scratch0(),
4226                     MemoryChunk::kWriteBarrierCounterOffset));
4227   __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4228 
4229   // Let's look at the color of the object:  If it is not black we don't have
4230   // to inform the incremental marker.
4231   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4232 
4233   regs_.Restore(masm);
4234   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4235     __ RememberedSetHelper(object(),
4236                            address(),
4237                            value(),
4238                            save_fp_regs_mode(),
4239                            MacroAssembler::kReturnAtEnd);
4240   } else {
4241     __ Ret();
4242   }
4243 
4244   __ bind(&on_black);
4245 
4246   // Get the value from the slot.
4247   __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4248 
4249   if (mode == INCREMENTAL_COMPACTION) {
4250     Label ensure_not_white;
4251 
4252     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
4253                      regs_.scratch1(),  // Scratch.
4254                      MemoryChunk::kEvacuationCandidateMask,
4255                      eq,
4256                      &ensure_not_white);
4257 
4258     __ CheckPageFlag(regs_.object(),
4259                      regs_.scratch1(),  // Scratch.
4260                      MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4261                      eq,
4262                      &need_incremental);
4263 
4264     __ bind(&ensure_not_white);
4265   }
4266 
4267   // We need extra registers for this, so we push the object and the address
4268   // register temporarily.
4269   __ Push(regs_.object(), regs_.address());
4270   __ JumpIfWhite(regs_.scratch0(),  // The value.
4271                  regs_.scratch1(),  // Scratch.
4272                  regs_.object(),    // Scratch.
4273                  regs_.address(),   // Scratch.
4274                  &need_incremental_pop_scratch);
4275   __ Pop(regs_.object(), regs_.address());
4276 
4277   regs_.Restore(masm);
4278   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4279     __ RememberedSetHelper(object(),
4280                            address(),
4281                            value(),
4282                            save_fp_regs_mode(),
4283                            MacroAssembler::kReturnAtEnd);
4284   } else {
4285     __ Ret();
4286   }
4287 
4288   __ bind(&need_incremental_pop_scratch);
4289   __ Pop(regs_.object(), regs_.address());
4290 
4291   __ bind(&need_incremental);
4292 
4293   // Fall through when we need to inform the incremental marker.
4294 }
4295 
4296 
Generate(MacroAssembler * masm)4297 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4298   CEntryStub ces(isolate(), 1, kSaveFPRegs);
4299   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4300   int parameter_count_offset =
4301       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4302   __ lw(a1, MemOperand(fp, parameter_count_offset));
4303   if (function_mode() == JS_FUNCTION_STUB_MODE) {
4304     __ Addu(a1, a1, Operand(1));
4305   }
4306   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4307   __ sll(a1, a1, kPointerSizeLog2);
4308   __ Ret(USE_DELAY_SLOT);
4309   __ Addu(sp, sp, a1);
4310 }
4311 
4312 
Generate(MacroAssembler * masm)4313 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4314   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
4315   LoadICStub stub(isolate(), state());
4316   stub.GenerateForTrampoline(masm);
4317 }
4318 
4319 
Generate(MacroAssembler * masm)4320 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4321   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
4322   KeyedLoadICStub stub(isolate(), state());
4323   stub.GenerateForTrampoline(masm);
4324 }
4325 
4326 
Generate(MacroAssembler * masm)4327 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4328   __ EmitLoadTypeFeedbackVector(a2);
4329   CallICStub stub(isolate(), state());
4330   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4331 }
4332 
4333 
Generate(MacroAssembler * masm)4334 void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
4335 
4336 
GenerateForTrampoline(MacroAssembler * masm)4337 void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
4338   GenerateImpl(masm, true);
4339 }
4340 
4341 
HandleArrayCases(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,bool is_polymorphic,Label * miss)4342 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
4343                              Register receiver_map, Register scratch1,
4344                              Register scratch2, bool is_polymorphic,
4345                              Label* miss) {
4346   // feedback initially contains the feedback array
4347   Label next_loop, prepare_next;
4348   Label start_polymorphic;
4349 
4350   Register cached_map = scratch1;
4351 
4352   __ lw(cached_map,
4353         FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
4354   __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4355   __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
4356   // found, now call handler.
4357   Register handler = feedback;
4358   __ lw(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
4359   __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4360   __ Jump(t9);
4361 
4362 
4363   Register length = scratch2;
4364   __ bind(&start_polymorphic);
4365   __ lw(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
4366   if (!is_polymorphic) {
4367     // If the IC could be monomorphic we have to make sure we don't go past the
4368     // end of the feedback array.
4369     __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
4370   }
4371 
4372   Register too_far = length;
4373   Register pointer_reg = feedback;
4374 
4375   // +-----+------+------+-----+-----+ ... ----+
4376   // | map | len  | wm0  | h0  | wm1 |      hN |
4377   // +-----+------+------+-----+-----+ ... ----+
4378   //                 0      1     2        len-1
4379   //                              ^              ^
4380   //                              |              |
4381   //                         pointer_reg      too_far
4382   //                         aka feedback     scratch2
4383   // also need receiver_map
4384   // use cached_map (scratch1) to look in the weak map values.
4385   __ sll(at, length, kPointerSizeLog2 - kSmiTagSize);
4386   __ Addu(too_far, feedback, Operand(at));
4387   __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4388   __ Addu(pointer_reg, feedback,
4389           Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
4390 
4391   __ bind(&next_loop);
4392   __ lw(cached_map, MemOperand(pointer_reg));
4393   __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4394   __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
4395   __ lw(handler, MemOperand(pointer_reg, kPointerSize));
4396   __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4397   __ Jump(t9);
4398 
4399   __ bind(&prepare_next);
4400   __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
4401   __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
4402 
4403   // We exhausted our array of map handler pairs.
4404   __ jmp(miss);
4405 }
4406 
4407 
HandleMonomorphicCase(MacroAssembler * masm,Register receiver,Register receiver_map,Register feedback,Register vector,Register slot,Register scratch,Label * compare_map,Label * load_smi_map,Label * try_array)4408 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
4409                                   Register receiver_map, Register feedback,
4410                                   Register vector, Register slot,
4411                                   Register scratch, Label* compare_map,
4412                                   Label* load_smi_map, Label* try_array) {
4413   __ JumpIfSmi(receiver, load_smi_map);
4414   __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
4415   __ bind(compare_map);
4416   Register cached_map = scratch;
4417   // Move the weak map into the weak_cell register.
4418   __ lw(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
4419   __ Branch(try_array, ne, cached_map, Operand(receiver_map));
4420   Register handler = feedback;
4421 
4422   __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
4423   __ Addu(handler, vector, Operand(at));
4424   __ lw(handler,
4425         FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
4426   __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4427   __ Jump(t9);
4428 }
4429 
4430 
GenerateImpl(MacroAssembler * masm,bool in_frame)4431 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4432   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // a1
4433   Register name = LoadWithVectorDescriptor::NameRegister();          // a2
4434   Register vector = LoadWithVectorDescriptor::VectorRegister();      // a3
4435   Register slot = LoadWithVectorDescriptor::SlotRegister();          // a0
4436   Register feedback = t0;
4437   Register receiver_map = t1;
4438   Register scratch1 = t4;
4439 
4440   __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
4441   __ Addu(feedback, vector, Operand(at));
4442   __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4443 
4444   // Try to quickly handle the monomorphic case without knowing for sure
4445   // if we have a weak cell in feedback. We do know it's safe to look
4446   // at WeakCell::kValueOffset.
4447   Label try_array, load_smi_map, compare_map;
4448   Label not_array, miss;
4449   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4450                         scratch1, &compare_map, &load_smi_map, &try_array);
4451 
4452   // Is it a fixed array?
4453   __ bind(&try_array);
4454   __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4455   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4456   __ Branch(&not_array, ne, at, Operand(scratch1));
4457   HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, true, &miss);
4458 
4459   __ bind(&not_array);
4460   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
4461   __ Branch(&miss, ne, at, Operand(feedback));
4462   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4463       Code::ComputeHandlerFlags(Code::LOAD_IC));
4464   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
4465                                                receiver, name, feedback,
4466                                                receiver_map, scratch1, t5);
4467 
4468   __ bind(&miss);
4469   LoadIC::GenerateMiss(masm);
4470 
4471   __ bind(&load_smi_map);
4472   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4473   __ jmp(&compare_map);
4474 }
4475 
4476 
Generate(MacroAssembler * masm)4477 void KeyedLoadICStub::Generate(MacroAssembler* masm) {
4478   GenerateImpl(masm, false);
4479 }
4480 
4481 
GenerateForTrampoline(MacroAssembler * masm)4482 void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
4483   GenerateImpl(masm, true);
4484 }
4485 
4486 
GenerateImpl(MacroAssembler * masm,bool in_frame)4487 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4488   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // a1
4489   Register key = LoadWithVectorDescriptor::NameRegister();           // a2
4490   Register vector = LoadWithVectorDescriptor::VectorRegister();      // a3
4491   Register slot = LoadWithVectorDescriptor::SlotRegister();          // a0
4492   Register feedback = t0;
4493   Register receiver_map = t1;
4494   Register scratch1 = t4;
4495 
4496   __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
4497   __ Addu(feedback, vector, Operand(at));
4498   __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4499 
4500   // Try to quickly handle the monomorphic case without knowing for sure
4501   // if we have a weak cell in feedback. We do know it's safe to look
4502   // at WeakCell::kValueOffset.
4503   Label try_array, load_smi_map, compare_map;
4504   Label not_array, miss;
4505   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4506                         scratch1, &compare_map, &load_smi_map, &try_array);
4507 
4508   __ bind(&try_array);
4509   // Is it a fixed array?
4510   __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4511   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4512   __ Branch(&not_array, ne, at, Operand(scratch1));
4513   // We have a polymorphic element handler.
4514   __ JumpIfNotSmi(key, &miss);
4515 
4516   Label polymorphic, try_poly_name;
4517   __ bind(&polymorphic);
4518   HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, true, &miss);
4519 
4520   __ bind(&not_array);
4521   // Is it generic?
4522   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
4523   __ Branch(&try_poly_name, ne, at, Operand(feedback));
4524   Handle<Code> megamorphic_stub =
4525       KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
4526   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
4527 
4528   __ bind(&try_poly_name);
4529   // We might have a name in feedback, and a fixed array in the next slot.
4530   __ Branch(&miss, ne, key, Operand(feedback));
4531   // If the name comparison succeeded, we know we have a fixed array with
4532   // at least one map/handler pair.
4533   __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
4534   __ Addu(feedback, vector, Operand(at));
4535   __ lw(feedback,
4536         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
4537   HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss);
4538 
4539   __ bind(&miss);
4540   KeyedLoadIC::GenerateMiss(masm);
4541 
4542   __ bind(&load_smi_map);
4543   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4544   __ jmp(&compare_map);
4545 }
4546 
4547 
Generate(MacroAssembler * masm)4548 void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
4549   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
4550   VectorStoreICStub stub(isolate(), state());
4551   stub.GenerateForTrampoline(masm);
4552 }
4553 
4554 
Generate(MacroAssembler * masm)4555 void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
4556   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
4557   VectorKeyedStoreICStub stub(isolate(), state());
4558   stub.GenerateForTrampoline(masm);
4559 }
4560 
4561 
Generate(MacroAssembler * masm)4562 void VectorStoreICStub::Generate(MacroAssembler* masm) {
4563   GenerateImpl(masm, false);
4564 }
4565 
4566 
GenerateForTrampoline(MacroAssembler * masm)4567 void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
4568   GenerateImpl(masm, true);
4569 }
4570 
4571 
GenerateImpl(MacroAssembler * masm,bool in_frame)4572 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4573   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // a1
4574   Register key = VectorStoreICDescriptor::NameRegister();           // a2
4575   Register vector = VectorStoreICDescriptor::VectorRegister();      // a3
4576   Register slot = VectorStoreICDescriptor::SlotRegister();          // t0
4577   DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0));          // a0
4578   Register feedback = t1;
4579   Register receiver_map = t2;
4580   Register scratch1 = t5;
4581 
4582   __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
4583   __ Addu(feedback, vector, Operand(scratch1));
4584   __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4585 
4586   // Try to quickly handle the monomorphic case without knowing for sure
4587   // if we have a weak cell in feedback. We do know it's safe to look
4588   // at WeakCell::kValueOffset.
4589   Label try_array, load_smi_map, compare_map;
4590   Label not_array, miss;
4591   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4592                         scratch1, &compare_map, &load_smi_map, &try_array);
4593 
4594   // Is it a fixed array?
4595   __ bind(&try_array);
4596   __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4597   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4598   __ Branch(&not_array, ne, scratch1, Operand(at));
4599 
4600   Register scratch2 = t4;
4601   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
4602                    &miss);
4603 
4604   __ bind(&not_array);
4605   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
4606   __ Branch(&miss, ne, feedback, Operand(at));
4607   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4608       Code::ComputeHandlerFlags(Code::STORE_IC));
4609   masm->isolate()->stub_cache()->GenerateProbe(
4610       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
4611       scratch1, scratch2);
4612 
4613   __ bind(&miss);
4614   StoreIC::GenerateMiss(masm);
4615 
4616   __ bind(&load_smi_map);
4617   __ Branch(USE_DELAY_SLOT, &compare_map);
4618   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
4619 }
4620 
4621 
Generate(MacroAssembler * masm)4622 void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
4623   GenerateImpl(masm, false);
4624 }
4625 
4626 
GenerateForTrampoline(MacroAssembler * masm)4627 void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
4628   GenerateImpl(masm, true);
4629 }
4630 
4631 
HandlePolymorphicStoreCase(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,Label * miss)4632 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
4633                                        Register receiver_map, Register scratch1,
4634                                        Register scratch2, Label* miss) {
4635   // feedback initially contains the feedback array
4636   Label next_loop, prepare_next;
4637   Label start_polymorphic;
4638   Label transition_call;
4639 
4640   Register cached_map = scratch1;
4641   Register too_far = scratch2;
4642   Register pointer_reg = feedback;
4643   __ lw(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
4644 
4645   // +-----+------+------+-----+-----+-----+ ... ----+
4646   // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
4647   // +-----+------+------+-----+-----+ ----+ ... ----+
4648   //                 0      1     2              len-1
4649   //                 ^                                 ^
4650   //                 |                                 |
4651   //             pointer_reg                        too_far
4652   //             aka feedback                       scratch2
4653   // also need receiver_map
4654   // use cached_map (scratch1) to look in the weak map values.
4655   __ sll(scratch1, too_far, kPointerSizeLog2 - kSmiTagSize);
4656   __ Addu(too_far, feedback, Operand(scratch1));
4657   __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4658   __ Addu(pointer_reg, feedback,
4659           Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
4660 
4661   __ bind(&next_loop);
4662   __ lw(cached_map, MemOperand(pointer_reg));
4663   __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4664   __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
4665   // Is it a transitioning store?
4666   __ lw(too_far, MemOperand(pointer_reg, kPointerSize));
4667   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4668   __ Branch(&transition_call, ne, too_far, Operand(at));
4669   __ lw(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
4670   __ Addu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
4671   __ Jump(t9);
4672 
4673   __ bind(&transition_call);
4674   __ lw(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
4675   __ JumpIfSmi(too_far, miss);
4676 
4677   __ lw(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
4678 
4679   // Load the map into the correct register.
4680   DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
4681   __ mov(feedback, too_far);
4682 
4683   __ Addu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
4684   __ Jump(t9);
4685 
4686   __ bind(&prepare_next);
4687   __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
4688   __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
4689 
4690   // We exhausted our array of map handler pairs.
4691   __ jmp(miss);
4692 }
4693 
4694 
GenerateImpl(MacroAssembler * masm,bool in_frame)4695 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4696   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // a1
4697   Register key = VectorStoreICDescriptor::NameRegister();           // a2
4698   Register vector = VectorStoreICDescriptor::VectorRegister();      // a3
4699   Register slot = VectorStoreICDescriptor::SlotRegister();          // t0
4700   DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0));          // a0
4701   Register feedback = t1;
4702   Register receiver_map = t2;
4703   Register scratch1 = t5;
4704 
4705   __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
4706   __ Addu(feedback, vector, Operand(scratch1));
4707   __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4708 
4709   // Try to quickly handle the monomorphic case without knowing for sure
4710   // if we have a weak cell in feedback. We do know it's safe to look
4711   // at WeakCell::kValueOffset.
4712   Label try_array, load_smi_map, compare_map;
4713   Label not_array, miss;
4714   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4715                         scratch1, &compare_map, &load_smi_map, &try_array);
4716 
4717   __ bind(&try_array);
4718   // Is it a fixed array?
4719   __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4720   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4721   __ Branch(&not_array, ne, scratch1, Operand(at));
4722 
4723   // We have a polymorphic element handler.
4724   Label polymorphic, try_poly_name;
4725   __ bind(&polymorphic);
4726 
4727   Register scratch2 = t4;
4728 
4729   HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
4730                              &miss);
4731 
4732   __ bind(&not_array);
4733   // Is it generic?
4734   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
4735   __ Branch(&try_poly_name, ne, feedback, Operand(at));
4736   Handle<Code> megamorphic_stub =
4737       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
4738   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
4739 
4740   __ bind(&try_poly_name);
4741   // We might have a name in feedback, and a fixed array in the next slot.
4742   __ Branch(&miss, ne, key, Operand(feedback));
4743   // If the name comparison succeeded, we know we have a fixed array with
4744   // at least one map/handler pair.
4745   __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
4746   __ Addu(feedback, vector, Operand(scratch1));
4747   __ lw(feedback,
4748         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
4749   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
4750                    &miss);
4751 
4752   __ bind(&miss);
4753   KeyedStoreIC::GenerateMiss(masm);
4754 
4755   __ bind(&load_smi_map);
4756   __ Branch(USE_DELAY_SLOT, &compare_map);
4757   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
4758 }
4759 
4760 
MaybeCallEntryHook(MacroAssembler * masm)4761 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4762   if (masm->isolate()->function_entry_hook() != NULL) {
4763     ProfileEntryHookStub stub(masm->isolate());
4764     __ push(ra);
4765     __ CallStub(&stub);
4766     __ pop(ra);
4767   }
4768 }
4769 
4770 
Generate(MacroAssembler * masm)4771 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4772   // The entry hook is a "push ra" instruction, followed by a call.
4773   // Note: on MIPS "push" is 2 instruction
4774   const int32_t kReturnAddressDistanceFromFunctionStart =
4775       Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4776 
4777   // This should contain all kJSCallerSaved registers.
4778   const RegList kSavedRegs =
4779      kJSCallerSaved |  // Caller saved registers.
4780      s5.bit();         // Saved stack pointer.
4781 
4782   // We also save ra, so the count here is one higher than the mask indicates.
4783   const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4784 
4785   // Save all caller-save registers as this may be called from anywhere.
4786   __ MultiPush(kSavedRegs | ra.bit());
4787 
4788   // Compute the function's address for the first argument.
4789   __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4790 
4791   // The caller's return address is above the saved temporaries.
4792   // Grab that for the second argument to the hook.
4793   __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4794 
4795   // Align the stack if necessary.
4796   int frame_alignment = masm->ActivationFrameAlignment();
4797   if (frame_alignment > kPointerSize) {
4798     __ mov(s5, sp);
4799     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4800     __ And(sp, sp, Operand(-frame_alignment));
4801   }
4802   __ Subu(sp, sp, kCArgsSlotsSize);
4803 #if defined(V8_HOST_ARCH_MIPS)
4804   int32_t entry_hook =
4805       reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4806   __ li(t9, Operand(entry_hook));
4807 #else
4808   // Under the simulator we need to indirect the entry hook through a
4809   // trampoline function at a known address.
4810   // It additionally takes an isolate as a third parameter.
4811   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4812 
4813   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4814   __ li(t9, Operand(ExternalReference(&dispatcher,
4815                                       ExternalReference::BUILTIN_CALL,
4816                                       isolate())));
4817 #endif
4818   // Call C function through t9 to conform ABI for PIC.
4819   __ Call(t9);
4820 
4821   // Restore the stack pointer if needed.
4822   if (frame_alignment > kPointerSize) {
4823     __ mov(sp, s5);
4824   } else {
4825     __ Addu(sp, sp, kCArgsSlotsSize);
4826   }
4827 
4828   // Also pop ra to get Ret(0).
4829   __ MultiPop(kSavedRegs | ra.bit());
4830   __ Ret();
4831 }
4832 
4833 
4834 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)4835 static void CreateArrayDispatch(MacroAssembler* masm,
4836                                 AllocationSiteOverrideMode mode) {
4837   if (mode == DISABLE_ALLOCATION_SITES) {
4838     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4839     __ TailCallStub(&stub);
4840   } else if (mode == DONT_OVERRIDE) {
4841     int last_index = GetSequenceIndexFromFastElementsKind(
4842         TERMINAL_FAST_ELEMENTS_KIND);
4843     for (int i = 0; i <= last_index; ++i) {
4844       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4845       T stub(masm->isolate(), kind);
4846       __ TailCallStub(&stub, eq, a3, Operand(kind));
4847     }
4848 
4849     // If we reached this point there is a problem.
4850     __ Abort(kUnexpectedElementsKindInArrayConstructor);
4851   } else {
4852     UNREACHABLE();
4853   }
4854 }
4855 
4856 
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)4857 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4858                                            AllocationSiteOverrideMode mode) {
4859   // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4860   // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4861   // a0 - number of arguments
4862   // a1 - constructor?
4863   // sp[0] - last argument
4864   Label normal_sequence;
4865   if (mode == DONT_OVERRIDE) {
4866     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4867     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4868     STATIC_ASSERT(FAST_ELEMENTS == 2);
4869     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4870     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
4871     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4872 
4873     // is the low bit set? If so, we are holey and that is good.
4874     __ And(at, a3, Operand(1));
4875     __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4876   }
4877 
4878   // look at the first argument
4879   __ lw(t1, MemOperand(sp, 0));
4880   __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
4881 
4882   if (mode == DISABLE_ALLOCATION_SITES) {
4883     ElementsKind initial = GetInitialFastElementsKind();
4884     ElementsKind holey_initial = GetHoleyElementsKind(initial);
4885 
4886     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4887                                                   holey_initial,
4888                                                   DISABLE_ALLOCATION_SITES);
4889     __ TailCallStub(&stub_holey);
4890 
4891     __ bind(&normal_sequence);
4892     ArraySingleArgumentConstructorStub stub(masm->isolate(),
4893                                             initial,
4894                                             DISABLE_ALLOCATION_SITES);
4895     __ TailCallStub(&stub);
4896   } else if (mode == DONT_OVERRIDE) {
4897     // We are going to create a holey array, but our kind is non-holey.
4898     // Fix kind and retry (only if we have an allocation site in the slot).
4899     __ Addu(a3, a3, Operand(1));
4900 
4901     if (FLAG_debug_code) {
4902       __ lw(t1, FieldMemOperand(a2, 0));
4903       __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4904       __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
4905     }
4906 
4907     // Save the resulting elements kind in type info. We can't just store a3
4908     // in the AllocationSite::transition_info field because elements kind is
4909     // restricted to a portion of the field...upper bits need to be left alone.
4910     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4911     __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4912     __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4913     __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4914 
4915 
4916     __ bind(&normal_sequence);
4917     int last_index = GetSequenceIndexFromFastElementsKind(
4918         TERMINAL_FAST_ELEMENTS_KIND);
4919     for (int i = 0; i <= last_index; ++i) {
4920       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4921       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4922       __ TailCallStub(&stub, eq, a3, Operand(kind));
4923     }
4924 
4925     // If we reached this point there is a problem.
4926     __ Abort(kUnexpectedElementsKindInArrayConstructor);
4927   } else {
4928     UNREACHABLE();
4929   }
4930 }
4931 
4932 
4933 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)4934 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4935   int to_index = GetSequenceIndexFromFastElementsKind(
4936       TERMINAL_FAST_ELEMENTS_KIND);
4937   for (int i = 0; i <= to_index; ++i) {
4938     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4939     T stub(isolate, kind);
4940     stub.GetCode();
4941     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4942       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4943       stub1.GetCode();
4944     }
4945   }
4946 }
4947 
4948 
GenerateStubsAheadOfTime(Isolate * isolate)4949 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4950   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4951       isolate);
4952   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4953       isolate);
4954   ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4955       isolate);
4956 }
4957 
4958 
GenerateStubsAheadOfTime(Isolate * isolate)4959 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4960     Isolate* isolate) {
4961   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4962   for (int i = 0; i < 2; i++) {
4963     // For internal arrays we only need a few things.
4964     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4965     stubh1.GetCode();
4966     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4967     stubh2.GetCode();
4968     InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4969     stubh3.GetCode();
4970   }
4971 }
4972 
4973 
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)4974 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4975     MacroAssembler* masm,
4976     AllocationSiteOverrideMode mode) {
4977   if (argument_count() == ANY) {
4978     Label not_zero_case, not_one_case;
4979     __ And(at, a0, a0);
4980     __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
4981     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4982 
4983     __ bind(&not_zero_case);
4984     __ Branch(&not_one_case, gt, a0, Operand(1));
4985     CreateArrayDispatchOneArgument(masm, mode);
4986 
4987     __ bind(&not_one_case);
4988     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4989   } else if (argument_count() == NONE) {
4990     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4991   } else if (argument_count() == ONE) {
4992     CreateArrayDispatchOneArgument(masm, mode);
4993   } else if (argument_count() == MORE_THAN_ONE) {
4994     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4995   } else {
4996     UNREACHABLE();
4997   }
4998 }
4999 
5000 
Generate(MacroAssembler * masm)5001 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5002   // ----------- S t a t e -------------
5003   //  -- a0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
5004   //  -- a1 : constructor
5005   //  -- a2 : AllocationSite or undefined
5006   //  -- a3 : Original constructor
5007   //  -- sp[0] : last argument
5008   // -----------------------------------
5009 
5010   if (FLAG_debug_code) {
5011     // The array construct code is only set for the global and natives
5012     // builtin Array functions which always have maps.
5013 
5014     // Initial map for the builtin Array function should be a map.
5015     __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5016     // Will both indicate a NULL and a Smi.
5017     __ SmiTst(t0, at);
5018     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5019         at, Operand(zero_reg));
5020     __ GetObjectType(t0, t0, t1);
5021     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5022         t1, Operand(MAP_TYPE));
5023 
5024     // We should either have undefined in a2 or a valid AllocationSite
5025     __ AssertUndefinedOrAllocationSite(a2, t0);
5026   }
5027 
5028   // Enter the context of the Array function.
5029   __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5030 
5031   Label subclassing;
5032   __ Branch(&subclassing, ne, a1, Operand(a3));
5033 
5034   Label no_info;
5035   // Get the elements kind and case on that.
5036   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5037   __ Branch(&no_info, eq, a2, Operand(at));
5038 
5039   __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5040   __ SmiUntag(a3);
5041   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5042   __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
5043   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5044 
5045   __ bind(&no_info);
5046   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5047 
5048   // Subclassing.
5049   __ bind(&subclassing);
5050   switch (argument_count()) {
5051     case ANY:
5052     case MORE_THAN_ONE:
5053       __ sll(at, a0, kPointerSizeLog2);
5054       __ addu(at, sp, at);
5055       __ sw(a1, MemOperand(at));
5056       __ li(at, Operand(3));
5057       __ addu(a0, a0, at);
5058       break;
5059     case NONE:
5060       __ sw(a1, MemOperand(sp, 0 * kPointerSize));
5061       __ li(a0, Operand(3));
5062       break;
5063     case ONE:
5064       __ sw(a1, MemOperand(sp, 1 * kPointerSize));
5065       __ li(a0, Operand(4));
5066       break;
5067   }
5068   __ Push(a3, a2);
5069   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
5070 }
5071 
5072 
GenerateCase(MacroAssembler * masm,ElementsKind kind)5073 void InternalArrayConstructorStub::GenerateCase(
5074     MacroAssembler* masm, ElementsKind kind) {
5075 
5076   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
5077   __ TailCallStub(&stub0, lo, a0, Operand(1));
5078 
5079   InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
5080   __ TailCallStub(&stubN, hi, a0, Operand(1));
5081 
5082   if (IsFastPackedElementsKind(kind)) {
5083     // We might need to create a holey array
5084     // look at the first argument.
5085     __ lw(at, MemOperand(sp, 0));
5086 
5087     InternalArraySingleArgumentConstructorStub
5088         stub1_holey(isolate(), GetHoleyElementsKind(kind));
5089     __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
5090   }
5091 
5092   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
5093   __ TailCallStub(&stub1);
5094 }
5095 
5096 
Generate(MacroAssembler * masm)5097 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5098   // ----------- S t a t e -------------
5099   //  -- a0 : argc
5100   //  -- a1 : constructor
5101   //  -- sp[0] : return address
5102   //  -- sp[4] : last argument
5103   // -----------------------------------
5104 
5105   if (FLAG_debug_code) {
5106     // The array construct code is only set for the global and natives
5107     // builtin Array functions which always have maps.
5108 
5109     // Initial map for the builtin Array function should be a map.
5110     __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5111     // Will both indicate a NULL and a Smi.
5112     __ SmiTst(a3, at);
5113     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5114         at, Operand(zero_reg));
5115     __ GetObjectType(a3, a3, t0);
5116     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5117         t0, Operand(MAP_TYPE));
5118   }
5119 
5120   // Figure out the right elements kind.
5121   __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5122 
5123   // Load the map's "bit field 2" into a3. We only need the first byte,
5124   // but the following bit field extraction takes care of that anyway.
5125   __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
5126   // Retrieve elements_kind from bit field 2.
5127   __ DecodeField<Map::ElementsKindBits>(a3);
5128 
5129   if (FLAG_debug_code) {
5130     Label done;
5131     __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
5132     __ Assert(
5133         eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
5134         a3, Operand(FAST_HOLEY_ELEMENTS));
5135     __ bind(&done);
5136   }
5137 
5138   Label fast_elements_case;
5139   __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
5140   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5141 
5142   __ bind(&fast_elements_case);
5143   GenerateCase(masm, FAST_ELEMENTS);
5144 }
5145 
5146 
Generate(MacroAssembler * masm)5147 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
5148   Register context_reg = cp;
5149   Register slot_reg = a2;
5150   Register result_reg = v0;
5151   Label slow_case;
5152 
5153   // Go up context chain to the script context.
5154   for (int i = 0; i < depth(); ++i) {
5155     __ lw(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
5156     context_reg = result_reg;
5157   }
5158 
5159   // Load the PropertyCell value at the specified slot.
5160   __ sll(at, slot_reg, kPointerSizeLog2);
5161   __ Addu(at, at, Operand(context_reg));
5162   __ lw(result_reg, ContextMemOperand(at, 0));
5163   __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
5164 
5165   // Check that value is not the_hole.
5166   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5167   __ Branch(&slow_case, eq, result_reg, Operand(at));
5168   __ Ret();
5169 
5170   // Fallback to the runtime.
5171   __ bind(&slow_case);
5172   __ SmiTag(slot_reg);
5173   __ Push(slot_reg);
5174   __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
5175 }
5176 
5177 
Generate(MacroAssembler * masm)5178 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
5179   Register context_reg = cp;
5180   Register slot_reg = a2;
5181   Register value_reg = a0;
5182   Register cell_reg = t0;
5183   Register cell_value_reg = t1;
5184   Register cell_details_reg = t2;
5185   Label fast_heapobject_case, fast_smi_case, slow_case;
5186 
5187   if (FLAG_debug_code) {
5188     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5189     __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
5190   }
5191 
5192   // Go up context chain to the script context.
5193   for (int i = 0; i < depth(); ++i) {
5194     __ lw(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
5195     context_reg = cell_reg;
5196   }
5197 
5198   // Load the PropertyCell at the specified slot.
5199   __ sll(at, slot_reg, kPointerSizeLog2);
5200   __ Addu(at, at, Operand(context_reg));
5201   __ lw(cell_reg, ContextMemOperand(at, 0));
5202 
5203   // Load PropertyDetails for the cell (actually only the cell_type and kind).
5204   __ lw(cell_details_reg,
5205         FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
5206   __ SmiUntag(cell_details_reg);
5207   __ And(cell_details_reg, cell_details_reg,
5208          PropertyDetails::PropertyCellTypeField::kMask |
5209              PropertyDetails::KindField::kMask |
5210              PropertyDetails::kAttributesReadOnlyMask);
5211 
5212   // Check if PropertyCell holds mutable data.
5213   Label not_mutable_data;
5214   __ Branch(&not_mutable_data, ne, cell_details_reg,
5215             Operand(PropertyDetails::PropertyCellTypeField::encode(
5216                         PropertyCellType::kMutable) |
5217                     PropertyDetails::KindField::encode(kData)));
5218   __ JumpIfSmi(value_reg, &fast_smi_case);
5219   __ bind(&fast_heapobject_case);
5220   __ sw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
5221   __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
5222                       cell_details_reg, kRAHasNotBeenSaved, kDontSaveFPRegs,
5223                       EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
5224   // RecordWriteField clobbers the value register, so we need to reload.
5225   __ Ret(USE_DELAY_SLOT);
5226   __ lw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
5227   __ bind(&not_mutable_data);
5228 
5229   // Check if PropertyCell value matches the new value (relevant for Constant,
5230   // ConstantType and Undefined cells).
5231   Label not_same_value;
5232   __ lw(cell_value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
5233   __ Branch(&not_same_value, ne, value_reg, Operand(cell_value_reg));
5234   // Make sure the PropertyCell is not marked READ_ONLY.
5235   __ And(at, cell_details_reg, PropertyDetails::kAttributesReadOnlyMask);
5236   __ Branch(&slow_case, ne, at, Operand(zero_reg));
5237   if (FLAG_debug_code) {
5238     Label done;
5239     // This can only be true for Constant, ConstantType and Undefined cells,
5240     // because we never store the_hole via this stub.
5241     __ Branch(&done, eq, cell_details_reg,
5242               Operand(PropertyDetails::PropertyCellTypeField::encode(
5243                           PropertyCellType::kConstant) |
5244                       PropertyDetails::KindField::encode(kData)));
5245     __ Branch(&done, eq, cell_details_reg,
5246               Operand(PropertyDetails::PropertyCellTypeField::encode(
5247                           PropertyCellType::kConstantType) |
5248                       PropertyDetails::KindField::encode(kData)));
5249     __ Check(eq, kUnexpectedValue, cell_details_reg,
5250              Operand(PropertyDetails::PropertyCellTypeField::encode(
5251                          PropertyCellType::kUndefined) |
5252                      PropertyDetails::KindField::encode(kData)));
5253     __ bind(&done);
5254   }
5255   __ Ret();
5256   __ bind(&not_same_value);
5257 
5258   // Check if PropertyCell contains data with constant type (and is not
5259   // READ_ONLY).
5260   __ Branch(&slow_case, ne, cell_details_reg,
5261             Operand(PropertyDetails::PropertyCellTypeField::encode(
5262                         PropertyCellType::kConstantType) |
5263                     PropertyDetails::KindField::encode(kData)));
5264 
5265   // Now either both old and new values must be SMIs or both must be heap
5266   // objects with same map.
5267   Label value_is_heap_object;
5268   __ JumpIfNotSmi(value_reg, &value_is_heap_object);
5269   __ JumpIfNotSmi(cell_value_reg, &slow_case);
5270   // Old and new values are SMIs, no need for a write barrier here.
5271   __ bind(&fast_smi_case);
5272   __ Ret(USE_DELAY_SLOT);
5273   __ sw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
5274   __ bind(&value_is_heap_object);
5275   __ JumpIfSmi(cell_value_reg, &slow_case);
5276   Register cell_value_map_reg = cell_value_reg;
5277   __ lw(cell_value_map_reg,
5278         FieldMemOperand(cell_value_reg, HeapObject::kMapOffset));
5279   __ Branch(&fast_heapobject_case, eq, cell_value_map_reg,
5280             FieldMemOperand(value_reg, HeapObject::kMapOffset));
5281 
5282   // Fallback to the runtime.
5283   __ bind(&slow_case);
5284   __ SmiTag(slot_reg);
5285   __ Push(slot_reg, value_reg);
5286   __ TailCallRuntime(is_strict(language_mode())
5287                          ? Runtime::kStoreGlobalViaContext_Strict
5288                          : Runtime::kStoreGlobalViaContext_Sloppy);
5289 }
5290 
5291 
AddressOffset(ExternalReference ref0,ExternalReference ref1)5292 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
5293   return ref0.address() - ref1.address();
5294 }
5295 
5296 
5297 // Calls an API function.  Allocates HandleScope, extracts returned value
5298 // from handle and propagates exceptions.  Restores context.  stack_space
5299 // - space to be unwound on exit (includes the call JS arguments space and
5300 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,int32_t stack_space_offset,MemOperand return_value_operand,MemOperand * context_restore_operand)5301 static void CallApiFunctionAndReturn(
5302     MacroAssembler* masm, Register function_address,
5303     ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
5304     MemOperand return_value_operand, MemOperand* context_restore_operand) {
5305   Isolate* isolate = masm->isolate();
5306   ExternalReference next_address =
5307       ExternalReference::handle_scope_next_address(isolate);
5308   const int kNextOffset = 0;
5309   const int kLimitOffset = AddressOffset(
5310       ExternalReference::handle_scope_limit_address(isolate), next_address);
5311   const int kLevelOffset = AddressOffset(
5312       ExternalReference::handle_scope_level_address(isolate), next_address);
5313 
5314   DCHECK(function_address.is(a1) || function_address.is(a2));
5315 
5316   Label profiler_disabled;
5317   Label end_profiler_check;
5318   __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
5319   __ lb(t9, MemOperand(t9, 0));
5320   __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
5321 
5322   // Additional parameter is the address of the actual callback.
5323   __ li(t9, Operand(thunk_ref));
5324   __ jmp(&end_profiler_check);
5325 
5326   __ bind(&profiler_disabled);
5327   __ mov(t9, function_address);
5328   __ bind(&end_profiler_check);
5329 
5330   // Allocate HandleScope in callee-save registers.
5331   __ li(s3, Operand(next_address));
5332   __ lw(s0, MemOperand(s3, kNextOffset));
5333   __ lw(s1, MemOperand(s3, kLimitOffset));
5334   __ lw(s2, MemOperand(s3, kLevelOffset));
5335   __ Addu(s2, s2, Operand(1));
5336   __ sw(s2, MemOperand(s3, kLevelOffset));
5337 
5338   if (FLAG_log_timer_events) {
5339     FrameScope frame(masm, StackFrame::MANUAL);
5340     __ PushSafepointRegisters();
5341     __ PrepareCallCFunction(1, a0);
5342     __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5343     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5344                      1);
5345     __ PopSafepointRegisters();
5346   }
5347 
5348   // Native call returns to the DirectCEntry stub which redirects to the
5349   // return address pushed on stack (could have moved after GC).
5350   // DirectCEntry stub itself is generated early and never moves.
5351   DirectCEntryStub stub(isolate);
5352   stub.GenerateCall(masm, t9);
5353 
5354   if (FLAG_log_timer_events) {
5355     FrameScope frame(masm, StackFrame::MANUAL);
5356     __ PushSafepointRegisters();
5357     __ PrepareCallCFunction(1, a0);
5358     __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5359     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5360                      1);
5361     __ PopSafepointRegisters();
5362   }
5363 
5364   Label promote_scheduled_exception;
5365   Label delete_allocated_handles;
5366   Label leave_exit_frame;
5367   Label return_value_loaded;
5368 
5369   // Load value from ReturnValue.
5370   __ lw(v0, return_value_operand);
5371   __ bind(&return_value_loaded);
5372 
5373   // No more valid handles (the result handle was the last one). Restore
5374   // previous handle scope.
5375   __ sw(s0, MemOperand(s3, kNextOffset));
5376   if (__ emit_debug_code()) {
5377     __ lw(a1, MemOperand(s3, kLevelOffset));
5378     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
5379   }
5380   __ Subu(s2, s2, Operand(1));
5381   __ sw(s2, MemOperand(s3, kLevelOffset));
5382   __ lw(at, MemOperand(s3, kLimitOffset));
5383   __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
5384 
5385   // Leave the API exit frame.
5386   __ bind(&leave_exit_frame);
5387 
5388   bool restore_context = context_restore_operand != NULL;
5389   if (restore_context) {
5390     __ lw(cp, *context_restore_operand);
5391   }
5392   if (stack_space_offset != kInvalidStackOffset) {
5393     // ExitFrame contains four MIPS argument slots after DirectCEntryStub call
5394     // so this must be accounted for.
5395     __ lw(s0, MemOperand(sp, stack_space_offset + kCArgsSlotsSize));
5396   } else {
5397     __ li(s0, Operand(stack_space));
5398   }
5399   __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
5400                     stack_space_offset != kInvalidStackOffset);
5401 
5402   // Check if the function scheduled an exception.
5403   __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
5404   __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
5405   __ lw(t1, MemOperand(at));
5406   __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
5407 
5408   __ Ret();
5409 
5410   // Re-throw by promoting a scheduled exception.
5411   __ bind(&promote_scheduled_exception);
5412   __ TailCallRuntime(Runtime::kPromoteScheduledException);
5413 
5414   // HandleScope limit has changed. Delete allocated extensions.
5415   __ bind(&delete_allocated_handles);
5416   __ sw(s1, MemOperand(s3, kLimitOffset));
5417   __ mov(s0, v0);
5418   __ mov(a0, v0);
5419   __ PrepareCallCFunction(1, s1);
5420   __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5421   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5422                    1);
5423   __ mov(v0, s0);
5424   __ jmp(&leave_exit_frame);
5425 }
5426 
5427 
CallApiFunctionStubHelper(MacroAssembler * masm,const ParameterCount & argc,bool return_first_arg,bool call_data_undefined)5428 static void CallApiFunctionStubHelper(MacroAssembler* masm,
5429                                       const ParameterCount& argc,
5430                                       bool return_first_arg,
5431                                       bool call_data_undefined) {
5432   // ----------- S t a t e -------------
5433   //  -- a0                  : callee
5434   //  -- t0                  : call_data
5435   //  -- a2                  : holder
5436   //  -- a1                  : api_function_address
5437   //  -- a3                  : number of arguments if argc is a register
5438   //  -- cp                  : context
5439   //  --
5440   //  -- sp[0]               : last argument
5441   //  -- ...
5442   //  -- sp[(argc - 1)* 4]   : first argument
5443   //  -- sp[argc * 4]        : receiver
5444   // -----------------------------------
5445 
5446   Register callee = a0;
5447   Register call_data = t0;
5448   Register holder = a2;
5449   Register api_function_address = a1;
5450   Register context = cp;
5451 
5452   typedef FunctionCallbackArguments FCA;
5453 
5454   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5455   STATIC_ASSERT(FCA::kCalleeIndex == 5);
5456   STATIC_ASSERT(FCA::kDataIndex == 4);
5457   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5458   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5459   STATIC_ASSERT(FCA::kIsolateIndex == 1);
5460   STATIC_ASSERT(FCA::kHolderIndex == 0);
5461   STATIC_ASSERT(FCA::kArgsLength == 7);
5462 
5463   DCHECK(argc.is_immediate() || a3.is(argc.reg()));
5464 
5465   // Save context, callee and call data.
5466   __ Push(context, callee, call_data);
5467   // Load context from callee.
5468   __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5469 
5470   Register scratch = call_data;
5471   if (!call_data_undefined) {
5472     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5473   }
5474   // Push return value and default return value.
5475   __ Push(scratch, scratch);
5476   __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
5477   // Push isolate and holder.
5478   __ Push(scratch, holder);
5479 
5480   // Prepare arguments.
5481   __ mov(scratch, sp);
5482 
5483   // Allocate the v8::Arguments structure in the arguments' space since
5484   // it's not controlled by GC.
5485   const int kApiStackSpace = 4;
5486 
5487   FrameScope frame_scope(masm, StackFrame::MANUAL);
5488   __ EnterExitFrame(false, kApiStackSpace);
5489 
5490   DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
5491   // a0 = FunctionCallbackInfo&
5492   // Arguments is after the return address.
5493   __ Addu(a0, sp, Operand(1 * kPointerSize));
5494   // FunctionCallbackInfo::implicit_args_
5495   __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
5496   if (argc.is_immediate()) {
5497     // FunctionCallbackInfo::values_
5498     __ Addu(at, scratch,
5499             Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
5500     __ sw(at, MemOperand(a0, 1 * kPointerSize));
5501     // FunctionCallbackInfo::length_ = argc
5502     __ li(at, Operand(argc.immediate()));
5503     __ sw(at, MemOperand(a0, 2 * kPointerSize));
5504     // FunctionCallbackInfo::is_construct_call_ = 0
5505     __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
5506   } else {
5507     // FunctionCallbackInfo::values_
5508     __ sll(at, argc.reg(), kPointerSizeLog2);
5509     __ Addu(at, at, scratch);
5510     __ Addu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
5511     __ sw(at, MemOperand(a0, 1 * kPointerSize));
5512     // FunctionCallbackInfo::length_ = argc
5513     __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
5514     // FunctionCallbackInfo::is_construct_call_
5515     __ Addu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
5516     __ sll(at, argc.reg(), kPointerSizeLog2);
5517     __ sw(at, MemOperand(a0, 3 * kPointerSize));
5518   }
5519 
5520   ExternalReference thunk_ref =
5521       ExternalReference::invoke_function_callback(masm->isolate());
5522 
5523   AllowExternalCallThatCantCauseGC scope(masm);
5524   MemOperand context_restore_operand(
5525       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5526   // Stores return the first js argument.
5527   int return_value_offset = 0;
5528   if (return_first_arg) {
5529     return_value_offset = 2 + FCA::kArgsLength;
5530   } else {
5531     return_value_offset = 2 + FCA::kReturnValueOffset;
5532   }
5533   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5534   int stack_space = 0;
5535   int32_t stack_space_offset = 4 * kPointerSize;
5536   if (argc.is_immediate()) {
5537     stack_space = argc.immediate() + FCA::kArgsLength + 1;
5538     stack_space_offset = kInvalidStackOffset;
5539   }
5540   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5541                            stack_space_offset, return_value_operand,
5542                            &context_restore_operand);
5543 }
5544 
5545 
Generate(MacroAssembler * masm)5546 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5547   bool call_data_undefined = this->call_data_undefined();
5548   CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
5549                             call_data_undefined);
5550 }
5551 
5552 
Generate(MacroAssembler * masm)5553 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5554   bool is_store = this->is_store();
5555   int argc = this->argc();
5556   bool call_data_undefined = this->call_data_undefined();
5557   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5558                             call_data_undefined);
5559 }
5560 
5561 
Generate(MacroAssembler * masm)5562 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5563   // ----------- S t a t e -------------
5564   //  -- sp[0]                  : name
5565   //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
5566   //  -- ...
5567   //  -- a2                     : api_function_address
5568   // -----------------------------------
5569 
5570   Register api_function_address = ApiGetterDescriptor::function_address();
5571   DCHECK(api_function_address.is(a2));
5572 
5573   __ mov(a0, sp);  // a0 = Handle<Name>
5574   __ Addu(a1, a0, Operand(1 * kPointerSize));  // a1 = PCA
5575 
5576   const int kApiStackSpace = 1;
5577   FrameScope frame_scope(masm, StackFrame::MANUAL);
5578   __ EnterExitFrame(false, kApiStackSpace);
5579 
5580   // Create PropertyAccessorInfo instance on the stack above the exit frame with
5581   // a1 (internal::Object** args_) as the data.
5582   __ sw(a1, MemOperand(sp, 1 * kPointerSize));
5583   __ Addu(a1, sp, Operand(1 * kPointerSize));  // a1 = AccessorInfo&
5584 
5585   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5586 
5587   ExternalReference thunk_ref =
5588       ExternalReference::invoke_accessor_getter_callback(isolate());
5589   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5590                            kStackUnwindSpace, kInvalidStackOffset,
5591                            MemOperand(fp, 6 * kPointerSize), NULL);
5592 }
5593 
5594 
5595 #undef __
5596 
5597 }  // namespace internal
5598 }  // namespace v8
5599 
5600 #endif  // V8_TARGET_ARCH_MIPS
5601