• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_MIPS64
8 
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/isolate.h"
15 #include "src/jsregexp.h"
16 #include "src/regexp-macro-assembler.h"
17 #include "src/runtime.h"
18 
19 namespace v8 {
20 namespace internal {
21 
22 
InitializeArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)23 static void InitializeArrayConstructorDescriptor(
24     Isolate* isolate, CodeStubDescriptor* descriptor,
25     int constant_stack_parameter_count) {
26   Address deopt_handler = Runtime::FunctionForId(
27       Runtime::kArrayConstructor)->entry;
28 
29   if (constant_stack_parameter_count == 0) {
30     descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
31                            JS_FUNCTION_STUB_MODE);
32   } else {
33     descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
34                            JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
35   }
36 }
37 
38 
InitializeInternalArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)39 static void InitializeInternalArrayConstructorDescriptor(
40     Isolate* isolate, CodeStubDescriptor* descriptor,
41     int constant_stack_parameter_count) {
42   Address deopt_handler = Runtime::FunctionForId(
43       Runtime::kInternalArrayConstructor)->entry;
44 
45   if (constant_stack_parameter_count == 0) {
46     descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
47                            JS_FUNCTION_STUB_MODE);
48   } else {
49     descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
50                            JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
51   }
52 }
53 
54 
InitializeDescriptor(CodeStubDescriptor * descriptor)55 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
56     CodeStubDescriptor* descriptor) {
57   InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
58 }
59 
60 
InitializeDescriptor(CodeStubDescriptor * descriptor)61 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
62     CodeStubDescriptor* descriptor) {
63   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
64 }
65 
66 
InitializeDescriptor(CodeStubDescriptor * descriptor)67 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
68     CodeStubDescriptor* descriptor) {
69   InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
70 }
71 
72 
InitializeDescriptor(CodeStubDescriptor * descriptor)73 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
74     CodeStubDescriptor* descriptor) {
75   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
76 }
77 
78 
InitializeDescriptor(CodeStubDescriptor * descriptor)79 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
80     CodeStubDescriptor* descriptor) {
81   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
82 }
83 
84 
InitializeDescriptor(CodeStubDescriptor * descriptor)85 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
86     CodeStubDescriptor* descriptor) {
87   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
88 }
89 
90 
91 #define __ ACCESS_MASM(masm)
92 
93 
94 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
95                                           Label* slow,
96                                           Condition cc);
97 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
98                                     Register lhs,
99                                     Register rhs,
100                                     Label* rhs_not_nan,
101                                     Label* slow,
102                                     bool strict);
103 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
104                                            Register lhs,
105                                            Register rhs);
106 
107 
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)108 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
109                                                ExternalReference miss) {
110   // Update the static counter each time a new code stub is generated.
111   isolate()->counters()->code_stubs()->Increment();
112 
113   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
114   int param_count = descriptor.GetEnvironmentParameterCount();
115   {
116     // Call the runtime system in a fresh internal frame.
117     FrameScope scope(masm, StackFrame::INTERNAL);
118     DCHECK((param_count == 0) ||
119            a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
120     // Push arguments, adjust sp.
121     __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
122     for (int i = 0; i < param_count; ++i) {
123       // Store argument to stack.
124       __ sd(descriptor.GetEnvironmentParameterRegister(i),
125             MemOperand(sp, (param_count - 1 - i) * kPointerSize));
126     }
127     __ CallExternalReference(miss, param_count);
128   }
129 
130   __ Ret();
131 }
132 
133 
Generate(MacroAssembler * masm)134 void DoubleToIStub::Generate(MacroAssembler* masm) {
135   Label out_of_range, only_low, negate, done;
136   Register input_reg = source();
137   Register result_reg = destination();
138 
139   int double_offset = offset();
140   // Account for saved regs if input is sp.
141   if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
142 
143   Register scratch =
144       GetRegisterThatIsNotOneOf(input_reg, result_reg);
145   Register scratch2 =
146       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
147   Register scratch3 =
148       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
149   DoubleRegister double_scratch = kLithiumScratchDouble;
150 
151   __ Push(scratch, scratch2, scratch3);
152   if (!skip_fastpath()) {
153     // Load double input.
154     __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
155 
156     // Clear cumulative exception flags and save the FCSR.
157     __ cfc1(scratch2, FCSR);
158     __ ctc1(zero_reg, FCSR);
159 
160     // Try a conversion to a signed integer.
161     __ Trunc_w_d(double_scratch, double_scratch);
162     // Move the converted value into the result register.
163     __ mfc1(scratch3, double_scratch);
164 
165     // Retrieve and restore the FCSR.
166     __ cfc1(scratch, FCSR);
167     __ ctc1(scratch2, FCSR);
168 
169     // Check for overflow and NaNs.
170     __ And(
171         scratch, scratch,
172         kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
173            | kFCSRInvalidOpFlagMask);
174     // If we had no exceptions then set result_reg and we are done.
175     Label error;
176     __ Branch(&error, ne, scratch, Operand(zero_reg));
177     __ Move(result_reg, scratch3);
178     __ Branch(&done);
179     __ bind(&error);
180   }
181 
182   // Load the double value and perform a manual truncation.
183   Register input_high = scratch2;
184   Register input_low = scratch3;
185 
186   __ lw(input_low, MemOperand(input_reg, double_offset));
187   __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
188 
189   Label normal_exponent, restore_sign;
190   // Extract the biased exponent in result.
191   __ Ext(result_reg,
192          input_high,
193          HeapNumber::kExponentShift,
194          HeapNumber::kExponentBits);
195 
196   // Check for Infinity and NaNs, which should return 0.
197   __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
198   __ Movz(result_reg, zero_reg, scratch);
199   __ Branch(&done, eq, scratch, Operand(zero_reg));
200 
201   // Express exponent as delta to (number of mantissa bits + 31).
202   __ Subu(result_reg,
203           result_reg,
204           Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
205 
206   // If the delta is strictly positive, all bits would be shifted away,
207   // which means that we can return 0.
208   __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
209   __ mov(result_reg, zero_reg);
210   __ Branch(&done);
211 
212   __ bind(&normal_exponent);
213   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
214   // Calculate shift.
215   __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
216 
217   // Save the sign.
218   Register sign = result_reg;
219   result_reg = no_reg;
220   __ And(sign, input_high, Operand(HeapNumber::kSignMask));
221 
222   // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
223   // to check for this specific case.
224   Label high_shift_needed, high_shift_done;
225   __ Branch(&high_shift_needed, lt, scratch, Operand(32));
226   __ mov(input_high, zero_reg);
227   __ Branch(&high_shift_done);
228   __ bind(&high_shift_needed);
229 
230   // Set the implicit 1 before the mantissa part in input_high.
231   __ Or(input_high,
232         input_high,
233         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
234   // Shift the mantissa bits to the correct position.
235   // We don't need to clear non-mantissa bits as they will be shifted away.
236   // If they weren't, it would mean that the answer is in the 32bit range.
237   __ sllv(input_high, input_high, scratch);
238 
239   __ bind(&high_shift_done);
240 
241   // Replace the shifted bits with bits from the lower mantissa word.
242   Label pos_shift, shift_done;
243   __ li(at, 32);
244   __ subu(scratch, at, scratch);
245   __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
246 
247   // Negate scratch.
248   __ Subu(scratch, zero_reg, scratch);
249   __ sllv(input_low, input_low, scratch);
250   __ Branch(&shift_done);
251 
252   __ bind(&pos_shift);
253   __ srlv(input_low, input_low, scratch);
254 
255   __ bind(&shift_done);
256   __ Or(input_high, input_high, Operand(input_low));
257   // Restore sign if necessary.
258   __ mov(scratch, sign);
259   result_reg = sign;
260   sign = no_reg;
261   __ Subu(result_reg, zero_reg, input_high);
262   __ Movz(result_reg, input_high, scratch);
263 
264   __ bind(&done);
265 
266   __ Pop(scratch, scratch2, scratch3);
267   __ Ret();
268 }
269 
270 
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)271 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
272     Isolate* isolate) {
273   WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
274   WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
275   stub1.GetCode();
276   stub2.GetCode();
277 }
278 
279 
280 // See comment for class, this does NOT work for int32's that are in Smi range.
Generate(MacroAssembler * masm)281 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
282   Label max_negative_int;
283   // the_int_ has the answer which is a signed int32 but not a Smi.
284   // We test for the special value that has a different exponent.
285   STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
286   // Test sign, and save for later conditionals.
287   __ And(sign(), the_int(), Operand(0x80000000u));
288   __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
289 
290   // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
291   // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
292   uint32_t non_smi_exponent =
293       (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
294   __ li(scratch(), Operand(non_smi_exponent));
295   // Set the sign bit in scratch_ if the value was negative.
296   __ or_(scratch(), scratch(), sign());
297   // Subtract from 0 if the value was negative.
298   __ subu(at, zero_reg, the_int());
299   __ Movn(the_int(), at, sign());
300   // We should be masking the implict first digit of the mantissa away here,
301   // but it just ends up combining harmlessly with the last digit of the
302   // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
303   // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
304   DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
305   const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
306   __ srl(at, the_int(), shift_distance);
307   __ or_(scratch(), scratch(), at);
308   __ sw(scratch(), FieldMemOperand(the_heap_number(),
309                                    HeapNumber::kExponentOffset));
310   __ sll(scratch(), the_int(), 32 - shift_distance);
311   __ Ret(USE_DELAY_SLOT);
312   __ sw(scratch(), FieldMemOperand(the_heap_number(),
313                                    HeapNumber::kMantissaOffset));
314 
315   __ bind(&max_negative_int);
316   // The max negative int32 is stored as a positive number in the mantissa of
317   // a double because it uses a sign bit instead of using two's complement.
318   // The actual mantissa bits stored are all 0 because the implicit most
319   // significant 1 bit is not stored.
320   non_smi_exponent += 1 << HeapNumber::kExponentShift;
321   __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
322   __ sw(scratch(),
323         FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
324   __ mov(scratch(), zero_reg);
325   __ Ret(USE_DELAY_SLOT);
326   __ sw(scratch(),
327         FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
328 }
329 
330 
331 // Handle the case where the lhs and rhs are the same object.
332 // Equality is almost reflexive (everything but NaN), so this is a test
333 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cc)334 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
335                                           Label* slow,
336                                           Condition cc) {
337   Label not_identical;
338   Label heap_number, return_equal;
339   Register exp_mask_reg = t1;
340 
341   __ Branch(&not_identical, ne, a0, Operand(a1));
342 
343   __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
344 
345   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
346   // so we do the second best thing - test it ourselves.
347   // They are both equal and they are not both Smis so both of them are not
348   // Smis. If it's not a heap number, then return equal.
349   if (cc == less || cc == greater) {
350     __ GetObjectType(a0, t0, t0);
351     __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
352   } else {
353     __ GetObjectType(a0, t0, t0);
354     __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
355     // Comparing JS objects with <=, >= is complicated.
356     if (cc != eq) {
357     __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
358       // Normally here we fall through to return_equal, but undefined is
359       // special: (undefined == undefined) == true, but
360       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
361       if (cc == less_equal || cc == greater_equal) {
362         __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE));
363         __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
364         __ Branch(&return_equal, ne, a0, Operand(a6));
365         DCHECK(is_int16(GREATER) && is_int16(LESS));
366         __ Ret(USE_DELAY_SLOT);
367         if (cc == le) {
368           // undefined <= undefined should fail.
369           __ li(v0, Operand(GREATER));
370         } else  {
371           // undefined >= undefined should fail.
372           __ li(v0, Operand(LESS));
373         }
374       }
375     }
376   }
377 
378   __ bind(&return_equal);
379   DCHECK(is_int16(GREATER) && is_int16(LESS));
380   __ Ret(USE_DELAY_SLOT);
381   if (cc == less) {
382     __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
383   } else if (cc == greater) {
384     __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
385   } else {
386     __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
387   }
388   // For less and greater we don't have to check for NaN since the result of
389   // x < x is false regardless.  For the others here is some code to check
390   // for NaN.
391   if (cc != lt && cc != gt) {
392     __ bind(&heap_number);
393     // It is a heap number, so return non-equal if it's NaN and equal if it's
394     // not NaN.
395 
396     // The representation of NaN values has all exponent bits (52..62) set,
397     // and not all mantissa bits (0..51) clear.
398     // Read top bits of double representation (second word of value).
399     __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
400     // Test that exponent bits are all set.
401     __ And(a7, a6, Operand(exp_mask_reg));
402     // If all bits not set (ne cond), then not a NaN, objects are equal.
403     __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg));
404 
405     // Shift out flag and all exponent bits, retaining only mantissa.
406     __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
407     // Or with all low-bits of mantissa.
408     __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
409     __ Or(v0, a7, Operand(a6));
410     // For equal we already have the right value in v0:  Return zero (equal)
411     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
412     // not (it's a NaN).  For <= and >= we need to load v0 with the failing
413     // value if it's a NaN.
414     if (cc != eq) {
415       // All-zero means Infinity means equal.
416       __ Ret(eq, v0, Operand(zero_reg));
417       DCHECK(is_int16(GREATER) && is_int16(LESS));
418       __ Ret(USE_DELAY_SLOT);
419       if (cc == le) {
420         __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
421       } else {
422         __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
423       }
424     }
425   }
426   // No fall through here.
427 
428   __ bind(&not_identical);
429 }
430 
431 
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * slow,bool strict)432 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
433                                     Register lhs,
434                                     Register rhs,
435                                     Label* both_loaded_as_doubles,
436                                     Label* slow,
437                                     bool strict) {
438   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
439          (lhs.is(a1) && rhs.is(a0)));
440 
441   Label lhs_is_smi;
442   __ JumpIfSmi(lhs, &lhs_is_smi);
443   // Rhs is a Smi.
444   // Check whether the non-smi is a heap number.
445   __ GetObjectType(lhs, t0, t0);
446   if (strict) {
447     // If lhs was not a number and rhs was a Smi then strict equality cannot
448     // succeed. Return non-equal (lhs is already not zero).
449     __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
450     __ mov(v0, lhs);
451   } else {
452     // Smi compared non-strictly with a non-Smi non-heap-number. Call
453     // the runtime.
454     __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
455   }
456   // Rhs is a smi, lhs is a number.
457   // Convert smi rhs to double.
458   __ SmiUntag(at, rhs);
459   __ mtc1(at, f14);
460   __ cvt_d_w(f14, f14);
461   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
462 
463   // We now have both loaded as doubles.
464   __ jmp(both_loaded_as_doubles);
465 
466   __ bind(&lhs_is_smi);
467   // Lhs is a Smi.  Check whether the non-smi is a heap number.
468   __ GetObjectType(rhs, t0, t0);
469   if (strict) {
470     // If lhs was not a number and rhs was a Smi then strict equality cannot
471     // succeed. Return non-equal.
472     __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
473     __ li(v0, Operand(1));
474   } else {
475     // Smi compared non-strictly with a non-Smi non-heap-number. Call
476     // the runtime.
477     __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
478   }
479 
480   // Lhs is a smi, rhs is a number.
481   // Convert smi lhs to double.
482   __ SmiUntag(at, lhs);
483   __ mtc1(at, f12);
484   __ cvt_d_w(f12, f12);
485   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
486   // Fall through to both_loaded_as_doubles.
487 }
488 
489 
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)490 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
491                                            Register lhs,
492                                            Register rhs) {
493     // If either operand is a JS object or an oddball value, then they are
494     // not equal since their pointers are different.
495     // There is no test for undetectability in strict equality.
496     STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
497     Label first_non_object;
498     // Get the type of the first operand into a2 and compare it with
499     // FIRST_SPEC_OBJECT_TYPE.
500     __ GetObjectType(lhs, a2, a2);
501     __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
502 
503     // Return non-zero.
504     Label return_not_equal;
505     __ bind(&return_not_equal);
506     __ Ret(USE_DELAY_SLOT);
507     __ li(v0, Operand(1));
508 
509     __ bind(&first_non_object);
510     // Check for oddballs: true, false, null, undefined.
511     __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
512 
513     __ GetObjectType(rhs, a3, a3);
514     __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
515 
516     // Check for oddballs: true, false, null, undefined.
517     __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
518 
519     // Now that we have the types we might as well check for
520     // internalized-internalized.
521     STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
522     __ Or(a2, a2, Operand(a3));
523     __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
524     __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
525 }
526 
527 
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)528 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
529                                        Register lhs,
530                                        Register rhs,
531                                        Label* both_loaded_as_doubles,
532                                        Label* not_heap_numbers,
533                                        Label* slow) {
534   __ GetObjectType(lhs, a3, a2);
535   __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
536   __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
537   // If first was a heap number & second wasn't, go to slow case.
538   __ Branch(slow, ne, a3, Operand(a2));
539 
540   // Both are heap numbers. Load them up then jump to the code we have
541   // for that.
542   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
543   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
544 
545   __ jmp(both_loaded_as_doubles);
546 }
547 
548 
549 // Fast negative check for internalized-to-internalized equality.
EmitCheckForInternalizedStringsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * not_both_strings)550 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
551                                                      Register lhs,
552                                                      Register rhs,
553                                                      Label* possible_strings,
554                                                      Label* not_both_strings) {
555   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
556          (lhs.is(a1) && rhs.is(a0)));
557 
558   // a2 is object type of rhs.
559   Label object_test;
560   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
561   __ And(at, a2, Operand(kIsNotStringMask));
562   __ Branch(&object_test, ne, at, Operand(zero_reg));
563   __ And(at, a2, Operand(kIsNotInternalizedMask));
564   __ Branch(possible_strings, ne, at, Operand(zero_reg));
565   __ GetObjectType(rhs, a3, a3);
566   __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
567   __ And(at, a3, Operand(kIsNotInternalizedMask));
568   __ Branch(possible_strings, ne, at, Operand(zero_reg));
569 
570   // Both are internalized strings. We already checked they weren't the same
571   // pointer so they are not equal.
572   __ Ret(USE_DELAY_SLOT);
573   __ li(v0, Operand(1));   // Non-zero indicates not equal.
574 
575   __ bind(&object_test);
576   __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
577   __ GetObjectType(rhs, a2, a3);
578   __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
579 
580   // If both objects are undetectable, they are equal.  Otherwise, they
581   // are not equal, since they are different objects and an object is not
582   // equal to undefined.
583   __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
584   __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
585   __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
586   __ and_(a0, a2, a3);
587   __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
588   __ Ret(USE_DELAY_SLOT);
589   __ xori(v0, a0, 1 << Map::kIsUndetectable);
590 }
591 
592 
CompareICStub_CheckInputType(MacroAssembler * masm,Register input,Register scratch,CompareICState::State expected,Label * fail)593 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
594                                          Register scratch,
595                                          CompareICState::State expected,
596                                          Label* fail) {
597   Label ok;
598   if (expected == CompareICState::SMI) {
599     __ JumpIfNotSmi(input, fail);
600   } else if (expected == CompareICState::NUMBER) {
601     __ JumpIfSmi(input, &ok);
602     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
603                 DONT_DO_SMI_CHECK);
604   }
605   // We could be strict about internalized/string here, but as long as
606   // hydrogen doesn't care, the stub doesn't have to care either.
607   __ bind(&ok);
608 }
609 
610 
611 // On entry a1 and a2 are the values to be compared.
612 // On exit a0 is 0, positive or negative to indicate the result of
613 // the comparison.
GenerateGeneric(MacroAssembler * masm)614 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
615   Register lhs = a1;
616   Register rhs = a0;
617   Condition cc = GetCondition();
618 
619   Label miss;
620   CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
621   CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
622 
623   Label slow;  // Call builtin.
624   Label not_smis, both_loaded_as_doubles;
625 
626   Label not_two_smis, smi_done;
627   __ Or(a2, a1, a0);
628   __ JumpIfNotSmi(a2, &not_two_smis);
629   __ SmiUntag(a1);
630   __ SmiUntag(a0);
631 
632   __ Ret(USE_DELAY_SLOT);
633   __ dsubu(v0, a1, a0);
634   __ bind(&not_two_smis);
635 
636   // NOTICE! This code is only reached after a smi-fast-case check, so
637   // it is certain that at least one operand isn't a smi.
638 
639   // Handle the case where the objects are identical.  Either returns the answer
640   // or goes to slow.  Only falls through if the objects were not identical.
641   EmitIdenticalObjectComparison(masm, &slow, cc);
642 
643   // If either is a Smi (we know that not both are), then they can only
644   // be strictly equal if the other is a HeapNumber.
645   STATIC_ASSERT(kSmiTag == 0);
646   DCHECK_EQ(0, Smi::FromInt(0));
647   __ And(a6, lhs, Operand(rhs));
648   __ JumpIfNotSmi(a6, &not_smis, a4);
649   // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
650   // 1) Return the answer.
651   // 2) Go to slow.
652   // 3) Fall through to both_loaded_as_doubles.
653   // 4) Jump to rhs_not_nan.
654   // In cases 3 and 4 we have found out we were dealing with a number-number
655   // comparison and the numbers have been loaded into f12 and f14 as doubles,
656   // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
657   EmitSmiNonsmiComparison(masm, lhs, rhs,
658                           &both_loaded_as_doubles, &slow, strict());
659 
660   __ bind(&both_loaded_as_doubles);
661   // f12, f14 are the double representations of the left hand side
662   // and the right hand side if we have FPU. Otherwise a2, a3 represent
663   // left hand side and a0, a1 represent right hand side.
664 
665   Label nan;
666   __ li(a4, Operand(LESS));
667   __ li(a5, Operand(GREATER));
668   __ li(a6, Operand(EQUAL));
669 
670   // Check if either rhs or lhs is NaN.
671   __ BranchF(NULL, &nan, eq, f12, f14);
672 
673   // Check if LESS condition is satisfied. If true, move conditionally
674   // result to v0.
675   if (kArchVariant != kMips64r6) {
676     __ c(OLT, D, f12, f14);
677     __ Movt(v0, a4);
678     // Use previous check to store conditionally to v0 oposite condition
679     // (GREATER). If rhs is equal to lhs, this will be corrected in next
680     // check.
681     __ Movf(v0, a5);
682     // Check if EQUAL condition is satisfied. If true, move conditionally
683     // result to v0.
684     __ c(EQ, D, f12, f14);
685     __ Movt(v0, a6);
686   } else {
687     Label skip;
688     __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
689     __ mov(v0, a4);  // Return LESS as result.
690 
691     __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
692     __ mov(v0, a6);  // Return EQUAL as result.
693 
694     __ mov(v0, a5);  // Return GREATER as result.
695     __ bind(&skip);
696   }
697   __ Ret();
698 
699   __ bind(&nan);
700   // NaN comparisons always fail.
701   // Load whatever we need in v0 to make the comparison fail.
702   DCHECK(is_int16(GREATER) && is_int16(LESS));
703   __ Ret(USE_DELAY_SLOT);
704   if (cc == lt || cc == le) {
705     __ li(v0, Operand(GREATER));
706   } else {
707     __ li(v0, Operand(LESS));
708   }
709 
710 
711   __ bind(&not_smis);
712   // At this point we know we are dealing with two different objects,
713   // and neither of them is a Smi. The objects are in lhs_ and rhs_.
714   if (strict()) {
715     // This returns non-equal for some object types, or falls through if it
716     // was not lucky.
717     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
718   }
719 
720   Label check_for_internalized_strings;
721   Label flat_string_check;
722   // Check for heap-number-heap-number comparison. Can jump to slow case,
723   // or load both doubles and jump to the code that handles
724   // that case. If the inputs are not doubles then jumps to
725   // check_for_internalized_strings.
726   // In this case a2 will contain the type of lhs_.
727   EmitCheckForTwoHeapNumbers(masm,
728                              lhs,
729                              rhs,
730                              &both_loaded_as_doubles,
731                              &check_for_internalized_strings,
732                              &flat_string_check);
733 
734   __ bind(&check_for_internalized_strings);
735   if (cc == eq && !strict()) {
736     // Returns an answer for two internalized strings or two
737     // detectable objects.
738     // Otherwise jumps to string case or not both strings case.
739     // Assumes that a2 is the type of lhs_ on entry.
740     EmitCheckForInternalizedStringsOrObjects(
741         masm, lhs, rhs, &flat_string_check, &slow);
742   }
743 
744   // Check for both being sequential one-byte strings,
745   // and inline if that is the case.
746   __ bind(&flat_string_check);
747 
748   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
749 
750   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
751                       a3);
752   if (cc == eq) {
753     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, a4);
754   } else {
755     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, a4,
756                                                     a5);
757   }
758   // Never falls through to here.
759 
760   __ bind(&slow);
761   // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
762   // a1 (rhs) second.
763   __ Push(lhs, rhs);
764   // Figure out which native to call and setup the arguments.
765   Builtins::JavaScript native;
766   if (cc == eq) {
767     native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
768   } else {
769     native = Builtins::COMPARE;
770     int ncr;  // NaN compare result.
771     if (cc == lt || cc == le) {
772       ncr = GREATER;
773     } else {
774       DCHECK(cc == gt || cc == ge);  // Remaining cases.
775       ncr = LESS;
776     }
777     __ li(a0, Operand(Smi::FromInt(ncr)));
778     __ push(a0);
779   }
780 
781   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
782   // tagged as a small integer.
783   __ InvokeBuiltin(native, JUMP_FUNCTION);
784 
785   __ bind(&miss);
786   GenerateMiss(masm);
787 }
788 
789 
Generate(MacroAssembler * masm)790 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
791   __ mov(t9, ra);
792   __ pop(ra);
793   __ PushSafepointRegisters();
794   __ Jump(t9);
795 }
796 
797 
Generate(MacroAssembler * masm)798 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
799   __ mov(t9, ra);
800   __ pop(ra);
801   __ PopSafepointRegisters();
802   __ Jump(t9);
803 }
804 
805 
Generate(MacroAssembler * masm)806 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
807   // We don't allow a GC during a store buffer overflow so there is no need to
808   // store the registers in any particular way, but we do have to store and
809   // restore them.
810   __ MultiPush(kJSCallerSaved | ra.bit());
811   if (save_doubles()) {
812     __ MultiPushFPU(kCallerSavedFPU);
813   }
814   const int argument_count = 1;
815   const int fp_argument_count = 0;
816   const Register scratch = a1;
817 
818   AllowExternalCallThatCantCauseGC scope(masm);
819   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
820   __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
821   __ CallCFunction(
822       ExternalReference::store_buffer_overflow_function(isolate()),
823       argument_count);
824   if (save_doubles()) {
825     __ MultiPopFPU(kCallerSavedFPU);
826   }
827 
828   __ MultiPop(kJSCallerSaved | ra.bit());
829   __ Ret();
830 }
831 
832 
Generate(MacroAssembler * masm)833 void MathPowStub::Generate(MacroAssembler* masm) {
834   const Register base = a1;
835   const Register exponent = MathPowTaggedDescriptor::exponent();
836   DCHECK(exponent.is(a2));
837   const Register heapnumbermap = a5;
838   const Register heapnumber = v0;
839   const DoubleRegister double_base = f2;
840   const DoubleRegister double_exponent = f4;
841   const DoubleRegister double_result = f0;
842   const DoubleRegister double_scratch = f6;
843   const FPURegister single_scratch = f8;
844   const Register scratch = t1;
845   const Register scratch2 = a7;
846 
847   Label call_runtime, done, int_exponent;
848   if (exponent_type() == ON_STACK) {
849     Label base_is_smi, unpack_exponent;
850     // The exponent and base are supplied as arguments on the stack.
851     // This can only happen if the stub is called from non-optimized code.
852     // Load input parameters from stack to double registers.
853     __ ld(base, MemOperand(sp, 1 * kPointerSize));
854     __ ld(exponent, MemOperand(sp, 0 * kPointerSize));
855 
856     __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
857 
858     __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
859     __ ld(scratch, FieldMemOperand(base, JSObject::kMapOffset));
860     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
861 
862     __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
863     __ jmp(&unpack_exponent);
864 
865     __ bind(&base_is_smi);
866     __ mtc1(scratch, single_scratch);
867     __ cvt_d_w(double_base, single_scratch);
868     __ bind(&unpack_exponent);
869 
870     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
871 
872     __ ld(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
873     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
874     __ ldc1(double_exponent,
875             FieldMemOperand(exponent, HeapNumber::kValueOffset));
876   } else if (exponent_type() == TAGGED) {
877     // Base is already in double_base.
878     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
879 
880     __ ldc1(double_exponent,
881             FieldMemOperand(exponent, HeapNumber::kValueOffset));
882   }
883 
884   if (exponent_type() != INTEGER) {
885     Label int_exponent_convert;
886     // Detect integer exponents stored as double.
887     __ EmitFPUTruncate(kRoundToMinusInf,
888                        scratch,
889                        double_exponent,
890                        at,
891                        double_scratch,
892                        scratch2,
893                        kCheckForInexactConversion);
894     // scratch2 == 0 means there was no conversion error.
895     __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
896 
897     if (exponent_type() == ON_STACK) {
898       // Detect square root case.  Crankshaft detects constant +/-0.5 at
899       // compile time and uses DoMathPowHalf instead.  We then skip this check
900       // for non-constant cases of +/-0.5 as these hardly occur.
901       Label not_plus_half;
902 
903       // Test for 0.5.
904       __ Move(double_scratch, 0.5);
905       __ BranchF(USE_DELAY_SLOT,
906                  &not_plus_half,
907                  NULL,
908                  ne,
909                  double_exponent,
910                  double_scratch);
911       // double_scratch can be overwritten in the delay slot.
912       // Calculates square root of base.  Check for the special case of
913       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
914       __ Move(double_scratch, -V8_INFINITY);
915       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
916       __ neg_d(double_result, double_scratch);
917 
918       // Add +0 to convert -0 to +0.
919       __ add_d(double_scratch, double_base, kDoubleRegZero);
920       __ sqrt_d(double_result, double_scratch);
921       __ jmp(&done);
922 
923       __ bind(&not_plus_half);
924       __ Move(double_scratch, -0.5);
925       __ BranchF(USE_DELAY_SLOT,
926                  &call_runtime,
927                  NULL,
928                  ne,
929                  double_exponent,
930                  double_scratch);
931       // double_scratch can be overwritten in the delay slot.
932       // Calculates square root of base.  Check for the special case of
933       // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
934       __ Move(double_scratch, -V8_INFINITY);
935       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
936       __ Move(double_result, kDoubleRegZero);
937 
938       // Add +0 to convert -0 to +0.
939       __ add_d(double_scratch, double_base, kDoubleRegZero);
940       __ Move(double_result, 1);
941       __ sqrt_d(double_scratch, double_scratch);
942       __ div_d(double_result, double_result, double_scratch);
943       __ jmp(&done);
944     }
945 
946     __ push(ra);
947     {
948       AllowExternalCallThatCantCauseGC scope(masm);
949       __ PrepareCallCFunction(0, 2, scratch2);
950       __ MovToFloatParameters(double_base, double_exponent);
951       __ CallCFunction(
952           ExternalReference::power_double_double_function(isolate()),
953           0, 2);
954     }
955     __ pop(ra);
956     __ MovFromFloatResult(double_result);
957     __ jmp(&done);
958 
959     __ bind(&int_exponent_convert);
960   }
961 
962   // Calculate power with integer exponent.
963   __ bind(&int_exponent);
964 
965   // Get two copies of exponent in the registers scratch and exponent.
966   if (exponent_type() == INTEGER) {
967     __ mov(scratch, exponent);
968   } else {
969     // Exponent has previously been stored into scratch as untagged integer.
970     __ mov(exponent, scratch);
971   }
972 
973   __ mov_d(double_scratch, double_base);  // Back up base.
974   __ Move(double_result, 1.0);
975 
976   // Get absolute value of exponent.
977   Label positive_exponent;
978   __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
979   __ Dsubu(scratch, zero_reg, scratch);
980   __ bind(&positive_exponent);
981 
982   Label while_true, no_carry, loop_end;
983   __ bind(&while_true);
984 
985   __ And(scratch2, scratch, 1);
986 
987   __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
988   __ mul_d(double_result, double_result, double_scratch);
989   __ bind(&no_carry);
990 
991   __ dsra(scratch, scratch, 1);
992 
993   __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
994   __ mul_d(double_scratch, double_scratch, double_scratch);
995 
996   __ Branch(&while_true);
997 
998   __ bind(&loop_end);
999 
1000   __ Branch(&done, ge, exponent, Operand(zero_reg));
1001   __ Move(double_scratch, 1.0);
1002   __ div_d(double_result, double_scratch, double_result);
1003   // Test whether result is zero.  Bail out to check for subnormal result.
1004   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1005   __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1006 
1007   // double_exponent may not contain the exponent value if the input was a
1008   // smi.  We set it with exponent value before bailing out.
1009   __ mtc1(exponent, single_scratch);
1010   __ cvt_d_w(double_exponent, single_scratch);
1011 
1012   // Returning or bailing out.
1013   Counters* counters = isolate()->counters();
1014   if (exponent_type() == ON_STACK) {
1015     // The arguments are still on the stack.
1016     __ bind(&call_runtime);
1017     __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
1018 
1019     // The stub is called from non-optimized code, which expects the result
1020     // as heap number in exponent.
1021     __ bind(&done);
1022     __ AllocateHeapNumber(
1023         heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1024     __ sdc1(double_result,
1025             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1026     DCHECK(heapnumber.is(v0));
1027     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1028     __ DropAndRet(2);
1029   } else {
1030     __ push(ra);
1031     {
1032       AllowExternalCallThatCantCauseGC scope(masm);
1033       __ PrepareCallCFunction(0, 2, scratch);
1034       __ MovToFloatParameters(double_base, double_exponent);
1035       __ CallCFunction(
1036           ExternalReference::power_double_double_function(isolate()),
1037           0, 2);
1038     }
1039     __ pop(ra);
1040     __ MovFromFloatResult(double_result);
1041 
1042     __ bind(&done);
1043     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1044     __ Ret();
1045   }
1046 }
1047 
1048 
NeedsImmovableCode()1049 bool CEntryStub::NeedsImmovableCode() {
1050   return true;
1051 }
1052 
1053 
GenerateStubsAheadOfTime(Isolate * isolate)1054 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1055   CEntryStub::GenerateAheadOfTime(isolate);
1056   WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1057   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1058   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1059   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1060   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1061   BinaryOpICStub::GenerateAheadOfTime(isolate);
1062   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1063   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1064   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1065 }
1066 
1067 
GenerateAheadOfTime(Isolate * isolate)1068 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1069   StoreRegistersStateStub stub(isolate);
1070   stub.GetCode();
1071 }
1072 
1073 
GenerateAheadOfTime(Isolate * isolate)1074 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1075   RestoreRegistersStateStub stub(isolate);
1076   stub.GetCode();
1077 }
1078 
1079 
GenerateFPStubs(Isolate * isolate)1080 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1081   // Generate if not already in cache.
1082   SaveFPRegsMode mode = kSaveFPRegs;
1083   CEntryStub(isolate, 1, mode).GetCode();
1084   StoreBufferOverflowStub(isolate, mode).GetCode();
1085   isolate->set_fp_stubs_generated(true);
1086 }
1087 
1088 
GenerateAheadOfTime(Isolate * isolate)1089 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1090   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1091   stub.GetCode();
1092 }
1093 
1094 
Generate(MacroAssembler * masm)1095 void CEntryStub::Generate(MacroAssembler* masm) {
1096   // Called from JavaScript; parameters are on stack as if calling JS function
1097   // s0: number of arguments including receiver
1098   // s1: size of arguments excluding receiver
1099   // s2: pointer to builtin function
1100   // fp: frame pointer    (restored after C call)
1101   // sp: stack pointer    (restored as callee's sp after C call)
1102   // cp: current context  (C callee-saved)
1103 
1104   ProfileEntryHookStub::MaybeCallEntryHook(masm);
1105 
1106   // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
1107   // The reason for this is that these arguments would need to be saved anyway
1108   // so it's faster to set them up directly.
1109   // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
1110 
1111   // Compute the argv pointer in a callee-saved register.
1112   __ Daddu(s1, sp, s1);
1113 
1114   // Enter the exit frame that transitions from JavaScript to C++.
1115   FrameScope scope(masm, StackFrame::MANUAL);
1116   __ EnterExitFrame(save_doubles());
1117 
1118   // s0: number of arguments  including receiver (C callee-saved)
1119   // s1: pointer to first argument (C callee-saved)
1120   // s2: pointer to builtin function (C callee-saved)
1121 
1122   // Prepare arguments for C routine.
1123   // a0 = argc
1124   __ mov(a0, s0);
1125   // a1 = argv (set in the delay slot after find_ra below).
1126 
1127   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1128   // also need to reserve the 4 argument slots on the stack.
1129 
1130   __ AssertStackIsAligned();
1131 
1132   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1133 
1134   // To let the GC traverse the return address of the exit frames, we need to
1135   // know where the return address is. The CEntryStub is unmovable, so
1136   // we can store the address on the stack to be able to find it again and
1137   // we never have to restore it, because it will not change.
1138   { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1139     // This branch-and-link sequence is needed to find the current PC on mips,
1140     // saved to the ra register.
1141     // Use masm-> here instead of the double-underscore macro since extra
1142     // coverage code can interfere with the proper calculation of ra.
1143     Label find_ra;
1144     masm->bal(&find_ra);  // bal exposes branch delay slot.
1145     masm->mov(a1, s1);
1146     masm->bind(&find_ra);
1147 
1148     // Adjust the value in ra to point to the correct return location, 2nd
1149     // instruction past the real call into C code (the jalr(t9)), and push it.
1150     // This is the return address of the exit frame.
1151     const int kNumInstructionsToJump = 5;
1152     masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
1153     masm->sd(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
1154     // Stack space reservation moved to the branch delay slot below.
1155     // Stack is still aligned.
1156 
1157     // Call the C routine.
1158     masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
1159     masm->jalr(t9);
1160     // Set up sp in the delay slot.
1161     masm->daddiu(sp, sp, -kCArgsSlotsSize);
1162     // Make sure the stored 'ra' points to this position.
1163     DCHECK_EQ(kNumInstructionsToJump,
1164               masm->InstructionsGeneratedSince(&find_ra));
1165   }
1166 
1167   // Runtime functions should not return 'the hole'.  Allowing it to escape may
1168   // lead to crashes in the IC code later.
1169   if (FLAG_debug_code) {
1170     Label okay;
1171     __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
1172     __ Branch(&okay, ne, v0, Operand(a4));
1173     __ stop("The hole escaped");
1174     __ bind(&okay);
1175   }
1176 
1177   // Check result for exception sentinel.
1178   Label exception_returned;
1179   __ LoadRoot(a4, Heap::kExceptionRootIndex);
1180   __ Branch(&exception_returned, eq, a4, Operand(v0));
1181 
1182   ExternalReference pending_exception_address(
1183       Isolate::kPendingExceptionAddress, isolate());
1184 
1185   // Check that there is no pending exception, otherwise we
1186   // should have returned the exception sentinel.
1187   if (FLAG_debug_code) {
1188     Label okay;
1189     __ li(a2, Operand(pending_exception_address));
1190     __ ld(a2, MemOperand(a2));
1191     __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
1192     // Cannot use check here as it attempts to generate call into runtime.
1193     __ Branch(&okay, eq, a4, Operand(a2));
1194     __ stop("Unexpected pending exception");
1195     __ bind(&okay);
1196   }
1197 
1198   // Exit C frame and return.
1199   // v0:v1: result
1200   // sp: stack pointer
1201   // fp: frame pointer
1202   // s0: still holds argc (callee-saved).
1203   __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
1204 
1205   // Handling of exception.
1206   __ bind(&exception_returned);
1207 
1208   // Retrieve the pending exception.
1209   __ li(a2, Operand(pending_exception_address));
1210   __ ld(v0, MemOperand(a2));
1211 
1212   // Clear the pending exception.
1213   __ li(a3, Operand(isolate()->factory()->the_hole_value()));
1214   __ sd(a3, MemOperand(a2));
1215 
1216   // Special handling of termination exceptions which are uncatchable
1217   // by javascript code.
1218   Label throw_termination_exception;
1219   __ LoadRoot(a4, Heap::kTerminationExceptionRootIndex);
1220   __ Branch(&throw_termination_exception, eq, v0, Operand(a4));
1221 
1222   // Handle normal exception.
1223   __ Throw(v0);
1224 
1225   __ bind(&throw_termination_exception);
1226   __ ThrowUncatchable(v0);
1227 }
1228 
1229 
Generate(MacroAssembler * masm)1230 void JSEntryStub::Generate(MacroAssembler* masm) {
1231   Label invoke, handler_entry, exit;
1232   Isolate* isolate = masm->isolate();
1233 
1234   // TODO(plind): unify the ABI description here.
1235   // Registers:
1236   // a0: entry address
1237   // a1: function
1238   // a2: receiver
1239   // a3: argc
1240   // a4 (a4): on mips64
1241 
1242   // Stack:
1243   // 0 arg slots on mips64 (4 args slots on mips)
1244   // args -- in a4/a4 on mips64, on stack on mips
1245 
1246   ProfileEntryHookStub::MaybeCallEntryHook(masm);
1247 
1248   // Save callee saved registers on the stack.
1249   __ MultiPush(kCalleeSaved | ra.bit());
1250 
1251   // Save callee-saved FPU registers.
1252   __ MultiPushFPU(kCalleeSavedFPU);
1253   // Set up the reserved register for 0.0.
1254   __ Move(kDoubleRegZero, 0.0);
1255 
1256   // Load argv in s0 register.
1257   if (kMipsAbi == kN64) {
1258     __ mov(s0, a4);  // 5th parameter in mips64 a4 (a4) register.
1259   } else {  // Abi O32.
1260     // 5th parameter on stack for O32 abi.
1261     int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1262     offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1263     __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1264   }
1265 
1266   __ InitializeRootRegister();
1267 
1268   // We build an EntryFrame.
1269   __ li(a7, Operand(-1));  // Push a bad frame pointer to fail if it is used.
1270   int marker = type();
1271   __ li(a6, Operand(Smi::FromInt(marker)));
1272   __ li(a5, Operand(Smi::FromInt(marker)));
1273   ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
1274   __ li(a4, Operand(c_entry_fp));
1275   __ ld(a4, MemOperand(a4));
1276   __ Push(a7, a6, a5, a4);
1277   // Set up frame pointer for the frame to be pushed.
1278   __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1279 
1280   // Registers:
1281   // a0: entry_address
1282   // a1: function
1283   // a2: receiver_pointer
1284   // a3: argc
1285   // s0: argv
1286   //
1287   // Stack:
1288   // caller fp          |
1289   // function slot      | entry frame
1290   // context slot       |
1291   // bad fp (0xff...f)  |
1292   // callee saved registers + ra
1293   // [ O32: 4 args slots]
1294   // args
1295 
1296   // If this is the outermost JS call, set js_entry_sp value.
1297   Label non_outermost_js;
1298   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1299   __ li(a5, Operand(ExternalReference(js_entry_sp)));
1300   __ ld(a6, MemOperand(a5));
1301   __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
1302   __ sd(fp, MemOperand(a5));
1303   __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1304   Label cont;
1305   __ b(&cont);
1306   __ nop();   // Branch delay slot nop.
1307   __ bind(&non_outermost_js);
1308   __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1309   __ bind(&cont);
1310   __ push(a4);
1311 
1312   // Jump to a faked try block that does the invoke, with a faked catch
1313   // block that sets the pending exception.
1314   __ jmp(&invoke);
1315   __ bind(&handler_entry);
1316   handler_offset_ = handler_entry.pos();
1317   // Caught exception: Store result (exception) in the pending exception
1318   // field in the JSEnv and return a failure sentinel.  Coming in here the
1319   // fp will be invalid because the PushTryHandler below sets it to 0 to
1320   // signal the existence of the JSEntry frame.
1321   __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1322                                       isolate)));
1323   __ sd(v0, MemOperand(a4));  // We come back from 'invoke'. result is in v0.
1324   __ LoadRoot(v0, Heap::kExceptionRootIndex);
1325   __ b(&exit);  // b exposes branch delay slot.
1326   __ nop();   // Branch delay slot nop.
1327 
1328   // Invoke: Link this frame into the handler chain.  There's only one
1329   // handler block in this code object, so its index is 0.
1330   __ bind(&invoke);
1331   __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1332   // If an exception not caught by another handler occurs, this handler
1333   // returns control to the code after the bal(&invoke) above, which
1334   // restores all kCalleeSaved registers (including cp and fp) to their
1335   // saved values before returning a failure to C.
1336 
1337   // Clear any pending exceptions.
1338   __ LoadRoot(a5, Heap::kTheHoleValueRootIndex);
1339   __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1340                                       isolate)));
1341   __ sd(a5, MemOperand(a4));
1342 
1343   // Invoke the function by calling through JS entry trampoline builtin.
1344   // Notice that we cannot store a reference to the trampoline code directly in
1345   // this stub, because runtime stubs are not traversed when doing GC.
1346 
1347   // Registers:
1348   // a0: entry_address
1349   // a1: function
1350   // a2: receiver_pointer
1351   // a3: argc
1352   // s0: argv
1353   //
1354   // Stack:
1355   // handler frame
1356   // entry frame
1357   // callee saved registers + ra
1358   // [ O32: 4 args slots]
1359   // args
1360 
1361   if (type() == StackFrame::ENTRY_CONSTRUCT) {
1362     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1363                                       isolate);
1364     __ li(a4, Operand(construct_entry));
1365   } else {
1366     ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1367     __ li(a4, Operand(entry));
1368   }
1369   __ ld(t9, MemOperand(a4));  // Deref address.
1370   // Call JSEntryTrampoline.
1371   __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1372   __ Call(t9);
1373 
1374   // Unlink this frame from the handler chain.
1375   __ PopTryHandler();
1376 
1377   __ bind(&exit);  // v0 holds result
1378   // Check if the current stack frame is marked as the outermost JS frame.
1379   Label non_outermost_js_2;
1380   __ pop(a5);
1381   __ Branch(&non_outermost_js_2,
1382             ne,
1383             a5,
1384             Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1385   __ li(a5, Operand(ExternalReference(js_entry_sp)));
1386   __ sd(zero_reg, MemOperand(a5));
1387   __ bind(&non_outermost_js_2);
1388 
1389   // Restore the top frame descriptors from the stack.
1390   __ pop(a5);
1391   __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1392                                       isolate)));
1393   __ sd(a5, MemOperand(a4));
1394 
1395   // Reset the stack to the callee saved registers.
1396   __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1397 
1398   // Restore callee-saved fpu registers.
1399   __ MultiPopFPU(kCalleeSavedFPU);
1400 
1401   // Restore callee saved registers from the stack.
1402   __ MultiPop(kCalleeSaved | ra.bit());
1403   // Return.
1404   __ Jump(ra);
1405 }
1406 
1407 
1408 // Uses registers a0 to a4.
1409 // Expected input (depending on whether args are in registers or on the stack):
1410 // * object: a0 or at sp + 1 * kPointerSize.
1411 // * function: a1 or at sp.
1412 //
1413 // An inlined call site may have been generated before calling this stub.
1414 // In this case the offset to the inline site to patch is passed on the stack,
1415 // in the safepoint slot for register a4.
Generate(MacroAssembler * masm)1416 void InstanceofStub::Generate(MacroAssembler* masm) {
1417   // Call site inlining and patching implies arguments in registers.
1418   DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1419   // ReturnTrueFalse is only implemented for inlined call sites.
1420   DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
1421 
1422   // Fixed register usage throughout the stub:
1423   const Register object = a0;  // Object (lhs).
1424   Register map = a3;  // Map of the object.
1425   const Register function = a1;  // Function (rhs).
1426   const Register prototype = a4;  // Prototype of the function.
1427   const Register inline_site = t1;
1428   const Register scratch = a2;
1429 
1430   const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize;
1431 
1432   Label slow, loop, is_instance, is_not_instance, not_js_object;
1433 
1434   if (!HasArgsInRegisters()) {
1435     __ ld(object, MemOperand(sp, 1 * kPointerSize));
1436     __ ld(function, MemOperand(sp, 0));
1437   }
1438 
1439   // Check that the left hand is a JS object and load map.
1440   __ JumpIfSmi(object, &not_js_object);
1441   __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
1442 
1443   // If there is a call site cache don't look in the global cache, but do the
1444   // real lookup and update the call site cache.
1445   if (!HasCallSiteInlineCheck()) {
1446     Label miss;
1447     __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1448     __ Branch(&miss, ne, function, Operand(at));
1449     __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1450     __ Branch(&miss, ne, map, Operand(at));
1451     __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1452     __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1453 
1454     __ bind(&miss);
1455   }
1456 
1457   // Get the prototype of the function.
1458   __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1459 
1460   // Check that the function prototype is a JS object.
1461   __ JumpIfSmi(prototype, &slow);
1462   __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1463 
1464   // Update the global instanceof or call site inlined cache with the current
1465   // map and function. The cached answer will be set when it is known below.
1466   if (!HasCallSiteInlineCheck()) {
1467     __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1468     __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1469   } else {
1470     DCHECK(HasArgsInRegisters());
1471     // Patch the (relocated) inlined map check.
1472 
1473     // The offset was stored in a4 safepoint slot.
1474     // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1475     __ LoadFromSafepointRegisterSlot(scratch, a4);
1476     __ Dsubu(inline_site, ra, scratch);
1477     // Get the map location in scratch and patch it.
1478     __ GetRelocatedValue(inline_site, scratch, v1);  // v1 used as scratch.
1479     __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset));
1480   }
1481 
1482   // Register mapping: a3 is object map and a4 is function prototype.
1483   // Get prototype of object into a2.
1484   __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1485 
1486   // We don't need map any more. Use it as a scratch register.
1487   Register scratch2 = map;
1488   map = no_reg;
1489 
1490   // Loop through the prototype chain looking for the function prototype.
1491   __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1492   __ bind(&loop);
1493   __ Branch(&is_instance, eq, scratch, Operand(prototype));
1494   __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1495   __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1496   __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1497   __ Branch(&loop);
1498 
1499   __ bind(&is_instance);
1500   DCHECK(Smi::FromInt(0) == 0);
1501   if (!HasCallSiteInlineCheck()) {
1502     __ mov(v0, zero_reg);
1503     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1504   } else {
1505     // Patch the call site to return true.
1506     __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1507     __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1508     // Get the boolean result location in scratch and patch it.
1509     __ PatchRelocatedValue(inline_site, scratch, v0);
1510 
1511     if (!ReturnTrueFalseObject()) {
1512       DCHECK_EQ(Smi::FromInt(0), 0);
1513       __ mov(v0, zero_reg);
1514     }
1515   }
1516   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1517 
1518   __ bind(&is_not_instance);
1519   if (!HasCallSiteInlineCheck()) {
1520     __ li(v0, Operand(Smi::FromInt(1)));
1521     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1522   } else {
1523     // Patch the call site to return false.
1524     __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1525     __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1526     // Get the boolean result location in scratch and patch it.
1527     __ PatchRelocatedValue(inline_site, scratch, v0);
1528 
1529     if (!ReturnTrueFalseObject()) {
1530       __ li(v0, Operand(Smi::FromInt(1)));
1531     }
1532   }
1533 
1534   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1535 
1536   Label object_not_null, object_not_null_or_smi;
1537   __ bind(&not_js_object);
1538   // Before null, smi and string value checks, check that the rhs is a function
1539   // as for a non-function rhs an exception needs to be thrown.
1540   __ JumpIfSmi(function, &slow);
1541   __ GetObjectType(function, scratch2, scratch);
1542   __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
1543 
1544   // Null is not instance of anything.
1545   __ Branch(&object_not_null,
1546             ne,
1547             scratch,
1548             Operand(isolate()->factory()->null_value()));
1549   __ li(v0, Operand(Smi::FromInt(1)));
1550   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1551 
1552   __ bind(&object_not_null);
1553   // Smi values are not instances of anything.
1554   __ JumpIfNotSmi(object, &object_not_null_or_smi);
1555   __ li(v0, Operand(Smi::FromInt(1)));
1556   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1557 
1558   __ bind(&object_not_null_or_smi);
1559   // String values are not instances of anything.
1560   __ IsObjectJSStringType(object, scratch, &slow);
1561   __ li(v0, Operand(Smi::FromInt(1)));
1562   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1563 
1564   // Slow-case.  Tail call builtin.
1565   __ bind(&slow);
1566   if (!ReturnTrueFalseObject()) {
1567     if (HasArgsInRegisters()) {
1568       __ Push(a0, a1);
1569     }
1570   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1571   } else {
1572     {
1573       FrameScope scope(masm, StackFrame::INTERNAL);
1574       __ Push(a0, a1);
1575       __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1576     }
1577     __ mov(a0, v0);
1578     __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1579     __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
1580     __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1581     __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1582   }
1583 }
1584 
1585 
Generate(MacroAssembler * masm)1586 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1587   Label miss;
1588   Register receiver = LoadDescriptor::ReceiverRegister();
1589   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
1590                                                           a4, &miss);
1591   __ bind(&miss);
1592   PropertyAccessCompiler::TailCallBuiltin(
1593       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1594 }
1595 
1596 
GenerateReadElement(MacroAssembler * masm)1597 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1598   // The displacement is the offset of the last parameter (if any)
1599   // relative to the frame pointer.
1600   const int kDisplacement =
1601       StandardFrameConstants::kCallerSPOffset - kPointerSize;
1602   DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
1603   DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1604 
1605   // Check that the key is a smiGenerateReadElement.
1606   Label slow;
1607   __ JumpIfNotSmi(a1, &slow);
1608 
1609   // Check if the calling frame is an arguments adaptor frame.
1610   Label adaptor;
1611   __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1612   __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1613   __ Branch(&adaptor,
1614             eq,
1615             a3,
1616             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1617 
1618   // Check index (a1) against formal parameters count limit passed in
1619   // through register a0. Use unsigned comparison to get negative
1620   // check for free.
1621   __ Branch(&slow, hs, a1, Operand(a0));
1622 
1623   // Read the argument from the stack and return it.
1624   __ dsubu(a3, a0, a1);
1625   __ SmiScale(a7, a3, kPointerSizeLog2);
1626   __ Daddu(a3, fp, Operand(a7));
1627   __ Ret(USE_DELAY_SLOT);
1628   __ ld(v0, MemOperand(a3, kDisplacement));
1629 
1630   // Arguments adaptor case: Check index (a1) against actual arguments
1631   // limit found in the arguments adaptor frame. Use unsigned
1632   // comparison to get negative check for free.
1633   __ bind(&adaptor);
1634   __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1635   __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
1636 
1637   // Read the argument from the adaptor frame and return it.
1638   __ dsubu(a3, a0, a1);
1639   __ SmiScale(a7, a3, kPointerSizeLog2);
1640   __ Daddu(a3, a2, Operand(a7));
1641   __ Ret(USE_DELAY_SLOT);
1642   __ ld(v0, MemOperand(a3, kDisplacement));
1643 
1644   // Slow-case: Handle non-smi or out-of-bounds access to arguments
1645   // by calling the runtime system.
1646   __ bind(&slow);
1647   __ push(a1);
1648   __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1649 }
1650 
1651 
GenerateNewSloppySlow(MacroAssembler * masm)1652 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1653   // sp[0] : number of parameters
1654   // sp[4] : receiver displacement
1655   // sp[8] : function
1656   // Check if the calling frame is an arguments adaptor frame.
1657   Label runtime;
1658   __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1659   __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1660   __ Branch(&runtime,
1661             ne,
1662             a2,
1663             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1664 
1665   // Patch the arguments.length and the parameters pointer in the current frame.
1666   __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1667   __ sd(a2, MemOperand(sp, 0 * kPointerSize));
1668   __ SmiScale(a7, a2, kPointerSizeLog2);
1669   __ Daddu(a3, a3, Operand(a7));
1670   __ daddiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
1671   __ sd(a3, MemOperand(sp, 1 * kPointerSize));
1672 
1673   __ bind(&runtime);
1674   __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1675 }
1676 
1677 
GenerateNewSloppyFast(MacroAssembler * masm)1678 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1679   // Stack layout:
1680   //  sp[0] : number of parameters (tagged)
1681   //  sp[4] : address of receiver argument
1682   //  sp[8] : function
1683   // Registers used over whole function:
1684   //  a6 : allocated object (tagged)
1685   //  t1 : mapped parameter count (tagged)
1686 
1687   __ ld(a1, MemOperand(sp, 0 * kPointerSize));
1688   // a1 = parameter count (tagged)
1689 
1690   // Check if the calling frame is an arguments adaptor frame.
1691   Label runtime;
1692   Label adaptor_frame, try_allocate;
1693   __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1694   __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1695   __ Branch(&adaptor_frame,
1696             eq,
1697             a2,
1698             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1699 
1700   // No adaptor, parameter count = argument count.
1701   __ mov(a2, a1);
1702   __ Branch(&try_allocate);
1703 
1704   // We have an adaptor frame. Patch the parameters pointer.
1705   __ bind(&adaptor_frame);
1706   __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1707   __ SmiScale(t2, a2, kPointerSizeLog2);
1708   __ Daddu(a3, a3, Operand(t2));
1709   __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1710   __ sd(a3, MemOperand(sp, 1 * kPointerSize));
1711 
1712   // a1 = parameter count (tagged)
1713   // a2 = argument count (tagged)
1714   // Compute the mapped parameter count = min(a1, a2) in a1.
1715   Label skip_min;
1716   __ Branch(&skip_min, lt, a1, Operand(a2));
1717   __ mov(a1, a2);
1718   __ bind(&skip_min);
1719 
1720   __ bind(&try_allocate);
1721 
1722   // Compute the sizes of backing store, parameter map, and arguments object.
1723   // 1. Parameter map, has 2 extra words containing context and backing store.
1724   const int kParameterMapHeaderSize =
1725       FixedArray::kHeaderSize + 2 * kPointerSize;
1726   // If there are no mapped parameters, we do not need the parameter_map.
1727   Label param_map_size;
1728   DCHECK_EQ(0, Smi::FromInt(0));
1729   __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
1730   __ mov(t1, zero_reg);  // In delay slot: param map size = 0 when a1 == 0.
1731   __ SmiScale(t1, a1, kPointerSizeLog2);
1732   __ daddiu(t1, t1, kParameterMapHeaderSize);
1733   __ bind(&param_map_size);
1734 
1735   // 2. Backing store.
1736   __ SmiScale(t2, a2, kPointerSizeLog2);
1737   __ Daddu(t1, t1, Operand(t2));
1738   __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
1739 
1740   // 3. Arguments object.
1741   __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
1742 
1743   // Do the allocation of all three objects in one go.
1744   __ Allocate(t1, v0, a3, a4, &runtime, TAG_OBJECT);
1745 
1746   // v0 = address of new object(s) (tagged)
1747   // a2 = argument count (smi-tagged)
1748   // Get the arguments boilerplate from the current native context into a4.
1749   const int kNormalOffset =
1750       Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1751   const int kAliasedOffset =
1752       Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
1753 
1754   __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1755   __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
1756   Label skip2_ne, skip2_eq;
1757   __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
1758   __ ld(a4, MemOperand(a4, kNormalOffset));
1759   __ bind(&skip2_ne);
1760 
1761   __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
1762   __ ld(a4, MemOperand(a4, kAliasedOffset));
1763   __ bind(&skip2_eq);
1764 
1765   // v0 = address of new object (tagged)
1766   // a1 = mapped parameter count (tagged)
1767   // a2 = argument count (smi-tagged)
1768   // a4 = address of arguments map (tagged)
1769   __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
1770   __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1771   __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1772   __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
1773 
1774   // Set up the callee in-object property.
1775   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1776   __ ld(a3, MemOperand(sp, 2 * kPointerSize));
1777   __ AssertNotSmi(a3);
1778   const int kCalleeOffset = JSObject::kHeaderSize +
1779       Heap::kArgumentsCalleeIndex * kPointerSize;
1780   __ sd(a3, FieldMemOperand(v0, kCalleeOffset));
1781 
1782   // Use the length (smi tagged) and set that as an in-object property too.
1783   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1784   const int kLengthOffset = JSObject::kHeaderSize +
1785       Heap::kArgumentsLengthIndex * kPointerSize;
1786   __ sd(a2, FieldMemOperand(v0, kLengthOffset));
1787 
1788   // Set up the elements pointer in the allocated arguments object.
1789   // If we allocated a parameter map, a4 will point there, otherwise
1790   // it will point to the backing store.
1791   __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize));
1792   __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
1793 
1794   // v0 = address of new object (tagged)
1795   // a1 = mapped parameter count (tagged)
1796   // a2 = argument count (tagged)
1797   // a4 = address of parameter map or backing store (tagged)
1798   // Initialize parameter map. If there are no mapped arguments, we're done.
1799   Label skip_parameter_map;
1800   Label skip3;
1801   __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
1802   // Move backing store address to a3, because it is
1803   // expected there when filling in the unmapped arguments.
1804   __ mov(a3, a4);
1805   __ bind(&skip3);
1806 
1807   __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
1808 
1809   __ LoadRoot(a6, Heap::kSloppyArgumentsElementsMapRootIndex);
1810   __ sd(a6, FieldMemOperand(a4, FixedArray::kMapOffset));
1811   __ Daddu(a6, a1, Operand(Smi::FromInt(2)));
1812   __ sd(a6, FieldMemOperand(a4, FixedArray::kLengthOffset));
1813   __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
1814   __ SmiScale(t2, a1, kPointerSizeLog2);
1815   __ Daddu(a6, a4, Operand(t2));
1816   __ Daddu(a6, a6, Operand(kParameterMapHeaderSize));
1817   __ sd(a6, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
1818 
1819   // Copy the parameter slots and the holes in the arguments.
1820   // We need to fill in mapped_parameter_count slots. They index the context,
1821   // where parameters are stored in reverse order, at
1822   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1823   // The mapped parameter thus need to get indices
1824   //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
1825   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1826   // We loop from right to left.
1827   Label parameters_loop, parameters_test;
1828   __ mov(a6, a1);
1829   __ ld(t1, MemOperand(sp, 0 * kPointerSize));
1830   __ Daddu(t1, t1, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1831   __ Dsubu(t1, t1, Operand(a1));
1832   __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
1833   __ SmiScale(t2, a6, kPointerSizeLog2);
1834   __ Daddu(a3, a4, Operand(t2));
1835   __ Daddu(a3, a3, Operand(kParameterMapHeaderSize));
1836 
1837   // a6 = loop variable (tagged)
1838   // a1 = mapping index (tagged)
1839   // a3 = address of backing store (tagged)
1840   // a4 = address of parameter map (tagged)
1841   // a5 = temporary scratch (a.o., for address calculation)
1842   // a7 = the hole value
1843   __ jmp(&parameters_test);
1844 
1845   __ bind(&parameters_loop);
1846 
1847   __ Dsubu(a6, a6, Operand(Smi::FromInt(1)));
1848   __ SmiScale(a5, a6, kPointerSizeLog2);
1849   __ Daddu(a5, a5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1850   __ Daddu(t2, a4, a5);
1851   __ sd(t1, MemOperand(t2));
1852   __ Dsubu(a5, a5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1853   __ Daddu(t2, a3, a5);
1854   __ sd(a7, MemOperand(t2));
1855   __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
1856   __ bind(&parameters_test);
1857   __ Branch(&parameters_loop, ne, a6, Operand(Smi::FromInt(0)));
1858 
1859   __ bind(&skip_parameter_map);
1860   // a2 = argument count (tagged)
1861   // a3 = address of backing store (tagged)
1862   // a5 = scratch
1863   // Copy arguments header and remaining slots (if there are any).
1864   __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
1865   __ sd(a5, FieldMemOperand(a3, FixedArray::kMapOffset));
1866   __ sd(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
1867 
1868   Label arguments_loop, arguments_test;
1869   __ mov(t1, a1);
1870   __ ld(a4, MemOperand(sp, 1 * kPointerSize));
1871   __ SmiScale(t2, t1, kPointerSizeLog2);
1872   __ Dsubu(a4, a4, Operand(t2));
1873   __ jmp(&arguments_test);
1874 
1875   __ bind(&arguments_loop);
1876   __ Dsubu(a4, a4, Operand(kPointerSize));
1877   __ ld(a6, MemOperand(a4, 0));
1878   __ SmiScale(t2, t1, kPointerSizeLog2);
1879   __ Daddu(a5, a3, Operand(t2));
1880   __ sd(a6, FieldMemOperand(a5, FixedArray::kHeaderSize));
1881   __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
1882 
1883   __ bind(&arguments_test);
1884   __ Branch(&arguments_loop, lt, t1, Operand(a2));
1885 
1886   // Return and remove the on-stack parameters.
1887   __ DropAndRet(3);
1888 
1889   // Do the runtime call to allocate the arguments object.
1890   // a2 = argument count (tagged)
1891   __ bind(&runtime);
1892   __ sd(a2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
1893   __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1894 }
1895 
1896 
Generate(MacroAssembler * masm)1897 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1898   // Return address is in ra.
1899   Label slow;
1900 
1901   Register receiver = LoadDescriptor::ReceiverRegister();
1902   Register key = LoadDescriptor::NameRegister();
1903 
1904   // Check that the key is an array index, that is Uint32.
1905   __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
1906   __ Branch(&slow, ne, t0, Operand(zero_reg));
1907 
1908   // Everything is fine, call runtime.
1909   __ Push(receiver, key);  // Receiver, key.
1910 
1911   // Perform tail call to the entry.
1912   __ TailCallExternalReference(
1913       ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1914                         masm->isolate()),
1915       2, 1);
1916 
1917   __ bind(&slow);
1918   PropertyAccessCompiler::TailCallBuiltin(
1919       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1920 }
1921 
1922 
GenerateNewStrict(MacroAssembler * masm)1923 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1924   // sp[0] : number of parameters
1925   // sp[4] : receiver displacement
1926   // sp[8] : function
1927   // Check if the calling frame is an arguments adaptor frame.
1928   Label adaptor_frame, try_allocate, runtime;
1929   __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1930   __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1931   __ Branch(&adaptor_frame,
1932             eq,
1933             a3,
1934             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1935 
1936   // Get the length from the frame.
1937   __ ld(a1, MemOperand(sp, 0));
1938   __ Branch(&try_allocate);
1939 
1940   // Patch the arguments.length and the parameters pointer.
1941   __ bind(&adaptor_frame);
1942   __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1943   __ sd(a1, MemOperand(sp, 0));
1944   __ SmiScale(at, a1, kPointerSizeLog2);
1945 
1946   __ Daddu(a3, a2, Operand(at));
1947 
1948   __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1949   __ sd(a3, MemOperand(sp, 1 * kPointerSize));
1950 
1951   // Try the new space allocation. Start out with computing the size
1952   // of the arguments object and the elements array in words.
1953   Label add_arguments_object;
1954   __ bind(&try_allocate);
1955   __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
1956   __ SmiUntag(a1);
1957 
1958   __ Daddu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
1959   __ bind(&add_arguments_object);
1960   __ Daddu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1961 
1962   // Do the allocation of both objects in one go.
1963   __ Allocate(a1, v0, a2, a3, &runtime,
1964               static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1965 
1966   // Get the arguments boilerplate from the current native context.
1967   __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1968   __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
1969   __ ld(a4, MemOperand(a4, Context::SlotOffset(
1970       Context::STRICT_ARGUMENTS_MAP_INDEX)));
1971 
1972   __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
1973   __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1974   __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1975   __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
1976 
1977   // Get the length (smi tagged) and set that as an in-object property too.
1978   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1979   __ ld(a1, MemOperand(sp, 0 * kPointerSize));
1980   __ AssertSmi(a1);
1981   __ sd(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
1982       Heap::kArgumentsLengthIndex * kPointerSize));
1983 
1984   Label done;
1985   __ Branch(&done, eq, a1, Operand(zero_reg));
1986 
1987   // Get the parameters pointer from the stack.
1988   __ ld(a2, MemOperand(sp, 1 * kPointerSize));
1989 
1990   // Set up the elements pointer in the allocated arguments object and
1991   // initialize the header in the elements fixed array.
1992   __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
1993   __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
1994   __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
1995   __ sd(a3, FieldMemOperand(a4, FixedArray::kMapOffset));
1996   __ sd(a1, FieldMemOperand(a4, FixedArray::kLengthOffset));
1997   // Untag the length for the loop.
1998   __ SmiUntag(a1);
1999 
2000 
2001   // Copy the fixed array slots.
2002   Label loop;
2003   // Set up a4 to point to the first array slot.
2004   __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2005   __ bind(&loop);
2006   // Pre-decrement a2 with kPointerSize on each iteration.
2007   // Pre-decrement in order to skip receiver.
2008   __ Daddu(a2, a2, Operand(-kPointerSize));
2009   __ ld(a3, MemOperand(a2));
2010   // Post-increment a4 with kPointerSize on each iteration.
2011   __ sd(a3, MemOperand(a4));
2012   __ Daddu(a4, a4, Operand(kPointerSize));
2013   __ Dsubu(a1, a1, Operand(1));
2014   __ Branch(&loop, ne, a1, Operand(zero_reg));
2015 
2016   // Return and remove the on-stack parameters.
2017   __ bind(&done);
2018   __ DropAndRet(3);
2019 
2020   // Do the runtime call to allocate the arguments object.
2021   __ bind(&runtime);
2022   __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2023 }
2024 
2025 
Generate(MacroAssembler * masm)2026 void RegExpExecStub::Generate(MacroAssembler* masm) {
2027   // Just jump directly to runtime if native RegExp is not selected at compile
2028   // time or if regexp entry in generated code is turned off runtime switch or
2029   // at compilation.
2030 #ifdef V8_INTERPRETED_REGEXP
2031   __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2032 #else  // V8_INTERPRETED_REGEXP
2033 
2034   // Stack frame on entry.
2035   //  sp[0]: last_match_info (expected JSArray)
2036   //  sp[4]: previous index
2037   //  sp[8]: subject string
2038   //  sp[12]: JSRegExp object
2039 
2040   const int kLastMatchInfoOffset = 0 * kPointerSize;
2041   const int kPreviousIndexOffset = 1 * kPointerSize;
2042   const int kSubjectOffset = 2 * kPointerSize;
2043   const int kJSRegExpOffset = 3 * kPointerSize;
2044 
2045   Label runtime;
2046   // Allocation of registers for this function. These are in callee save
2047   // registers and will be preserved by the call to the native RegExp code, as
2048   // this code is called using the normal C calling convention. When calling
2049   // directly from generated code the native RegExp code will not do a GC and
2050   // therefore the content of these registers are safe to use after the call.
2051   // MIPS - using s0..s2, since we are not using CEntry Stub.
2052   Register subject = s0;
2053   Register regexp_data = s1;
2054   Register last_match_info_elements = s2;
2055 
2056   // Ensure that a RegExp stack is allocated.
2057   ExternalReference address_of_regexp_stack_memory_address =
2058       ExternalReference::address_of_regexp_stack_memory_address(
2059           isolate());
2060   ExternalReference address_of_regexp_stack_memory_size =
2061       ExternalReference::address_of_regexp_stack_memory_size(isolate());
2062   __ li(a0, Operand(address_of_regexp_stack_memory_size));
2063   __ ld(a0, MemOperand(a0, 0));
2064   __ Branch(&runtime, eq, a0, Operand(zero_reg));
2065 
2066   // Check that the first argument is a JSRegExp object.
2067   __ ld(a0, MemOperand(sp, kJSRegExpOffset));
2068   STATIC_ASSERT(kSmiTag == 0);
2069   __ JumpIfSmi(a0, &runtime);
2070   __ GetObjectType(a0, a1, a1);
2071   __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2072 
2073   // Check that the RegExp has been compiled (data contains a fixed array).
2074   __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2075   if (FLAG_debug_code) {
2076     __ SmiTst(regexp_data, a4);
2077     __ Check(nz,
2078              kUnexpectedTypeForRegExpDataFixedArrayExpected,
2079              a4,
2080              Operand(zero_reg));
2081     __ GetObjectType(regexp_data, a0, a0);
2082     __ Check(eq,
2083              kUnexpectedTypeForRegExpDataFixedArrayExpected,
2084              a0,
2085              Operand(FIXED_ARRAY_TYPE));
2086   }
2087 
2088   // regexp_data: RegExp data (FixedArray)
2089   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2090   __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2091   __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2092 
2093   // regexp_data: RegExp data (FixedArray)
2094   // Check that the number of captures fit in the static offsets vector buffer.
2095   __ ld(a2,
2096          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2097   // Check (number_of_captures + 1) * 2 <= offsets vector size
2098   // Or          number_of_captures * 2 <= offsets vector size - 2
2099   // Or          number_of_captures     <= offsets vector size / 2 - 1
2100   // Multiplying by 2 comes for free since a2 is smi-tagged.
2101   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2102   int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1;
2103   __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp)));
2104 
2105   // Reset offset for possibly sliced string.
2106   __ mov(t0, zero_reg);
2107   __ ld(subject, MemOperand(sp, kSubjectOffset));
2108   __ JumpIfSmi(subject, &runtime);
2109   __ mov(a3, subject);  // Make a copy of the original subject string.
2110   __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2111   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2112   // subject: subject string
2113   // a3: subject string
2114   // a0: subject string instance type
2115   // regexp_data: RegExp data (FixedArray)
2116   // Handle subject string according to its encoding and representation:
2117   // (1) Sequential string?  If yes, go to (5).
2118   // (2) Anything but sequential or cons?  If yes, go to (6).
2119   // (3) Cons string.  If the string is flat, replace subject with first string.
2120   //     Otherwise bailout.
2121   // (4) Is subject external?  If yes, go to (7).
2122   // (5) Sequential string.  Load regexp code according to encoding.
2123   // (E) Carry on.
2124   /// [...]
2125 
2126   // Deferred code at the end of the stub:
2127   // (6) Not a long external string?  If yes, go to (8).
2128   // (7) External string.  Make it, offset-wise, look like a sequential string.
2129   //     Go to (5).
2130   // (8) Short external string or not a string?  If yes, bail out to runtime.
2131   // (9) Sliced string.  Replace subject with parent.  Go to (4).
2132 
2133   Label check_underlying;   // (4)
2134   Label seq_string;         // (5)
2135   Label not_seq_nor_cons;   // (6)
2136   Label external_string;    // (7)
2137   Label not_long_external;  // (8)
2138 
2139   // (1) Sequential string?  If yes, go to (5).
2140   __ And(a1,
2141          a0,
2142          Operand(kIsNotStringMask |
2143                  kStringRepresentationMask |
2144                  kShortExternalStringMask));
2145   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2146   __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (5).
2147 
2148   // (2) Anything but sequential or cons?  If yes, go to (6).
2149   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2150   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2151   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2152   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2153   // Go to (6).
2154   __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2155 
2156   // (3) Cons string.  Check that it's flat.
2157   // Replace subject with first string and reload instance type.
2158   __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2159   __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2160   __ Branch(&runtime, ne, a0, Operand(a1));
2161   __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2162 
2163   // (4) Is subject external?  If yes, go to (7).
2164   __ bind(&check_underlying);
2165   __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2166   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2167   STATIC_ASSERT(kSeqStringTag == 0);
2168   __ And(at, a0, Operand(kStringRepresentationMask));
2169   // The underlying external string is never a short external string.
2170   STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2171   STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2172   __ Branch(&external_string, ne, at, Operand(zero_reg));  // Go to (7).
2173 
2174   // (5) Sequential string.  Load regexp code according to encoding.
2175   __ bind(&seq_string);
2176   // subject: sequential subject string (or look-alike, external string)
2177   // a3: original subject string
2178   // Load previous index and check range before a3 is overwritten.  We have to
2179   // use a3 instead of subject here because subject might have been only made
2180   // to look like a sequential string when it actually is an external string.
2181   __ ld(a1, MemOperand(sp, kPreviousIndexOffset));
2182   __ JumpIfNotSmi(a1, &runtime);
2183   __ ld(a3, FieldMemOperand(a3, String::kLengthOffset));
2184   __ Branch(&runtime, ls, a3, Operand(a1));
2185   __ SmiUntag(a1);
2186 
2187   STATIC_ASSERT(kStringEncodingMask == 4);
2188   STATIC_ASSERT(kOneByteStringTag == 4);
2189   STATIC_ASSERT(kTwoByteStringTag == 0);
2190   __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one_byte.
2191   __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
2192   __ dsra(a3, a0, 2);  // a3 is 1 for one_byte, 0 for UC16 (used below).
2193   __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2194   __ Movz(t9, a5, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2195 
2196   // (E) Carry on.  String handling is done.
2197   // t9: irregexp code
2198   // Check that the irregexp code has been generated for the actual string
2199   // encoding. If it has, the field contains a code object otherwise it contains
2200   // a smi (code flushing support).
2201   __ JumpIfSmi(t9, &runtime);
2202 
2203   // a1: previous index
2204   // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
2205   // t9: code
2206   // subject: Subject string
2207   // regexp_data: RegExp data (FixedArray)
2208   // All checks done. Now push arguments for native regexp code.
2209   __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2210                       1, a0, a2);
2211 
2212   // Isolates: note we add an additional parameter here (isolate pointer).
2213   const int kRegExpExecuteArguments = 9;
2214   const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4;
2215   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2216 
2217   // Stack pointer now points to cell where return address is to be written.
2218   // Arguments are before that on the stack or in registers, meaning we
2219   // treat the return address as argument 5. Thus every argument after that
2220   // needs to be shifted back by 1. Since DirectCEntryStub will handle
2221   // allocating space for the c argument slots, we don't need to calculate
2222   // that into the argument positions on the stack. This is how the stack will
2223   // look (sp meaning the value of sp at this moment):
2224   // Abi n64:
2225   //   [sp + 1] - Argument 9
2226   //   [sp + 0] - saved ra
2227   // Abi O32:
2228   //   [sp + 5] - Argument 9
2229   //   [sp + 4] - Argument 8
2230   //   [sp + 3] - Argument 7
2231   //   [sp + 2] - Argument 6
2232   //   [sp + 1] - Argument 5
2233   //   [sp + 0] - saved ra
2234 
2235   if (kMipsAbi == kN64) {
2236     // Argument 9: Pass current isolate address.
2237     __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2238     __ sd(a0, MemOperand(sp, 1 * kPointerSize));
2239 
2240     // Argument 8: Indicate that this is a direct call from JavaScript.
2241     __ li(a7, Operand(1));
2242 
2243     // Argument 7: Start (high end) of backtracking stack memory area.
2244     __ li(a0, Operand(address_of_regexp_stack_memory_address));
2245     __ ld(a0, MemOperand(a0, 0));
2246     __ li(a2, Operand(address_of_regexp_stack_memory_size));
2247     __ ld(a2, MemOperand(a2, 0));
2248     __ daddu(a6, a0, a2);
2249 
2250     // Argument 6: Set the number of capture registers to zero to force global
2251     // regexps to behave as non-global. This does not affect non-global regexps.
2252     __ mov(a5, zero_reg);
2253 
2254     // Argument 5: static offsets vector buffer.
2255     __ li(a4, Operand(
2256           ExternalReference::address_of_static_offsets_vector(isolate())));
2257   } else {  // O32.
2258     DCHECK(kMipsAbi == kO32);
2259 
2260     // Argument 9: Pass current isolate address.
2261     // CFunctionArgumentOperand handles MIPS stack argument slots.
2262     __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2263     __ sd(a0, MemOperand(sp, 5 * kPointerSize));
2264 
2265     // Argument 8: Indicate that this is a direct call from JavaScript.
2266     __ li(a0, Operand(1));
2267     __ sd(a0, MemOperand(sp, 4 * kPointerSize));
2268 
2269     // Argument 7: Start (high end) of backtracking stack memory area.
2270     __ li(a0, Operand(address_of_regexp_stack_memory_address));
2271     __ ld(a0, MemOperand(a0, 0));
2272     __ li(a2, Operand(address_of_regexp_stack_memory_size));
2273     __ ld(a2, MemOperand(a2, 0));
2274     __ daddu(a0, a0, a2);
2275     __ sd(a0, MemOperand(sp, 3 * kPointerSize));
2276 
2277     // Argument 6: Set the number of capture registers to zero to force global
2278     // regexps to behave as non-global. This does not affect non-global regexps.
2279     __ mov(a0, zero_reg);
2280     __ sd(a0, MemOperand(sp, 2 * kPointerSize));
2281 
2282     // Argument 5: static offsets vector buffer.
2283     __ li(a0, Operand(
2284           ExternalReference::address_of_static_offsets_vector(isolate())));
2285     __ sd(a0, MemOperand(sp, 1 * kPointerSize));
2286   }
2287 
2288   // For arguments 4 and 3 get string length, calculate start of string data
2289   // and calculate the shift of the index (0 for one_byte and 1 for two byte).
2290   __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2291   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
2292   // Load the length from the original subject string from the previous stack
2293   // frame. Therefore we have to use fp, which points exactly to two pointer
2294   // sizes below the previous sp. (Because creating a new stack frame pushes
2295   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2296   __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2297   // If slice offset is not 0, load the length from the original sliced string.
2298   // Argument 4, a3: End of string data
2299   // Argument 3, a2: Start of string data
2300   // Prepare start and end index of the input.
2301   __ dsllv(t1, t0, a3);
2302   __ daddu(t0, t2, t1);
2303   __ dsllv(t1, a1, a3);
2304   __ daddu(a2, t0, t1);
2305 
2306   __ ld(t2, FieldMemOperand(subject, String::kLengthOffset));
2307 
2308   __ SmiUntag(t2);
2309   __ dsllv(t1, t2, a3);
2310   __ daddu(a3, t0, t1);
2311   // Argument 2 (a1): Previous index.
2312   // Already there
2313 
2314   // Argument 1 (a0): Subject string.
2315   __ mov(a0, subject);
2316 
2317   // Locate the code entry and call it.
2318   __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2319   DirectCEntryStub stub(isolate());
2320   stub.GenerateCall(masm, t9);
2321 
2322   __ LeaveExitFrame(false, no_reg, true);
2323 
2324   // v0: result
2325   // subject: subject string (callee saved)
2326   // regexp_data: RegExp data (callee saved)
2327   // last_match_info_elements: Last match info elements (callee saved)
2328   // Check the result.
2329   Label success;
2330   __ Branch(&success, eq, v0, Operand(1));
2331   // We expect exactly one result since we force the called regexp to behave
2332   // as non-global.
2333   Label failure;
2334   __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2335   // If not exception it can only be retry. Handle that in the runtime system.
2336   __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2337   // Result must now be exception. If there is no pending exception already a
2338   // stack overflow (on the backtrack stack) was detected in RegExp code but
2339   // haven't created the exception yet. Handle that in the runtime system.
2340   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2341   __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2342   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2343                                       isolate())));
2344   __ ld(v0, MemOperand(a2, 0));
2345   __ Branch(&runtime, eq, v0, Operand(a1));
2346 
2347   __ sd(a1, MemOperand(a2, 0));  // Clear pending exception.
2348 
2349   // Check if the exception is a termination. If so, throw as uncatchable.
2350   __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
2351   Label termination_exception;
2352   __ Branch(&termination_exception, eq, v0, Operand(a0));
2353 
2354   __ Throw(v0);
2355 
2356   __ bind(&termination_exception);
2357   __ ThrowUncatchable(v0);
2358 
2359   __ bind(&failure);
2360   // For failure and exception return null.
2361   __ li(v0, Operand(isolate()->factory()->null_value()));
2362   __ DropAndRet(4);
2363 
2364   // Process the result from the native regexp code.
2365   __ bind(&success);
2366 
2367   __ lw(a1, UntagSmiFieldMemOperand(
2368       regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2369   // Calculate number of capture registers (number_of_captures + 1) * 2.
2370   __ Daddu(a1, a1, Operand(1));
2371   __ dsll(a1, a1, 1);  // Multiply by 2.
2372 
2373   __ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
2374   __ JumpIfSmi(a0, &runtime);
2375   __ GetObjectType(a0, a2, a2);
2376   __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2377   // Check that the JSArray is in fast case.
2378   __ ld(last_match_info_elements,
2379         FieldMemOperand(a0, JSArray::kElementsOffset));
2380   __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2381   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2382   __ Branch(&runtime, ne, a0, Operand(at));
2383   // Check that the last match info has space for the capture registers and the
2384   // additional information.
2385   __ ld(a0,
2386         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2387   __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2388 
2389   __ SmiUntag(at, a0);
2390   __ Branch(&runtime, gt, a2, Operand(at));
2391 
2392   // a1: number of capture registers
2393   // subject: subject string
2394   // Store the capture count.
2395   __ SmiTag(a2, a1);  // To smi.
2396   __ sd(a2, FieldMemOperand(last_match_info_elements,
2397                              RegExpImpl::kLastCaptureCountOffset));
2398   // Store last subject and last input.
2399   __ sd(subject,
2400          FieldMemOperand(last_match_info_elements,
2401                          RegExpImpl::kLastSubjectOffset));
2402   __ mov(a2, subject);
2403   __ RecordWriteField(last_match_info_elements,
2404                       RegExpImpl::kLastSubjectOffset,
2405                       subject,
2406                       a7,
2407                       kRAHasNotBeenSaved,
2408                       kDontSaveFPRegs);
2409   __ mov(subject, a2);
2410   __ sd(subject,
2411          FieldMemOperand(last_match_info_elements,
2412                          RegExpImpl::kLastInputOffset));
2413   __ RecordWriteField(last_match_info_elements,
2414                       RegExpImpl::kLastInputOffset,
2415                       subject,
2416                       a7,
2417                       kRAHasNotBeenSaved,
2418                       kDontSaveFPRegs);
2419 
2420   // Get the static offsets vector filled by the native regexp code.
2421   ExternalReference address_of_static_offsets_vector =
2422       ExternalReference::address_of_static_offsets_vector(isolate());
2423   __ li(a2, Operand(address_of_static_offsets_vector));
2424 
2425   // a1: number of capture registers
2426   // a2: offsets vector
2427   Label next_capture, done;
2428   // Capture register counter starts from number of capture registers and
2429   // counts down until wrapping after zero.
2430   __ Daddu(a0,
2431          last_match_info_elements,
2432          Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2433   __ bind(&next_capture);
2434   __ Dsubu(a1, a1, Operand(1));
2435   __ Branch(&done, lt, a1, Operand(zero_reg));
2436   // Read the value from the static offsets vector buffer.
2437   __ lw(a3, MemOperand(a2, 0));
2438   __ daddiu(a2, a2, kIntSize);
2439   // Store the smi value in the last match info.
2440   __ SmiTag(a3);
2441   __ sd(a3, MemOperand(a0, 0));
2442   __ Branch(&next_capture, USE_DELAY_SLOT);
2443   __ daddiu(a0, a0, kPointerSize);  // In branch delay slot.
2444 
2445   __ bind(&done);
2446 
2447   // Return last match info.
2448   __ ld(v0, MemOperand(sp, kLastMatchInfoOffset));
2449   __ DropAndRet(4);
2450 
2451   // Do the runtime call to execute the regexp.
2452   __ bind(&runtime);
2453   __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2454 
2455   // Deferred code for string handling.
2456   // (6) Not a long external string?  If yes, go to (8).
2457   __ bind(&not_seq_nor_cons);
2458   // Go to (8).
2459   __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
2460 
2461   // (7) External string.  Make it, offset-wise, look like a sequential string.
2462   __ bind(&external_string);
2463   __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2464   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2465   if (FLAG_debug_code) {
2466     // Assert that we do not have a cons or slice (indirect strings) here.
2467     // Sequential strings have already been ruled out.
2468     __ And(at, a0, Operand(kIsIndirectStringMask));
2469     __ Assert(eq,
2470               kExternalStringExpectedButNotFound,
2471               at,
2472               Operand(zero_reg));
2473   }
2474   __ ld(subject,
2475         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2476   // Move the pointer so that offset-wise, it looks like a sequential string.
2477   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2478   __ Dsubu(subject,
2479           subject,
2480           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2481   __ jmp(&seq_string);    // Go to (5).
2482 
2483   // (8) Short external string or not a string?  If yes, bail out to runtime.
2484   __ bind(&not_long_external);
2485   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2486   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2487   __ Branch(&runtime, ne, at, Operand(zero_reg));
2488 
2489   // (9) Sliced string.  Replace subject with parent.  Go to (4).
2490   // Load offset into t0 and replace subject string with parent.
2491   __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2492   __ SmiUntag(t0);
2493   __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2494   __ jmp(&check_underlying);  // Go to (4).
2495 #endif  // V8_INTERPRETED_REGEXP
2496 }
2497 
2498 
GenerateRecordCallTarget(MacroAssembler * masm)2499 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2500   // Cache the called function in a feedback vector slot.  Cache states
2501   // are uninitialized, monomorphic (indicated by a JSFunction), and
2502   // megamorphic.
2503   // a0 : number of arguments to the construct function
2504   // a1 : the function to call
2505   // a2 : Feedback vector
2506   // a3 : slot in feedback vector (Smi)
2507   Label initialize, done, miss, megamorphic, not_array_function;
2508 
2509   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2510             masm->isolate()->heap()->megamorphic_symbol());
2511   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2512             masm->isolate()->heap()->uninitialized_symbol());
2513 
2514   // Load the cache state into a4.
2515   __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2516   __ Daddu(a4, a2, Operand(a4));
2517   __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
2518 
2519   // A monomorphic cache hit or an already megamorphic state: invoke the
2520   // function without changing the state.
2521   __ Branch(&done, eq, a4, Operand(a1));
2522 
2523   if (!FLAG_pretenuring_call_new) {
2524     // If we came here, we need to see if we are the array function.
2525     // If we didn't have a matching function, and we didn't find the megamorph
2526     // sentinel, then we have in the slot either some other function or an
2527     // AllocationSite. Do a map check on the object in a3.
2528     __ ld(a5, FieldMemOperand(a4, 0));
2529     __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2530     __ Branch(&miss, ne, a5, Operand(at));
2531 
2532     // Make sure the function is the Array() function
2533     __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
2534     __ Branch(&megamorphic, ne, a1, Operand(a4));
2535     __ jmp(&done);
2536   }
2537 
2538   __ bind(&miss);
2539 
2540   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2541   // megamorphic.
2542   __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
2543   __ Branch(&initialize, eq, a4, Operand(at));
2544   // MegamorphicSentinel is an immortal immovable object (undefined) so no
2545   // write-barrier is needed.
2546   __ bind(&megamorphic);
2547   __ dsrl(a4, a3, 32- kPointerSizeLog2);
2548   __ Daddu(a4, a2, Operand(a4));
2549   __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2550   __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
2551   __ jmp(&done);
2552 
2553   // An uninitialized cache is patched with the function.
2554   __ bind(&initialize);
2555   if (!FLAG_pretenuring_call_new) {
2556     // Make sure the function is the Array() function.
2557     __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
2558     __ Branch(&not_array_function, ne, a1, Operand(a4));
2559 
2560     // The target function is the Array constructor,
2561     // Create an AllocationSite if we don't already have it, store it in the
2562     // slot.
2563     {
2564       FrameScope scope(masm, StackFrame::INTERNAL);
2565       const RegList kSavedRegs =
2566           1 << 4  |  // a0
2567           1 << 5  |  // a1
2568           1 << 6  |  // a2
2569           1 << 7;    // a3
2570 
2571       // Arguments register must be smi-tagged to call out.
2572       __ SmiTag(a0);
2573       __ MultiPush(kSavedRegs);
2574 
2575       CreateAllocationSiteStub create_stub(masm->isolate());
2576       __ CallStub(&create_stub);
2577 
2578       __ MultiPop(kSavedRegs);
2579       __ SmiUntag(a0);
2580     }
2581     __ Branch(&done);
2582 
2583     __ bind(&not_array_function);
2584   }
2585 
2586   __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2587   __ Daddu(a4, a2, Operand(a4));
2588   __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2589   __ sd(a1, MemOperand(a4, 0));
2590 
2591   __ Push(a4, a2, a1);
2592   __ RecordWrite(a2, a4, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
2593                  EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2594   __ Pop(a4, a2, a1);
2595 
2596   __ bind(&done);
2597 }
2598 
2599 
EmitContinueIfStrictOrNative(MacroAssembler * masm,Label * cont)2600 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2601   __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2602 
2603   // Do not transform the receiver for strict mode functions.
2604   int32_t strict_mode_function_mask =
2605       1 <<  SharedFunctionInfo::kStrictModeBitWithinByte ;
2606   // Do not transform the receiver for native (Compilerhints already in a3).
2607   int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
2608 
2609   __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kStrictModeByteOffset));
2610   __ And(at, a4, Operand(strict_mode_function_mask));
2611   __ Branch(cont, ne, at, Operand(zero_reg));
2612   __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kNativeByteOffset));
2613   __ And(at, a4, Operand(native_mask));
2614   __ Branch(cont, ne, at, Operand(zero_reg));
2615 }
2616 
2617 
EmitSlowCase(MacroAssembler * masm,int argc,Label * non_function)2618 static void EmitSlowCase(MacroAssembler* masm,
2619                          int argc,
2620                          Label* non_function) {
2621   // Check for function proxy.
2622   __ Branch(non_function, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
2623   __ push(a1);  // put proxy as additional argument
2624   __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
2625   __ mov(a2, zero_reg);
2626   __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
2627   {
2628     Handle<Code> adaptor =
2629         masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2630     __ Jump(adaptor, RelocInfo::CODE_TARGET);
2631   }
2632 
2633   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2634   // of the original receiver from the call site).
2635   __ bind(non_function);
2636   __ sd(a1, MemOperand(sp, argc * kPointerSize));
2637   __ li(a0, Operand(argc));  // Set up the number of arguments.
2638   __ mov(a2, zero_reg);
2639   __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
2640   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2641           RelocInfo::CODE_TARGET);
2642 }
2643 
2644 
EmitWrapCase(MacroAssembler * masm,int argc,Label * cont)2645 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2646   // Wrap the receiver and patch it back onto the stack.
2647   { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2648     __ Push(a1, a3);
2649     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2650     __ pop(a1);
2651   }
2652   __ Branch(USE_DELAY_SLOT, cont);
2653   __ sd(v0, MemOperand(sp, argc * kPointerSize));
2654 }
2655 
2656 
CallFunctionNoFeedback(MacroAssembler * masm,int argc,bool needs_checks,bool call_as_method)2657 static void CallFunctionNoFeedback(MacroAssembler* masm,
2658                                    int argc, bool needs_checks,
2659                                    bool call_as_method) {
2660   // a1 : the function to call
2661   Label slow, non_function, wrap, cont;
2662 
2663   if (needs_checks) {
2664     // Check that the function is really a JavaScript function.
2665     // a1: pushed function (to be verified)
2666     __ JumpIfSmi(a1, &non_function);
2667 
2668     // Goto slow case if we do not have a function.
2669     __ GetObjectType(a1, a4, a4);
2670     __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
2671   }
2672 
2673   // Fast-case: Invoke the function now.
2674   // a1: pushed function
2675   ParameterCount actual(argc);
2676 
2677   if (call_as_method) {
2678     if (needs_checks) {
2679       EmitContinueIfStrictOrNative(masm, &cont);
2680     }
2681 
2682     // Compute the receiver in sloppy mode.
2683     __ ld(a3, MemOperand(sp, argc * kPointerSize));
2684 
2685     if (needs_checks) {
2686       __ JumpIfSmi(a3, &wrap);
2687       __ GetObjectType(a3, a4, a4);
2688       __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
2689     } else {
2690       __ jmp(&wrap);
2691     }
2692 
2693     __ bind(&cont);
2694   }
2695   __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2696 
2697   if (needs_checks) {
2698     // Slow-case: Non-function called.
2699     __ bind(&slow);
2700     EmitSlowCase(masm, argc, &non_function);
2701   }
2702 
2703   if (call_as_method) {
2704     __ bind(&wrap);
2705     // Wrap the receiver and patch it back onto the stack.
2706     EmitWrapCase(masm, argc, &cont);
2707   }
2708 }
2709 
2710 
Generate(MacroAssembler * masm)2711 void CallFunctionStub::Generate(MacroAssembler* masm) {
2712   CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2713 }
2714 
2715 
Generate(MacroAssembler * masm)2716 void CallConstructStub::Generate(MacroAssembler* masm) {
2717   // a0 : number of arguments
2718   // a1 : the function to call
2719   // a2 : feedback vector
2720   // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
2721   Label slow, non_function_call;
2722   // Check that the function is not a smi.
2723   __ JumpIfSmi(a1, &non_function_call);
2724   // Check that the function is a JSFunction.
2725   __ GetObjectType(a1, a4, a4);
2726   __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
2727 
2728   if (RecordCallTarget()) {
2729     GenerateRecordCallTarget(masm);
2730 
2731     __ dsrl(at, a3, 32 - kPointerSizeLog2);
2732     __ Daddu(a5, a2, at);
2733     if (FLAG_pretenuring_call_new) {
2734       // Put the AllocationSite from the feedback vector into a2.
2735       // By adding kPointerSize we encode that we know the AllocationSite
2736       // entry is at the feedback vector slot given by a3 + 1.
2737       __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
2738     } else {
2739       Label feedback_register_initialized;
2740       // Put the AllocationSite from the feedback vector into a2, or undefined.
2741       __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
2742       __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
2743       __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2744       __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
2745       __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
2746       __ bind(&feedback_register_initialized);
2747     }
2748 
2749     __ AssertUndefinedOrAllocationSite(a2, a5);
2750   }
2751 
2752   // Jump to the function-specific construct stub.
2753   Register jmp_reg = a4;
2754   __ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2755   __ ld(jmp_reg, FieldMemOperand(jmp_reg,
2756                                  SharedFunctionInfo::kConstructStubOffset));
2757   __ Daddu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2758   __ Jump(at);
2759 
2760   // a0: number of arguments
2761   // a1: called object
2762   // a4: object type
2763   Label do_call;
2764   __ bind(&slow);
2765   __ Branch(&non_function_call, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
2766   __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2767   __ jmp(&do_call);
2768 
2769   __ bind(&non_function_call);
2770   __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2771   __ bind(&do_call);
2772   // Set expected number of arguments to zero (not changing r0).
2773   __ li(a2, Operand(0, RelocInfo::NONE32));
2774   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2775            RelocInfo::CODE_TARGET);
2776 }
2777 
2778 
2779 // StringCharCodeAtGenerator.
GenerateFast(MacroAssembler * masm)2780 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2781   DCHECK(!a4.is(index_));
2782   DCHECK(!a4.is(result_));
2783   DCHECK(!a4.is(object_));
2784 
2785   // If the receiver is a smi trigger the non-string case.
2786   __ JumpIfSmi(object_, receiver_not_string_);
2787 
2788   // Fetch the instance type of the receiver into result register.
2789   __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2790   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2791   // If the receiver is not a string trigger the non-string case.
2792   __ And(a4, result_, Operand(kIsNotStringMask));
2793   __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
2794 
2795   // If the index is non-smi trigger the non-smi case.
2796   __ JumpIfNotSmi(index_, &index_not_smi_);
2797 
2798   __ bind(&got_smi_index_);
2799 
2800   // Check for index out of range.
2801   __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
2802   __ Branch(index_out_of_range_, ls, a4, Operand(index_));
2803 
2804   __ SmiUntag(index_);
2805 
2806   StringCharLoadGenerator::Generate(masm,
2807                                     object_,
2808                                     index_,
2809                                     result_,
2810                                     &call_runtime_);
2811 
2812   __ SmiTag(result_);
2813   __ bind(&exit_);
2814 }
2815 
2816 
EmitLoadTypeFeedbackVector(MacroAssembler * masm,Register vector)2817 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2818   __ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2819   __ ld(vector, FieldMemOperand(vector,
2820                                 JSFunction::kSharedFunctionInfoOffset));
2821   __ ld(vector, FieldMemOperand(vector,
2822                                 SharedFunctionInfo::kFeedbackVectorOffset));
2823 }
2824 
2825 
Generate(MacroAssembler * masm)2826 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2827   // a1 - function
2828   // a3 - slot id
2829   Label miss;
2830 
2831   EmitLoadTypeFeedbackVector(masm, a2);
2832 
2833   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
2834   __ Branch(&miss, ne, a1, Operand(at));
2835 
2836   __ li(a0, Operand(arg_count()));
2837   __ dsrl(at, a3, 32 - kPointerSizeLog2);
2838   __ Daddu(at, a2, Operand(at));
2839   __ ld(a4, FieldMemOperand(at, FixedArray::kHeaderSize));
2840 
2841   // Verify that a4 contains an AllocationSite
2842   __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
2843   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2844   __ Branch(&miss, ne, a5, Operand(at));
2845 
2846   __ mov(a2, a4);
2847   ArrayConstructorStub stub(masm->isolate(), arg_count());
2848   __ TailCallStub(&stub);
2849 
2850   __ bind(&miss);
2851   GenerateMiss(masm);
2852 
2853   // The slow case, we need this no matter what to complete a call after a miss.
2854   CallFunctionNoFeedback(masm,
2855                          arg_count(),
2856                          true,
2857                          CallAsMethod());
2858 
2859   // Unreachable.
2860   __ stop("Unexpected code address");
2861 }
2862 
2863 
Generate(MacroAssembler * masm)2864 void CallICStub::Generate(MacroAssembler* masm) {
2865   // a1 - function
2866   // a3 - slot id (Smi)
2867   Label extra_checks_or_miss, slow_start;
2868   Label slow, non_function, wrap, cont;
2869   Label have_js_function;
2870   int argc = arg_count();
2871   ParameterCount actual(argc);
2872 
2873   EmitLoadTypeFeedbackVector(masm, a2);
2874 
2875   // The checks. First, does r1 match the recorded monomorphic target?
2876   __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2877   __ Daddu(a4, a2, Operand(a4));
2878   __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
2879   __ Branch(&extra_checks_or_miss, ne, a1, Operand(a4));
2880 
2881   __ bind(&have_js_function);
2882   if (CallAsMethod()) {
2883     EmitContinueIfStrictOrNative(masm, &cont);
2884     // Compute the receiver in sloppy mode.
2885     __ ld(a3, MemOperand(sp, argc * kPointerSize));
2886 
2887     __ JumpIfSmi(a3, &wrap);
2888     __ GetObjectType(a3, a4, a4);
2889     __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
2890 
2891     __ bind(&cont);
2892   }
2893 
2894   __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2895 
2896   __ bind(&slow);
2897   EmitSlowCase(masm, argc, &non_function);
2898 
2899   if (CallAsMethod()) {
2900     __ bind(&wrap);
2901     EmitWrapCase(masm, argc, &cont);
2902   }
2903 
2904   __ bind(&extra_checks_or_miss);
2905   Label miss;
2906 
2907   __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2908   __ Branch(&slow_start, eq, a4, Operand(at));
2909   __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
2910   __ Branch(&miss, eq, a4, Operand(at));
2911 
2912   if (!FLAG_trace_ic) {
2913     // We are going megamorphic. If the feedback is a JSFunction, it is fine
2914     // to handle it here. More complex cases are dealt with in the runtime.
2915     __ AssertNotSmi(a4);
2916     __ GetObjectType(a4, a5, a5);
2917     __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
2918     __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2919     __ Daddu(a4, a2, Operand(a4));
2920     __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2921     __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
2922     __ Branch(&slow_start);
2923   }
2924 
2925   // We are here because tracing is on or we are going monomorphic.
2926   __ bind(&miss);
2927   GenerateMiss(masm);
2928 
2929   // the slow case
2930   __ bind(&slow_start);
2931   // Check that the function is really a JavaScript function.
2932   // r1: pushed function (to be verified)
2933   __ JumpIfSmi(a1, &non_function);
2934 
2935   // Goto slow case if we do not have a function.
2936   __ GetObjectType(a1, a4, a4);
2937   __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
2938   __ Branch(&have_js_function);
2939 }
2940 
2941 
GenerateMiss(MacroAssembler * masm)2942 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2943   // Get the receiver of the function from the stack; 1 ~ return address.
2944   __ ld(a4, MemOperand(sp, (arg_count() + 1) * kPointerSize));
2945 
2946   {
2947     FrameScope scope(masm, StackFrame::INTERNAL);
2948 
2949     // Push the receiver and the function and feedback info.
2950     __ Push(a4, a1, a2, a3);
2951 
2952     // Call the entry.
2953     IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2954                                                : IC::kCallIC_Customization_Miss;
2955 
2956     ExternalReference miss = ExternalReference(IC_Utility(id),
2957                                                masm->isolate());
2958     __ CallExternalReference(miss, 4);
2959 
2960     // Move result to a1 and exit the internal frame.
2961     __ mov(a1, v0);
2962   }
2963 }
2964 
2965 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2966 void StringCharCodeAtGenerator::GenerateSlow(
2967     MacroAssembler* masm,
2968     const RuntimeCallHelper& call_helper) {
2969   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2970 
2971   // Index is not a smi.
2972   __ bind(&index_not_smi_);
2973   // If index is a heap number, try converting it to an integer.
2974   __ CheckMap(index_,
2975               result_,
2976               Heap::kHeapNumberMapRootIndex,
2977               index_not_number_,
2978               DONT_DO_SMI_CHECK);
2979   call_helper.BeforeCall(masm);
2980   // Consumed by runtime conversion function:
2981   __ Push(object_, index_);
2982   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
2983     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
2984   } else {
2985     DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
2986     // NumberToSmi discards numbers that are not exact integers.
2987     __ CallRuntime(Runtime::kNumberToSmi, 1);
2988   }
2989 
2990   // Save the conversion result before the pop instructions below
2991   // have a chance to overwrite it.
2992 
2993   __ Move(index_, v0);
2994   __ pop(object_);
2995   // Reload the instance type.
2996   __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2997   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2998   call_helper.AfterCall(masm);
2999   // If index is still not a smi, it must be out of range.
3000   __ JumpIfNotSmi(index_, index_out_of_range_);
3001   // Otherwise, return to the fast path.
3002   __ Branch(&got_smi_index_);
3003 
3004   // Call runtime. We get here when the receiver is a string and the
3005   // index is a number, but the code of getting the actual character
3006   // is too complex (e.g., when the string needs to be flattened).
3007   __ bind(&call_runtime_);
3008   call_helper.BeforeCall(masm);
3009   __ SmiTag(index_);
3010   __ Push(object_, index_);
3011   __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3012 
3013   __ Move(result_, v0);
3014 
3015   call_helper.AfterCall(masm);
3016   __ jmp(&exit_);
3017 
3018   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3019 }
3020 
3021 
3022 // -------------------------------------------------------------------------
3023 // StringCharFromCodeGenerator
3024 
GenerateFast(MacroAssembler * masm)3025 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3026   // Fast case of Heap::LookupSingleCharacterStringFromCode.
3027 
3028   DCHECK(!a4.is(result_));
3029   DCHECK(!a4.is(code_));
3030 
3031   STATIC_ASSERT(kSmiTag == 0);
3032   DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
3033   __ And(a4,
3034          code_,
3035          Operand(kSmiTagMask |
3036                  ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3037   __ Branch(&slow_case_, ne, a4, Operand(zero_reg));
3038 
3039 
3040   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3041   // At this point code register contains smi tagged one_byte char code.
3042   STATIC_ASSERT(kSmiTag == 0);
3043   __ SmiScale(a4, code_, kPointerSizeLog2);
3044   __ Daddu(result_, result_, a4);
3045   __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3046   __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
3047   __ Branch(&slow_case_, eq, result_, Operand(a4));
3048   __ bind(&exit_);
3049 }
3050 
3051 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)3052 void StringCharFromCodeGenerator::GenerateSlow(
3053     MacroAssembler* masm,
3054     const RuntimeCallHelper& call_helper) {
3055   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3056 
3057   __ bind(&slow_case_);
3058   call_helper.BeforeCall(masm);
3059   __ push(code_);
3060   __ CallRuntime(Runtime::kCharFromCode, 1);
3061   __ Move(result_, v0);
3062 
3063   call_helper.AfterCall(masm);
3064   __ Branch(&exit_);
3065 
3066   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3067 }
3068 
3069 
3070 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
3071 
3072 
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,String::Encoding encoding)3073 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3074                                           Register dest,
3075                                           Register src,
3076                                           Register count,
3077                                           Register scratch,
3078                                           String::Encoding encoding) {
3079   if (FLAG_debug_code) {
3080     // Check that destination is word aligned.
3081     __ And(scratch, dest, Operand(kPointerAlignmentMask));
3082     __ Check(eq,
3083              kDestinationOfCopyNotAligned,
3084              scratch,
3085              Operand(zero_reg));
3086   }
3087 
3088   // Assumes word reads and writes are little endian.
3089   // Nothing to do for zero characters.
3090   Label done;
3091 
3092   if (encoding == String::TWO_BYTE_ENCODING) {
3093     __ Daddu(count, count, count);
3094   }
3095 
3096   Register limit = count;  // Read until dest equals this.
3097   __ Daddu(limit, dest, Operand(count));
3098 
3099   Label loop_entry, loop;
3100   // Copy bytes from src to dest until dest hits limit.
3101   __ Branch(&loop_entry);
3102   __ bind(&loop);
3103   __ lbu(scratch, MemOperand(src));
3104   __ daddiu(src, src, 1);
3105   __ sb(scratch, MemOperand(dest));
3106   __ daddiu(dest, dest, 1);
3107   __ bind(&loop_entry);
3108   __ Branch(&loop, lt, dest, Operand(limit));
3109 
3110   __ bind(&done);
3111 }
3112 
3113 
Generate(MacroAssembler * masm)3114 void SubStringStub::Generate(MacroAssembler* masm) {
3115   Label runtime;
3116   // Stack frame on entry.
3117   //  ra: return address
3118   //  sp[0]: to
3119   //  sp[4]: from
3120   //  sp[8]: string
3121 
3122   // This stub is called from the native-call %_SubString(...), so
3123   // nothing can be assumed about the arguments. It is tested that:
3124   //  "string" is a sequential string,
3125   //  both "from" and "to" are smis, and
3126   //  0 <= from <= to <= string.length.
3127   // If any of these assumptions fail, we call the runtime system.
3128 
3129   const int kToOffset = 0 * kPointerSize;
3130   const int kFromOffset = 1 * kPointerSize;
3131   const int kStringOffset = 2 * kPointerSize;
3132 
3133   __ ld(a2, MemOperand(sp, kToOffset));
3134   __ ld(a3, MemOperand(sp, kFromOffset));
3135 // Does not needed?
3136 //  STATIC_ASSERT(kFromOffset == kToOffset + 4);
3137   STATIC_ASSERT(kSmiTag == 0);
3138 // Does not needed?
3139 // STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3140 
3141   // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3142   // safe in this case.
3143   __ JumpIfNotSmi(a2, &runtime);
3144   __ JumpIfNotSmi(a3, &runtime);
3145   // Both a2 and a3 are untagged integers.
3146 
3147   __ SmiUntag(a2, a2);
3148   __ SmiUntag(a3, a3);
3149   __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
3150 
3151   __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
3152   __ Dsubu(a2, a2, a3);
3153 
3154   // Make sure first argument is a string.
3155   __ ld(v0, MemOperand(sp, kStringOffset));
3156   __ JumpIfSmi(v0, &runtime);
3157   __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3158   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3159   __ And(a4, a1, Operand(kIsNotStringMask));
3160 
3161   __ Branch(&runtime, ne, a4, Operand(zero_reg));
3162 
3163   Label single_char;
3164   __ Branch(&single_char, eq, a2, Operand(1));
3165 
3166   // Short-cut for the case of trivial substring.
3167   Label return_v0;
3168   // v0: original string
3169   // a2: result string length
3170   __ ld(a4, FieldMemOperand(v0, String::kLengthOffset));
3171   __ SmiUntag(a4);
3172   // Return original string.
3173   __ Branch(&return_v0, eq, a2, Operand(a4));
3174   // Longer than original string's length or negative: unsafe arguments.
3175   __ Branch(&runtime, hi, a2, Operand(a4));
3176   // Shorter than original string's length: an actual substring.
3177 
3178   // Deal with different string types: update the index if necessary
3179   // and put the underlying string into a5.
3180   // v0: original string
3181   // a1: instance type
3182   // a2: length
3183   // a3: from index (untagged)
3184   Label underlying_unpacked, sliced_string, seq_or_external_string;
3185   // If the string is not indirect, it can only be sequential or external.
3186   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3187   STATIC_ASSERT(kIsIndirectStringMask != 0);
3188   __ And(a4, a1, Operand(kIsIndirectStringMask));
3189   __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg));
3190   // a4 is used as a scratch register and can be overwritten in either case.
3191   __ And(a4, a1, Operand(kSlicedNotConsMask));
3192   __ Branch(&sliced_string, ne, a4, Operand(zero_reg));
3193   // Cons string.  Check whether it is flat, then fetch first part.
3194   __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset));
3195   __ LoadRoot(a4, Heap::kempty_stringRootIndex);
3196   __ Branch(&runtime, ne, a5, Operand(a4));
3197   __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset));
3198   // Update instance type.
3199   __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
3200   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3201   __ jmp(&underlying_unpacked);
3202 
3203   __ bind(&sliced_string);
3204   // Sliced string.  Fetch parent and correct start index by offset.
3205   __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
3206   __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3207   __ SmiUntag(a4);  // Add offset to index.
3208   __ Daddu(a3, a3, a4);
3209   // Update instance type.
3210   __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
3211   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3212   __ jmp(&underlying_unpacked);
3213 
3214   __ bind(&seq_or_external_string);
3215   // Sequential or external string.  Just move string to the expected register.
3216   __ mov(a5, v0);
3217 
3218   __ bind(&underlying_unpacked);
3219 
3220   if (FLAG_string_slices) {
3221     Label copy_routine;
3222     // a5: underlying subject string
3223     // a1: instance type of underlying subject string
3224     // a2: length
3225     // a3: adjusted start index (untagged)
3226     // Short slice.  Copy instead of slicing.
3227     __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
3228     // Allocate new sliced string.  At this point we do not reload the instance
3229     // type including the string encoding because we simply rely on the info
3230     // provided by the original string.  It does not matter if the original
3231     // string's encoding is wrong because we always have to recheck encoding of
3232     // the newly created string's parent anyways due to externalized strings.
3233     Label two_byte_slice, set_slice_header;
3234     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3235     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3236     __ And(a4, a1, Operand(kStringEncodingMask));
3237     __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
3238     __ AllocateOneByteSlicedString(v0, a2, a6, a7, &runtime);
3239     __ jmp(&set_slice_header);
3240     __ bind(&two_byte_slice);
3241     __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
3242     __ bind(&set_slice_header);
3243     __ SmiTag(a3);
3244     __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
3245     __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3246     __ jmp(&return_v0);
3247 
3248     __ bind(&copy_routine);
3249   }
3250 
3251   // a5: underlying subject string
3252   // a1: instance type of underlying subject string
3253   // a2: length
3254   // a3: adjusted start index (untagged)
3255   Label two_byte_sequential, sequential_string, allocate_result;
3256   STATIC_ASSERT(kExternalStringTag != 0);
3257   STATIC_ASSERT(kSeqStringTag == 0);
3258   __ And(a4, a1, Operand(kExternalStringTag));
3259   __ Branch(&sequential_string, eq, a4, Operand(zero_reg));
3260 
3261   // Handle external string.
3262   // Rule out short external strings.
3263   STATIC_ASSERT(kShortExternalStringTag != 0);
3264   __ And(a4, a1, Operand(kShortExternalStringTag));
3265   __ Branch(&runtime, ne, a4, Operand(zero_reg));
3266   __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset));
3267   // a5 already points to the first character of underlying string.
3268   __ jmp(&allocate_result);
3269 
3270   __ bind(&sequential_string);
3271   // Locate first character of underlying subject string.
3272   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3273   __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3274 
3275   __ bind(&allocate_result);
3276   // Sequential acii string.  Allocate the result.
3277   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3278   __ And(a4, a1, Operand(kStringEncodingMask));
3279   __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
3280 
3281   // Allocate and copy the resulting one_byte string.
3282   __ AllocateOneByteString(v0, a2, a4, a6, a7, &runtime);
3283 
3284   // Locate first character of substring to copy.
3285   __ Daddu(a5, a5, a3);
3286 
3287   // Locate first character of result.
3288   __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3289 
3290   // v0: result string
3291   // a1: first character of result string
3292   // a2: result string length
3293   // a5: first character of substring to copy
3294   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3295   StringHelper::GenerateCopyCharacters(
3296       masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING);
3297   __ jmp(&return_v0);
3298 
3299   // Allocate and copy the resulting two-byte string.
3300   __ bind(&two_byte_sequential);
3301   __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime);
3302 
3303   // Locate first character of substring to copy.
3304   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3305   __ dsll(a4, a3, 1);
3306   __ Daddu(a5, a5, a4);
3307   // Locate first character of result.
3308   __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3309 
3310   // v0: result string.
3311   // a1: first character of result.
3312   // a2: result length.
3313   // a5: first character of substring to copy.
3314   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3315   StringHelper::GenerateCopyCharacters(
3316       masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING);
3317 
3318   __ bind(&return_v0);
3319   Counters* counters = isolate()->counters();
3320   __ IncrementCounter(counters->sub_string_native(), 1, a3, a4);
3321   __ DropAndRet(3);
3322 
3323   // Just jump to runtime to create the sub string.
3324   __ bind(&runtime);
3325   __ TailCallRuntime(Runtime::kSubString, 3, 1);
3326 
3327   __ bind(&single_char);
3328   // v0: original string
3329   // a1: instance type
3330   // a2: length
3331   // a3: from index (untagged)
3332   StringCharAtGenerator generator(
3333       v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3334   generator.GenerateFast(masm);
3335   __ DropAndRet(3);
3336   generator.SkipSlow(masm, &runtime);
3337 }
3338 
3339 
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)3340 void StringHelper::GenerateFlatOneByteStringEquals(
3341     MacroAssembler* masm, Register left, Register right, Register scratch1,
3342     Register scratch2, Register scratch3) {
3343   Register length = scratch1;
3344 
3345   // Compare lengths.
3346   Label strings_not_equal, check_zero_length;
3347   __ ld(length, FieldMemOperand(left, String::kLengthOffset));
3348   __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
3349   __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3350   __ bind(&strings_not_equal);
3351   // Can not put li in delayslot, it has multi instructions.
3352   __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3353   __ Ret();
3354 
3355   // Check if the length is zero.
3356   Label compare_chars;
3357   __ bind(&check_zero_length);
3358   STATIC_ASSERT(kSmiTag == 0);
3359   __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3360   DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL)));
3361   __ Ret(USE_DELAY_SLOT);
3362   __ li(v0, Operand(Smi::FromInt(EQUAL)));
3363 
3364   // Compare characters.
3365   __ bind(&compare_chars);
3366 
3367   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3368                                   v0, &strings_not_equal);
3369 
3370   // Characters are equal.
3371   __ Ret(USE_DELAY_SLOT);
3372   __ li(v0, Operand(Smi::FromInt(EQUAL)));
3373 }
3374 
3375 
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)3376 void StringHelper::GenerateCompareFlatOneByteStrings(
3377     MacroAssembler* masm, Register left, Register right, Register scratch1,
3378     Register scratch2, Register scratch3, Register scratch4) {
3379   Label result_not_equal, compare_lengths;
3380   // Find minimum length and length difference.
3381   __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
3382   __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
3383   __ Dsubu(scratch3, scratch1, Operand(scratch2));
3384   Register length_delta = scratch3;
3385   __ slt(scratch4, scratch2, scratch1);
3386   __ Movn(scratch1, scratch2, scratch4);
3387   Register min_length = scratch1;
3388   STATIC_ASSERT(kSmiTag == 0);
3389   __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3390 
3391   // Compare loop.
3392   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3393                                   scratch4, v0, &result_not_equal);
3394 
3395   // Compare lengths - strings up to min-length are equal.
3396   __ bind(&compare_lengths);
3397   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3398   // Use length_delta as result if it's zero.
3399   __ mov(scratch2, length_delta);
3400   __ mov(scratch4, zero_reg);
3401   __ mov(v0, zero_reg);
3402 
3403   __ bind(&result_not_equal);
3404   // Conditionally update the result based either on length_delta or
3405   // the last comparion performed in the loop above.
3406   Label ret;
3407   __ Branch(&ret, eq, scratch2, Operand(scratch4));
3408   __ li(v0, Operand(Smi::FromInt(GREATER)));
3409   __ Branch(&ret, gt, scratch2, Operand(scratch4));
3410   __ li(v0, Operand(Smi::FromInt(LESS)));
3411   __ bind(&ret);
3412   __ Ret();
3413 }
3414 
3415 
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Register scratch3,Label * chars_not_equal)3416 void StringHelper::GenerateOneByteCharsCompareLoop(
3417     MacroAssembler* masm, Register left, Register right, Register length,
3418     Register scratch1, Register scratch2, Register scratch3,
3419     Label* chars_not_equal) {
3420   // Change index to run from -length to -1 by adding length to string
3421   // start. This means that loop ends when index reaches zero, which
3422   // doesn't need an additional compare.
3423   __ SmiUntag(length);
3424   __ Daddu(scratch1, length,
3425           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3426   __ Daddu(left, left, Operand(scratch1));
3427   __ Daddu(right, right, Operand(scratch1));
3428   __ Dsubu(length, zero_reg, length);
3429   Register index = length;  // index = -length;
3430 
3431 
3432   // Compare loop.
3433   Label loop;
3434   __ bind(&loop);
3435   __ Daddu(scratch3, left, index);
3436   __ lbu(scratch1, MemOperand(scratch3));
3437   __ Daddu(scratch3, right, index);
3438   __ lbu(scratch2, MemOperand(scratch3));
3439   __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3440   __ Daddu(index, index, 1);
3441   __ Branch(&loop, ne, index, Operand(zero_reg));
3442 }
3443 
3444 
Generate(MacroAssembler * masm)3445 void StringCompareStub::Generate(MacroAssembler* masm) {
3446   Label runtime;
3447 
3448   Counters* counters = isolate()->counters();
3449 
3450   // Stack frame on entry.
3451   //  sp[0]: right string
3452   //  sp[4]: left string
3453   __ ld(a1, MemOperand(sp, 1 * kPointerSize));  // Left.
3454   __ ld(a0, MemOperand(sp, 0 * kPointerSize));  // Right.
3455 
3456   Label not_same;
3457   __ Branch(&not_same, ne, a0, Operand(a1));
3458   STATIC_ASSERT(EQUAL == 0);
3459   STATIC_ASSERT(kSmiTag == 0);
3460   __ li(v0, Operand(Smi::FromInt(EQUAL)));
3461   __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3462   __ DropAndRet(2);
3463 
3464   __ bind(&not_same);
3465 
3466   // Check that both objects are sequential one_byte strings.
3467   __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
3468 
3469   // Compare flat one_byte strings natively. Remove arguments from stack first.
3470   __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3471   __ Daddu(sp, sp, Operand(2 * kPointerSize));
3472   StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, a4, a5);
3473 
3474   __ bind(&runtime);
3475   __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3476 }
3477 
3478 
Generate(MacroAssembler * masm)3479 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3480   // ----------- S t a t e -------------
3481   //  -- a1    : left
3482   //  -- a0    : right
3483   //  -- ra    : return address
3484   // -----------------------------------
3485 
3486   // Load a2 with the allocation site. We stick an undefined dummy value here
3487   // and replace it with the real allocation site later when we instantiate this
3488   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3489   __ li(a2, handle(isolate()->heap()->undefined_value()));
3490 
3491   // Make sure that we actually patched the allocation site.
3492   if (FLAG_debug_code) {
3493     __ And(at, a2, Operand(kSmiTagMask));
3494     __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
3495     __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
3496     __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3497     __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
3498   }
3499 
3500   // Tail call into the stub that handles binary operations with allocation
3501   // sites.
3502   BinaryOpWithAllocationSiteStub stub(isolate(), state());
3503   __ TailCallStub(&stub);
3504 }
3505 
3506 
GenerateSmis(MacroAssembler * masm)3507 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3508   DCHECK(state() == CompareICState::SMI);
3509   Label miss;
3510   __ Or(a2, a1, a0);
3511   __ JumpIfNotSmi(a2, &miss);
3512 
3513   if (GetCondition() == eq) {
3514     // For equality we do not care about the sign of the result.
3515     __ Ret(USE_DELAY_SLOT);
3516     __ Dsubu(v0, a0, a1);
3517   } else {
3518     // Untag before subtracting to avoid handling overflow.
3519     __ SmiUntag(a1);
3520     __ SmiUntag(a0);
3521     __ Ret(USE_DELAY_SLOT);
3522     __ Dsubu(v0, a1, a0);
3523   }
3524 
3525   __ bind(&miss);
3526   GenerateMiss(masm);
3527 }
3528 
3529 
GenerateNumbers(MacroAssembler * masm)3530 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3531   DCHECK(state() == CompareICState::NUMBER);
3532 
3533   Label generic_stub;
3534   Label unordered, maybe_undefined1, maybe_undefined2;
3535   Label miss;
3536 
3537   if (left() == CompareICState::SMI) {
3538     __ JumpIfNotSmi(a1, &miss);
3539   }
3540   if (right() == CompareICState::SMI) {
3541     __ JumpIfNotSmi(a0, &miss);
3542   }
3543 
3544   // Inlining the double comparison and falling back to the general compare
3545   // stub if NaN is involved.
3546   // Load left and right operand.
3547   Label done, left, left_smi, right_smi;
3548   __ JumpIfSmi(a0, &right_smi);
3549   __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3550               DONT_DO_SMI_CHECK);
3551   __ Dsubu(a2, a0, Operand(kHeapObjectTag));
3552   __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
3553   __ Branch(&left);
3554   __ bind(&right_smi);
3555   __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
3556   FPURegister single_scratch = f6;
3557   __ mtc1(a2, single_scratch);
3558   __ cvt_d_w(f2, single_scratch);
3559 
3560   __ bind(&left);
3561   __ JumpIfSmi(a1, &left_smi);
3562   __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3563               DONT_DO_SMI_CHECK);
3564   __ Dsubu(a2, a1, Operand(kHeapObjectTag));
3565   __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
3566   __ Branch(&done);
3567   __ bind(&left_smi);
3568   __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
3569   single_scratch = f8;
3570   __ mtc1(a2, single_scratch);
3571   __ cvt_d_w(f0, single_scratch);
3572 
3573   __ bind(&done);
3574 
3575   // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
3576   Label fpu_eq, fpu_lt;
3577   // Test if equal, and also handle the unordered/NaN case.
3578   __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
3579 
3580   // Test if less (unordered case is already handled).
3581   __ BranchF(&fpu_lt, NULL, lt, f0, f2);
3582 
3583   // Otherwise it's greater, so just fall thru, and return.
3584   DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
3585   __ Ret(USE_DELAY_SLOT);
3586   __ li(v0, Operand(GREATER));
3587 
3588   __ bind(&fpu_eq);
3589   __ Ret(USE_DELAY_SLOT);
3590   __ li(v0, Operand(EQUAL));
3591 
3592   __ bind(&fpu_lt);
3593   __ Ret(USE_DELAY_SLOT);
3594   __ li(v0, Operand(LESS));
3595 
3596   __ bind(&unordered);
3597   __ bind(&generic_stub);
3598   CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3599                      CompareICState::GENERIC, CompareICState::GENERIC);
3600   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3601 
3602   __ bind(&maybe_undefined1);
3603   if (Token::IsOrderedRelationalCompareOp(op())) {
3604     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3605     __ Branch(&miss, ne, a0, Operand(at));
3606     __ JumpIfSmi(a1, &unordered);
3607     __ GetObjectType(a1, a2, a2);
3608     __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
3609     __ jmp(&unordered);
3610   }
3611 
3612   __ bind(&maybe_undefined2);
3613   if (Token::IsOrderedRelationalCompareOp(op())) {
3614     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3615     __ Branch(&unordered, eq, a1, Operand(at));
3616   }
3617 
3618   __ bind(&miss);
3619   GenerateMiss(masm);
3620 }
3621 
3622 
GenerateInternalizedStrings(MacroAssembler * masm)3623 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3624   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3625   Label miss;
3626 
3627   // Registers containing left and right operands respectively.
3628   Register left = a1;
3629   Register right = a0;
3630   Register tmp1 = a2;
3631   Register tmp2 = a3;
3632 
3633   // Check that both operands are heap objects.
3634   __ JumpIfEitherSmi(left, right, &miss);
3635 
3636   // Check that both operands are internalized strings.
3637   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3638   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3639   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3640   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3641   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3642   __ Or(tmp1, tmp1, Operand(tmp2));
3643   __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3644   __ Branch(&miss, ne, at, Operand(zero_reg));
3645 
3646   // Make sure a0 is non-zero. At this point input operands are
3647   // guaranteed to be non-zero.
3648   DCHECK(right.is(a0));
3649   STATIC_ASSERT(EQUAL == 0);
3650   STATIC_ASSERT(kSmiTag == 0);
3651   __ mov(v0, right);
3652   // Internalized strings are compared by identity.
3653   __ Ret(ne, left, Operand(right));
3654   DCHECK(is_int16(EQUAL));
3655   __ Ret(USE_DELAY_SLOT);
3656   __ li(v0, Operand(Smi::FromInt(EQUAL)));
3657 
3658   __ bind(&miss);
3659   GenerateMiss(masm);
3660 }
3661 
3662 
GenerateUniqueNames(MacroAssembler * masm)3663 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3664   DCHECK(state() == CompareICState::UNIQUE_NAME);
3665   DCHECK(GetCondition() == eq);
3666   Label miss;
3667 
3668   // Registers containing left and right operands respectively.
3669   Register left = a1;
3670   Register right = a0;
3671   Register tmp1 = a2;
3672   Register tmp2 = a3;
3673 
3674   // Check that both operands are heap objects.
3675   __ JumpIfEitherSmi(left, right, &miss);
3676 
3677   // Check that both operands are unique names. This leaves the instance
3678   // types loaded in tmp1 and tmp2.
3679   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3680   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3681   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3682   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3683 
3684   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3685   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3686 
3687   // Use a0 as result
3688   __ mov(v0, a0);
3689 
3690   // Unique names are compared by identity.
3691   Label done;
3692   __ Branch(&done, ne, left, Operand(right));
3693   // Make sure a0 is non-zero. At this point input operands are
3694   // guaranteed to be non-zero.
3695   DCHECK(right.is(a0));
3696   STATIC_ASSERT(EQUAL == 0);
3697   STATIC_ASSERT(kSmiTag == 0);
3698   __ li(v0, Operand(Smi::FromInt(EQUAL)));
3699   __ bind(&done);
3700   __ Ret();
3701 
3702   __ bind(&miss);
3703   GenerateMiss(masm);
3704 }
3705 
3706 
GenerateStrings(MacroAssembler * masm)3707 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3708   DCHECK(state() == CompareICState::STRING);
3709   Label miss;
3710 
3711   bool equality = Token::IsEqualityOp(op());
3712 
3713   // Registers containing left and right operands respectively.
3714   Register left = a1;
3715   Register right = a0;
3716   Register tmp1 = a2;
3717   Register tmp2 = a3;
3718   Register tmp3 = a4;
3719   Register tmp4 = a5;
3720   Register tmp5 = a6;
3721 
3722   // Check that both operands are heap objects.
3723   __ JumpIfEitherSmi(left, right, &miss);
3724 
3725   // Check that both operands are strings. This leaves the instance
3726   // types loaded in tmp1 and tmp2.
3727   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3728   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3729   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3730   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3731   STATIC_ASSERT(kNotStringTag != 0);
3732   __ Or(tmp3, tmp1, tmp2);
3733   __ And(tmp5, tmp3, Operand(kIsNotStringMask));
3734   __ Branch(&miss, ne, tmp5, Operand(zero_reg));
3735 
3736   // Fast check for identical strings.
3737   Label left_ne_right;
3738   STATIC_ASSERT(EQUAL == 0);
3739   STATIC_ASSERT(kSmiTag == 0);
3740   __ Branch(&left_ne_right, ne, left, Operand(right));
3741   __ Ret(USE_DELAY_SLOT);
3742   __ mov(v0, zero_reg);  // In the delay slot.
3743   __ bind(&left_ne_right);
3744 
3745   // Handle not identical strings.
3746 
3747   // Check that both strings are internalized strings. If they are, we're done
3748   // because we already know they are not identical. We know they are both
3749   // strings.
3750   if (equality) {
3751     DCHECK(GetCondition() == eq);
3752     STATIC_ASSERT(kInternalizedTag == 0);
3753     __ Or(tmp3, tmp1, Operand(tmp2));
3754     __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
3755     Label is_symbol;
3756     __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
3757     // Make sure a0 is non-zero. At this point input operands are
3758     // guaranteed to be non-zero.
3759     DCHECK(right.is(a0));
3760     __ Ret(USE_DELAY_SLOT);
3761     __ mov(v0, a0);  // In the delay slot.
3762     __ bind(&is_symbol);
3763   }
3764 
3765   // Check that both strings are sequential one_byte.
3766   Label runtime;
3767   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3768                                                     &runtime);
3769 
3770   // Compare flat one_byte strings. Returns when done.
3771   if (equality) {
3772     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3773                                                   tmp3);
3774   } else {
3775     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3776                                                     tmp2, tmp3, tmp4);
3777   }
3778 
3779   // Handle more complex cases in runtime.
3780   __ bind(&runtime);
3781   __ Push(left, right);
3782   if (equality) {
3783     __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3784   } else {
3785     __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3786   }
3787 
3788   __ bind(&miss);
3789   GenerateMiss(masm);
3790 }
3791 
3792 
GenerateObjects(MacroAssembler * masm)3793 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3794   DCHECK(state() == CompareICState::OBJECT);
3795   Label miss;
3796   __ And(a2, a1, Operand(a0));
3797   __ JumpIfSmi(a2, &miss);
3798 
3799   __ GetObjectType(a0, a2, a2);
3800   __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3801   __ GetObjectType(a1, a2, a2);
3802   __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3803 
3804   DCHECK(GetCondition() == eq);
3805   __ Ret(USE_DELAY_SLOT);
3806   __ dsubu(v0, a0, a1);
3807 
3808   __ bind(&miss);
3809   GenerateMiss(masm);
3810 }
3811 
3812 
GenerateKnownObjects(MacroAssembler * masm)3813 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3814   Label miss;
3815   __ And(a2, a1, a0);
3816   __ JumpIfSmi(a2, &miss);
3817   __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
3818   __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
3819   __ Branch(&miss, ne, a2, Operand(known_map_));
3820   __ Branch(&miss, ne, a3, Operand(known_map_));
3821 
3822   __ Ret(USE_DELAY_SLOT);
3823   __ dsubu(v0, a0, a1);
3824 
3825   __ bind(&miss);
3826   GenerateMiss(masm);
3827 }
3828 
3829 
GenerateMiss(MacroAssembler * masm)3830 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3831   {
3832     // Call the runtime system in a fresh internal frame.
3833     ExternalReference miss =
3834         ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3835     FrameScope scope(masm, StackFrame::INTERNAL);
3836     __ Push(a1, a0);
3837     __ Push(ra, a1, a0);
3838     __ li(a4, Operand(Smi::FromInt(op())));
3839     __ daddiu(sp, sp, -kPointerSize);
3840     __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
3841     __ sd(a4, MemOperand(sp));  // In the delay slot.
3842     // Compute the entry point of the rewritten stub.
3843     __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
3844     // Restore registers.
3845     __ Pop(a1, a0, ra);
3846   }
3847   __ Jump(a2);
3848 }
3849 
3850 
Generate(MacroAssembler * masm)3851 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3852   // Make place for arguments to fit C calling convention. Most of the callers
3853   // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
3854   // so they handle stack restoring and we don't have to do that here.
3855   // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
3856   // kCArgsSlotsSize stack space after the call.
3857   __ daddiu(sp, sp, -kCArgsSlotsSize);
3858   // Place the return address on the stack, making the call
3859   // GC safe. The RegExp backend also relies on this.
3860   __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
3861   __ Call(t9);  // Call the C++ function.
3862   __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
3863 
3864   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3865     // In case of an error the return address may point to a memory area
3866     // filled with kZapValue by the GC.
3867     // Dereference the address and check for this.
3868     __ Uld(a4, MemOperand(t9));
3869     __ Assert(ne, kReceivedInvalidReturnAddress, a4,
3870         Operand(reinterpret_cast<uint64_t>(kZapValue)));
3871   }
3872   __ Jump(t9);
3873 }
3874 
3875 
GenerateCall(MacroAssembler * masm,Register target)3876 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3877                                     Register target) {
3878   intptr_t loc =
3879       reinterpret_cast<intptr_t>(GetCode().location());
3880   __ Move(t9, target);
3881   __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
3882   __ Call(ra);
3883 }
3884 
3885 
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<Name> name,Register scratch0)3886 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3887                                                       Label* miss,
3888                                                       Label* done,
3889                                                       Register receiver,
3890                                                       Register properties,
3891                                                       Handle<Name> name,
3892                                                       Register scratch0) {
3893   DCHECK(name->IsUniqueName());
3894   // If names of slots in range from 1 to kProbes - 1 for the hash value are
3895   // not equal to the name and kProbes-th slot is not used (its name is the
3896   // undefined value), it guarantees the hash table doesn't contain the
3897   // property. It's true even if some slots represent deleted properties
3898   // (their names are the hole value).
3899   for (int i = 0; i < kInlinedProbes; i++) {
3900     // scratch0 points to properties hash.
3901     // Compute the masked index: (hash + i + i * i) & mask.
3902     Register index = scratch0;
3903     // Capacity is smi 2^n.
3904     __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
3905     __ Dsubu(index, index, Operand(1));
3906     __ And(index, index,
3907            Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
3908 
3909     // Scale the index by multiplying by the entry size.
3910     DCHECK(NameDictionary::kEntrySize == 3);
3911     __ dsll(at, index, 1);
3912     __ Daddu(index, index, at);  // index *= 3.
3913 
3914     Register entity_name = scratch0;
3915     // Having undefined at this place means the name is not contained.
3916     DCHECK_EQ(kSmiTagSize, 1);
3917     Register tmp = properties;
3918 
3919     __ dsll(scratch0, index, kPointerSizeLog2);
3920     __ Daddu(tmp, properties, scratch0);
3921     __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3922 
3923     DCHECK(!tmp.is(entity_name));
3924     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3925     __ Branch(done, eq, entity_name, Operand(tmp));
3926 
3927     // Load the hole ready for use below:
3928     __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3929 
3930     // Stop if found the property.
3931     __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
3932 
3933     Label good;
3934     __ Branch(&good, eq, entity_name, Operand(tmp));
3935 
3936     // Check if the entry name is not a unique name.
3937     __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3938     __ lbu(entity_name,
3939            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3940     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3941     __ bind(&good);
3942 
3943     // Restore the properties.
3944     __ ld(properties,
3945           FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3946   }
3947 
3948   const int spill_mask =
3949       (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
3950        a2.bit() | a1.bit() | a0.bit() | v0.bit());
3951 
3952   __ MultiPush(spill_mask);
3953   __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3954   __ li(a1, Operand(Handle<Name>(name)));
3955   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3956   __ CallStub(&stub);
3957   __ mov(at, v0);
3958   __ MultiPop(spill_mask);
3959 
3960   __ Branch(done, eq, at, Operand(zero_reg));
3961   __ Branch(miss, ne, at, Operand(zero_reg));
3962 }
3963 
3964 
3965 // Probe the name dictionary in the |elements| register. Jump to the
3966 // |done| label if a property with the given name is found. Jump to
3967 // the |miss| label otherwise.
3968 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)3969 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3970                                                       Label* miss,
3971                                                       Label* done,
3972                                                       Register elements,
3973                                                       Register name,
3974                                                       Register scratch1,
3975                                                       Register scratch2) {
3976   DCHECK(!elements.is(scratch1));
3977   DCHECK(!elements.is(scratch2));
3978   DCHECK(!name.is(scratch1));
3979   DCHECK(!name.is(scratch2));
3980 
3981   __ AssertName(name);
3982 
3983   // Compute the capacity mask.
3984   __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
3985   __ SmiUntag(scratch1);
3986   __ Dsubu(scratch1, scratch1, Operand(1));
3987 
3988   // Generate an unrolled loop that performs a few probes before
3989   // giving up. Measurements done on Gmail indicate that 2 probes
3990   // cover ~93% of loads from dictionaries.
3991   for (int i = 0; i < kInlinedProbes; i++) {
3992     // Compute the masked index: (hash + i + i * i) & mask.
3993     __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
3994     if (i > 0) {
3995       // Add the probe offset (i + i * i) left shifted to avoid right shifting
3996       // the hash in a separate instruction. The value hash + i + i * i is right
3997       // shifted in the following and instruction.
3998       DCHECK(NameDictionary::GetProbeOffset(i) <
3999              1 << (32 - Name::kHashFieldOffset));
4000       __ Daddu(scratch2, scratch2, Operand(
4001           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4002     }
4003     __ dsrl(scratch2, scratch2, Name::kHashShift);
4004     __ And(scratch2, scratch1, scratch2);
4005 
4006     // Scale the index by multiplying by the element size.
4007     DCHECK(NameDictionary::kEntrySize == 3);
4008     // scratch2 = scratch2 * 3.
4009 
4010     __ dsll(at, scratch2, 1);
4011     __ Daddu(scratch2, scratch2, at);
4012 
4013     // Check if the key is identical to the name.
4014     __ dsll(at, scratch2, kPointerSizeLog2);
4015     __ Daddu(scratch2, elements, at);
4016     __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
4017     __ Branch(done, eq, name, Operand(at));
4018   }
4019 
4020   const int spill_mask =
4021       (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
4022        a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4023       ~(scratch1.bit() | scratch2.bit());
4024 
4025   __ MultiPush(spill_mask);
4026   if (name.is(a0)) {
4027     DCHECK(!elements.is(a1));
4028     __ Move(a1, name);
4029     __ Move(a0, elements);
4030   } else {
4031     __ Move(a0, elements);
4032     __ Move(a1, name);
4033   }
4034   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4035   __ CallStub(&stub);
4036   __ mov(scratch2, a2);
4037   __ mov(at, v0);
4038   __ MultiPop(spill_mask);
4039 
4040   __ Branch(done, ne, at, Operand(zero_reg));
4041   __ Branch(miss, eq, at, Operand(zero_reg));
4042 }
4043 
4044 
Generate(MacroAssembler * masm)4045 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4046   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
4047   // we cannot call anything that could cause a GC from this stub.
4048   // Registers:
4049   //  result: NameDictionary to probe
4050   //  a1: key
4051   //  dictionary: NameDictionary to probe.
4052   //  index: will hold an index of entry if lookup is successful.
4053   //         might alias with result_.
4054   // Returns:
4055   //  result_ is zero if lookup failed, non zero otherwise.
4056 
4057   Register result = v0;
4058   Register dictionary = a0;
4059   Register key = a1;
4060   Register index = a2;
4061   Register mask = a3;
4062   Register hash = a4;
4063   Register undefined = a5;
4064   Register entry_key = a6;
4065 
4066   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4067 
4068   __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
4069   __ SmiUntag(mask);
4070   __ Dsubu(mask, mask, Operand(1));
4071 
4072   __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4073 
4074   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4075 
4076   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4077     // Compute the masked index: (hash + i + i * i) & mask.
4078     // Capacity is smi 2^n.
4079     if (i > 0) {
4080       // Add the probe offset (i + i * i) left shifted to avoid right shifting
4081       // the hash in a separate instruction. The value hash + i + i * i is right
4082       // shifted in the following and instruction.
4083       DCHECK(NameDictionary::GetProbeOffset(i) <
4084              1 << (32 - Name::kHashFieldOffset));
4085       __ Daddu(index, hash, Operand(
4086           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4087     } else {
4088       __ mov(index, hash);
4089     }
4090     __ dsrl(index, index, Name::kHashShift);
4091     __ And(index, mask, index);
4092 
4093     // Scale the index by multiplying by the entry size.
4094     DCHECK(NameDictionary::kEntrySize == 3);
4095     // index *= 3.
4096     __ mov(at, index);
4097     __ dsll(index, index, 1);
4098     __ Daddu(index, index, at);
4099 
4100 
4101     DCHECK_EQ(kSmiTagSize, 1);
4102     __ dsll(index, index, kPointerSizeLog2);
4103     __ Daddu(index, index, dictionary);
4104     __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
4105 
4106     // Having undefined at this place means the name is not contained.
4107     __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
4108 
4109     // Stop if found the property.
4110     __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4111 
4112     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4113       // Check if the entry name is not a unique name.
4114       __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4115       __ lbu(entry_key,
4116              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4117       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4118     }
4119   }
4120 
4121   __ bind(&maybe_in_dictionary);
4122   // If we are doing negative lookup then probing failure should be
4123   // treated as a lookup success. For positive lookup probing failure
4124   // should be treated as lookup failure.
4125   if (mode() == POSITIVE_LOOKUP) {
4126     __ Ret(USE_DELAY_SLOT);
4127     __ mov(result, zero_reg);
4128   }
4129 
4130   __ bind(&in_dictionary);
4131   __ Ret(USE_DELAY_SLOT);
4132   __ li(result, 1);
4133 
4134   __ bind(&not_in_dictionary);
4135   __ Ret(USE_DELAY_SLOT);
4136   __ mov(result, zero_reg);
4137 }
4138 
4139 
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)4140 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4141     Isolate* isolate) {
4142   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4143   stub1.GetCode();
4144   // Hydrogen code stubs need stub2 at snapshot time.
4145   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4146   stub2.GetCode();
4147 }
4148 
4149 
4150 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
4151 // the value has just been written into the object, now this stub makes sure
4152 // we keep the GC informed.  The word in the object where the value has been
4153 // written is in the address register.
Generate(MacroAssembler * masm)4154 void RecordWriteStub::Generate(MacroAssembler* masm) {
4155   Label skip_to_incremental_noncompacting;
4156   Label skip_to_incremental_compacting;
4157 
4158   // The first two branch+nop instructions are generated with labels so as to
4159   // get the offset fixed up correctly by the bind(Label*) call.  We patch it
4160   // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4161   // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4162   // incremental heap marking.
4163   // See RecordWriteStub::Patch for details.
4164   __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4165   __ nop();
4166   __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4167   __ nop();
4168 
4169   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4170     __ RememberedSetHelper(object(),
4171                            address(),
4172                            value(),
4173                            save_fp_regs_mode(),
4174                            MacroAssembler::kReturnAtEnd);
4175   }
4176   __ Ret();
4177 
4178   __ bind(&skip_to_incremental_noncompacting);
4179   GenerateIncremental(masm, INCREMENTAL);
4180 
4181   __ bind(&skip_to_incremental_compacting);
4182   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4183 
4184   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4185   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4186 
4187   PatchBranchIntoNop(masm, 0);
4188   PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4189 }
4190 
4191 
GenerateIncremental(MacroAssembler * masm,Mode mode)4192 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4193   regs_.Save(masm);
4194 
4195   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4196     Label dont_need_remembered_set;
4197 
4198     __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
4199     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
4200                            regs_.scratch0(),
4201                            &dont_need_remembered_set);
4202 
4203     __ CheckPageFlag(regs_.object(),
4204                      regs_.scratch0(),
4205                      1 << MemoryChunk::SCAN_ON_SCAVENGE,
4206                      ne,
4207                      &dont_need_remembered_set);
4208 
4209     // First notify the incremental marker if necessary, then update the
4210     // remembered set.
4211     CheckNeedsToInformIncrementalMarker(
4212         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4213     InformIncrementalMarker(masm);
4214     regs_.Restore(masm);
4215     __ RememberedSetHelper(object(),
4216                            address(),
4217                            value(),
4218                            save_fp_regs_mode(),
4219                            MacroAssembler::kReturnAtEnd);
4220 
4221     __ bind(&dont_need_remembered_set);
4222   }
4223 
4224   CheckNeedsToInformIncrementalMarker(
4225       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4226   InformIncrementalMarker(masm);
4227   regs_.Restore(masm);
4228   __ Ret();
4229 }
4230 
4231 
InformIncrementalMarker(MacroAssembler * masm)4232 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4233   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4234   int argument_count = 3;
4235   __ PrepareCallCFunction(argument_count, regs_.scratch0());
4236   Register address =
4237       a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4238   DCHECK(!address.is(regs_.object()));
4239   DCHECK(!address.is(a0));
4240   __ Move(address, regs_.address());
4241   __ Move(a0, regs_.object());
4242   __ Move(a1, address);
4243   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4244 
4245   AllowExternalCallThatCantCauseGC scope(masm);
4246   __ CallCFunction(
4247       ExternalReference::incremental_marking_record_write_function(isolate()),
4248       argument_count);
4249   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4250 }
4251 
4252 
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)4253 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4254     MacroAssembler* masm,
4255     OnNoNeedToInformIncrementalMarker on_no_need,
4256     Mode mode) {
4257   Label on_black;
4258   Label need_incremental;
4259   Label need_incremental_pop_scratch;
4260 
4261   __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4262   __ ld(regs_.scratch1(),
4263         MemOperand(regs_.scratch0(),
4264                    MemoryChunk::kWriteBarrierCounterOffset));
4265   __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4266   __ sd(regs_.scratch1(),
4267          MemOperand(regs_.scratch0(),
4268                     MemoryChunk::kWriteBarrierCounterOffset));
4269   __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4270 
4271   // Let's look at the color of the object:  If it is not black we don't have
4272   // to inform the incremental marker.
4273   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4274 
4275   regs_.Restore(masm);
4276   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4277     __ RememberedSetHelper(object(),
4278                            address(),
4279                            value(),
4280                            save_fp_regs_mode(),
4281                            MacroAssembler::kReturnAtEnd);
4282   } else {
4283     __ Ret();
4284   }
4285 
4286   __ bind(&on_black);
4287 
4288   // Get the value from the slot.
4289   __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
4290 
4291   if (mode == INCREMENTAL_COMPACTION) {
4292     Label ensure_not_white;
4293 
4294     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
4295                      regs_.scratch1(),  // Scratch.
4296                      MemoryChunk::kEvacuationCandidateMask,
4297                      eq,
4298                      &ensure_not_white);
4299 
4300     __ CheckPageFlag(regs_.object(),
4301                      regs_.scratch1(),  // Scratch.
4302                      MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4303                      eq,
4304                      &need_incremental);
4305 
4306     __ bind(&ensure_not_white);
4307   }
4308 
4309   // We need extra registers for this, so we push the object and the address
4310   // register temporarily.
4311   __ Push(regs_.object(), regs_.address());
4312   __ EnsureNotWhite(regs_.scratch0(),  // The value.
4313                     regs_.scratch1(),  // Scratch.
4314                     regs_.object(),  // Scratch.
4315                     regs_.address(),  // Scratch.
4316                     &need_incremental_pop_scratch);
4317   __ Pop(regs_.object(), regs_.address());
4318 
4319   regs_.Restore(masm);
4320   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4321     __ RememberedSetHelper(object(),
4322                            address(),
4323                            value(),
4324                            save_fp_regs_mode(),
4325                            MacroAssembler::kReturnAtEnd);
4326   } else {
4327     __ Ret();
4328   }
4329 
4330   __ bind(&need_incremental_pop_scratch);
4331   __ Pop(regs_.object(), regs_.address());
4332 
4333   __ bind(&need_incremental);
4334 
4335   // Fall through when we need to inform the incremental marker.
4336 }
4337 
4338 
Generate(MacroAssembler * masm)4339 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4340   // ----------- S t a t e -------------
4341   //  -- a0    : element value to store
4342   //  -- a3    : element index as smi
4343   //  -- sp[0] : array literal index in function as smi
4344   //  -- sp[4] : array literal
4345   // clobbers a1, a2, a4
4346   // -----------------------------------
4347 
4348   Label element_done;
4349   Label double_elements;
4350   Label smi_element;
4351   Label slow_elements;
4352   Label fast_elements;
4353 
4354   // Get array literal index, array literal and its map.
4355   __ ld(a4, MemOperand(sp, 0 * kPointerSize));
4356   __ ld(a1, MemOperand(sp, 1 * kPointerSize));
4357   __ ld(a2, FieldMemOperand(a1, JSObject::kMapOffset));
4358 
4359   __ CheckFastElements(a2, a5, &double_elements);
4360   // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4361   __ JumpIfSmi(a0, &smi_element);
4362   __ CheckFastSmiElements(a2, a5, &fast_elements);
4363 
4364   // Store into the array literal requires a elements transition. Call into
4365   // the runtime.
4366   __ bind(&slow_elements);
4367   // call.
4368   __ Push(a1, a3, a0);
4369   __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4370   __ ld(a5, FieldMemOperand(a5, JSFunction::kLiteralsOffset));
4371   __ Push(a5, a4);
4372   __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4373 
4374   // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4375   __ bind(&fast_elements);
4376   __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
4377   __ SmiScale(a6, a3, kPointerSizeLog2);
4378   __ Daddu(a6, a5, a6);
4379   __ Daddu(a6, a6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4380   __ sd(a0, MemOperand(a6, 0));
4381   // Update the write barrier for the array store.
4382   __ RecordWrite(a5, a6, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4383                  EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4384   __ Ret(USE_DELAY_SLOT);
4385   __ mov(v0, a0);
4386 
4387   // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4388   // and value is Smi.
4389   __ bind(&smi_element);
4390   __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
4391   __ SmiScale(a6, a3, kPointerSizeLog2);
4392   __ Daddu(a6, a5, a6);
4393   __ sd(a0, FieldMemOperand(a6, FixedArray::kHeaderSize));
4394   __ Ret(USE_DELAY_SLOT);
4395   __ mov(v0, a0);
4396 
4397   // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4398   __ bind(&double_elements);
4399   __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
4400   __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, a2, &slow_elements);
4401   __ Ret(USE_DELAY_SLOT);
4402   __ mov(v0, a0);
4403 }
4404 
4405 
Generate(MacroAssembler * masm)4406 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4407   CEntryStub ces(isolate(), 1, kSaveFPRegs);
4408   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4409   int parameter_count_offset =
4410       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4411   __ ld(a1, MemOperand(fp, parameter_count_offset));
4412   if (function_mode() == JS_FUNCTION_STUB_MODE) {
4413     __ Daddu(a1, a1, Operand(1));
4414   }
4415   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4416   __ dsll(a1, a1, kPointerSizeLog2);
4417   __ Ret(USE_DELAY_SLOT);
4418   __ Daddu(sp, sp, a1);
4419 }
4420 
4421 
Generate(MacroAssembler * masm)4422 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4423   EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4424   VectorLoadStub stub(isolate(), state());
4425   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4426 }
4427 
4428 
Generate(MacroAssembler * masm)4429 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4430   EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4431   VectorKeyedLoadStub stub(isolate());
4432   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4433 }
4434 
4435 
MaybeCallEntryHook(MacroAssembler * masm)4436 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4437   if (masm->isolate()->function_entry_hook() != NULL) {
4438     ProfileEntryHookStub stub(masm->isolate());
4439     __ push(ra);
4440     __ CallStub(&stub);
4441     __ pop(ra);
4442   }
4443 }
4444 
4445 
Generate(MacroAssembler * masm)4446 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4447   // The entry hook is a "push ra" instruction, followed by a call.
4448   // Note: on MIPS "push" is 2 instruction
4449   const int32_t kReturnAddressDistanceFromFunctionStart =
4450       Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4451 
4452   // This should contain all kJSCallerSaved registers.
4453   const RegList kSavedRegs =
4454      kJSCallerSaved |  // Caller saved registers.
4455      s5.bit();         // Saved stack pointer.
4456 
4457   // We also save ra, so the count here is one higher than the mask indicates.
4458   const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4459 
4460   // Save all caller-save registers as this may be called from anywhere.
4461   __ MultiPush(kSavedRegs | ra.bit());
4462 
4463   // Compute the function's address for the first argument.
4464   __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4465 
4466   // The caller's return address is above the saved temporaries.
4467   // Grab that for the second argument to the hook.
4468   __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4469 
4470   // Align the stack if necessary.
4471   int frame_alignment = masm->ActivationFrameAlignment();
4472   if (frame_alignment > kPointerSize) {
4473     __ mov(s5, sp);
4474     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4475     __ And(sp, sp, Operand(-frame_alignment));
4476   }
4477 
4478   __ Dsubu(sp, sp, kCArgsSlotsSize);
4479 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
4480   int64_t entry_hook =
4481       reinterpret_cast<int64_t>(isolate()->function_entry_hook());
4482   __ li(t9, Operand(entry_hook));
4483 #else
4484   // Under the simulator we need to indirect the entry hook through a
4485   // trampoline function at a known address.
4486   // It additionally takes an isolate as a third parameter.
4487   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4488 
4489   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4490   __ li(t9, Operand(ExternalReference(&dispatcher,
4491                                       ExternalReference::BUILTIN_CALL,
4492                                       isolate())));
4493 #endif
4494   // Call C function through t9 to conform ABI for PIC.
4495   __ Call(t9);
4496 
4497   // Restore the stack pointer if needed.
4498   if (frame_alignment > kPointerSize) {
4499     __ mov(sp, s5);
4500   } else {
4501     __ Daddu(sp, sp, kCArgsSlotsSize);
4502   }
4503 
4504   // Also pop ra to get Ret(0).
4505   __ MultiPop(kSavedRegs | ra.bit());
4506   __ Ret();
4507 }
4508 
4509 
4510 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)4511 static void CreateArrayDispatch(MacroAssembler* masm,
4512                                 AllocationSiteOverrideMode mode) {
4513   if (mode == DISABLE_ALLOCATION_SITES) {
4514     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4515     __ TailCallStub(&stub);
4516   } else if (mode == DONT_OVERRIDE) {
4517     int last_index = GetSequenceIndexFromFastElementsKind(
4518         TERMINAL_FAST_ELEMENTS_KIND);
4519     for (int i = 0; i <= last_index; ++i) {
4520       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4521       T stub(masm->isolate(), kind);
4522       __ TailCallStub(&stub, eq, a3, Operand(kind));
4523     }
4524 
4525     // If we reached this point there is a problem.
4526     __ Abort(kUnexpectedElementsKindInArrayConstructor);
4527   } else {
4528     UNREACHABLE();
4529   }
4530 }
4531 
4532 
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)4533 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4534                                            AllocationSiteOverrideMode mode) {
4535   // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4536   // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4537   // a0 - number of arguments
4538   // a1 - constructor?
4539   // sp[0] - last argument
4540   Label normal_sequence;
4541   if (mode == DONT_OVERRIDE) {
4542     DCHECK(FAST_SMI_ELEMENTS == 0);
4543     DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4544     DCHECK(FAST_ELEMENTS == 2);
4545     DCHECK(FAST_HOLEY_ELEMENTS == 3);
4546     DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4547     DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4548 
4549     // is the low bit set? If so, we are holey and that is good.
4550     __ And(at, a3, Operand(1));
4551     __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4552   }
4553   // look at the first argument
4554   __ ld(a5, MemOperand(sp, 0));
4555   __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
4556 
4557   if (mode == DISABLE_ALLOCATION_SITES) {
4558     ElementsKind initial = GetInitialFastElementsKind();
4559     ElementsKind holey_initial = GetHoleyElementsKind(initial);
4560 
4561     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4562                                                   holey_initial,
4563                                                   DISABLE_ALLOCATION_SITES);
4564     __ TailCallStub(&stub_holey);
4565 
4566     __ bind(&normal_sequence);
4567     ArraySingleArgumentConstructorStub stub(masm->isolate(),
4568                                             initial,
4569                                             DISABLE_ALLOCATION_SITES);
4570     __ TailCallStub(&stub);
4571   } else if (mode == DONT_OVERRIDE) {
4572     // We are going to create a holey array, but our kind is non-holey.
4573     // Fix kind and retry (only if we have an allocation site in the slot).
4574     __ Daddu(a3, a3, Operand(1));
4575 
4576     if (FLAG_debug_code) {
4577       __ ld(a5, FieldMemOperand(a2, 0));
4578       __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4579       __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
4580     }
4581 
4582     // Save the resulting elements kind in type info. We can't just store a3
4583     // in the AllocationSite::transition_info field because elements kind is
4584     // restricted to a portion of the field...upper bits need to be left alone.
4585     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4586     __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4587     __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4588     __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4589 
4590 
4591     __ bind(&normal_sequence);
4592     int last_index = GetSequenceIndexFromFastElementsKind(
4593         TERMINAL_FAST_ELEMENTS_KIND);
4594     for (int i = 0; i <= last_index; ++i) {
4595       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4596       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4597       __ TailCallStub(&stub, eq, a3, Operand(kind));
4598     }
4599 
4600     // If we reached this point there is a problem.
4601     __ Abort(kUnexpectedElementsKindInArrayConstructor);
4602   } else {
4603     UNREACHABLE();
4604   }
4605 }
4606 
4607 
4608 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)4609 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4610   int to_index = GetSequenceIndexFromFastElementsKind(
4611       TERMINAL_FAST_ELEMENTS_KIND);
4612   for (int i = 0; i <= to_index; ++i) {
4613     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4614     T stub(isolate, kind);
4615     stub.GetCode();
4616     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4617       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4618       stub1.GetCode();
4619     }
4620   }
4621 }
4622 
4623 
GenerateStubsAheadOfTime(Isolate * isolate)4624 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4625   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4626       isolate);
4627   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4628       isolate);
4629   ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4630       isolate);
4631 }
4632 
4633 
GenerateStubsAheadOfTime(Isolate * isolate)4634 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4635     Isolate* isolate) {
4636   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4637   for (int i = 0; i < 2; i++) {
4638     // For internal arrays we only need a few things.
4639     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4640     stubh1.GetCode();
4641     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4642     stubh2.GetCode();
4643     InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4644     stubh3.GetCode();
4645   }
4646 }
4647 
4648 
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)4649 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4650     MacroAssembler* masm,
4651     AllocationSiteOverrideMode mode) {
4652   if (argument_count() == ANY) {
4653     Label not_zero_case, not_one_case;
4654     __ And(at, a0, a0);
4655     __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
4656     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4657 
4658     __ bind(&not_zero_case);
4659     __ Branch(&not_one_case, gt, a0, Operand(1));
4660     CreateArrayDispatchOneArgument(masm, mode);
4661 
4662     __ bind(&not_one_case);
4663     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4664   } else if (argument_count() == NONE) {
4665     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4666   } else if (argument_count() == ONE) {
4667     CreateArrayDispatchOneArgument(masm, mode);
4668   } else if (argument_count() == MORE_THAN_ONE) {
4669     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4670   } else {
4671     UNREACHABLE();
4672   }
4673 }
4674 
4675 
Generate(MacroAssembler * masm)4676 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4677   // ----------- S t a t e -------------
4678   //  -- a0 : argc (only if argument_count() == ANY)
4679   //  -- a1 : constructor
4680   //  -- a2 : AllocationSite or undefined
4681   //  -- sp[0] : return address
4682   //  -- sp[4] : last argument
4683   // -----------------------------------
4684 
4685   if (FLAG_debug_code) {
4686     // The array construct code is only set for the global and natives
4687     // builtin Array functions which always have maps.
4688 
4689     // Initial map for the builtin Array function should be a map.
4690     __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4691     // Will both indicate a NULL and a Smi.
4692     __ SmiTst(a4, at);
4693     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4694         at, Operand(zero_reg));
4695     __ GetObjectType(a4, a4, a5);
4696     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4697         a5, Operand(MAP_TYPE));
4698 
4699     // We should either have undefined in a2 or a valid AllocationSite
4700     __ AssertUndefinedOrAllocationSite(a2, a4);
4701   }
4702 
4703   Label no_info;
4704   // Get the elements kind and case on that.
4705   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4706   __ Branch(&no_info, eq, a2, Operand(at));
4707 
4708   __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4709   __ SmiUntag(a3);
4710   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4711   __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
4712   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4713 
4714   __ bind(&no_info);
4715   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4716 }
4717 
4718 
GenerateCase(MacroAssembler * masm,ElementsKind kind)4719 void InternalArrayConstructorStub::GenerateCase(
4720     MacroAssembler* masm, ElementsKind kind) {
4721 
4722   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4723   __ TailCallStub(&stub0, lo, a0, Operand(1));
4724 
4725   InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4726   __ TailCallStub(&stubN, hi, a0, Operand(1));
4727 
4728   if (IsFastPackedElementsKind(kind)) {
4729     // We might need to create a holey array
4730     // look at the first argument.
4731     __ ld(at, MemOperand(sp, 0));
4732 
4733     InternalArraySingleArgumentConstructorStub
4734         stub1_holey(isolate(), GetHoleyElementsKind(kind));
4735     __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
4736   }
4737 
4738   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4739   __ TailCallStub(&stub1);
4740 }
4741 
4742 
Generate(MacroAssembler * masm)4743 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4744   // ----------- S t a t e -------------
4745   //  -- a0 : argc
4746   //  -- a1 : constructor
4747   //  -- sp[0] : return address
4748   //  -- sp[4] : last argument
4749   // -----------------------------------
4750 
4751   if (FLAG_debug_code) {
4752     // The array construct code is only set for the global and natives
4753     // builtin Array functions which always have maps.
4754 
4755     // Initial map for the builtin Array function should be a map.
4756     __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4757     // Will both indicate a NULL and a Smi.
4758     __ SmiTst(a3, at);
4759     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4760         at, Operand(zero_reg));
4761     __ GetObjectType(a3, a3, a4);
4762     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4763         a4, Operand(MAP_TYPE));
4764   }
4765 
4766   // Figure out the right elements kind.
4767   __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4768 
4769   // Load the map's "bit field 2" into a3. We only need the first byte,
4770   // but the following bit field extraction takes care of that anyway.
4771   __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
4772   // Retrieve elements_kind from bit field 2.
4773   __ DecodeField<Map::ElementsKindBits>(a3);
4774 
4775   if (FLAG_debug_code) {
4776     Label done;
4777     __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
4778     __ Assert(
4779         eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
4780         a3, Operand(FAST_HOLEY_ELEMENTS));
4781     __ bind(&done);
4782   }
4783 
4784   Label fast_elements_case;
4785   __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
4786   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4787 
4788   __ bind(&fast_elements_case);
4789   GenerateCase(masm, FAST_ELEMENTS);
4790 }
4791 
4792 
Generate(MacroAssembler * masm)4793 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4794   // ----------- S t a t e -------------
4795   //  -- a0                  : callee
4796   //  -- a4                  : call_data
4797   //  -- a2                  : holder
4798   //  -- a1                  : api_function_address
4799   //  -- cp                  : context
4800   //  --
4801   //  -- sp[0]               : last argument
4802   //  -- ...
4803   //  -- sp[(argc - 1)* 4]   : first argument
4804   //  -- sp[argc * 4]        : receiver
4805   // -----------------------------------
4806 
4807   Register callee = a0;
4808   Register call_data = a4;
4809   Register holder = a2;
4810   Register api_function_address = a1;
4811   Register context = cp;
4812 
4813   int argc = this->argc();
4814   bool is_store = this->is_store();
4815   bool call_data_undefined = this->call_data_undefined();
4816 
4817   typedef FunctionCallbackArguments FCA;
4818 
4819   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4820   STATIC_ASSERT(FCA::kCalleeIndex == 5);
4821   STATIC_ASSERT(FCA::kDataIndex == 4);
4822   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4823   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4824   STATIC_ASSERT(FCA::kIsolateIndex == 1);
4825   STATIC_ASSERT(FCA::kHolderIndex == 0);
4826   STATIC_ASSERT(FCA::kArgsLength == 7);
4827 
4828   // Save context, callee and call data.
4829   __ Push(context, callee, call_data);
4830   // Load context from callee.
4831   __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4832 
4833   Register scratch = call_data;
4834   if (!call_data_undefined) {
4835     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4836   }
4837   // Push return value and default return value.
4838   __ Push(scratch, scratch);
4839   __ li(scratch,
4840         Operand(ExternalReference::isolate_address(isolate())));
4841   // Push isolate and holder.
4842   __ Push(scratch, holder);
4843 
4844   // Prepare arguments.
4845   __ mov(scratch, sp);
4846 
4847   // Allocate the v8::Arguments structure in the arguments' space since
4848   // it's not controlled by GC.
4849   const int kApiStackSpace = 4;
4850 
4851   FrameScope frame_scope(masm, StackFrame::MANUAL);
4852   __ EnterExitFrame(false, kApiStackSpace);
4853 
4854   DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
4855   // a0 = FunctionCallbackInfo&
4856   // Arguments is after the return address.
4857   __ Daddu(a0, sp, Operand(1 * kPointerSize));
4858   // FunctionCallbackInfo::implicit_args_
4859   __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
4860   // FunctionCallbackInfo::values_
4861   __ Daddu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
4862   __ sd(at, MemOperand(a0, 1 * kPointerSize));
4863   // FunctionCallbackInfo::length_ = argc
4864   __ li(at, Operand(argc));
4865   __ sd(at, MemOperand(a0, 2 * kPointerSize));
4866   // FunctionCallbackInfo::is_construct_call = 0
4867   __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
4868 
4869   const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
4870   ExternalReference thunk_ref =
4871       ExternalReference::invoke_function_callback(isolate());
4872 
4873   AllowExternalCallThatCantCauseGC scope(masm);
4874   MemOperand context_restore_operand(
4875       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4876   // Stores return the first js argument.
4877   int return_value_offset = 0;
4878   if (is_store) {
4879     return_value_offset = 2 + FCA::kArgsLength;
4880   } else {
4881     return_value_offset = 2 + FCA::kReturnValueOffset;
4882   }
4883   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4884 
4885   __ CallApiFunctionAndReturn(api_function_address,
4886                               thunk_ref,
4887                               kStackUnwindSpace,
4888                               return_value_operand,
4889                               &context_restore_operand);
4890 }
4891 
4892 
Generate(MacroAssembler * masm)4893 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4894   // ----------- S t a t e -------------
4895   //  -- sp[0]                  : name
4896   //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
4897   //  -- ...
4898   //  -- a2                     : api_function_address
4899   // -----------------------------------
4900 
4901   Register api_function_address = ApiGetterDescriptor::function_address();
4902   DCHECK(api_function_address.is(a2));
4903 
4904   __ mov(a0, sp);  // a0 = Handle<Name>
4905   __ Daddu(a1, a0, Operand(1 * kPointerSize));  // a1 = PCA
4906 
4907   const int kApiStackSpace = 1;
4908   FrameScope frame_scope(masm, StackFrame::MANUAL);
4909   __ EnterExitFrame(false, kApiStackSpace);
4910 
4911   // Create PropertyAccessorInfo instance on the stack above the exit frame with
4912   // a1 (internal::Object** args_) as the data.
4913   __ sd(a1, MemOperand(sp, 1 * kPointerSize));
4914   __ Daddu(a1, sp, Operand(1 * kPointerSize));  // a1 = AccessorInfo&
4915 
4916   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4917 
4918   ExternalReference thunk_ref =
4919       ExternalReference::invoke_accessor_getter_callback(isolate());
4920   __ CallApiFunctionAndReturn(api_function_address,
4921                               thunk_ref,
4922                               kStackUnwindSpace,
4923                               MemOperand(fp, 6 * kPointerSize),
4924                               NULL);
4925 }
4926 
4927 
4928 #undef __
4929 
4930 } }  // namespace v8::internal
4931 
4932 #endif  // V8_TARGET_ARCH_MIPS64
4933